MongoDBAtlasVectorSearch seems to return "empty response"

Hi, I’m working on a AI chatbot which is using Mongodb as his own database for it’s datas. But it suddendly stop work well and start returning “empty response” to every query I did.
I’m using llama-index with this and python and i’m pretty sure llama-index isn’t the problem. Here is my code :

@app.route("/query", methods=["POST"])
def QueryRoute():

    # Initialisation

    requestDTO = None
    responseDTO = None

    # Gestion du Token

    token = request.args.get("token", None)
    
    if token != _token:
        responseDTO = ServiceQueryResponse.ServiceQueryResponseDTO(True, "Le jeton est invalide.", None)
        return GenerateQueryResponse(requestDTO, responseDTO), 400```
 qa_template = Prompt(requestDTO.get_system_template())
    
    if not qa_template or qa_template == "" or qa_template == None:
        responseDTO = ServiceQueryResponse.ServiceQueryResponseDTO(True, "Un historique de type 'System' est obligatoire.", None)        
        return GenerateQueryResponse(requestDTO, responseDTO), 400
            
    # Mise aux normes des paramètres
            
    if (requestDTO.LinkNumber < 0):
        requestDTO.LinkNumber = 1
    elif (requestDTO.LinkNumber > 5):
        requestDTO.LinkNumber = 5

    try:
        # Récupération info model

        model = GetModel(requestDTO.Model)

        # Initialisation OpenAI

        llm = AzureOpenAI(
            model=model.ChatModel.Model,
            deployment_name=model.ChatModel.Name,
            temperature=requestDTO.Temperature,
            api_key=model.Key1,
            azure_endpoint=model.Server,
            api_version=model.ChatModel.ApiVersion,
            logprobs=None,
            default_headers={},
        )

        embed_model = AzureOpenAIEmbedding(
            model=model.LearningModel.Model,
            deployment_name=model.LearningModel.Name,
            api_key=model.Key1,
            azure_endpoint=model.Server,
            api_version=model.LearningModel.ApiVersion,
        )

        Settings.llm = llm
        Settings.embed_model = embed_model
        Settings.context_window = _contextWindow
        Settings.num_output = _numOutput

         # Génération de l'historique

        messages = []

        if (requestDTO.History != None):
            for item in requestDTO.History:
                messages.append(ChatMessage(role=item.Type.lower(), content=item.Prompt))

        # Ajout du prompt
            
        messages.append(ChatMessage(role="user", content=requestDTO.Prompt))
        

        indexName = requestDTO.Index
      
        # Initialisation des paramètres pour les requètes sur MongoDB Atlas
        mongodb_client = pymongo.MongoClient(_mongoURI)
        store = MongoDBAtlasVectorSearch(mongodb_client, db_name=indexName)

        index = VectorStoreIndex.from_vector_store(store)
        
        # configure retriever
        
        retriever = VectorIndexRetriever(
            index=index,
            similarity_top_k=requestDTO.LinkNumber,                      
        )

        # configure response synthesizer, ici on peut chosir le mode réponse pour la query(ici on peut influer sur la maniere dont le contexte et le prompt sont itéré sur la query)
        response_synthesizer = get_response_synthesizer(
            response_mode= ResponseMode.TREE_SUMMARIZE,
        )
        #response_synthesizer = get_response_synthesizer(response_mode=GetResponseMode(requestDTO.Mode), text_qa_template=qa_template)
        # Template obligatoire du system prompt définissant le comprtement du LLM)

        qa_template = Prompt(requestDTO.get_system_template())
        
        if not qa_template or qa_template == "" or qa_template == None:
            responseDTO = ServiceQueryResponse.ServiceQueryResponseDTO(True, "Un historique de type 'System' est obligatoire.", None)        
            return GenerateQueryResponse(requestDTO, responseDTO), 400
        # assemble query engine, o recupere l'index grace au retriver et aussi le mode de reponse du "ResponseMode.TREE_SUMMARIZE" via response_synthesizer 
        
        query_engine = RetrieverQueryEngine(
            retriever=retriever, 
            response_synthesizer=response_synthesizer,
        )

        # Partie permettant de créer la réponse

        query_engine = index.as_query_engine( similarity_top_k=requestDTO.LinkNumber,text_qa_template=qa_template)
        query_text = requestDTO.Prompt
        gpt_result = query_engine.query(query_text)

        resultDTO = ServiceQueryResponse.ServiceQueryResponseResultDTO(gpt_result.response, [])
        print(str(resultDTO))

        for item in gpt_result.source_nodes:
            node = ServiceQueryResponse.ServiceQueryResponseNodeDTO(item.node.extra_info.get("file_name"), item.node.extra_info.get("page_label"), item.node.text, item.score)
            resultDTO.Nodes.append(node)

        
        # Construction de la réponse

        responseDTO = ServiceQueryResponse.ServiceQueryResponseDTO(False, None, resultDTO)

        # Terminée, on envoi la réponse définitive


        return GenerateQueryResponse(requestDTO, responseDTO), 200

        # Initialisation de l'index via les index sur MongoDB Atlas
        # Et inversement, commenter/décommenter si on veut juste query l'index existant
        # build index
        
       
    
    except Exception as error:

        return str(error), 400``