Ajout de l'évaluateur, de mon côté le score est de 2/71... car la plupar des réponses sont: ⚠️ Limite de requêtes atteinte. Attendez 1 minute et réessayez. Il faudrait essayer avec un modèle acceptant plus de requêtes, éventuellement en local.

This commit is contained in:
2025-11-13 16:07:34 +01:00
parent 3fc4020316
commit f94c7d4086
4 changed files with 476 additions and 27 deletions

15
main.py
View File

@ -1,24 +1,27 @@
import os
import glob
from typing import List
import json
from src.impl.datastore import Datastore, DataItem
from src.impl.indexer import Indexer
from src.impl.retriever import Retriever
from src.impl.response_generator import ResponseGenerator
from src.impl.evaluator import Evaluator
from src.RAG_pipeline import RAGpipeline
from create_parser import create_parser
DEFAULT_SOURCE_PATH = "data/source/"
DEFAULT_EVAL_PATH =""
DEFAULT_EVAL_PATH ="data/eval/sample_questions.json"
def create_pipeline() -> RAGpipeline:
indexer = Indexer()
datastore = Datastore()
retriever = Retriever(datastore= datastore)
response_generator = ResponseGenerator()
return RAGpipeline(indexer = indexer, datastore = datastore, retriever= retriever, response_generator= response_generator)
response_generator = ResponseGenerator()
evaluator = Evaluator()
return RAGpipeline(indexer = indexer, datastore = datastore, retriever= retriever, response_generator= response_generator, evaluator= evaluator)
def main():
@ -29,14 +32,14 @@ def main():
source_path = getattr(args, "path", DEFAULT_SOURCE_PATH) or DEFAULT_SOURCE_PATH
documents_path = get_files_in_directory(source_path=source_path)
#eval_path = args.eval_file if args.eval_file else DEFAULT_EVAL_PATH
#sample_questions = json.load(open(eval_path, "r"))
eval_path = args.eval_file if args.eval_file else DEFAULT_EVAL_PATH
sample_questions = json.load(open(eval_path, "r"))
commands = {
"run": lambda: pipeline.run(documents_path = documents_path),
"reset": lambda: pipeline.reset(),
"add": lambda: pipeline.add_documents(documents_path=documents_path),
"evaluate": lambda: pipeline.evaluate(),
"evaluate": lambda: pipeline.evaluate(sample_questions= sample_questions),
"query": lambda: print(pipeline.process_query(args.prompt)),
}