diff --git a/src/README.md b/src/README.md index 95f7035..8dbc4c2 100644 --- a/src/README.md +++ b/src/README.md @@ -30,8 +30,11 @@ sentence-transformers (torch version), sentencepiece and Pandas. To install a wo #### Install QuestEVal To install QuestEval, one needs to install a modified version of the QuestEval codebase that fixes PyPi broken build -with -SpaCy. To install a working LENS version, use `pip install git+https://github.com/davebulaval/QuestEval`. +with SpaCy. To install a working LENS version, use `pip install git+https://github.com/davebulaval/QuestEval`. + +#### Install BLEURT + +To install a working BLEURT version, use `pip install git+https://github.com/google-research/bleurt`. ## Execution diff --git a/src/requirements.txt b/src/requirements.txt index 11dfb01..bbc5485 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,7 +1,7 @@ # In a Python 3.11 environment sentence-transformers evaluate -datasets==2.10 +datasets textstat tqdm sacremoses @@ -16,12 +16,14 @@ python2latex poutyne torchmetrics torch>=1.6.0,<2 -git+https://github.com/google-research/bleurt.git # Install a modified version of the LENS codebase by fixing PyPi broken build -# with sentence-transformers (torch version), sentencepiece and Pandas +# with sentence-transformers (torch version), sentencepiece and Pandas. # pip install git+https://github.com/davebulaval/LENS # Install a modified version of the QuestEval codebase by fixing PyPi broken build -# with SpaCy -# pip install git+https://github.com/davebulaval/QuestEval \ No newline at end of file +# with SpaCy. +# pip install git+https://github.com/davebulaval/QuestEval + +# Install BLEURT +# pip install git+https://github.com/google-research/bleurt \ No newline at end of file diff --git a/src/training/few_shot_training.py b/src/training/few_shot_training.py index a56cc2a..f551de4 100644 --- a/src/training/few_shot_training.py +++ b/src/training/few_shot_training.py @@ -12,7 +12,6 @@ Trainer, ) -from evaluate_metrics import compute_other_metrics_performance from metrics.metrics import compute_metrics, eval_compute_metrics_identical, eval_compute_metrics_unrelated from tools import ( bool_parse, @@ -125,6 +124,12 @@ def tokenize_function(example): metric_key_prefix="test/unrelated_sentences", ) +# Local import and model delete to reduce memory usage on GPU +del model +del trainer + +from evaluate_metrics import compute_other_metrics_performance + print("----------Test Set Evaluation start of Other Metrics----------") compute_other_metrics_performance( test_set=tokenized_csmd_dataset["test"],