From 13c98950783be6deba047792a1a05b6b6606d6a1 Mon Sep 17 00:00:00 2001 From: Kai Xiong Date: Tue, 29 Mar 2022 20:43:36 +0800 Subject: [PATCH] Update evaluation scripts --- evaluation_metrics_causal_reasoning.py | 1 + evalution_metrics_conceptual_explanation_generation.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/evaluation_metrics_causal_reasoning.py b/evaluation_metrics_causal_reasoning.py index cd464b7..6bfe73b 100644 --- a/evaluation_metrics_causal_reasoning.py +++ b/evaluation_metrics_causal_reasoning.py @@ -26,6 +26,7 @@ def evaluation_metrics(gold, predictions): return accuracy + def main(): prediction_file = sys.argv[1] gold_file = sys.argv[2] diff --git a/evalution_metrics_conceptual_explanation_generation.py b/evalution_metrics_conceptual_explanation_generation.py index 85b311d..a1d7a67 100644 --- a/evalution_metrics_conceptual_explanation_generation.py +++ b/evalution_metrics_conceptual_explanation_generation.py @@ -27,7 +27,7 @@ def evaluation_bleu(gold, predictions): def evaluation_rouge(golds, predictions): - rougel = 0 + rouge_l = 0 rouge = Rouge() for key in predictions: @@ -39,11 +39,11 @@ def evaluation_rouge(golds, predictions): try: scores = rouge.get_scores(prediction, gold) - rougel += scores[0]['rouge-l']['r'] + rouge_l += scores[0]['rouge-l']['r'] except: continue - avg_rougel = rougel / len(golds) + avg_rougel = rouge_l / len(golds) return avg_rougel