This repository has been archived by the owner on Dec 11, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
metrics.py
56 lines (48 loc) · 1.85 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# calculate rouge score, blue score and metor score for two files
import rouge
from nltk.translate.bleu_score import sentence_bleu, corpus_bleu
from rouge import FilesRouge
import argparse
class Metrics:
@staticmethod
def calculate_bleu(candidates_path, references_path):
preds = open(candidates_path, 'r').readlines()
refs = open(references_path, 'r').readlines()
Ba = corpus_bleu(refs, preds)
B1 = corpus_bleu(refs, preds, weights=(1,0,0,0))
B2 = corpus_bleu(refs, preds, weights=(0,1,0,0))
B3 = corpus_bleu(refs, preds, weights=(0,0,1,0))
B4 = corpus_bleu(refs, preds, weights=(0,0,0,1))
Ba = round(Ba * 100, 2)
B1 = round(B1 * 100, 2)
B2 = round(B2 * 100, 2)
B3 = round(B3 * 100, 2)
B4 = round(B4 * 100, 2)
ret = ''
ret += ('for %s functions\n' % (len(preds)))
ret += ('Ba %s\n' % (Ba))
ret += ('B1 %s\n' % (B1))
ret += ('B2 %s\n' % (B2))
ret += ('B3 %s\n' % (B3))
ret += ('B4 %s\n' % (B4))
return ret
@staticmethod
def calculate_rouge(candidates_path, references_path):
files_rouge = FilesRouge()
scores = files_rouge.get_scores(references_path, candidates_path, avg=True)
return scores
@staticmethod
def calculate_results(true_positive, false_positive, false_negative):
if true_positive + false_positive > 0:
precision = true_positive / (true_positive + false_positive)
else:
precision = 0
if true_positive + false_negative > 0:
recall = true_positive / (true_positive + false_negative)
else:
recall = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
else:
f1 = 0
return precision, recall, f1