JanviMl commited on
Commit
5bab557
·
verified ·
1 Parent(s): ca75f71

Update metrics.py

Browse files
Files changed (1) hide show
  1. metrics.py +0 -24
metrics.py CHANGED
@@ -1,12 +1,6 @@
1
  # metrics.py
2
- import nltk
3
- #from nltk.translate.bleu_score import sentence_bleu
4
- from rouge_score import rouge_scorer
5
  from model_loader import metrics_models
6
 
7
- # Download required NLTK data
8
- nltk.download('punkt')
9
-
10
  def compute_semantic_similarity(original, paraphrased):
11
  """
12
  Compute semantic similarity between the original and paraphrased comment using Sentence-BERT.
@@ -35,22 +29,4 @@ def compute_empathy_score(paraphrased):
35
  return round(score, 2)
36
  except Exception as e:
37
  print(f"Error computing empathy score: {str(e)}")
38
- return None
39
-
40
-
41
- def compute_rouge_score(original, paraphrased):
42
- """
43
- Compute ROUGE scores (ROUGE-1, ROUGE-2, ROUGE-L) between the original and paraphrased comment.
44
- Returns a dictionary with ROUGE scores.
45
- """
46
- try:
47
- scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True)
48
- scores = scorer.score(original, paraphrased)
49
- return {
50
- 'rouge1': round(scores['rouge1'].fmeasure, 2),
51
- 'rouge2': round(scores['rouge2'].fmeasure, 2),
52
- 'rougeL': round(scores['rougeL'].fmeasure, 2)
53
- }
54
- except Exception as e:
55
- print(f"Error computing ROUGE scores: {str(e)}")
56
  return None
 
1
  # metrics.py
 
 
 
2
  from model_loader import metrics_models
3
 
 
 
 
4
  def compute_semantic_similarity(original, paraphrased):
5
  """
6
  Compute semantic similarity between the original and paraphrased comment using Sentence-BERT.
 
29
  return round(score, 2)
30
  except Exception as e:
31
  print(f"Error computing empathy score: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  return None