openfree commited on
Commit
a00a5a2
Β·
verified Β·
1 Parent(s): e12f2d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +227 -144
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import spaces
3
  import torch
4
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
  import torch.nn.functional as F
6
  import torch.nn as nn
7
  import re
@@ -9,75 +9,113 @@ import requests
9
  from urllib.parse import urlparse
10
  import xml.etree.ElementTree as ET
11
 
12
- # Model repository and device setup
13
- model_path = 'ssocean/NAIP'
14
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
 
16
- # Globals
17
  model = None
18
  tokenizer = None
19
 
20
  def fetch_arxiv_paper(arxiv_input):
21
- """Fetch title & abstract from arXiv URL or ID."""
22
  try:
 
23
  if 'arxiv.org' in arxiv_input:
24
  parsed = urlparse(arxiv_input)
25
- arxiv_id = parsed.path.split('/')[-1].replace('.pdf', '')
 
26
  else:
27
  arxiv_id = arxiv_input.strip()
 
 
28
  api_url = f'http://export.arxiv.org/api/query?id_list={arxiv_id}'
29
- resp = requests.get(api_url)
30
- if resp.status_code != 200:
31
- return {"title":"", "abstract":"", "success":False, "message":"arXiv API error"}
32
- root = ET.fromstring(resp.text)
33
- ns = {'atom':'http://www.w3.org/2005/Atom'}
34
- entry = root.find('.//atom:entry', ns)
 
 
 
 
 
 
 
 
 
 
 
 
35
  if entry is None:
36
- return {"title":"", "abstract":"", "success":False, "message":"Paper not found"}
37
- title = entry.find('atom:title', ns).text.strip()
38
- abstract = entry.find('atom:summary', ns).text.strip()
39
- return {"title":title, "abstract":abstract, "success":True, "message":"Fetched successfully"}
 
 
 
 
 
 
 
 
 
 
 
 
40
  except Exception as e:
41
- return {"title":"", "abstract":"", "success":False, "message":f"Error: {e}"}
 
 
 
 
 
42
 
43
  @spaces.GPU(duration=60, enable_queue=True)
44
  def predict(title, abstract):
45
- """Predict a normalized impact score (0–1) from title & abstract."""
 
 
46
  global model, tokenizer
47
 
48
  if model is None:
49
- # Try loading full-precision on GPU first
50
- try:
51
- model = AutoModelForSequenceClassification.from_pretrained(
52
- model_path,
53
- num_labels=1,
54
- torch_dtype=torch.float32,
55
- device_map="auto"
56
- )
57
- except RuntimeError:
58
- # Fallback to CPU-only
59
- model = AutoModelForSequenceClassification.from_pretrained(
60
- model_path,
61
- num_labels=1,
62
- torch_dtype=torch.float32,
63
- device_map="cpu"
64
- )
65
  tokenizer = AutoTokenizer.from_pretrained(model_path)
66
  model.eval()
67
-
68
- prompt = (
69
  f"Given a certain paper,\n"
70
- f"Title: {title.strip()}\n"
71
- f"Abstract: {abstract.strip()}\n"
72
  f"Predict its normalized academic impact (0~1):"
73
  )
74
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
75
- inputs = {k: v.to(device) for k, v in inputs.items()}
76
- with torch.no_grad():
77
- logits = model(**inputs).logits
78
- prob = torch.sigmoid(logits).item()
79
- score = min(1.0, prob + 0.05) # +0.05 adjustment
80
- return round(score, 4)
 
 
 
 
 
81
 
82
  def get_grade_and_emoji(score):
83
  if score >= 0.900: return "AAA 🌟"
@@ -90,32 +128,55 @@ def get_grade_and_emoji(score):
90
  if score >= 0.300: return "CC ✏️"
91
  return "C πŸ“‘"
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  def validate_input(title, abstract):
94
- """Ensure title β‰₯3 words, abstract β‰₯50 words, and ASCII-only."""
95
- non_ascii = re.compile(r'[^\x00-\x7F]')
 
96
  if len(title.split()) < 3:
97
- return False, "Title must be at least 3 words."
98
  if len(abstract.split()) < 50:
99
- return False, "Abstract must be at least 50 words."
100
- if non_ascii.search(title):
101
- return False, "Title contains non-ASCII characters."
102
- if non_ascii.search(abstract):
103
- return False, "Abstract contains non-ASCII characters."
104
- return True, "Inputs look good."
105
 
106
  def update_button_status(title, abstract):
107
- valid, msg = validate_input(title, abstract)
108
  if not valid:
109
- return gr.update(value="Error: " + msg), gr.update(interactive=False)
110
- return gr.update(value=msg), gr.update(interactive=True)
111
 
112
  def process_arxiv_input(arxiv_input):
113
  if not arxiv_input.strip():
114
  return "", "", "Please enter an arXiv URL or ID"
115
- res = fetch_arxiv_paper(arxiv_input)
116
- if res["success"]:
117
- return res["title"], res["abstract"], res["message"]
118
- return "", "", res["message"]
 
119
 
120
  css = """
121
  .gradio-container {
@@ -130,11 +191,17 @@ css = """
130
  -webkit-background-clip: text;
131
  -webkit-text-fill-color: transparent;
132
  }
 
 
 
 
 
 
133
  .input-section {
134
- background: #ffffff;
135
  padding: 2rem;
136
  border-radius: 1rem;
137
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
138
  }
139
  .result-section {
140
  background: #f8fafc;
@@ -142,7 +209,13 @@ css = """
142
  border-radius: 1rem;
143
  margin-top: 2rem;
144
  }
145
- .methodology-section, .example-section {
 
 
 
 
 
 
146
  background: #fff7ed;
147
  padding: 2rem;
148
  border-radius: 1rem;
@@ -154,134 +227,144 @@ css = """
154
  margin: 1rem 0;
155
  }
156
  .arxiv-input {
157
- background: #f3f4f6;
158
  padding: 1rem;
 
159
  border-radius: 0.5rem;
160
- margin-bottom: 1.5rem;
161
  }
162
  .arxiv-link {
163
  color: #2563eb;
164
  text-decoration: underline;
165
  font-size: 0.9em;
 
166
  }
167
  .arxiv-note {
168
- color: #666666;
169
  font-size: 0.9em;
170
  margin-top: 0.5em;
171
  margin-bottom: 0.5em;
172
  }
173
  """
174
 
175
- example_papers = [
176
- {
177
- "title": "Attention Is All You Need",
178
- "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train.",
179
- "score": 0.982,
180
- "note": "Revolutionary paper introducing the Transformer architecture."
181
- },
182
- {
183
- "title": "Language Models are Few-Shot Learners",
184
- "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructionsβ€”something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches.",
185
- "score": 0.956,
186
- "note": "Groundbreaking GPT-3 paper on few-shot learning."
187
- },
188
- {
189
- "title": "An Empirical Study of Neural Network Training Protocols",
190
- "abstract": "This paper presents a comparative analysis of different training protocols for neural networks across various architectures. We examine the effects of learning rate schedules, batch size selection, and optimization algorithms on model convergence and final performance. Our experiments span multiple datasets and model sizes, providing practical insights for deep learning practitioners.",
191
- "score": 0.623,
192
- "note": "Solid empirical comparison of training protocols."
193
- }
194
- ]
195
-
196
  with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
197
- gr.Markdown("<div class='main-title'>Papers Impact: AI-Powered Research Impact Predictor</div>")
198
- gr.HTML("""
199
- <a href="https://visitorbadge.io/status?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space">
200
- <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space&countColor=%23263759" />
201
- </a>
202
- """)
 
 
 
203
 
204
  with gr.Row():
205
  with gr.Column(elem_classes="input-section"):
206
  with gr.Group(elem_classes="arxiv-input"):
207
- gr.Markdown("### Import from arXiv")
208
  arxiv_input = gr.Textbox(
209
  lines=1,
210
- placeholder="e.g. 2504.11651",
211
- label="arXiv URL or ID",
212
  value="2504.11651"
213
  )
214
  gr.Markdown("""
215
  <p class="arxiv-note">
216
- Click to use the default example or visit <a href="https://arxiv.org" class="arxiv-link" target="_blank">arxiv.org</a>
 
217
  </p>
218
  """)
219
  fetch_button = gr.Button("πŸ” Fetch Paper Details", variant="secondary")
220
-
221
- gr.Markdown("### Or Enter Paper Details Manually")
 
222
  title_input = gr.Textbox(
223
  lines=2,
224
- placeholder="Enter paper title (minimum 3 words)...",
225
  label="Paper Title"
226
  )
227
  abstract_input = gr.Textbox(
228
  lines=5,
229
- placeholder="Enter paper abstract (minimum 50 words)...",
230
  label="Paper Abstract"
231
  )
232
- validation_status = gr.Textbox(label="Validation Status", interactive=False)
233
  submit_button = gr.Button("🎯 Predict Impact", interactive=False, variant="primary")
234
-
235
  with gr.Column(elem_classes="result-section"):
236
- score_output = gr.Number(label="Impact Score")
237
- grade_output = gr.Textbox(label="Grade", elem_classes="grade-display")
 
238
 
239
  with gr.Row(elem_classes="methodology-section"):
240
- gr.Markdown("""
241
- ### Scientific Methodology
242
- - **Training Data**: Papers from CS.CV, CS.CL (NLP), and CS.AI fields
243
- - **Optimization**: NDCG optimization with Sigmoid activation & MSE loss
244
- - **Validation**: Cross-validated on historical citation data
245
- - **Architecture**: Transformer-based text encoder
246
- - **Metrics**: Citation-pattern analysis & research influence
247
- """)
 
 
248
 
249
- gr.Markdown("""
250
- ### Rating Scale
251
- | Grade | Score Range | Description | Emoji |
252
- |-------|-------------|---------------------|-------|
253
- | AAA | 0.900–1.000 | Exceptional Impact | 🌟 |
254
- | AA | 0.800–0.899 | Very High Impact | ⭐ |
255
- | A | 0.650–0.799 | High Impact | ✨ |
256
- | BBB | 0.600–0.649 | Above Average | πŸ”΅ |
257
- | BB | 0.550–0.599 | Moderate Impact | πŸ“˜ |
258
- | B | 0.500–0.549 | Average Impact | πŸ“– |
259
- | CCC | 0.400–0.499 | Below Average | πŸ“ |
260
- | CC | 0.300–0.399 | Low Impact | ✏️ |
261
- | C | <0.300 | Limited Impact | πŸ“‘ |
262
- """)
 
 
 
263
 
264
  with gr.Row(elem_classes="example-section"):
265
- gr.Markdown("### Example Papers")
266
  for paper in example_papers:
267
- gr.Markdown(f"""
268
- #### {paper['title']}
269
- **Score**: {paper['score']} | **Grade**: {get_grade_and_emoji(paper['score'])}
270
- {paper['abstract']}
271
- *{paper['note']}*
272
- ---
273
- """)
 
 
274
 
275
- # Event handlers
276
- title_input.change(update_button_status, [title_input, abstract_input], [validation_status, submit_button])
277
- abstract_input.change(update_button_status, [title_input, abstract_input], [validation_status, submit_button])
278
- fetch_button.click(process_arxiv_input, [arxiv_input], [title_input, abstract_input, validation_status])
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
- def run_prediction(t, a):
281
- s = predict(t, a)
282
- return s, get_grade_and_emoji(s)
 
283
 
284
- submit_button.click(run_prediction, [title_input, abstract_input], [score_output, grade_output])
 
 
 
 
285
 
286
  if __name__ == "__main__":
287
  iface.launch()
 
1
  import gradio as gr
2
  import spaces
3
  import torch
4
+ from transformers import AutoConfig, AutoTokenizer, AutoModelForSequenceClassification
5
  import torch.nn.functional as F
6
  import torch.nn as nn
7
  import re
 
9
  from urllib.parse import urlparse
10
  import xml.etree.ElementTree as ET
11
 
12
+ model_path = r'ssocean/NAIP'
 
13
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
 
15
+ global model, tokenizer
16
  model = None
17
  tokenizer = None
18
 
19
  def fetch_arxiv_paper(arxiv_input):
20
+ """Fetch paper details from arXiv URL or ID using requests."""
21
  try:
22
+ # Extract arXiv ID from URL or use directly
23
  if 'arxiv.org' in arxiv_input:
24
  parsed = urlparse(arxiv_input)
25
+ path = parsed.path
26
+ arxiv_id = path.split('/')[-1].replace('.pdf', '')
27
  else:
28
  arxiv_id = arxiv_input.strip()
29
+
30
+ # Fetch metadata using arXiv API
31
  api_url = f'http://export.arxiv.org/api/query?id_list={arxiv_id}'
32
+ response = requests.get(api_url)
33
+
34
+ if response.status_code != 200:
35
+ return {
36
+ "title": "",
37
+ "abstract": "",
38
+ "success": False,
39
+ "message": "Error fetching paper from arXiv API"
40
+ }
41
+
42
+ # Parse the response XML
43
+ root = ET.fromstring(response.text)
44
+
45
+ # ArXiv API uses Atom namespace
46
+ ns = {'arxiv': 'http://www.w3.org/2005/Atom'}
47
+
48
+ # Extract title and abstract
49
+ entry = root.find('.//arxiv:entry', ns)
50
  if entry is None:
51
+ return {
52
+ "title": "",
53
+ "abstract": "",
54
+ "success": False,
55
+ "message": "Paper not found"
56
+ }
57
+
58
+ title = entry.find('arxiv:title', ns).text.strip()
59
+ abstract = entry.find('arxiv:summary', ns).text.strip()
60
+
61
+ return {
62
+ "title": title,
63
+ "abstract": abstract,
64
+ "success": True,
65
+ "message": "Paper fetched successfully!"
66
+ }
67
  except Exception as e:
68
+ return {
69
+ "title": "",
70
+ "abstract": "",
71
+ "success": False,
72
+ "message": f"Error fetching paper: {e}"
73
+ }
74
 
75
  @spaces.GPU(duration=60, enable_queue=True)
76
  def predict(title, abstract):
77
+ """Predict a normalized academic impact score (0–1) from title & abstract."""
78
+ title = title.replace("\n", " ").strip().replace("''", "'")
79
+ abstract = abstract.replace("\n", " ").strip().replace("''", "'")
80
  global model, tokenizer
81
 
82
  if model is None:
83
+ # Load config and disable any quantization
84
+ config = AutoConfig.from_pretrained(model_path)
85
+ config.quantization_config = None
86
+
87
+ # Load model in full float32, then move to device
88
+ model = AutoModelForSequenceClassification.from_pretrained(
89
+ model_path,
90
+ config=config,
91
+ num_labels=1,
92
+ torch_dtype=torch.float32,
93
+ device_map=None,
94
+ low_cpu_mem_usage=False
95
+ )
96
+ model.to(device)
97
+
 
98
  tokenizer = AutoTokenizer.from_pretrained(model_path)
99
  model.eval()
100
+
101
+ text = (
102
  f"Given a certain paper,\n"
103
+ f"Title: {title}\n"
104
+ f"Abstract: {abstract}\n"
105
  f"Predict its normalized academic impact (0~1):"
106
  )
107
+
108
+ try:
109
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=1024)
110
+ inputs = {k: v.to(device) for k, v in inputs.items()}
111
+ with torch.no_grad():
112
+ outputs = model(**inputs)
113
+ prob = torch.sigmoid(outputs.logits).item()
114
+ score = min(1.0, prob + 0.05)
115
+ return round(score, 4)
116
+ except Exception as e:
117
+ print(f"Prediction error: {e}")
118
+ return 0.0 # default on error
119
 
120
  def get_grade_and_emoji(score):
121
  if score >= 0.900: return "AAA 🌟"
 
128
  if score >= 0.300: return "CC ✏️"
129
  return "C πŸ“‘"
130
 
131
+ example_papers = [
132
+ {
133
+ "title": "Attention Is All You Need",
134
+ "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train.",
135
+ "score": 0.982,
136
+ "note": "πŸ’« Revolutionary paper that introduced the Transformer architecture, fundamentally changing NLP and deep learning."
137
+ },
138
+ {
139
+ "title": "Language Models are Few-Shot Learners",
140
+ "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches.",
141
+ "score": 0.956,
142
+ "note": "πŸš€ Groundbreaking GPT-3 paper that demonstrated the power of large language models."
143
+ },
144
+ {
145
+ "title": "An Empirical Study of Neural Network Training Protocols",
146
+ "abstract": "This paper presents a comparative analysis of different training protocols for neural networks across various architectures. We examine the effects of learning rate schedules, batch size selection, and optimization algorithms on model convergence and final performance. Our experiments span multiple datasets and model sizes, providing practical insights for deep learning practitioners.",
147
+ "score": 0.623,
148
+ "note": "πŸ“š Solid research paper with useful findings but more limited scope and impact."
149
+ }
150
+ ]
151
+
152
  def validate_input(title, abstract):
153
+ title = title.replace("\n", " ").strip().replace("''", "'")
154
+ abstract = abstract.replace("\n", " ").strip().replace("''", "'")
155
+ non_latin_pattern = re.compile(r'[^\u0000-\u007F]')
156
  if len(title.split()) < 3:
157
+ return False, "The title must be at least 3 words long."
158
  if len(abstract.split()) < 50:
159
+ return False, "The abstract must be at least 50 words long."
160
+ if non_latin_pattern.search(title):
161
+ return False, "The title contains invalid characters. Only English letters and symbols are allowed."
162
+ if non_latin_pattern.search(abstract):
163
+ return False, "The abstract contains invalid characters. Only English letters and symbols are allowed."
164
+ return True, "Inputs are valid!"
165
 
166
  def update_button_status(title, abstract):
167
+ valid, message = validate_input(title, abstract)
168
  if not valid:
169
+ return gr.update(value="Error: " + message), gr.update(interactive=False)
170
+ return gr.update(value=message), gr.update(interactive=True)
171
 
172
  def process_arxiv_input(arxiv_input):
173
  if not arxiv_input.strip():
174
  return "", "", "Please enter an arXiv URL or ID"
175
+ result = fetch_arxiv_paper(arxiv_input)
176
+ if result["success"]:
177
+ return result["title"], result["abstract"], result["message"]
178
+ else:
179
+ return "", "", result["message"]
180
 
181
  css = """
182
  .gradio-container {
 
191
  -webkit-background-clip: text;
192
  -webkit-text-fill-color: transparent;
193
  }
194
+ .sub-title {
195
+ text-align: center;
196
+ color: #4b5563;
197
+ font-size: 1.5rem !important;
198
+ margin-bottom: 2rem !important;
199
+ }
200
  .input-section {
201
+ background: white;
202
  padding: 2rem;
203
  border-radius: 1rem;
204
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
205
  }
206
  .result-section {
207
  background: #f8fafc;
 
209
  border-radius: 1rem;
210
  margin-top: 2rem;
211
  }
212
+ .methodology-section {
213
+ background: #ecfdf5;
214
+ padding: 2rem;
215
+ border-radius: 1rem;
216
+ margin-top: 2rem;
217
+ }
218
+ .example-section {
219
  background: #fff7ed;
220
  padding: 2rem;
221
  border-radius: 1rem;
 
227
  margin: 1rem 0;
228
  }
229
  .arxiv-input {
230
+ margin-bottom: 1.5rem;
231
  padding: 1rem;
232
+ background: #f3f4f6;
233
  border-radius: 0.5rem;
 
234
  }
235
  .arxiv-link {
236
  color: #2563eb;
237
  text-decoration: underline;
238
  font-size: 0.9em;
239
+ margin-top: 0.5em;
240
  }
241
  .arxiv-note {
242
+ color: #666;
243
  font-size: 0.9em;
244
  margin-top: 0.5em;
245
  margin-bottom: 0.5em;
246
  }
247
  """
248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
  with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
250
+ gr.Markdown(
251
+ """
252
+ # Papers Impact: AI-Powered Research Impact Predictor
253
+ ## https://discord.gg/openfreeai
254
+ """
255
+ )
256
+ gr.HTML("""<a href="https://visitorbadge.io/status?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space">
257
+ <img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2FVIDraft-PaperImpact.hf.space&countColor=%23263759" />
258
+ </a>""")
259
 
260
  with gr.Row():
261
  with gr.Column(elem_classes="input-section"):
262
  with gr.Group(elem_classes="arxiv-input"):
263
+ gr.Markdown("### πŸ“‘ Import from arXiv")
264
  arxiv_input = gr.Textbox(
265
  lines=1,
266
+ placeholder="Enter arXiv URL or ID (e.g., 2504.11651)",
267
+ label="arXiv Paper URL/ID",
268
  value="2504.11651"
269
  )
270
  gr.Markdown("""
271
  <p class="arxiv-note">
272
+ Click input field to use example paper or browse papers at
273
+ <a href="https://arxiv.org" target="_blank" class="arxiv-link">arxiv.org</a>
274
  </p>
275
  """)
276
  fetch_button = gr.Button("πŸ” Fetch Paper Details", variant="secondary")
277
+
278
+ gr.Markdown("### πŸ“ Or Enter Paper Details Manually")
279
+
280
  title_input = gr.Textbox(
281
  lines=2,
282
+ placeholder="Enter Paper Title (minimum 3 words)...",
283
  label="Paper Title"
284
  )
285
  abstract_input = gr.Textbox(
286
  lines=5,
287
+ placeholder="Enter Paper Abstract (minimum 50 words)...",
288
  label="Paper Abstract"
289
  )
290
+ validation_status = gr.Textbox(label="βœ”οΈ Validation Status", interactive=False)
291
  submit_button = gr.Button("🎯 Predict Impact", interactive=False, variant="primary")
292
+
293
  with gr.Column(elem_classes="result-section"):
294
+ with gr.Group():
295
+ score_output = gr.Number(label="🎯 Impact Score")
296
+ grade_output = gr.Textbox(label="πŸ† Grade", value="", elem_classes="grade-display")
297
 
298
  with gr.Row(elem_classes="methodology-section"):
299
+ gr.Markdown(
300
+ """
301
+ ### πŸ”¬ Scientific Methodology
302
+ - **Training Data**: Model trained on extensive dataset of published papers from CS.CV, CS.CL(NLP), and CS.AI fields
303
+ - **Optimization**: NDCG optimization with Sigmoid activation and MSE loss function
304
+ - **Validation**: Cross-validated against historical paper impact data
305
+ - **Architecture**: Advanced transformer-based deep textual analysis
306
+ - **Metrics**: Quantitative analysis of citation patterns and research influence
307
+ """
308
+ )
309
 
310
+ with gr.Row():
311
+ gr.Markdown(
312
+ """
313
+ ### πŸ“Š Rating Scale
314
+ | Grade | Score Range | Description | Indicator |
315
+ |-------|-------------|-------------|-----------|
316
+ | AAA | 0.900-1.000 | Exceptional Impact | 🌟 |
317
+ | AA | 0.800-0.899 | Very High Impact | ⭐ |
318
+ | A | 0.650-0.799 | High Impact | ✨ |
319
+ | BBB | 0.600-0.649 | Above Average Impact | πŸ”΅ |
320
+ | BB | 0.550-0.599 | Moderate Impact | πŸ“˜ |
321
+ | B | 0.500-0.549 | Average Impact | πŸ“– |
322
+ | CCC | 0.400-0.499 | Below Average Impact | πŸ“ |
323
+ | CC | 0.300-0.399 | Low Impact | ✏️ |
324
+ | C | < 0.299 | Limited Impact | πŸ“‘ |
325
+ """
326
+ )
327
 
328
  with gr.Row(elem_classes="example-section"):
329
+ gr.Markdown("### πŸ“‹ Example Papers")
330
  for paper in example_papers:
331
+ gr.Markdown(
332
+ f"""
333
+ #### {paper['title']}
334
+ **Score**: {paper.get('score', 'N/A')} | **Grade**: {get_grade_and_emoji(paper.get('score', 0))}
335
+ {paper['abstract']}
336
+ *{paper['note']}*
337
+ ---
338
+ """
339
+ )
340
 
341
+ title_input.change(
342
+ update_button_status,
343
+ inputs=[title_input, abstract_input],
344
+ outputs=[validation_status, submit_button]
345
+ )
346
+ abstract_input.change(
347
+ update_button_status,
348
+ inputs=[title_input, abstract_input],
349
+ outputs=[validation_status, submit_button]
350
+ )
351
+
352
+ fetch_button.click(
353
+ process_arxiv_input,
354
+ inputs=[arxiv_input],
355
+ outputs=[title_input, abstract_input, validation_status]
356
+ )
357
 
358
+ def process_prediction(title, abstract):
359
+ score = predict(title, abstract)
360
+ grade = get_grade_and_emoji(score)
361
+ return score, grade
362
 
363
+ submit_button.click(
364
+ process_prediction,
365
+ inputs=[title_input, abstract_input],
366
+ outputs=[score_output, grade_output]
367
+ )
368
 
369
  if __name__ == "__main__":
370
  iface.launch()