Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ from transformers import pipeline, BloomForCausalLM, BloomTokenizerFast
|
|
6 |
from huggingface_hub import login
|
7 |
import requests
|
8 |
import os
|
9 |
-
from models import evaluate_with_gpt,
|
10 |
# from models import evaluate_with_llama
|
11 |
|
12 |
|
@@ -23,21 +23,21 @@ def extract_text_from_pdf(pdf_file):
|
|
23 |
|
24 |
def evaluate_all_models(pdf_file, job_description):
|
25 |
gpt_result = evaluate_with_gpt(pdf_file, job_description)
|
26 |
-
gemma_result = evaluate_with_gemma(pdf_file, job_description)
|
27 |
bloom_result = evaluate_with_bloom(pdf_file, job_description)
|
28 |
jabir_result = evaluate_with_jabir(pdf_file, job_description)
|
29 |
# llama_result=evaluate_with_llama(pdf_file, job_description)
|
30 |
-
return f"GPT-4o Result:\n{gpt_result}\n\
|
31 |
# return f"\n\nllama Result:\n{llam_result}"
|
32 |
|
33 |
iface = gr.Interface(
|
34 |
-
fn=lambda pdf, jd, model: evaluate_with_gpt(pdf, jd) if model == "GPT-4o" else
|
35 |
# fn=lambda pdf, jd, model: evaluate_with_llama(pdf, jd),
|
36 |
|
37 |
inputs=[
|
38 |
gr.File(label="Upload Resume PDF"),
|
39 |
gr.Textbox(lines=10, label="Job Description"),
|
40 |
-
gr.Radio(choices=["GPT-4o", "
|
41 |
# gr.Radio(choices=["llama"], label="Choose Model")
|
42 |
],
|
43 |
outputs="text",
|
|
|
6 |
from huggingface_hub import login
|
7 |
import requests
|
8 |
import os
|
9 |
+
from models import evaluate_with_gpt,evaluate_with_bloom
|
10 |
# from models import evaluate_with_llama
|
11 |
|
12 |
|
|
|
23 |
|
24 |
def evaluate_all_models(pdf_file, job_description):
|
25 |
gpt_result = evaluate_with_gpt(pdf_file, job_description)
|
26 |
+
# gemma_result = evaluate_with_gemma(pdf_file, job_description)
|
27 |
bloom_result = evaluate_with_bloom(pdf_file, job_description)
|
28 |
jabir_result = evaluate_with_jabir(pdf_file, job_description)
|
29 |
# llama_result=evaluate_with_llama(pdf_file, job_description)
|
30 |
+
return f"GPT-4o Result:\n{gpt_result}\n\nBloom Result:\n{bloom_result}\n\njabir Result:\n{jabir_result}"
|
31 |
# return f"\n\nllama Result:\n{llam_result}"
|
32 |
|
33 |
iface = gr.Interface(
|
34 |
+
fn=lambda pdf, jd, model: evaluate_with_gpt(pdf, jd) if model == "GPT-4o" else evaluate_with_bloom(pdf, jd) if model == "Bloom" else evaluate_with_jabir(pdf, jd) if model == "jabir" else evaluate_all_models(pdf, jd),
|
35 |
# fn=lambda pdf, jd, model: evaluate_with_llama(pdf, jd),
|
36 |
|
37 |
inputs=[
|
38 |
gr.File(label="Upload Resume PDF"),
|
39 |
gr.Textbox(lines=10, label="Job Description"),
|
40 |
+
gr.Radio(choices=["GPT-4o", "Bloom", "jabir", "All"], label="Choose Model")
|
41 |
# gr.Radio(choices=["llama"], label="Choose Model")
|
42 |
],
|
43 |
outputs="text",
|