Fariddvp commited on
Commit
5b5955f
·
verified ·
1 Parent(s): 56cd4cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -6,8 +6,8 @@ from transformers import pipeline, BloomForCausalLM, BloomTokenizerFast
6
  from huggingface_hub import login
7
  import requests
8
  import os
9
- from models import evaluate_with_gpt,evaluate_with_bloom
10
- # from models import evaluate_with_llama
11
 
12
 
13
 
@@ -26,18 +26,18 @@ def evaluate_all_models(pdf_file, job_description):
26
  # gemma_result = evaluate_with_gemma(pdf_file, job_description)
27
  bloom_result = evaluate_with_bloom(pdf_file, job_description)
28
  jabir_result = evaluate_with_jabir(pdf_file, job_description)
29
- # llama_result=evaluate_with_llama(pdf_file, job_description)
30
- return f"GPT-4o Result:\n{gpt_result}\n\nBloom Result:\n{bloom_result}\n\njabir Result:\n{jabir_result}"
31
  # return f"\n\nllama Result:\n{llam_result}"
32
 
33
  iface = gr.Interface(
34
- fn=lambda pdf, jd, model: evaluate_with_gpt(pdf, jd) if model == "GPT-4o" else evaluate_with_bloom(pdf, jd) if model == "Bloom" else evaluate_with_jabir(pdf, jd) if model == "jabir" else evaluate_all_models(pdf, jd),
35
  # fn=lambda pdf, jd, model: evaluate_with_llama(pdf, jd),
36
 
37
  inputs=[
38
  gr.File(label="Upload Resume PDF"),
39
  gr.Textbox(lines=10, label="Job Description"),
40
- gr.Radio(choices=["GPT-4o", "Bloom", "jabir", "All"], label="Choose Model")
41
  # gr.Radio(choices=["llama"], label="Choose Model")
42
  ],
43
  outputs="text",
 
6
  from huggingface_hub import login
7
  import requests
8
  import os
9
+ from models import evaluate_with_gpt,evaluate_with_bloom,evaluate_with_llama
10
+
11
 
12
 
13
 
 
26
  # gemma_result = evaluate_with_gemma(pdf_file, job_description)
27
  bloom_result = evaluate_with_bloom(pdf_file, job_description)
28
  jabir_result = evaluate_with_jabir(pdf_file, job_description)
29
+ llama_result=evaluate_with_llama(pdf_file, job_description)
30
+ return f"GPT-4o Result:\n{gpt_result}\n\nBloom Result:\n{bloom_result}\n\njabir Result:\n{jabir_result}\n\nllama Result:\n{llam_result}"
31
  # return f"\n\nllama Result:\n{llam_result}"
32
 
33
  iface = gr.Interface(
34
+ fn=lambda pdf, jd, model: evaluate_with_gpt(pdf, jd) if model == "GPT-4o" else evaluate_with_bloom(pdf, jd) if model == "Bloom" else evaluate_with_jabir(pdf, jd) if model == "jabir" else evaluate_with_llama(pdf, jd) if model == "llama" else evaluate_all_models(pdf, jd),
35
  # fn=lambda pdf, jd, model: evaluate_with_llama(pdf, jd),
36
 
37
  inputs=[
38
  gr.File(label="Upload Resume PDF"),
39
  gr.Textbox(lines=10, label="Job Description"),
40
+ gr.Radio(choices=["GPT-4o", "Bloom", "jabir","llama", "All"], label="Choose Model")
41
  # gr.Radio(choices=["llama"], label="Choose Model")
42
  ],
43
  outputs="text",