import streamlit as st from huggingface_hub import InferenceClient from dotenv import load_dotenv import os import PyPDF2 as pdf # Load .env file load_dotenv() api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN") # Hugging Face model MODEL = "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1" # Set up page st.set_page_config(page_title="JD Matcher by Jishnu Setia", page_icon="📄") st.title("📌 Job Description Matcher") st.text("Find out if your resume matches the job you're targeting!") # Input fields jd = st.text_area("📝 Paste the Job Description here:") uploaded_file = st.file_uploader("📎 Upload Your Resume (PDF only):", type="pdf") submit = st.button("🚀 Submit") # Function to read PDF content def input_pdf_text(uploaded_file): reader = pdf.PdfReader(uploaded_file) text = "" for page in reader.pages: text += page.extract_text() return text # Prompt template system_prompt = { "role": "system", "content": ( "You are a highly experienced ATS (Applicant Tracking System). Evaluate the resume based on the given job description. " "Be strict, accurate, and helpful. Job market is very competitive. Return your response in this format:\n\n" "1. JD Match Percentage: \"%\"\n" "2. Matching Feedback: (e.g., 'Great match!' or 'Needs improvement')\n" "3. Missing Keywords: [list]\n" "4. Tips to Improve the Resume:" ) } # When submit is clicked if submit: if uploaded_file and jd: with st.spinner("Analyzing your resume..."): resume_text = input_pdf_text(uploaded_file) # Prepare context context = [ system_prompt, {"role": "user", "content": f"Resume:\n{resume_text}\n\nJob Description:\n{jd}"} ] try: client = InferenceClient( model=MODEL, provider="nebius", api_key=api_key ) completion = client.chat.completions.create( model=MODEL, messages=context, max_tokens=2048, ) response = completion.choices[0].message.content st.subheader("📊 ATS Evaluation Result") st.markdown(response) except Exception as e: st.error(f"❌ Error: {e}") else: st.warning("Please upload a resume and paste a job description!")