Spaces:
Running
Running
File size: 1,888 Bytes
f5ebd9b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import streamlit as st
import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
# Load resume data
resume_data = {
"name": "Pradeep Singh Sengar",
"linkedin": "www.linkedin.com/in/ipradeepsengarr",
"email": "[email protected]",
"github": "github.com/pradeepsengar",
"mobile": "+91-7898367211",
"education": "Bachelor of Engineering (Hons.) - Information Technology; CGPA: 8.31 (Oriental College Of Technology, Bhopal, 2019-2023)",
"skills": "Python, HTML/CSS, Django, Reactjs, Node.js, Git, Web Scraping, Generative AI, Machine Learning (LLM)",
"experience": "Graduate Engineer Trainee at Jio Platform Limited (Dec. 2023 - Present). Implemented chatbots with Docker, used Git/GitHub, worked with LLM concepts and Hugging Face.",
"projects": "Room Rental System, Text to Image Generator, Fitness Tracker, Movie Recommendation System",
"honors_awards": "Qualified for Round 1B of SnackDown (CodeChef), Startup Challenge (Top 10 teams)",
"certifications": "Web Development (Internshala), The Complete Python Pro Bootcamp (Udemy), Data Science (LinkedIn Learning), Web Scraping (LinkedIn Learning)"
}
# Convert data to list of sentences for retrieval
resume_values = list(resume_data.values())
# Load embedding model
model = SentenceTransformer('all-MiniLM-L6-v2')
embeddings = model.encode(resume_values)
# Store embeddings in FAISS index
index = faiss.IndexFlatL2(embeddings.shape[1])
index.add(np.array(embeddings))
def get_response(query):
query_embedding = model.encode([query])
D, I = index.search(query_embedding, 1)
return resume_values[I[0][0]]
# Streamlit UI
st.title("📝 Resume Chatbot")
st.write("Ask anything about Pradeep's resume!")
user_input = st.text_input("Your question:")
if user_input:
response = get_response(user_input)
st.success(f"**Answer:** {response}")
|