Spaces:
Sleeping
Sleeping
import streamlit as st | |
import json | |
from typing import Dict, List, Any | |
import re | |
def format_project_response(project: dict, indent_level: int = 0) -> str: | |
"""Format project details with proper indentation and spacing""" | |
indent = " " * indent_level | |
response = [f"\n{indent}• {project['name']}:"] | |
# Add description with proper indentation | |
description_lines = project['description'].split('. ') | |
response.extend([f"{indent} {line.strip()}." for line in description_lines]) | |
# Add technologies with proper line break | |
if 'skills_used' in project: | |
response.append(f"\n{indent} Technologies: {', '.join(project['skills_used'])}") | |
# Add status and notes | |
if 'status' in project: | |
status = project['status'] | |
if 'development' in status.lower() or 'progress' in status.lower(): | |
response.append(f"\n{indent} Status: {status}") | |
if 'confidentiality_note' in project: | |
response.append(f"{indent} Note: {project['confidentiality_note']}") | |
return '\n'.join(response) + '\n' | |
def analyze_job_requirements(text: str, knowledge_base: dict) -> Dict[str, List[str]]: | |
"""Analyze job requirements and match with skills""" | |
text_lower = text.lower() | |
# Common ML/AI related keywords | |
tech_keywords = { | |
'machine learning': ['ml', 'machine learning', 'deep learning', 'neural networks'], | |
'data science': ['data science', 'data analysis', 'analytics', 'statistics'], | |
'programming': ['python', 'sql', 'programming', 'coding'], | |
'tools': ['tableau', 'powerbi', 'visualization', 'git'], | |
'cloud': ['aws', 'azure', 'cloud', 'deployment'] | |
} | |
# Extract matches from knowledge base | |
matches = {category: [] for category in tech_keywords} | |
my_skills = { | |
skill.lower() | |
for skill_type in knowledge_base['skills']['technical_skills'].values() | |
for skill_list in skill_type.values() | |
for skill in skill_list | |
} | |
# Find matching skills in each category | |
for category, keywords in tech_keywords.items(): | |
for keyword in keywords: | |
if keyword in text_lower and any(skill in keyword or keyword in skill for skill in my_skills): | |
matches[category].append(keyword) | |
return matches | |
def handle_perspective_query(query: str, knowledge_base: dict) -> str: | |
"""Handle philosophical or perspective-based queries""" | |
query_lower = query.lower() | |
perspectives = knowledge_base.get('perspectives', {}) | |
# Market-related queries | |
if any(word in query_lower for word in ['market', 'opportunity', 'job', 'hiring']): | |
if any(word in query_lower for word in ['down', 'bad', 'difficult', 'tough']): | |
response_parts = [ | |
"• My Perspective on the Current Market:", | |
f" {perspectives['market_outlook']['job_market']}", | |
"", | |
"• My Strategic Approach:", | |
f" {perspectives['market_outlook']['strategy']}", | |
"", | |
"• My Unique Value Proposition:", | |
f" {perspectives['market_outlook']['value_proposition']}" | |
] | |
return '\n'.join(response_parts) | |
# Learning and growth queries | |
elif any(word in query_lower for word in ['learn', 'study', 'growth']): | |
return f"• My Learning Philosophy:\n {perspectives['learning_philosophy']}" | |
# Handle non-portfolio queries gracefully | |
return knowledge_base['common_queries']['general'] | |
def generate_response(query: str, knowledge_base: dict) -> str: | |
"""Generate enhanced responses using the knowledge base""" | |
query_lower = query.lower() | |
# Handle project listing requests | |
if any(word in query_lower for word in ['list', 'project', 'portfolio', 'built', 'created', 'developed']): | |
response_parts = ["Here are my key projects:"] | |
# Major Projects (under development) | |
response_parts.append("\nMajor Projects (In Development):") | |
for project in knowledge_base['projects']['major_projects']: | |
response_parts.append(format_project_response(project, indent_level=1)) | |
# Algorithm Implementation Projects | |
response_parts.append("\nCompleted Algorithm Implementation Projects:") | |
for project in knowledge_base['projects']['algorithm_practice_projects']: | |
response_parts.append(format_project_response(project, indent_level=1)) | |
response = '\n'.join(response_parts) | |
return add_relevant_links(response, query, knowledge_base) | |
# Handle job description analysis | |
elif len(query.split()) > 20 and any(phrase in query_lower for phrase in | |
['requirements', 'qualifications', 'looking for', 'job description']): | |
matches = analyze_job_requirements(query, knowledge_base) | |
relevant_projects = find_relevant_projects(query, knowledge_base['projects']['major_projects']) | |
response_parts = ["Based on the job requirements, here's how my profile aligns:\n"] | |
# Technical Skills Match | |
if any(matches.values()): | |
response_parts.append("• Technical Skills Alignment:") | |
for category, skills in matches.items(): | |
if skills: | |
response_parts.append(f" - Strong {category} skills: {', '.join(skills)}") | |
response_parts.append("") | |
# Project Experience | |
if relevant_projects: | |
response_parts.append("• Relevant Project Experience:") | |
for project in relevant_projects: | |
desc = f" - {project['name']}: {project['description']}" | |
response_parts.append(desc) | |
response_parts.append("") | |
# Education and Background | |
response_parts.extend([ | |
"• Education and Background:", | |
" - Advanced AI/ML education in Canada", | |
" - Unique commerce background providing business perspective", | |
" - Strong foundation in practical ML implementation", | |
"" | |
]) | |
return '\n'.join(response_parts) | |
# Handle perspective/philosophical queries | |
elif any(word in query_lower for word in ['market', 'think', 'believe', 'opinion', 'weather']): | |
return handle_perspective_query(query, knowledge_base) | |
# Handle story/background queries | |
elif any(word in query_lower for word in ['background', 'journey', 'story', 'transition']): | |
return format_story_response(knowledge_base) | |
# Default response | |
return format_default_response(knowledge_base) | |
def main(): | |
st.title("💬 Chat with Manyue's AI Assistant") | |
# Initialize session state | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "knowledge_base" not in st.session_state: | |
try: | |
with open('knowledge_base.json', 'r', encoding='utf-8') as f: | |
st.session_state.knowledge_base = json.load(f) | |
except FileNotFoundError: | |
st.error("Knowledge base file not found.") | |
return | |
# Display welcome message | |
if "displayed_welcome" not in st.session_state: | |
st.write(""" | |
Hi! I'm Manyue's AI assistant. I can tell you about: | |
- My journey from commerce to ML/AI | |
- My technical skills and projects | |
- My fit for ML/AI roles | |
- My perspective on the tech industry | |
- You can also paste job descriptions to see how my profile matches! | |
""") | |
st.session_state.displayed_welcome = True | |
# Create two columns | |
col1, col2 = st.columns([3, 1]) | |
with col1: | |
# Display existing messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Handle input - IMPORTANT: This needs to be outside any button handlers | |
if prompt := st.chat_input("Ask me anything or paste a job description..."): | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Add user message to session state | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Generate and display response | |
with st.chat_message("assistant"): | |
try: | |
response = generate_response(prompt, st.session_state.knowledge_base) | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
except Exception as e: | |
st.error(f"An error occurred: {str(e)}") | |
with col2: | |
st.subheader("Quick Questions") | |
example_questions = [ | |
"Tell me about your ML projects", | |
"What are your technical skills?", | |
"What makes you stand out?", | |
"What's your journey into ML?", | |
"Your view on the current market?" | |
] | |
# Handle quick question buttons | |
for question in example_questions: | |
if st.button(question, key=f"btn_{question}"): # Add unique keys | |
with st.chat_message("user"): | |
st.markdown(question) | |
st.session_state.messages.append({"role": "user", "content": question}) | |
with st.chat_message("assistant"): | |
try: | |
response = generate_response(question, st.session_state.knowledge_base) | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
except Exception as e: | |
st.error(f"An error occurred: {str(e)}") | |
st.rerun() | |
st.markdown("---") | |
if st.button("Clear Chat", key="clear_chat"): # Add unique key | |
st.session_state.messages = [] | |
st.rerun() | |
if __name__ == "__main__": | |
main() |