Spaces:
Runtime error
Runtime error
import gradio as gr | |
import requests | |
import os | |
from bs4 import BeautifulSoup # For scraping company and role info | |
# Load API keys securely from environment variables | |
proxycurl_api_key = os.getenv("PROXYCURL_API_KEY") # Proxycurl API key | |
groq_api_key = os.getenv("GROQ_CLOUD_API_KEY") # Groq Cloud API key | |
class EmailAgent: | |
def __init__(self, linkedin_url, company_name, role, word_limit, user_name, email, phone, linkedin): | |
self.linkedin_url = linkedin_url | |
self.company_name = company_name | |
self.role = role | |
self.word_limit = word_limit | |
self.user_name = user_name | |
self.email = email | |
self.phone = phone | |
self.linkedin = linkedin | |
self.bio = None | |
self.skills = [] | |
self.experiences = [] | |
self.company_info = None | |
self.role_description = None | |
# Reason: Decide what information is needed and if we need to take additional steps | |
def reason_about_data(self): | |
print("Reasoning: Deciding what data we need...") | |
if not self.linkedin_url: | |
print("Warning: LinkedIn URL missing. Proceeding with default bio.") | |
if not self.company_name: | |
print("Warning: Company name missing. Proceeding with default company info.") | |
if not self.role: | |
print("Warning: Role missing. We will use general logic for the role.") | |
# Action: Fetch LinkedIn data via Proxycurl (acting based on reasoning) | |
def fetch_linkedin_data(self): | |
if not self.linkedin_url: | |
print("Action: No LinkedIn URL provided, using default bio.") | |
self.bio = "A professional with diverse experience." | |
self.skills = ["Adaptable", "Hardworking"] | |
self.experiences = ["Worked across various industries"] | |
else: | |
print("Action: Fetching LinkedIn data via Proxycurl.") | |
headers = {"Authorization": f"Bearer {proxycurl_api_key}"} | |
url = f"https://nubela.co/proxycurl/api/v2/linkedin?url={self.linkedin_url}" | |
response = requests.get(url, headers=headers) | |
if response.status_code == 200: | |
data = response.json() | |
self.bio = data.get("summary", "No bio available") | |
self.skills = data.get("skills", []) | |
self.experiences = data.get("experiences", []) | |
else: | |
print("Error: Unable to fetch LinkedIn profile. Using default bio.") | |
self.bio = "A professional with diverse experience." | |
self.skills = ["Adaptable", "Hardworking"] | |
self.experiences = ["Worked across various industries"] | |
# Action: Fetch company information via Proxycurl or use defaults | |
def fetch_company_info(self): | |
if not self.company_name: | |
print("Action: No company name provided, using default company info.") | |
self.company_info = "A leading company in its field." | |
else: | |
print(f"Action: Fetching company info for {self.company_name}.") | |
headers = {"Authorization": f"Bearer {proxycurl_api_key}"} | |
url = f"https://nubela.co/proxycurl/api/v2/linkedin/company?company_name={self.company_name}" | |
response = requests.get(url, headers=headers) | |
if response.status_code == 200: | |
data = response.json() | |
self.company_info = data.get("description", "No detailed company info available.") | |
else: | |
print(f"Error: Unable to fetch company info for {self.company_name}. Using default info.") | |
self.company_info = "A leading company in its field." | |
# Action: Scrape the company's website for role-specific information or use defaults | |
def scrape_role_from_website(self): | |
print(f"Action: Scraping role description from the company's website for {self.role}.") | |
if not self.company_name: | |
print("Error: No company name or URL provided for scraping.") | |
return False | |
# Try scraping the website for role descriptions | |
try: | |
response = requests.get(f"https://{self.company_name}.com/careers") | |
if response.status_code == 200: | |
soup = BeautifulSoup(response.text, 'html.parser') | |
role_descriptions = soup.find_all(string=lambda text: self.role.lower() in text.lower()) | |
if role_descriptions: | |
self.role_description = role_descriptions[0] | |
print(f"Found role description: {self.role_description}") | |
return True | |
else: | |
print(f"No specific role description found on the website for {self.role}.") | |
return False | |
else: | |
print(f"Error: Unable to reach company's website at {self.company_name}.com.") | |
return False | |
except Exception as e: | |
print(f"Error during scraping: {e}") | |
return False | |
# Action: Use default logic for role description if no role is available | |
def use_default_role_description(self): | |
print(f"Action: Using default logic for the role of {self.role}.") | |
self.role_description = f"The role of {self.role} at {self.company_name} involves leadership and management." | |
# Reflection: Check if we have enough data to generate the email | |
def reflect_on_data(self): | |
print("Reflection: Do we have enough data?") | |
if not self.bio or not self.skills or not self.company_info: | |
print("Warning: Some critical information is missing. Proceeding with default values.") | |
return True | |
# Final Action: Generate the email using Groq Cloud LLM based on gathered data | |
def generate_email(self): | |
print("Action: Generating the email with the gathered information.") | |
# Updated and fully dynamic LLM prompt | |
prompt = f""" | |
Write a professional email applying for the {self.role} position at {self.company_name}. | |
Use the following information: | |
- The candidate’s LinkedIn bio: {self.bio}. | |
- The candidate’s most relevant skills: {', '.join(self.skills)}. | |
- The candidate’s professional experience: {', '.join([exp['title'] for exp in self.experiences])}. | |
Please research the company's public information. If no company-specific information is available, use general knowledge about the company's industry. | |
Tailor the email dynamically to the role of **{self.role}** at {self.company_name}, aligning the candidate's skills and experiences with the expected responsibilities of the role and the company’s operations. | |
End the email with this signature: | |
Best regards, | |
{self.user_name} | |
Email: {self.email} | |
Phone: {self.phone} | |
LinkedIn: {self.linkedin} | |
The email should not exceed {self.word_limit} words. | |
""" | |
url = "https://api.groq.com/openai/v1/chat/completions" | |
headers = { | |
"Authorization": f"Bearer {groq_api_key}", | |
"Content-Type": "application/json", | |
} | |
data = { | |
"messages": [{"role": "user", "content": prompt}], | |
"model": "llama3-8b-8192" | |
} | |
response = requests.post(url, headers=headers, json=data) | |
if response.status_code == 200: | |
return response.json()["choices"][0]["message"]["content"].strip() | |
else: | |
print(f"Error: {response.status_code}, {response.text}") | |
return "Error generating email. Please check your API key or try again later." | |
# Main loop following ReAct pattern | |
def run(self): | |
self.reason_about_data() # Reasoning step | |
self.fetch_linkedin_data() # Fetch LinkedIn data | |
self.fetch_company_info() # Fetch company data | |
# Scrape the company's website for role-specific information or use defaults | |
if not self.scrape_role_from_website(): | |
self.use_default_role_description() | |
# Reflect on whether the data is sufficient | |
if self.reflect_on_data(): | |
return self.generate_email() # Final action: generate email | |
else: | |
return "Error: Not enough data to generate the email." | |
# Define the Gradio interface and the main app logic | |
def gradio_ui(): | |
# Input fields | |
name_input = gr.Textbox(label="Your Name", placeholder="Enter your name") | |
company_input = gr.Textbox(label="Company Name or URL", placeholder="Enter the company name or website URL") | |
role_input = gr.Textbox(label="Role Applying For", placeholder="Enter the role you are applying for") | |
email_input = gr.Textbox(label="Your Email Address", placeholder="Enter your email address") | |
phone_input = gr.Textbox(label="Your Phone Number", placeholder="Enter your phone number") | |
linkedin_input = gr.Textbox(label="Your LinkedIn URL", placeholder="Enter your LinkedIn profile URL") | |
word_limit_slider = gr.Slider(minimum=50, maximum=300, step=10, label="Email Word Limit", value=150) | |
# Output field | |
email_output = gr.Textbox(label="Generated Email", placeholder="Your generated email will appear here", lines=10) | |
# Function to create and run the email agent | |
def create_email(name, company_name, role, email, phone, linkedin_url, word_limit): | |
agent = EmailAgent(linkedin_url, company_name, role, word_limit, name, email, phone, linkedin_url) | |
return agent.run() | |
# Gradio interface | |
demo = gr.Interface( | |
fn=create_email, | |
inputs=[name_input, company_input, role_input, email_input, phone_input, linkedin_input, word_limit_slider], | |
outputs=[email_output], | |
title="Email Writing AI Agent with ReAct", | |
description="Generate a professional email for a job application using LinkedIn data, company info, and role description.", | |
allow_flagging="never" | |
) | |
# Launch the Gradio app | |
demo.launch() | |
# Start the Gradio app when running the script | |
if __name__ == "__main__": | |
gradio_ui() | |