File size: 3,936 Bytes
912e356
 
 
 
 
 
 
 
 
06e0899
 
912e356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06e0899
 
 
912e356
06e0899
 
 
 
 
 
 
 
912e356
06e0899
912e356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import json
import random
from smolagents import TransformersModel
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
import numpy as np
from huggingface_hub import InferenceClient
from smolagents import LiteLLMModel

from tools.final_answer import FinalAnswerTool
from tools.visit_webpage import VisitWebpageTool
from tools.web_search import DuckDuckGoSearchTool
from typing import Optional, Tuple

from Gradio_UI import GradioUI

@tool
def provide_my_information(query: str) -> str:
    """
    Provide information about me (Tianqing LIU)based on the user's query.

    Args:
        query: The user's question or request for information.

    Returns:
        str: A response containing the requested information.
    """
    # Convert the query to lowercase for case-insensitive matching
    query = query.lower()
    my_info = None
    with open("info/info.json", 'r') as file:
        my_info = json.load(file)
    # Check for specific keywords in the query and return the corresponding information
    if "who"  in query or "about" in query or "introduce" in query or "presentation" in query:
        return f" {my_info['introduction']}."
    if "name" in query:
        return f"My name is {my_info['name']}."
    elif "location" in query:
        return f"I am located in {my_info['location']}."
    elif "occupation" in query or "job" in query or "work" in query:
        return f"I work as a {my_info['occupation']}."
    elif "education" in query or "educational" in query:
        return f"I have a {my_info['education']}."
    elif "skills" in query or "what can you do" in query:
        return f"My skills include: {', '.join(my_info['skills'])}."
    elif "hobbies" in query or "interests" in query:
        return f"My hobbies are: {', '.join(my_info['hobbies'])}."
    elif "contact" in query or "email" in query or "linkedin" in query or "github" in query or "cv" in query or "resume" in query:
        contact_info = my_info["contact"]
        return (
            f"You can contact me via email at {contact_info['email']}, "
            f"connect with me on LinkedIn at {contact_info['linkedin']}, "
            f"or check out my GitHub profile at {contact_info['github']}."
            f"or check out my website at {contact_info['website']}."
        )
    else:
        return "I'm sorry, I don't have information on that. Please ask about my name, location, occupation, education, skills, hobbies, or contact details."


final_answer = FinalAnswerTool()
visit_webpage = VisitWebpageTool()
web_search = DuckDuckGoSearchTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
#model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
#model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition"
model_id = "Qwen/QwQ-32B",
#model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct",max_tokens=1025)
#model = HfApiModel(
 #     max_tokens=2096,
 #     temperature=0.5,
 #     #model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
 #     model_id=model_id,
 #     # it is possible that this model may be overloaded
 #     custom_role_conversions=None,
 # )

model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", temperature=0.2, max_tokens=10)

# Import tool from Hub
#image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
agent = CodeAgent(
    model=model,
    tools=[final_answer,provide_my_information], ## add your tools here (don't remove final answer)
    max_steps=1,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates
)


GradioUI(agent).launch()