GLorr's picture
Upload folder using huggingface_hub
6c09f76 verified
raw
history blame contribute delete
4.2 kB
import json
import logging
import os
logger = logging.getLogger(__name__)
"""Schedule meeting integration function."""
def fetch_next_question() -> str:
"""Fetch the next question.
Returns:
str: The next question.
"""
questions = [
"What is the capital of France?",
"What is 2 + 2?",
"Who wrote Romeo and Juliet?",
"What is the chemical symbol for gold?",
"Which planet is known as the Red Planet?",
]
question = questions[0]
return f"You need to ask the candidate following question: `{question}`. Allow the candidate some time to respond "
fetch_next_question_tool = {
"name": "fetch_next_question",
"description": "Fetch the next question",
}
def validate_answer(
question_id: int, answer: str, answer_type: str | int | list
) -> str:
"""Validate the user's answer against an expected answer type.
question_id (int): The identifier of the question being validated
answer (str): The user's provided answer to validate
answer_type (type): The expected python type that the answer should match (e.g. str, int, list)
str: Returns "Answer is valid" if answer matches expected type, raises ValueError otherwise
Raises:
ValueError: If the answer's type does not match the expected answer_type
Example:
>>> validate_answer(1, "42", str)
True
>>> validate_answer(1, 42, str)
ValueError: Invalid answer type
"""
logging.info(
{
"question_id": question_id,
"answer": answer,
"answer_type": answer_type,
}
)
if type(answer) is answer_type:
raise ValueError("Invalid answer type")
# Create or load the answers file
answers_file = "/Users/georgeslorre/ML6/internal/gemini-voice-agents/answers.json"
answers = []
if os.path.exists(answers_file):
with open(answers_file, "r") as f:
answers = json.load(f)
# Append new answer
answers[question_id] = {"question_id": question_id, "answer": answer}
# Write back to file
with open(answers_file, "w") as f:
json.dump(answers, f, indent=2)
return "Answer is valid"
validate_answer_tool = {
"name": "validate_answer",
"description": "Validate the user's answer against an expected answer type",
"parameters": {
"type": "OBJECT",
"properties": {
"question_id": {
"type": "INTEGER",
"description": "The identifier of the question being validated"
},
"answer": {
"type": "STRING",
"description": "The user's provided answer to validate"
},
"answer_type": {
"type": "STRING",
"description": "The expected python type that the answer should match (e.g. str, int, list)"
}
},
"required": ["question_id", "answer", "answer_type"]
}
}
def store_input(role: str, input: str) -> str:
"""Store conversation input in a JSON file.
Args:
role (str): The role of the speaker (user or assistant)
input (str): The text input to store
Returns:
str: Confirmation message
"""
conversation_file = "/Users/georgeslorre/ML6/internal/gemini-voice-agents/conversation.json"
conversation = []
if os.path.exists(conversation_file):
with open(conversation_file, "r") as f:
conversation = json.load(f)
conversation.append({"role": role, "content": input})
with open(conversation_file, "w") as f:
json.dump(conversation, f, indent=2)
return "Input stored successfully"
store_input_tool = {
"name": "store_input",
"description": "Store user input in conversation history",
"parameters": {
"type": "OBJECT",
"properties": {
"role": {
"type": "STRING",
"description": "The role of the speaker (user or assistant)"
},
"input": {
"type": "STRING",
"description": "The text input to store"
}
}
}
}