Spaces:
Running
Running
Chandima Prabhath
Enhance voice reply function to include a friendly prompt; implement retry logic with delay in LLM generation
c187bdf
import os | |
from openai import OpenAI | |
from dotenv import load_dotenv | |
from utils import read_config | |
import random | |
import time # Import time for retry delay | |
load_dotenv() | |
client = OpenAI( | |
base_url="https://text.pollinations.ai/openai", | |
api_key="YOUR_API_KEY" # Add if needed | |
) | |
def pre_process(): | |
# Read the configuration and substitute the character placeholder | |
config = read_config() | |
system_prompt = config['llm']['system_prompt'] | |
char = config['llm']['char'] | |
return system_prompt.replace("{char}", char) | |
def generate_llm(prompt, model="openai-large", max_tokens=8000): | |
system_prompt = pre_process() | |
while True: # Keep retrying indefinitely | |
try: | |
# Use OpenAI's ChatCompletion API | |
randomSeed = random.randint(0, 9999999) | |
response = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": prompt} | |
], | |
max_tokens=max_tokens, | |
seed=randomSeed | |
) | |
# Return the generated text | |
return response.choices[0].message.content.strip() | |
except Exception as e: | |
print(f"Error occurred: {str(e)}. Retrying in 5 seconds...") | |
time.sleep(5) # Wait before retrying | |
# Example usage (can be removed or commented out in production): | |
if __name__ == "__main__": | |
sample_prompt = "search for free image generation api" | |
print("Response:", generate_llm(sample_prompt)) |