File size: 1,359 Bytes
ff069bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e371810
ed8fee4
d8d7161
ff069bf
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import os
import requests
import urllib.parse
from dotenv import load_dotenv
from utils import read_config
import random

load_dotenv()

def pre_process():
    # Read the configuration and substitute the character placeholder
    config = read_config()
    system_prompt = config['llm']['system_prompt']
    char = config['llm']['char']
    return system_prompt.replace("{char}", char)

def generate_llm(prompt):
    system_prompt = pre_process()
    # Encode the user prompt and system prompt for URL safety
    encoded_prompt = urllib.parse.quote(prompt)
    encoded_system = urllib.parse.quote(system_prompt)
    # Build the GET request URL for Pollinations' text API
    randomSeed = random.randint(0, 9999999)
    print(f"DEBUG: Random seed: {randomSeed}")
    url = f"https://text.pollinations.ai/{encoded_prompt}?model=openai-large&private=true&system={encoded_system}&seed={randomSeed}"
    
    try:
        response = requests.get(url, timeout=30)
        response.raise_for_status()
        # Return the generated text (stripping any extra whitespace)
        return response.text.strip()
    except Exception as e:
        return f"Error: {str(e)}"

# Example usage (can be removed or commented out in production):
if __name__ == "__main__":
    sample_prompt = "What is the capital of France?"
    print("Response:", generate_llm(sample_prompt))