Spaces:
Runtime error
Runtime error
File size: 8,687 Bytes
5305e57 599d7c0 a47572b 16565e1 a47572b 16565e1 a47572b 5305e57 9109896 16565e1 5305e57 a47572b 16565e1 a47572b 16565e1 9109896 16565e1 9109896 16565e1 9109896 16565e1 9109896 16565e1 a47572b 9109896 16565e1 a47572b 16565e1 a47572b 16565e1 a47572b 16565e1 a47572b 9109896 16565e1 a47572b 9109896 16565e1 9109896 16565e1 a47572b 9109896 16565e1 a47572b 9109896 16565e1 a47572b 9109896 16565e1 9109896 16565e1 a47572b 16565e1 599d7c0 16565e1 a4b58ab 9109896 16565e1 9109896 16565e1 9109896 599d7c0 a47572b 16565e1 a47572b 9109896 a47572b 9109896 16565e1 9109896 16565e1 9109896 16565e1 a47572b 16565e1 a47572b 16565e1 a47572b 16565e1 9109896 16565e1 a47572b 9109896 16565e1 9109896 a47572b 16565e1 9109896 16565e1 9109896 16565e1 9109896 16565e1 a47572b 9109896 a47572b 16565e1 9109896 a47572b 9109896 a47572b 599d7c0 9109896 16565e1 599d7c0 a47572b 599d7c0 16565e1 9109896 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
import os
from openai import OpenAI
from datetime import datetime
import gradio as gr
import time
import openai # Redundant import
# --- Constants ---
# Default model might need to align with what client.responses.create supports
DEFAULT_MODEL = "gpt-4.1" # As per your example
MAX_HISTORY_LENGTH = 5 # History formatting will be manual and limited
# --- API Key and Client Initialization ---
API_KEY = os.getenv("OPENAI_API_KEY")
if not API_KEY:
print("Error: OPENAI_API_KEY environment variable not set.")
# Handle missing key appropriately (e.g., disable UI, raise error)
client = OpenAI(api_key=API_KEY)
# --- Helper Functions ---
# !!! WARNING: This function is adapted to the requested format and LOSES features !!!
def get_openai_response_simplified(prompt, model=DEFAULT_MODEL, system_prompt="", chat_history=None):
"""
Gets a response using the client.responses.create format.
NOTE: This is NON-STREAMING and handles history/system prompt crudely.
Advanced parameters (temp, top_p etc.) are NOT supported by this structure.
"""
today_day = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# --- Attempt to manually format history and system prompt into 'input' ---
formatted_input = ""
# Add system prompt if provided
effective_system_prompt = f"Today's date is: {today_day}. {system_prompt}".strip()
if effective_system_prompt:
# How best to include system prompt? Prepend? Specific tags? Unknown.
formatted_input += f"System: {effective_system_prompt}\n\n"
# Add chat history (simple concatenation)
if chat_history:
for turn in chat_history:
if len(turn) == 2 and turn[0] is not None and turn[1] is not None:
formatted_input += f"User: {turn[0]}\nAssistant: {turn[1]}\n"
# Add the current user prompt
formatted_input += f"User: {prompt}\nAssistant:" # Prompt the model for the next turn
try:
# *** Using the requested client.responses.create format ***
# NOTE: This assumes client.responses.create actually exists and works this way.
# NOTE: Parameters like temperature, top_p, max_tokens are NOT included here
# as they are not part of the provided example format.
response = client.responses.create(
model=model,
input=formatted_input
)
# Assuming the response object has an 'output_text' attribute
return response.output_text
# Error handling might need adjustment based on how client.responses.create fails
except openai.APIConnectionError as e:
print(f"OpenAI API request failed: {e}")
return f"Error: Could not connect to OpenAI API. {e}"
except openai.RateLimitError as e:
print(f"OpenAI API request failed: {e}")
return f"Error: Rate limit exceeded. Please try again later. {e}"
except openai.AuthenticationError as e:
print(f"OpenAI API request failed: {e}")
return f"Error: Authentication failed. Check your API key. {e}"
except openai.APIStatusError as e:
print(f"OpenAI API request failed: {e}")
return f"Error: OpenAI API returned an error (Status: {e.status_code}). {e}"
except AttributeError as e:
print(f"Error accessing response or client method: {e}")
return f"Error: The API call structure 'client.responses.create' or its response format might be incorrect or not available. {e}"
except Exception as e:
print(f"An unexpected error occurred: {e}")
return f"An unexpected error occurred: {e}"
# !!! WARNING: This update function is now NON-STREAMING !!!
def update_ui_simplified(message, chat_history, model, system_prompt, history_length):
"""Updates the Gradio UI WITHOUT streaming."""
if not message:
return "", chat_history # Return original history if message is empty
# Keep only the specified length of history for the *next* call
history_for_api = chat_history[-int(history_length):] if history_length > 0 else []
# Call the simplified, non-streaming function
bot_response = get_openai_response_simplified(
prompt=message,
model=model,
system_prompt=system_prompt,
chat_history=history_for_api # Pass the potentially trimmed history
)
# Append the user message and the *complete* bot response
chat_history.append((message, bot_response))
# Update UI only once with the full response
# Always display history based on the slider length for visibility
visible_history = chat_history[-int(history_length):] if history_length > 0 else []
return "", visible_history # Clear input, return updated history
# --- Gradio Interface (Modified for Simplified API Call) ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
# Keep your Markdown, titles, etc.
gr.Markdown("# Chat (Simplified API Demo)")
gr.Markdown("---")
gr.Markdown("Using a simplified, non-streaming API call structure.")
gr.Markdown("---")
# ... (rest of your Markdown) ...
gr.Markdown("Chat below (Note: Responses will appear all at once): π")
with gr.Row():
with gr.Column(scale=4):
chatbot = gr.Chatbot(
label="Chat Window",
show_label=False,
avatar_images=(
"https://cdn-icons-png.flaticon.com/512/1077/1077114.png", # User
"https://cdn-icons-png.flaticon.com/512/8649/8649540.png" # AI
),
render_markdown=True,
height=500,
bubble_full_width=False
)
msg = gr.Textbox(
label="Your Message",
placeholder="Type your message here and press Enter...",
scale=4,
show_label=False,
container=False
)
# Accordion remains, but parameters might not be used by the simplified API call
with gr.Accordion("Advanced Options (May Not Apply to Simplified API)", open=False):
model_select = gr.Dropdown(
label="Model",
# Ensure gpt-4.1 is a valid choice if used
choices=["gpt-4.1", "gpt-4o-mini-2024-07-18", "gpt-3.5-turbo-0125", "gpt-4o"],
value=DEFAULT_MODEL,
interactive=True
)
# These sliders are kept for UI, but won't be passed to the simplified API call
temperature_slider = gr.Slider(label="Temperature (Not Used)", minimum=0.0, maximum=2.0, value=1.0, step=0.1, interactive=True)
top_p_slider = gr.Slider(label="Top P (Not Used)", minimum=0.0, maximum=1.0, value=1.0, step=0.05, interactive=True)
frequency_penalty_slider = gr.Slider(label="Frequency Penalty (Not Used)", minimum=-2.0, maximum=2.0, value=0.0, step=0.1, interactive=True)
presence_penalty_slider = gr.Slider(label="Presence Penalty (Not Used)", minimum=-2.0, maximum=2.0, value=0.0, step=0.1, interactive=True)
system_prompt_textbox = gr.Textbox(label="System Prompt", placeholder="e.g., You are a helpful assistant.", lines=3, interactive=True)
history_length_slider = gr.Slider(label="Chat History Length (Affects Input & Display)", minimum=1, maximum=20, value=MAX_HISTORY_LENGTH, step=1, interactive=True)
with gr.Row():
clear = gr.Button("Clear Chat")
send = gr.Button("Send Message", variant="primary")
# --- Event Handlers (Using Simplified Functions) ---
# Define inputs, excluding sliders not used by the simplified function
inputs_simplified = [
msg, chatbot, model_select, system_prompt_textbox,
history_length_slider
]
outputs = [msg, chatbot] # Outputs remain the same
# Connect send button click
send.click(
update_ui_simplified, # Use the non-streaming UI update function
inputs=inputs_simplified,
outputs=outputs,
queue=True
)
# Connect textbox submit (Enter key)
msg.submit(
update_ui_simplified, # Use the non-streaming UI update function
inputs=inputs_simplified,
outputs=outputs,
queue=True
)
# Connect clear button
clear.click(lambda: (None, []), None, outputs=[msg, chatbot], queue=False)
gr.Examples(
examples=["Tell me about the latest AI developments", "Write a short story about a friendly robot", "Explain black holes simply"],
inputs=msg,
label="Example Prompts"
)
# --- Launch ---
if __name__ == "__main__":
demo.queue()
demo.launch() |