ReallyFloppyPenguin commited on
Commit
2c79988
·
verified ·
1 Parent(s): 5103a80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +395 -29
app.py CHANGED
@@ -1,15 +1,60 @@
1
  import gradio as gr
2
- from synthgen import generate_synthetic_text, api_key # Import from our modified synthgen
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- # Check if the API key was loaded successfully (provides feedback in Gradio UI)
5
- api_key_loaded = True
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def run_generation(prompt: str, model: str, num_samples: int) -> str:
8
  """
9
- Wrapper function for Gradio interface to generate multiple samples.
 
10
  """
11
- if not api_key_loaded:
12
- return "Error: OPENROUTER_API_KEY not configured in Space secrets."
13
  if not prompt:
14
  return "Error: Please enter a prompt."
15
  if num_samples <= 0:
@@ -18,39 +63,360 @@ def run_generation(prompt: str, model: str, num_samples: int) -> str:
18
  output = f"Generating {num_samples} samples using model '{model}'...\n"
19
  output += "="*20 + "\n\n"
20
 
 
21
  for i in range(num_samples):
 
22
  generated_text = generate_synthetic_text(prompt, model)
23
  output += f"--- Sample {i+1} ---\n"
24
- output += generated_text + "\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- output += "="*20 + "\nGeneration complete."
 
 
 
 
 
 
27
  return output
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  # --- Gradio Interface Definition ---
30
  with gr.Blocks() as demo:
31
- gr.Markdown("# Synthetic Text Generator using OpenRouter")
32
  gr.Markdown(
33
- "Generate multiple text samples based on a prompt using various models available on OpenRouter. "
34
- "Ensure you have added your `OPENROUTER_API_KEY` to the Space secrets."
35
- )
36
- if not api_key_loaded:
37
- gr.Markdown("**Warning:** `OPENROUTER_API_KEY` not found. Please add it to the Space secrets.")
38
-
39
- with gr.Row():
40
- prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt here (e.g., Generate a short product description for a sci-fi gadget)")
41
- with gr.Row():
42
- model_input = gr.Textbox(label="OpenRouter Model ID", value="deepseek/deepseek-chat-v3-0324:free", placeholder="e.g., openai/gpt-3.5-turbo, google/gemini-flash-1.5")
43
- num_samples_input = gr.Number(label="Number of Samples", value=3, minimum=1, step=1)
44
-
45
- generate_button = gr.Button("Generate Text")
46
- output_text = gr.Textbox(label="Generated Samples", lines=15)
47
-
48
- generate_button.click(
49
- fn=run_generation,
50
- inputs=[prompt_input, model_input, num_samples_input],
51
- outputs=output_text
52
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  # Launch the Gradio app
55
  if __name__ == "__main__":
56
- demo.launch()
 
 
 
 
1
  import gradio as gr
2
+ import json
3
+ import tempfile
4
+ import os
5
+ import re # For parsing conversation
6
+ from typing import Union, Optional # Add Optional
7
+ # Import the actual functions from synthgen
8
+ from synthgen import (
9
+ generate_synthetic_text,
10
+ generate_prompts,
11
+ generate_synthetic_conversation
12
+ )
13
+ # We no longer need to import api_key here or check it directly in app.py
14
 
 
 
15
 
16
+ # --- Helper Functions for JSON Generation ---
17
+
18
+ # Use Union for Python < 3.10 compatibility
19
+ def create_json_file(data: object, base_filename: str) -> Union[str, None]:
20
+ """Creates a temporary JSON file and returns its path."""
21
+ try:
22
+ # Create a temporary file with a .json extension
23
+ with tempfile.NamedTemporaryFile(mode='w', suffix=".json", delete=False, encoding='utf-8') as temp_file:
24
+ json.dump(data, temp_file, indent=4, ensure_ascii=False)
25
+ return temp_file.name # Return the path to the temporary file
26
+ except Exception as e:
27
+ print(f"Error creating JSON file {base_filename}: {e}")
28
+ return None
29
+
30
+ def parse_conversation_string(text: str) -> list[dict]:
31
+ """Parses a multi-line conversation string into a list of message dictionaries."""
32
+ messages = []
33
+ # Regex to capture "User:" or "Assistant:" at the start of a line, followed by content
34
+ pattern = re.compile(r"^(User|Assistant):\s*(.*)$", re.IGNORECASE | re.MULTILINE)
35
+ matches = pattern.finditer(text)
36
+ for match in matches:
37
+ role = match.group(1).lower()
38
+ content = match.group(2).strip()
39
+ messages.append({"role": role, "content": content})
40
+ # If parsing fails or format is unexpected, return raw text in a single message?
41
+ # Or return empty list? Let's return what we found.
42
+ if not messages and text: # If regex found nothing but text exists
43
+ print(f"Warning: Could not parse conversation structure for: '{text[:100]}...'")
44
+ # Fallback: return the whole text as a single assistant message? Or user?
45
+ # Let's return a generic system message indicating the raw content
46
+ # return [{"role": "system", "content": f"Unparsed conversation text: {text}"}]
47
+ # Or maybe just return empty, TBD based on preference
48
+ pass # Return empty list if parsing fails for now
49
+ return messages
50
+
51
+
52
+ # Wrapper for text generation (remains largely the same, but error handling is improved in synthgen)
53
  def run_generation(prompt: str, model: str, num_samples: int) -> str:
54
  """
55
+ Wrapper function for Gradio interface to generate multiple text samples.
56
+ Relies on generate_synthetic_text for API calls and error handling.
57
  """
 
 
58
  if not prompt:
59
  return "Error: Please enter a prompt."
60
  if num_samples <= 0:
 
63
  output = f"Generating {num_samples} samples using model '{model}'...\n"
64
  output += "="*20 + "\n\n"
65
 
66
+ # generate_synthetic_text now handles API errors internally
67
  for i in range(num_samples):
68
+ # The function returns the text or an error string starting with "Error:"
69
  generated_text = generate_synthetic_text(prompt, model)
70
  output += f"--- Sample {i+1} ---\n"
71
+ output += generated_text + "\n\n" # Append result directly
72
+
73
+ output += "="*20 + "\nGeneration complete (check results above for errors)."
74
+ return output
75
+
76
+
77
+ # Removed the placeholder backend functions (generate_prompts_backend, generate_single_conversation)
78
+
79
+
80
+ # Modified function to handle multiple conversation prompts using the real backend
81
+ def run_conversation_generation(system_prompts_text: str, model: str, num_turns: int) -> str:
82
+ """
83
+ Wrapper function for Gradio interface to generate multiple conversations
84
+ based on a list of prompts, calling generate_synthetic_conversation.
85
+ """
86
+ if not system_prompts_text:
87
+ return "Error: Please enter or generate at least one system prompt/topic."
88
+ if num_turns <= 0:
89
+ return "Error: Number of turns must be positive."
90
+
91
+ prompts = [p.strip() for p in system_prompts_text.strip().split('\n') if p.strip()]
92
+ if not prompts:
93
+ return "Error: No valid prompts found in the input."
94
+
95
+ output = f"Generating {len(prompts)} conversations ({num_turns} turns each) using model '{model}'...\n"
96
+ output += "="*40 + "\n\n"
97
+
98
+ for i, prompt in enumerate(prompts):
99
+ # Call the actual function from synthgen.py
100
+ # It handles API calls and returns the conversation or an error string.
101
+ conversation_text = generate_synthetic_conversation(prompt, model, num_turns)
102
 
103
+ # We don't need a try-except here because the function itself returns error strings
104
+ # The title is now included within the returned string from the function
105
+ output += f"--- Conversation {i+1}/{len(prompts)} ---\n"
106
+ output += conversation_text + "\n\n" # Append result directly
107
+
108
+
109
+ output += "="*40 + "\nGeneration complete (check results above for errors)."
110
  return output
111
 
112
+ # Helper function for the Gradio UI to generate prompts using the real backend
113
+ def generate_prompts_ui(
114
+ num_prompts: int,
115
+ model: str,
116
+ temperature: float, # Add settings
117
+ top_p: float,
118
+ max_tokens: int
119
+ ) -> str:
120
+ """UI Wrapper to call the generate_prompts backend and format for Textbox."""
121
+ # Handle optional settings
122
+ temp_val = temperature if temperature > 0 else None
123
+ top_p_val = top_p if 0 < top_p <= 1 else None
124
+ # Use a specific max_tokens for prompt generation or pass from UI? Let's pass from UI
125
+ max_tokens_val = max_tokens if max_tokens > 0 else 200 # Set a default if UI value is 0
126
+
127
+ if not model:
128
+ return "Error: Please select a model for prompt generation."
129
+ if num_prompts <= 0:
130
+ return "Error: Number of prompts to generate must be positive."
131
+ if num_prompts > 50:
132
+ return "Error: Cannot generate more than 50 prompts at a time."
133
+
134
+ print(f"Generating prompts with settings: Temp={temp_val}, Top-P={top_p_val}, MaxTokens={max_tokens_val}") # Debug print
135
+
136
+ try:
137
+ # Call the actual function from synthgen.py, passing settings
138
+ prompts_list = generate_prompts(
139
+ num_prompts,
140
+ model,
141
+ temperature=temp_val,
142
+ top_p=top_p_val,
143
+ max_tokens=max_tokens_val
144
+ )
145
+ return "\n".join(prompts_list)
146
+ except ValueError as e:
147
+ # Catch errors raised by generate_prompts (e.g., API errors, parsing errors)
148
+ return f"Error generating prompts: {e}"
149
+ except Exception as e:
150
+ # Catch any other unexpected errors
151
+ print(f"Unexpected error in generate_prompts_ui: {e}")
152
+ return f"An unexpected error occurred: {e}"
153
+
154
+
155
+ # --- Modified Generation Wrappers ---
156
+
157
+ # Wrapper for text generation + JSON preparation
158
+ def run_generation_and_prepare_json(
159
+ prompt: str,
160
+ model: str,
161
+ num_samples: int,
162
+ temperature: float, # Add settings
163
+ top_p: float,
164
+ max_tokens: int
165
+ ):
166
+ """Generates text samples and prepares a JSON file for download."""
167
+ # Handle optional settings (Gradio might pass default if not interacted with)
168
+ temp_val = temperature if temperature > 0 else None # Allow 0 but treat as None if needed? OpenRouter usually uses >0. Let's map 0 to None.
169
+ top_p_val = top_p if 0 < top_p <= 1 else None # top_p must be > 0 and <= 1
170
+ max_tokens_val = max_tokens if max_tokens > 0 else None # Max tokens should be positive
171
+
172
+ if not prompt:
173
+ return "Error: Please enter a prompt.", None
174
+ if num_samples <= 0:
175
+ return "Error: Number of samples must be positive.", None
176
+
177
+ output_str = f"Generating {num_samples} samples using model '{model}'...\n"
178
+ output_str += f"(Settings: Temp={temp_val}, Top-P={top_p_val}, MaxTokens={max_tokens_val})\n"
179
+ output_str += "="*20 + "\n\n"
180
+ results_list = []
181
+
182
+ for i in range(num_samples):
183
+ # Pass settings to the backend function
184
+ generated_text = generate_synthetic_text(
185
+ prompt,
186
+ model,
187
+ temperature=temp_val,
188
+ top_p=top_p_val,
189
+ max_tokens=max_tokens_val
190
+ )
191
+ output_str += f"--- Sample {i+1} ---\n"
192
+ output_str += generated_text + "\n\n"
193
+ if not generated_text.startswith("Error:"):
194
+ results_list.append(generated_text)
195
+ else:
196
+ pass
197
+
198
+ output_str += "="*20 + "\nGeneration complete (check results above for errors)."
199
+ json_filepath = create_json_file(results_list, "text_samples.json")
200
+ return output_str, json_filepath
201
+
202
+
203
+ # Wrapper for conversation generation + JSON preparation
204
+ def run_conversation_generation_and_prepare_json(
205
+ system_prompts_text: str,
206
+ model: str,
207
+ num_turns: int,
208
+ temperature: float, # Add settings
209
+ top_p: float,
210
+ max_tokens: int
211
+ ):
212
+ """Generates conversations and prepares a JSON file for download."""
213
+ temp_val = temperature if temperature > 0 else None
214
+ top_p_val = top_p if 0 < top_p <= 1 else None
215
+ max_tokens_val = max_tokens if max_tokens > 0 else None
216
+
217
+ if not system_prompts_text:
218
+ return "Error: Please enter or generate at least one system prompt/topic.", None
219
+ if num_turns <= 0:
220
+ return "Error: Number of turns must be positive.", None
221
+
222
+ prompts = [p.strip() for p in system_prompts_text.strip().split('\n') if p.strip()]
223
+ if not prompts:
224
+ return "Error: No valid prompts found in the input.", None
225
+
226
+ output_str = f"Generating {len(prompts)} conversations ({num_turns} turns each) using model '{model}'...\n"
227
+ output_str += f"(Settings: Temp={temp_val}, Top-P={top_p_val}, MaxTokens={max_tokens_val})\n"
228
+ output_str += "="*40 + "\n\n"
229
+ results_list_structured = []
230
+
231
+ for i, prompt in enumerate(prompts):
232
+ # Pass settings to the backend function
233
+ conversation_text = generate_synthetic_conversation(
234
+ prompt,
235
+ model,
236
+ num_turns,
237
+ temperature=temp_val,
238
+ top_p=top_p_val,
239
+ max_tokens=max_tokens_val
240
+ )
241
+
242
+ output_str += f"--- Conversation {i+1}/{len(prompts)} ---\n"
243
+ output_str += conversation_text + "\n\n"
244
+
245
+ # Parse the generated text block for JSON structure
246
+ # Note: generate_synthetic_conversation includes a title like "Generated conversation for..."
247
+ # We might want to remove that before parsing or adjust the parser.
248
+ # Let's assume the core conversation starts after the first line break if a title exists.
249
+ core_conversation_text = conversation_text
250
+ if "\n\n" in conversation_text:
251
+ # Split only if the separator is present and the text doesn't start with Error:
252
+ if not conversation_text.startswith("Error:"):
253
+ parts = conversation_text.split("\n\n", 1)
254
+ if len(parts) > 1:
255
+ core_conversation_text = parts[1]
256
+ else: # Handle case where title might not have double newline
257
+ core_conversation_text = conversation_text # Fallback to full text
258
+ else:
259
+ core_conversation_text = None # Don't try to parse errors
260
+ elif conversation_text.startswith("Error:"):
261
+ core_conversation_text = None # Don't try to parse errors
262
+ # Else: No double newline, assume the whole text is the conversation (or error)
263
+
264
+ if core_conversation_text:
265
+ messages = parse_conversation_string(core_conversation_text)
266
+ if messages: # Add only if parsing was successful
267
+ results_list_structured.append({
268
+ "prompt": prompt,
269
+ "messages": messages
270
+ })
271
+ else: # Parsing failed, optionally add raw text or error placeholder
272
+ results_list_structured.append({
273
+ "prompt": prompt,
274
+ "error": "Failed to parse conversation structure.",
275
+ "raw_text": core_conversation_text # Include raw text if parsing failed
276
+ })
277
+ elif conversation_text.startswith("Error:"):
278
+ results_list_structured.append({
279
+ "prompt": prompt,
280
+ "error": conversation_text # Include the error message from generation
281
+ })
282
+ else: # Handle case where core_conversation_text became None unexpectedly or original text was just a title
283
+ results_list_structured.append({
284
+ "prompt": prompt,
285
+ "error": "Could not extract conversation content for parsing.",
286
+ "raw_text": conversation_text
287
+ })
288
+
289
+
290
+ output_str += "="*40 + "\nGeneration complete (check results above for errors)."
291
+
292
+ # Create JSON file from the structured list
293
+ json_filepath = create_json_file(results_list_structured, "conversations.json")
294
+
295
+ return output_str, json_filepath
296
+
297
+
298
  # --- Gradio Interface Definition ---
299
  with gr.Blocks() as demo:
300
+ gr.Markdown("# Synthetic Data Generator using OpenRouter")
301
  gr.Markdown(
302
+ "Generate synthetic text samples or conversations using various models"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
  )
304
+ # Removed the api_key_loaded check and warning Markdown
305
+
306
+ # Define model choices (can be shared or specific per tab)
307
+ # Consider fetching these dynamically from OpenRouter if possible in the future
308
+ model_choices = [
309
+ "deepseek/deepseek-chat-v3-0324:free", # Example free model
310
+ "meta-llama/llama-3.3-70b-instruct:free",
311
+ "deepseek/deepseek-r1:free",
312
+ "google/gemini-2.5-pro-exp-03-25:free",
313
+ "qwen/qwen-2.5-72b-instruct:free",
314
+ "featherless/qwerky-72b:free",
315
+ "google/gemma-3-27b-it:free",
316
+ "mistralai/mistral-small-24b-instruct-2501:free",
317
+ "deepseek/deepseek-r1-distill-llama-70b:free",
318
+ "sophosympatheia/rogue-rose-103b-v0.2:free",
319
+ "nvidia/llama-3.1-nemotron-70b-instruct:free",
320
+ "microsoft/phi-3-medium-128k-instruct:free",
321
+ "undi95/toppy-m-7b:free",
322
+ "huggingfaceh4/zephyr-7b-beta:free",
323
+ "openrouter/quasar-alpha"
324
+ # Add more model IDs as needed
325
+ ]
326
+ default_model = model_choices[0] if model_choices else None
327
+
328
+ # --- Shared Model Settings ---
329
+ # Use an Accordion for less clutter
330
+ with gr.Accordion("Model Settings (Optional)", open=False):
331
+ # Set reasonable ranges and defaults. Use 0 for Max Tokens/Top-P to signify 'None'/API default.
332
+ temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.1, label="Temperature", info="Controls randomness. Higher values are more creative, lower are more deterministic. 0 means use API default.")
333
+ top_p_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Top-P (Nucleus Sampling)", info="Considers only tokens with cumulative probability mass >= top_p. 0 means use API default.")
334
+ max_tokens_slider = gr.Number(value=0, minimum=0, maximum=8192, step=64, label="Max Tokens", info="Maximum number of tokens to generate in the completion. 0 means use API default.")
335
+
336
+
337
+ with gr.Tabs():
338
+ with gr.TabItem("Text Generation"):
339
+ with gr.Row():
340
+ prompt_input_text = gr.Textbox(label="Prompt", placeholder="Enter your prompt here (e.g., Generate a short product description for a sci-fi gadget)", lines=3)
341
+ with gr.Row():
342
+ model_input_text = gr.Dropdown(
343
+ label="OpenRouter Model ID",
344
+ choices=model_choices,
345
+ value=default_model
346
+ )
347
+ num_samples_input_text = gr.Number(label="Number of Samples", value=3, minimum=1, maximum=20, step=1)
348
+
349
+ generate_button_text = gr.Button("Generate Text Samples")
350
+ output_text = gr.Textbox(label="Generated Samples", lines=15, show_copy_button=True)
351
+ # Add File component for download
352
+ download_file_text = gr.File(label="Download Samples as JSON")
353
+
354
+ generate_button_text.click(
355
+ fn=run_generation_and_prepare_json,
356
+ inputs=[
357
+ prompt_input_text, model_input_text, num_samples_input_text,
358
+ temperature_slider, top_p_slider, max_tokens_slider # Add settings inputs
359
+ ],
360
+ outputs=[output_text, download_file_text]
361
+ )
362
+
363
+
364
+ with gr.TabItem("Conversation Generation"):
365
+ gr.Markdown("Enter one system prompt/topic per line below, or use the 'Generate Prompts' button.")
366
+ with gr.Row():
367
+ # Textbox for multiple prompts
368
+ prompt_input_conv = gr.Textbox(
369
+ label="Prompts (one per line)",
370
+ lines=5, # Make it multi-line
371
+ placeholder="Enter prompts here, one per line...\ne.g., Act as a pirate discussing treasure maps.\nDiscuss the future of space travel."
372
+ )
373
+ with gr.Row():
374
+ # Input for number of prompts to generate
375
+ num_prompts_input_conv = gr.Number(label="Number of Prompts to Generate", value=5, minimum=1, maximum=20, step=1) # Keep max reasonable
376
+ # Button to trigger AI prompt generation
377
+ generate_prompts_button = gr.Button("Generate Prompts using AI")
378
+ with gr.Row():
379
+ # Model selection for conversation generation AND prompt generation
380
+ model_input_conv = gr.Dropdown(
381
+ label="OpenRouter Model ID (for generation)",
382
+ choices=model_choices,
383
+ value=default_model
384
+ )
385
+
386
+ with gr.Row():
387
+ # Input for number of turns per conversation
388
+ num_turns_input_conv = gr.Number(label="Number of Turns per Conversation (approx)", value=5, minimum=1, maximum=20, step=1) # Keep max reasonable
389
+
390
+ # Button to generate the conversations based on the prompts in the Textbox
391
+ generate_conversations_button = gr.Button("Generate Conversations")
392
+ output_conv = gr.Textbox(label="Generated Conversations", lines=15, show_copy_button=True)
393
+ # Add File component for download
394
+ download_file_conv = gr.File(label="Download Conversations as JSON")
395
+
396
+ # Connect the "Generate Prompts" button to the UI wrapper
397
+ generate_prompts_button.click(
398
+ fn=generate_prompts_ui, # Use the wrapper that calls the real function
399
+ inputs=[
400
+ num_prompts_input_conv, model_input_conv,
401
+ temperature_slider, top_p_slider, max_tokens_slider # Add settings inputs
402
+ ],
403
+ outputs=prompt_input_conv
404
+ )
405
+
406
+ # Connect the "Generate Conversations" button to the real function wrapper
407
+ generate_conversations_button.click(
408
+ fn=run_conversation_generation_and_prepare_json, # Use the wrapper that calls the real function
409
+ inputs=[
410
+ prompt_input_conv, model_input_conv, num_turns_input_conv,
411
+ temperature_slider, top_p_slider, max_tokens_slider # Add settings inputs
412
+ ],
413
+ outputs=[output_conv, download_file_conv] # Output to both Textbox and File
414
+ )
415
+
416
 
417
  # Launch the Gradio app
418
  if __name__ == "__main__":
419
+ print("Launching Gradio App...")
420
+ print("Make sure the OPENROUTER_API_KEY environment variable is set.")
421
+ # Use share=True for temporary public link if running locally and need to test
422
+ demo.launch(share=True) # share=True