MINEOGO commited on
Commit
b865b71
·
verified ·
1 Parent(s): 155c47a

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -285
app.py DELETED
@@ -1,285 +0,0 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import os
4
- import re # For post-processing fallback
5
-
6
- # --- FastAPI & Pydantic Imports ---
7
- from fastapi import FastAPI, Request, HTTPException, Depends, Body
8
- from fastapi.responses import JSONResponse
9
- from pydantic import BaseModel, Field
10
- from typing import Literal, Optional
11
-
12
- # --- Configuration ---
13
- API_TOKEN = os.getenv("HF_TOKEN", None)
14
- MODEL = "HuggingFaceH4/zephyr-7b-beta"
15
- # --- Define the Secret Key for the API ---
16
- API_SECRET_KEY = "onlyfordearygt" # Keep this secure in a real application (e.g., env variable)
17
-
18
- # --- Initialize Inference Client ---
19
- try:
20
- print(f"Attempting to initialize Inference Client for model: {MODEL}")
21
- if API_TOKEN:
22
- print("Using HF Token found in environment.")
23
- client = InferenceClient(model=MODEL, token=API_TOKEN)
24
- else:
25
- print("HF Token not found. Running without token (may lead to rate limits).")
26
- client = InferenceClient(model=MODEL)
27
- print("Inference Client initialized successfully.")
28
- except Exception as e:
29
- print(f"Error initializing Inference Client: {e}")
30
- # We still want the Gradio app to potentially load, but maybe show an error
31
- # raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
32
- client = None # Set client to None so we can check later
33
- print("WARNING: AI Client initialization failed. API/Generation will not work.")
34
-
35
-
36
- # --- Pydantic Model for API Request Body ---
37
- class GenerateRequest(BaseModel):
38
- prompt: str
39
- backend_choice: Literal["Static", "Flask", "Node.js"] = "Static"
40
- file_structure: Literal["Single File", "Multiple Files"] = "Multiple Files"
41
- max_tokens: Optional[int] = Field(default=3072, gt=128, le=4096) # Add validation
42
- temperature: Optional[float] = Field(default=0.7, gt=0.0, le=2.0)
43
- top_p: Optional[float] = Field(default=0.9, gt=0.0, le=1.0)
44
- secret_key: str # Required for authentication
45
-
46
-
47
- # --- Core Code Generation Function (Mostly Unchanged) ---
48
- # Note: This function is now also used by the API
49
- def generate_code(
50
- prompt: str,
51
- backend_choice: str,
52
- file_structure: str,
53
- max_tokens: int,
54
- temperature: float,
55
- top_p: float,
56
- ):
57
- """
58
- Generates website code based on user prompt and choices.
59
- Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
60
- Yields the code token by token for live updates (for Gradio UI).
61
- The *final* yielded value is the complete, cleaned code (for API).
62
- """
63
- # Check if client initialized properly
64
- if client is None:
65
- final_error_message = "## Error\n\nAI Model Client not initialized. Generation is unavailable."
66
- print(final_error_message)
67
- # Yield the error for Gradio, return it for API callers later
68
- yield final_error_message
69
- return # Stop execution for this generator
70
-
71
- print(f"--- Generating Code ---")
72
- print(f"Prompt: {prompt[:100]}...")
73
- print(f"Backend Context: {backend_choice}")
74
- print(f"File Structure: {file_structure}")
75
- print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
76
-
77
- if file_structure == "Single File":
78
- file_structure_instruction = (
79
- "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
80
- "Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
81
- "Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
82
- "Do NOT use file separation markers."
83
- )
84
- else: # Multiple Files
85
- file_structure_instruction = (
86
- "- **File Structure is 'Multiple Files':** Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
87
- "Use these EXACT markers: `<!-- index.html -->`, `/* style.css */`, `// script.js` (only if JS is needed).\n"
88
- "- Place the corresponding code directly after each marker.\n"
89
- "- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
90
- )
91
-
92
- system_message = (
93
- "You are an expert frontend web developer AI. Your primary goal is to generate **complete, visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
94
- "Follow ALL these rules with EXTREME STRICTNESS:\n"
95
- "1. **STYLE & DETAIL:** Generate rich, detailed code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects. Aim for a high-quality visual result.\n"
96
- "2. **COMPLETENESS:** Generate the *entire* requested code structure. Ensure all files/sections are fully generated and properly closed. **DO NOT STOP GENERATING PREMATURELY.** Finish the whole task.\n"
97
- "3. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
98
- "4. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (`<!DOCTYPE html>` or `<!-- index.html -->`).\n"
99
- "5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code. DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
100
- "6. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
101
- f"7. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
102
- f" {file_structure_instruction}\n"
103
- "8. **BACKEND CONTEXT ({backend_choice}):** Use as a hint for frontend structure only. Generate ONLY frontend code.\n"
104
- "9. **FRONTEND ONLY:** Do NOT generate server-side code.\n"
105
- "10. **ACCURACY:** Generate functional code addressing the user's prompt.\n\n"
106
- "REMEMBER: Create COMPLETE, visually appealing code. Output ONLY raw code. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
107
- )
108
-
109
- messages = [
110
- {"role": "system", "content": system_message},
111
- {"role": "user", "content": f"Generate the complete website frontend code for: {prompt}"}
112
- ]
113
-
114
- response_stream = ""
115
- full_response_for_cleaning = ""
116
- token_count = 0
117
- last_yielded_value = "" # Store the last value for the API
118
-
119
- try:
120
- print("Sending request to Hugging Face Inference API...")
121
- stream = client.chat_completion(
122
- messages=messages,
123
- max_tokens=max_tokens,
124
- stream=True,
125
- temperature=temperature,
126
- top_p=top_p,
127
- )
128
- for message in stream:
129
- token = message.choices[0].delta.content
130
- if isinstance(token, str):
131
- token_count += 1
132
- response_stream += token
133
- full_response_for_cleaning += token
134
- last_yielded_value = response_stream # Keep updating last value during stream
135
- yield response_stream # Yield cumulative response for live UI update
136
-
137
- print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(full_response_for_cleaning)}")
138
- if token_count >= max_tokens - 10:
139
- print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
140
-
141
- # --- Post-Processing ---
142
- cleaned_response = full_response_for_cleaning.strip()
143
- cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
144
- cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
145
- cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
146
- # Remove common phrases only if they are clearly at the start/end and unlikely to be code
147
- common_phrases_start = ["Here is the code:", "Okay, here is the code:", "Here's the code:", "Sure, here is the code you requested:"]
148
- for phrase in common_phrases_start:
149
- if cleaned_response.lower().startswith(phrase.lower()):
150
- cleaned_response = cleaned_response[len(phrase):].lstrip()
151
-
152
- last_yielded_value = cleaned_response.strip() # Final cleaned value
153
- yield last_yielded_value # Yield final cleaned response for Gradio UI
154
-
155
- except Exception as e:
156
- error_message = f"An error occurred during the API call: {e}"
157
- print(error_message)
158
- final_error_message = f"## Error\n\nFailed to generate code.\n**Reason:** {e}"
159
- # Yield error for Gradio UI
160
- yield final_error_message
161
- # Ensure the generator stops, API will handle the exception based on this
162
- # For the API, we will raise an exception in the route handler if needed
163
-
164
-
165
- # --- Build Gradio Interface using Blocks ---
166
- # Define this *before* creating the FastAPI app that might mount it
167
- with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
168
- gr.Markdown("# ✨ Website Code Generator ✨")
169
- gr.Markdown(
170
- "Describe the website you want. The AI will generate **visually styled** frontend code (HTML, CSS, JS) using **plenty of CSS**. "
171
- "The code appears live below.\n"
172
- "**Important:**\n"
173
- "1. This generator creates code based *only* on your initial description. To refine, modify your description and generate again.\n"
174
- "2. **If the code output stops abruptly**, it likely hit the 'Max New Tokens' limit. **Increase the slider value below** and try again!\n"
175
- "3. An API endpoint is available at `/api/generate` (POST request, requires secret key)." # Notify about API
176
- )
177
-
178
- with gr.Row():
179
- with gr.Column(scale=2):
180
- prompt_input = gr.Textbox(label="Website Description", placeholder="e.g., A modern portfolio...", lines=6,)
181
- backend_radio = gr.Radio(["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static", info="Hint for AI - generates ONLY frontend code.")
182
- file_structure_radio = gr.Radio(["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files", info="Choose 'Single File' or 'Multiple Files'.")
183
- generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary")
184
-
185
- with gr.Column(scale=3):
186
- code_output = gr.Code(label="Generated Code (Raw Output - Aiming for Style!)", language="html", lines=30, interactive=False,)
187
-
188
- with gr.Accordion("Advanced Generation Settings", open=False):
189
- max_tokens_slider = gr.Slider(minimum=512, maximum=4096, value=3072, step=256, label="Max New Tokens", info="Max length. Increase if output is cut off!")
190
- temperature_slider = gr.Slider(minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature", info="Controls randomness.")
191
- top_p_slider = gr.Slider( minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P", info="Alternative randomness control.")
192
-
193
- # --- Connect Gradio Inputs/Outputs ---
194
- generate_button.click(
195
- fn=generate_code,
196
- inputs=[prompt_input, backend_radio, file_structure_radio, max_tokens_slider, temperature_slider, top_p_slider,],
197
- outputs=code_output,
198
- )
199
-
200
- # --- Gradio Examples ---
201
- gr.Examples(
202
- examples=[
203
- ["A simple counter page...", "Static", "Single File"],
204
- ["A responsive product grid...", "Static", "Multiple Files"],
205
- ["A personal blog homepage...", "Flask", "Multiple Files"],
206
- ["A 'Coming Soon' page...", "Static", "Multiple Files"]
207
- ],
208
- inputs=[prompt_input, backend_radio, file_structure_radio],
209
- label="Example Prompts (Aiming for Style)"
210
- )
211
-
212
-
213
- # --- Create FastAPI App and Mount Gradio ---
214
- # This approach uses FastAPI as the main server and mounts Gradio onto it
215
- app = FastAPI()
216
-
217
- # --- Define the API Endpoint ---
218
- @app.post("/api/generate")
219
- async def api_generate_code(payload: GenerateRequest):
220
- """
221
- API endpoint to generate website code.
222
- Requires a secret key for authentication.
223
- """
224
- print(f"API Request received for prompt: {payload.prompt[:50]}...")
225
-
226
- # --- Authentication ---
227
- if payload.secret_key != API_SECRET_KEY:
228
- print("API Authentication failed: Invalid secret key.")
229
- raise HTTPException(status_code=403, detail="Invalid secret key")
230
-
231
- # Check if HF client is available
232
- if client is None:
233
- print("API Error: AI Client not initialized.")
234
- raise HTTPException(status_code=503, detail="AI Model Client not initialized. Service unavailable.")
235
-
236
- print("API Authentication successful.")
237
-
238
- # --- Call the generator function and consume it ---
239
- final_code = ""
240
- try:
241
- # Consume the generator to get the last yielded value (the complete code)
242
- code_generator = generate_code(
243
- prompt=payload.prompt,
244
- backend_choice=payload.backend_choice,
245
- file_structure=payload.file_structure,
246
- max_tokens=payload.max_tokens,
247
- temperature=payload.temperature,
248
- top_p=payload.top_p,
249
- )
250
- for code_chunk in code_generator:
251
- final_code = code_chunk # Keep overwriting until the last one
252
-
253
- # Check if the final result indicates an error from within generate_code
254
- if final_code.strip().startswith("## Error"):
255
- print(f"API Error during generation: {final_code}")
256
- # Extract reason if possible, otherwise return generic error
257
- reason = final_code.split("Reason:**")[-1].strip() if "Reason:**" in final_code else "Generation failed internally."
258
- raise HTTPException(status_code=500, detail=f"Code generation failed: {reason}")
259
-
260
- print(f"API generated code length: {len(final_code)}")
261
- # --- Return the final code ---
262
- return JSONResponse(content={"generated_code": final_code})
263
-
264
- except HTTPException as http_exc:
265
- # Re-raise HTTPException if it's already one (like auth failure or internal error)
266
- raise http_exc
267
- except Exception as e:
268
- # Catch any other unexpected errors during generation/consumption
269
- print(f"API - Unexpected Error during generation: {e}")
270
- raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {str(e)}")
271
-
272
-
273
- # --- Mount the Gradio app onto the FastAPI app ---
274
- # The Gradio UI will be available at the root path "/"
275
- app = gr.mount_gradio_app(app, demo, path="/")
276
-
277
- # --- Launch ---
278
- # Use Uvicorn to run the FastAPI app (which now includes Gradio)
279
- if __name__ == "__main__":
280
- import uvicorn
281
- print("Starting FastAPI server with Gradio mounted...")
282
- # Recommended settings for Hugging Face Spaces: host="0.0.0.0", port=7860
283
- # You might need to adjust port if running locally and 7860 is taken.
284
- uvicorn.run(app, host="0.0.0.0", port=7860)
285
- # Note: demo.launch() is no longer used directly here, as uvicorn runs the combined app.