MINEOGO commited on
Commit
244ea60
·
verified ·
1 Parent(s): 1c5dbd6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +215 -0
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+ import re # For post-processing fallback
5
+
6
+ # --- Configuration ---
7
+ API_TOKEN = os.getenv("HF_TOKEN", None)
8
+ MODEL = "HuggingFaceH4/zephyr-7b-beta" # Or choose another suitable model
9
+
10
+ # --- Initialize Inference Client ---
11
+ try:
12
+ print(f"Attempting to initialize Inference Client for model: {MODEL}")
13
+ if API_TOKEN:
14
+ print("Using HF Token found in environment.")
15
+ client = InferenceClient(model=MODEL, token=API_TOKEN)
16
+ else:
17
+ print("HF Token not found. Running without token (may lead to rate limits).")
18
+ client = InferenceClient(model=MODEL)
19
+ print("Inference Client initialized successfully.")
20
+ except Exception as e:
21
+ print(f"Error initializing Inference Client: {e}")
22
+ raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
23
+
24
+ # --- Core Code Generation Function ---
25
+ def generate_code(
26
+ prompt: str,
27
+ backend_choice: str,
28
+ file_structure: str,
29
+ max_tokens: int,
30
+ temperature: float,
31
+ top_p: float,
32
+ ):
33
+ """
34
+ Generates website code based on user prompt and choices.
35
+ Aims for richer CSS and strictly outputs ONLY raw code.
36
+ Yields the code token by token for live updates.
37
+ """
38
+ print(f"--- Generating Code ---")
39
+ print(f"Prompt: {prompt[:100]}...")
40
+ print(f"Backend Context: {backend_choice}")
41
+ print(f"File Structure: {file_structure}")
42
+ print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
43
+
44
+ # --- Dynamically Build System Message Based on File Structure & Style Request ---
45
+
46
+ if file_structure == "Single File":
47
+ file_structure_instruction = (
48
+ "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
49
+ "Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
50
+ "Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
51
+ "Do NOT use file separation markers."
52
+ )
53
+ else: # Multiple Files
54
+ file_structure_instruction = (
55
+ "- **File Structure is 'Multiple Files':** Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
56
+ "Use these EXACT markers: `<!-- index.html -->`, `/* style.css */`, `// script.js` (only if JS is needed).\n"
57
+ "- Place the corresponding code directly after each marker.\n"
58
+ "- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
59
+ )
60
+
61
+ # Assemble the full system message with enhanced style guidance and stricter output rules
62
+ system_message = (
63
+ "You are an expert frontend web developer AI. Your primary goal is to generate **visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
64
+ "Follow ALL these rules with EXTREME STRICTNESS:\n"
65
+ "1. **STYLE & DETAIL:** Generate rich, detailed code. Don't just make minimal examples. Use **plenty of CSS** for layout (Flexbox/Grid), spacing (padding/margin), typography (fonts), colors, and consider adding subtle transitions or effects for a polished look. Aim for a high-quality visual result.\n"
66
+ "2. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions ('Here is the code...'), NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>` before, during, or after the code.\n"
67
+ "3. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (e.g., `<!DOCTYPE html>` or `<!-- index.html -->`). NO leading spaces, newlines, or any other characters.\n"
68
+ "4. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (e.g., the final `</html>`, `}`, or `;`). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
69
+ "5. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
70
+ f"6. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure below:\n"
71
+ f" {file_structure_instruction}\n" # Insert the specific instruction here
72
+ "7. **BACKEND CONTEXT ({backend_choice}):** Use this as a hint for frontend structure (e.g., placeholders like `{{ variable }}` if 'Flask' is chosen), but ONLY generate the static frontend code (HTML, CSS, client-side JS).\n"
73
+ "8. **FRONTEND ONLY:** Do NOT generate server-side code (Python, Node.js, etc.).\n"
74
+ "9. **ACCURACY:** Generate functional code that directly addresses the user's prompt.\n\n"
75
+ "REMEMBER: Create visually appealing code. Output ONLY the raw code. START immediately with code. END immediately with code. NO extra text or tags EVER." # Final reinforcement
76
+ )
77
+
78
+ # --- Construct the messages for the API ---
79
+ messages = [
80
+ {"role": "system", "content": system_message},
81
+ {"role": "user", "content": f"Generate the website frontend code for: {prompt}"} # Slightly rephrased user message
82
+ ]
83
+
84
+ # --- Stream the response from the API ---
85
+ response_stream = ""
86
+ full_response_for_cleaning = ""
87
+ try:
88
+ print("Sending request to Hugging Face Inference API...")
89
+ for message in client.chat_completion(
90
+ messages=messages,
91
+ max_tokens=max_tokens,
92
+ stream=True,
93
+ temperature=temperature, # User controlled - 0.7 is a reasonable default balance
94
+ top_p=top_p,
95
+ ):
96
+ token = message.choices[0].delta.content
97
+ if isinstance(token, str):
98
+ response_stream += token
99
+ full_response_for_cleaning += token
100
+ yield response_stream # Yield cumulative response for live update
101
+
102
+ print(f"API stream finished. Raw length: {len(full_response_for_cleaning)}")
103
+
104
+ # --- Post-Processing (Fallback Safety Net) ---
105
+ # Primarily rely on the prompt, but clean common issues just in case.
106
+ cleaned_response = full_response_for_cleaning.strip()
107
+
108
+ # Remove potential leading/trailing markdown code fences
109
+ cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
110
+ cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
111
+
112
+ # Remove potential conversational tags if they slip through (less likely now)
113
+ cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
114
+
115
+ # Remove common introductory/closing phrases if they slip through
116
+ common_phrases = [
117
+ "Here is the code:", "Okay, here is the code:", "Here's the code:",
118
+ "Sure, here is the code you requested:", "Let me know if you need anything else."
119
+ # Add more if needed
120
+ ]
121
+ temp_response = cleaned_response.lower()
122
+ for phrase in common_phrases:
123
+ if temp_response.startswith(phrase.lower()):
124
+ cleaned_response = cleaned_response[len(phrase):].lstrip()
125
+ if temp_response.endswith(phrase.lower()):
126
+ cleaned_response = cleaned_response[:-len(phrase)].rstrip()
127
+
128
+ # Yield the final cleaned response *once* after streaming.
129
+ yield cleaned_response.strip()
130
+
131
+ except Exception as e:
132
+ error_message = f"An error occurred during the API call: {e}"
133
+ print(error_message)
134
+ yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
135
+
136
+
137
+ # --- Build Gradio Interface using Blocks ---
138
+ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
139
+ gr.Markdown("# ✨ Website Code Generator ✨") # Added some flair
140
+ gr.Markdown(
141
+ "Describe the website you want. The AI will generate **visually styled** frontend code (HTML, CSS, JS) using **plenty of CSS**. "
142
+ "The code appears live below. \n"
143
+ "**Important:** This generator creates code based *only* on your initial description. To refine the output, modify your description and generate again." # Added clarification
144
+ )
145
+
146
+ with gr.Row():
147
+ with gr.Column(scale=2):
148
+ prompt_input = gr.Textbox(
149
+ label="Website Description",
150
+ placeholder="e.g., A modern portfolio landing page with a smooth scroll navigation, a stylish hero section, project cards with hover effects, and a contact form.", # More ambitious placeholder
151
+ lines=6, # Slightly more lines
152
+ )
153
+ backend_radio = gr.Radio(
154
+ ["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static",
155
+ info="Hint for AI (e.g., template placeholders) - generates ONLY frontend code."
156
+ )
157
+ file_structure_radio = gr.Radio(
158
+ ["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
159
+ info="Choose 'Single File' (all in index.html) or 'Multiple Files' (separate css/js)."
160
+ )
161
+ generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary") # Updated button text
162
+
163
+ with gr.Column(scale=3):
164
+ code_output = gr.Code(
165
+ label="Generated Code (Raw Output - Aiming for Style!)", # Updated label
166
+ language="html",
167
+ lines=30, # More lines for potentially longer code
168
+ interactive=False,
169
+ )
170
+
171
+ with gr.Accordion("Advanced Generation Settings", open=False):
172
+ max_tokens_slider = gr.Slider(
173
+ minimum=512, maximum=4096, value=2560, step=128, label="Max New Tokens", # Increased default
174
+ info="Max length of generated code. Increase for complex pages."
175
+ )
176
+ temperature_slider = gr.Slider(
177
+ minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature", # Default 0.7 is often good
178
+ info="Controls randomness. Lower=more predictable, Higher=more creative."
179
+ )
180
+ top_p_slider = gr.Slider(
181
+ minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P",
182
+ info="Alternative randomness control."
183
+ )
184
+
185
+ # --- Connect Inputs/Outputs ---
186
+ generate_button.click(
187
+ fn=generate_code,
188
+ inputs=[
189
+ prompt_input,
190
+ backend_radio,
191
+ file_structure_radio,
192
+ max_tokens_slider,
193
+ temperature_slider,
194
+ top_p_slider,
195
+ ],
196
+ outputs=code_output,
197
+ )
198
+
199
+ # --- Examples ---
200
+ gr.Examples(
201
+ examples=[
202
+ ["A simple counter page with a number display, an increment button, and a decrement button. Style the buttons nicely and center everything.", "Static", "Single File"],
203
+ ["A responsive product grid for an e-commerce site. Each card needs an image, title, price, and 'Add to Cart' button with a hover effect. Use modern CSS.", "Static", "Multiple Files"],
204
+ ["A personal blog homepage featuring a clean header with navigation, a main content area for post summaries (placeholders ok), and a simple footer. Use a nice font.", "Flask", "Multiple Files"],
205
+ ["A 'Coming Soon' page with a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Static", "Multiple Files"]
206
+ ],
207
+ inputs=[prompt_input, backend_radio, file_structure_radio],
208
+ label="Example Prompts (Aiming for Style)" # Updated label
209
+ )
210
+
211
+ # --- Launch ---
212
+ if __name__ == "__main__":
213
+ print("Starting Gradio app...")
214
+ demo.queue(max_size=10).launch()
215
+ print("Gradio app launched.")