File size: 14,027 Bytes
aa6f369
 
 
 
 
 
 
e13fef7
aa6f369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1fb711c
aa6f369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6541c57
 
 
 
 
 
c873383
 
 
 
 
 
 
 
6541c57
 
 
 
c873383
aa6f369
 
 
 
 
9e905c0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re # For post-processing fallback

# --- Configuration ---
API_TOKEN = os.getenv("HF_TOKEN", None)
MODEL = """Qwen/Qwen2.5-Coder-32B-Instruct""" # Or choose another suitable model

# --- Initialize Inference Client ---
try:
    print(f"Attempting to initialize Inference Client for model: {MODEL}")
    if API_TOKEN:
        print("Using HF Token found in environment.")
        client = InferenceClient(model=MODEL, token=API_TOKEN)
    else:
        print("HF Token not found. Running without token (may lead to rate limits).")
        client = InferenceClient(model=MODEL)
    print("Inference Client initialized successfully.")
except Exception as e:
    print(f"Error initializing Inference Client: {e}")
    raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")

# --- Core Code Generation Function ---
def generate_code(
    prompt: str,
    backend_choice: str,
    file_structure: str,
    max_tokens: int,
    temperature: float,
    top_p: float,
):
    """
    Generates website code based on user prompt and choices.
    Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
    Yields the code token by token for live updates.
    under development not fully completed!
    """
    print(f"--- Generating Code ---")
    print(f"Prompt: {prompt[:100]}...")
    print(f"Backend Context: {backend_choice}")
    print(f"File Structure: {file_structure}")
    # Log the max_tokens value being used for this request
    print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")

    # --- Dynamically Build System Message ---
    if file_structure == "Single File":
        file_structure_instruction = (
            "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
            "Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
            "Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
            "Do NOT use file separation markers."
        )
    else: # Multiple Files
        file_structure_instruction = (
            "- **File Structure is 'Multiple Files':** Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
            "Use these EXACT markers: `<!-- index.html -->`, `/* style.css */`, `// script.js` (only if JS is needed).\n"
            "- Place the corresponding code directly after each marker.\n"
            "- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
         )

    # Assemble the full system message - Emphasizing completeness and NO premature stopping
    system_message = (
        "You are an expert frontend web developer AI. Your primary goal is to generate **complete, visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
        "Follow ALL these rules with EXTREME STRICTNESS:\n"
        "1.  **STYLE & DETAIL:** Generate rich, detailed code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects. Aim for a high-quality visual result.\n"
        "2.  **COMPLETENESS:** Generate the *entire* requested code structure. Ensure all files/sections are fully generated and properly closed (e.g., closing HTML tags `</html>`, CSS braces `}`, script tags `</script>`). **DO NOT STOP GENERATING PREMATURELY.** Finish the whole task.\n"
        "3.  **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
        "4.  **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (`<!DOCTYPE html>` or `<!-- index.html -->`).\n"
        "5.  **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (`</html>`, `}`, `;`, etc.). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
        "6.  **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
        f"7.  **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
        f"    {file_structure_instruction}\n"
        "8.  **BACKEND CONTEXT ({backend_choice}):** Use as a hint for frontend structure only. Generate ONLY frontend code.\n"
        "9.  **FRONTEND ONLY:** Do NOT generate server-side code.\n"
        "10. **ACCURACY:** Generate functional code addressing the user's prompt.\n\n"
        "REMEMBER: Create COMPLETE, visually appealing code. Output ONLY raw code. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
    )

    # --- Construct the messages for the API ---
    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": f"Generate the complete website frontend code for: {prompt}"}
    ]

    # --- Stream the response from the API ---
    response_stream = ""
    full_response_for_cleaning = ""
    token_count = 0 # Add a simple counter for debugging
    try:
        print("Sending request to Hugging Face Inference API...")
        stream = client.chat_completion(
            messages=messages,
            max_tokens=max_tokens, # Use the value from the slider
            stream=True,
            temperature=temperature,
            top_p=top_p,
        )
        for message in stream:
            token = message.choices[0].delta.content
            if isinstance(token, str):
                token_count += 1 # Crude approximation of tokens received
                response_stream += token
                full_response_for_cleaning += token
                # Log progress occasionally for debugging if needed
                # if token_count % 100 == 0:
                #    print(f"Stream progress: Received ~{token_count} tokens...")
                yield response_stream # Yield cumulative response for live update

        print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(full_response_for_cleaning)}")
        # Check if received tokens are close to max_tokens, indicating potential cutoff
        if token_count >= max_tokens - 10: # Check if close to the limit (allowing for slight variations)
             print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
             # Optionally, append a warning to the output itself, though it violates the "code only" rule
             # full_response_for_cleaning += "\n\n<!-- WARNING: Output may be incomplete due to max_tokens limit. -->"


        # --- Post-Processing (Fallback Safety Net) ---
        cleaned_response = full_response_for_cleaning.strip()
        cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
        cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
        cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
        common_phrases = [
            "Here is the code:", "Okay, here is the code:", "Here's the code:",
            "Sure, here is the code you requested:", "Let me know if you need anything else."
        ]
        # Simple check, might need more robust cleaning if issues persist
        for phrase in common_phrases:
             # Check start
            if cleaned_response.lower().startswith(phrase.lower()):
                cleaned_response = cleaned_response[len(phrase):].lstrip()
            # Check end - be careful not to remove parts of valid code
            # This end check is risky, might remove valid closing comments or similar.
            # Consider removing if it causes issues.
            # if cleaned_response.lower().endswith(phrase.lower()):
            #      cleaned_response = cleaned_response[:-len(phrase)].rstrip()


        yield cleaned_response.strip() # Yield final cleaned response

    except Exception as e:
        error_message = f"An error occurred during the API call: {e}"
        print(error_message)
        yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."


# --- Build Gradio Interface using Blocks ---
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
    gr.Markdown("# ✨ Website Code Generator ✨")
    gr.Markdown(
        "Describe the website you want. The AI will generate **visually styled** frontend code (HTML, CSS, JS) using **plenty of CSS**. "
        "The code appears live below.\n"
        "**Important:**\n"
        "1. This generator creates code based *only* on your initial description. To refine, modify your description and generate again.\n"
        "2. **If the code output stops abruptly**, it likely hit the 'Max New Tokens' limit. **Increase the slider value below** and try again!" # Added explanation
    )

    with gr.Row():
        with gr.Column(scale=2):
            prompt_input = gr.Textbox(
                label="Website Description",
                placeholder="e.g., A modern portfolio landing page with smooth scroll nav, stylish hero, project cards with hover effects, contact form.",
                lines=6,
            )
            backend_radio = gr.Radio(
                ["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static",
                info="Hint for AI (e.g., {{var}}) - generates ONLY frontend code."
            )
            file_structure_radio = gr.Radio(
                ["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
                info="Choose 'Single File' (all in index.html) or 'Multiple Files' (separate css/js)."
            )
            generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary")

        with gr.Column(scale=3):
            code_output = gr.Code(
                label="Generated Code (Raw Output - Aiming for Style!)",
                language="html",
                lines=30,
                interactive=False,
            )

    with gr.Accordion("Advanced Generation Settings", open=False):
        # INCREASED max_tokens range and default value
        max_tokens_slider = gr.Slider(
            minimum=512,
            maximum=4096, # Set maximum to model's limit (Zephyr 7B can handle this)
            value=3072,   # Increased default significantly
            step=256,     # Larger steps might be practical
            label="Max New Tokens",
            info="Max length of generated code. Increase if output is cut off!" # Updated info
            )
        temperature_slider = gr.Slider(
            minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature",
            info="Controls randomness. Lower=focused, Higher=creative."
            )
        top_p_slider = gr.Slider(
            minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P",
            info="Alternative randomness control."
            )

    # --- Connect Inputs/Outputs ---
    generate_button.click(
        fn=generate_code,
        inputs=[
            prompt_input,
            backend_radio,
            file_structure_radio,
            max_tokens_slider,
            temperature_slider,
            top_p_slider,
        ],
        outputs=code_output,
    )

# --- Examples ---
    gr.Examples(
        examples=[
            ["A simple counter page with a number display, an increment button, and a decrement button. Style the buttons nicely and center everything.", "Static", "Single File"],
            ["A responsive product grid for an e-commerce site. Each card needs an image, title, price, and 'Add to Cart' button with a hover effect. Use modern CSS.", "Static", "Multiple Files"],
            ["A personal blog homepage featuring a clean header with navigation, a main content area for post summaries (placeholders ok), and a simple footer. Use a nice font.", "Flask", "Multiple Files"],
            ["A 'Coming Soon' page with a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Static", "Multiple Files"],
            ["A to-do list app with add, edit, and delete features. Use JavaScript for interactivity. Save data in localStorage.", "Static", "Single File"],
            ["A weather dashboard that fetches current weather using an API and shows it with icons. Style with CSS grid.", "Static", "Multiple Files"],
            ["A personal portfolio site with a hero section, skills, project gallery, and contact form. Add animations using AOS or GSAP.", "Static", "Multiple Files"],
            ["A freelancer portfolio template with service cards, testimonials, and pricing sections. Use Tailwind CSS.", "Static", "Multiple Files"],
            ["A real-time chat web app using Flask-SocketIO. Basic login and support for multiple rooms.", "Flask", "Multiple Files"],
            ["A quiz app with multiple-choice questions, score tracking, and a final result page. Use JS to manage quiz logic.", "Static", "Single File"],
            ["A modern developer portfolio with sections for resume download, tech stack icons, GitHub activity, and a dark mode toggle.", "Static", "Multiple Files"]
        ],
        inputs=[prompt_input, backend_radio, file_structure_radio],
        label="Example Prompts (Aiming for Style)"
    )
    
# --- Launch ---
if __name__ == "__main__":
    print("Starting Gradio app...")
    # Ensure queue is enabled for Spaces
    demo.queue(max_size=10).launch()
    print("Gradio app launchdd.")