File size: 10,575 Bytes
d4f7838
03483b4
d4f7838
50f731d
d4f7838
03483b4
50f731d
 
 
32de25b
c338ef9
3dc01aa
6e9c777
03483b4
 
 
50f731d
03483b4
 
 
 
 
50f731d
03483b4
 
 
 
 
 
 
18ec03a
c338ef9
d4f7838
 
 
 
 
 
c338ef9
d4f7838
b679222
d4f7838
c338ef9
50f731d
 
 
 
 
 
 
 
 
 
 
18ec03a
6e9c777
50f731d
6e9c777
 
 
 
 
d4f7838
6e9c777
50f731d
6e9c777
50f731d
6e9c777
 
 
 
 
50f731d
3536086
6e9c777
 
 
32de25b
50f731d
 
3d43200
50f731d
 
 
03a7726
5d26448
6e9c777
d4f7838
6e9c777
a28d73c
 
50f731d
 
a28d73c
 
6e9c777
50f731d
18ec03a
476039c
03a7726
 
 
4e09d57
 
03a7726
 
4e8c436
 
03a7726
 
 
 
6e9c777
d4f7838
03a7726
 
 
 
 
 
 
 
 
 
 
 
476039c
18ec03a
50f731d
 
 
d4f7838
50f731d
 
 
 
 
 
18dce47
03a7726
 
6e9c777
03a7726
c338ef9
6e9c777
03a7726
 
 
3536086
03a7726
50f731d
 
 
03a7726
4e09d57
03a7726
4e09d57
03a7726
50f731d
 
03a7726
d4f7838
50f731d
 
4e09d57
3536086
 
 
d4f7838
3536086
 
 
50f731d
4e09d57
50f731d
 
 
 
3536086
18ec03a
 
50f731d
3d43200
03a7726
 
3d43200
 
934dfd0
4e09d57
18ec03a
 
03a7726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50f731d
03a7726
50f731d
 
 
6e9c777
dbd6fa0
03a7726
 
18dce47
03a7726
6e9c777
03a7726
50f731d
03a7726
 
 
 
 
50f731d
 
03a7726
 
50f731d
 
253f2c6
4e09d57
3d43200
 
 
 
 
 
 
 
18dce47
3d43200
3536086
 
3d43200
 
 
 
4e09d57
3d43200
 
 
 
 
 
 
 
e87ed80
 
 
 
 
 
 
 
 
 
50f731d
3d43200
4e09d57
03a7726
 
 
4e09d57
 
 
d4f7838
b679222
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
import re
import json
import time
import requests
import importlib.metadata
import gradio as gr
from huggingface_hub import (
    create_repo, upload_file, list_models, constants
)
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
from google import genai
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch

# — USER INFO & MODEL LISTING —

def show_profile(profile: gr.OAuthProfile | None) -> str:
    return f"✅ Logged in as **{profile.username}**" if profile else "*Not logged in.*"

def list_private_models(
    profile: gr.OAuthProfile | None,
    oauth_token: gr.OAuthToken | None
) -> str:
    if not profile or not oauth_token:
        return "Please log in to see your models."
    models = [
        f"{m.id} ({'private' if m.private else 'public'})"
        for m in list_models(author=profile.username, token=oauth_token.token)
    ]
    return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)

# — UTILITIES —

def get_sdk_version(sdk_choice: str) -> str:
    pkg = "gradio" if sdk_choice == "gradio" else "streamlit"
    try:
        return importlib.metadata.version(pkg)
    except importlib.metadata.PackageNotFoundError:
        return "UNKNOWN"

def extract_code(text: str) -> str:
    blocks = re.findall(r"```(?:\w*\n)?([\s\S]*?)```", text)
    return blocks[-1].strip() if blocks else text.strip()

def classify_errors(logs: str) -> str:
    errs = set()
    for line in logs.splitlines():
        if "SyntaxError" in line:
            errs.add("syntax")
        elif "ImportError" in line or "ModuleNotFoundError" in line:
            errs.add("import")
        elif "Traceback" in line or "Exception" in line:
            errs.add("runtime")
    return ", ".join(errs) or "unknown"

# — HF SPACE LOGGING —

def _get_space_jwt(repo_id: str) -> str:
    url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
    r = get_session().get(url, headers=build_hf_headers())
    hf_raise_for_status(r)
    return r.json()["token"]

def fetch_logs(repo_id: str, level: str) -> str:
    jwt = _get_space_jwt(repo_id)
    url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
    lines = []
    with get_session().get(url, headers=build_hf_headers(token=jwt), stream=True) as resp:
        hf_raise_for_status(resp)
        for raw in resp.iter_lines():
            if raw.startswith(b"data: "):
                try:
                    ev = json.loads(raw[len(b"data: "):].decode())
                    ts, txt = ev.get("timestamp",""), ev.get("data","")
                    lines.append(f"[{ts}] {txt}")
                except:
                    continue
    return "\n".join(lines)

def check_iframe(url: str, timeout: int = 5) -> bool:
    try:
        return requests.get(url, timeout=timeout).status_code == 200
    except:
        return False

# — CORE LOOP WITH TWO AGENTS —

def handle_user_message(
    history,
    sdk_choice: str,
    gemini_api_key: str,
    grounding_enabled: bool,
    temperature: float,
    max_output_tokens: int,
    profile: gr.OAuthProfile | None,
    oauth_token: gr.OAuthToken | None
):
    if not profile or not oauth_token:
        return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"

    client     = genai.Client(api_key=gemini_api_key)
    code_fn    = "app.py" if sdk_choice=="gradio" else "streamlit_app.py"
    repo_id    = f"{profile.username}/{profile.username}-auto-space"
    iframe_url = f"https://huggingface.co/spaces/{repo_id}"

    # — SYSTEM PROMPTS —  
    system_code = {
        "role":"system",
        "content":(
            "You are **Code‑Gen Agent**, a proactive AI developer. Your sole responsibility is to author "
            f"and correct the entire `{code_fn}` file in a single markdown code block—no extra commentary. "
            "You have permission to edit files, push updates to the HF Space, and optimize code. "
            "After each push, await build & run logs before making further changes."
        )
    }
    system_debug = {
        "role":"system",
        "content":(
            "You are **Debug Agent**, a meticulous code reviewer. You can read all files, logs, and the app "
            "preview, but you **cannot** modify or push code. Your task is to analyze the latest code + logs "
            "and return concise, actionable feedback or “All clear.”"
        )
    }

    # initialize each agent’s conversation
    code_chat  = [system_code] + history[:]
    debug_chat = [system_debug] + history[:]

    build_logs = run_logs = ""
    backoff = 1

    for attempt in range(1, 7):
        tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
        cfg = GenerateContentConfig(
            tools=tools,
            response_modalities=["TEXT"],
            temperature=temperature,
            max_output_tokens=max_output_tokens,
        )

        # --- 1) Code‑Gen generates or updates code ---
        resp_code = client.models.generate_content(
            model="gemini-2.5-flash-preview-04-17",
            contents=[m["content"] for m in code_chat],
            config=cfg
        )
        code = extract_code(resp_code.text)
        code_chat.append({"role":"assistant","content":code})
        debug_chat.append({"role":"assistant","content":code})

        # quick syntax check
        try:
            compile(code, code_fn, "exec")
        except SyntaxError as e:
            code_chat.append({
                "role":"user",
                "content": f"SyntaxError caught: {e}. Please correct `{code_fn}` only."
            })
            time.sleep(backoff); backoff = min(backoff*2, 30)
            continue

        # write & push to HF Space
        sdk_version = get_sdk_version(sdk_choice)
        files = {
            code_fn: code,
            "README.md": f"""---
title: Wuhp Auto Space
emoji: 🐢
sdk: {sdk_choice}
sdk_version: {sdk_version}
app_file: {code_fn}
pinned: false
---
""",
            "requirements.txt": "pandas\n" + ("streamlit\n" if sdk_choice=="streamlit" else "gradio\n")
        }
        for fn, content in files.items():
            with open(fn, "w") as f:
                f.write(content)

        create_repo(repo_id=repo_id, token=oauth_token.token,
                    exist_ok=True, repo_type="space", space_sdk=sdk_choice)
        for fn in files:
            upload_file(
                path_or_fileobj=fn, path_in_repo=fn,
                repo_id=repo_id, token=oauth_token.token,
                repo_type="space"
            )

        # fetch logs
        build_logs = fetch_logs(repo_id, "build")
        run_logs   = fetch_logs(repo_id, "run")
        err_types  = classify_errors(build_logs + "\n" + run_logs)

        # --- 2) Debug‑Agent reviews code & logs ---
        debug_input = (
            f"🏷 **Attempt {attempt}**\n"
            f"Error types: {err_types}\n\n"
            f"**Build logs:**\n{build_logs}\n\n"
            f"**Run logs:**\n{run_logs}\n\n"
            "If there are no errors, reply “All clear.” Otherwise, list your recommended changes."
        )
        debug_chat.append({"role":"user","content":debug_input})
        resp_debug = client.models.generate_content(
            model="gemini-2.5-flash-preview-04-17",
            contents=[m["content"] for m in debug_chat],
            config=cfg
        )
        feedback = resp_debug.text.strip()
        debug_chat.append({"role":"assistant","content":feedback})

        # check for success
        if "ERROR" not in build_logs.upper() and \
           "ERROR" not in run_logs.upper() and \
           check_iframe(iframe_url):
            break

        # feed debug feedback back to Code‑Gen
        code_chat.append({
            "role":"user",
            "content": f"🔧 Debug feedback:\n{feedback}\nPlease output the full corrected `{code_fn}` code block only."
        })
        time.sleep(backoff); backoff = min(backoff*2, 30)

    # prepare UI outputs
    messages = [
        {"role": m["role"], "content": m["content"]}
        for m in code_chat if m["role"] != "system"
    ]
    iframe_html = (
        f'<iframe src="{iframe_url}" width="100%" height="500px"></iframe>'
        + ("" if check_iframe(iframe_url)
           else "<p style='color:red;'>⚠️ iframe not responding.</p>")
    )
    return messages, build_logs, run_logs, iframe_html

# — SIMPLE UI WITH HIGHER MAX TOKENS —

with gr.Blocks(title="HF Space Auto‑Builder") as demo:
    gr.Markdown("## 🐢 HF Space Auto‑Builder\n1) Sign in  2) Enter prompt  3) Watch code, logs & preview")

    login_btn = gr.LoginButton(variant="huggingface", size="lg")
    status_md = gr.Markdown("*Not logged in.*")
    models_md = gr.Markdown()
    demo.load(show_profile,     inputs=None, outputs=status_md)
    demo.load(list_private_models, inputs=None, outputs=models_md)
    login_btn.click(show_profile,     inputs=None, outputs=status_md)
    login_btn.click(list_private_models, inputs=None, outputs=models_md)

    sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK")
    api_key    = gr.Textbox(label="Gemini API Key", type="password")
    grounding  = gr.Checkbox(label="Enable grounding", value=False)
    temp       = gr.Slider(0,1,value=0.2, label="Temperature")
    max_tokens = gr.Number(value=2048, label="Max output tokens (set up to Gemini’s limit)")

    chatbot   = gr.Chatbot(type="messages")
    user_in   = gr.Textbox(placeholder="Your prompt…", label="Prompt", lines=1)
    send_btn  = gr.Button("Send")
    build_box = gr.Textbox(label="Build logs", lines=5, interactive=False)
    run_box   = gr.Textbox(label="Run logs",   lines=5, interactive=False)
    preview   = gr.HTML("<p>No Space yet.</p>")

    send_btn.click(
        fn=handle_user_message,
        inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens],
        outputs=[chatbot, build_box, run_box, preview]
    )
    user_in.submit(
        fn=handle_user_message,
        inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens],
        outputs=[chatbot, build_box, run_box, preview]
    )

    refresh_btn = gr.Button("Refresh Logs")
    refresh_btn.click(
        fn=lambda profile, token: (
            fetch_logs(f"{profile.username}/{profile.username}-auto-space", "build"),
            fetch_logs(f"{profile.username}/{profile.username}-auto-space", "run")
        ),
        outputs=[build_box, run_box]
    )

    demo.launch(server_name="0.0.0.0", server_port=7860)