Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -66,9 +66,8 @@ def handle_user_message(
|
|
66 |
chat = [{
|
67 |
"role":"system",
|
68 |
"content":(
|
69 |
-
f"You are an AI assistant
|
70 |
-
f"{sdk_choice} SDK. After producing code, wait for logs.
|
71 |
-
"If errors appear, fix them and return the full updated code."
|
72 |
)
|
73 |
}] + history
|
74 |
|
@@ -84,16 +83,19 @@ def handle_user_message(
|
|
84 |
tools.append(Tool(google_search=GoogleSearch()))
|
85 |
cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
|
86 |
|
87 |
-
resp
|
88 |
model="gemini-2.5-flash-preview-04-17",
|
89 |
contents=[m["content"] for m in chat],
|
90 |
config=cfg
|
91 |
)
|
92 |
-
code
|
93 |
chat.append({"role":"assistant","content":code})
|
94 |
|
95 |
-
# write
|
96 |
-
with open(code_fn,
|
|
|
|
|
|
|
97 |
with open(readme_fn, "w") as f:
|
98 |
f.write(f"""---
|
99 |
title: Wuhp Auto Space
|
@@ -108,10 +110,14 @@ pinned: false
|
|
108 |
|
109 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
110 |
""")
|
111 |
-
reqs = "pandas\n" + ("streamlit\n" if sdk_choice=="streamlit" else "gradio\n")
|
112 |
-
with open(reqs_fn, "w") as f: f.write(reqs)
|
113 |
|
114 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
create_repo(
|
116 |
repo_id=repo_id,
|
117 |
token=oauth_token.token,
|
@@ -150,19 +156,20 @@ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-
|
|
150 |
|
151 |
# — BUILD THE UI —
|
152 |
|
153 |
-
with gr.Blocks(title="HF Space Auto‑Builder
|
154 |
gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
|
155 |
"1. Sign in\n2. Enter your prompt\n3. Watch code, README, requirements, logs, and preview\n\n---")
|
156 |
|
157 |
# LOGIN & MODEL LISTING
|
158 |
-
login_btn
|
159 |
-
status_md
|
160 |
-
models_md
|
161 |
|
162 |
-
|
163 |
-
demo.load(
|
164 |
-
|
165 |
-
login_btn.click(
|
|
|
166 |
|
167 |
# CONTROLS
|
168 |
sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK template")
|
@@ -178,9 +185,10 @@ with gr.Blocks(title="HF Space Auto‑Builder (Gradio & Streamlit)") as demo:
|
|
178 |
run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
|
179 |
preview = gr.HTML("<p>No Space yet.</p>")
|
180 |
|
|
|
181 |
send_btn.click(
|
182 |
fn=handle_user_message,
|
183 |
-
inputs=[chatbot, sdk_choice, api_key, grounding
|
184 |
outputs=[chatbot, build_box, run_box, preview]
|
185 |
)
|
186 |
|
|
|
66 |
chat = [{
|
67 |
"role":"system",
|
68 |
"content":(
|
69 |
+
f"You are an AI assistant writing a HuggingFace Space using the "
|
70 |
+
f"{sdk_choice} SDK. After producing code, wait for logs; if errors appear, fix them."
|
|
|
71 |
)
|
72 |
}] + history
|
73 |
|
|
|
83 |
tools.append(Tool(google_search=GoogleSearch()))
|
84 |
cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
|
85 |
|
86 |
+
resp = client.models.generate_content(
|
87 |
model="gemini-2.5-flash-preview-04-17",
|
88 |
contents=[m["content"] for m in chat],
|
89 |
config=cfg
|
90 |
)
|
91 |
+
code = resp.text
|
92 |
chat.append({"role":"assistant","content":code})
|
93 |
|
94 |
+
# write code file
|
95 |
+
with open(code_fn, "w") as f:
|
96 |
+
f.write(code)
|
97 |
+
|
98 |
+
# write README.md
|
99 |
with open(readme_fn, "w") as f:
|
100 |
f.write(f"""---
|
101 |
title: Wuhp Auto Space
|
|
|
110 |
|
111 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
112 |
""")
|
|
|
|
|
113 |
|
114 |
+
# write requirements.txt
|
115 |
+
base_reqs = "pandas\n"
|
116 |
+
extra = "streamlit\n" if sdk_choice=="streamlit" else "gradio\n"
|
117 |
+
with open(reqs_fn, "w") as f:
|
118 |
+
f.write(base_reqs + extra)
|
119 |
+
|
120 |
+
# deploy all three files
|
121 |
create_repo(
|
122 |
repo_id=repo_id,
|
123 |
token=oauth_token.token,
|
|
|
156 |
|
157 |
# — BUILD THE UI —
|
158 |
|
159 |
+
with gr.Blocks(title="HF Space Auto‑Builder") as demo:
|
160 |
gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
|
161 |
"1. Sign in\n2. Enter your prompt\n3. Watch code, README, requirements, logs, and preview\n\n---")
|
162 |
|
163 |
# LOGIN & MODEL LISTING
|
164 |
+
login_btn = gr.LoginButton(variant="huggingface", size="lg")
|
165 |
+
status_md = gr.Markdown("*Not logged in.*")
|
166 |
+
models_md = gr.Markdown()
|
167 |
|
168 |
+
# Implicitly injects profile (and token) when calling these
|
169 |
+
demo.load(show_profile, inputs=None, outputs=status_md)
|
170 |
+
demo.load(list_private_models, inputs=None, outputs=models_md)
|
171 |
+
login_btn.click(show_profile, inputs=None, outputs=status_md)
|
172 |
+
login_btn.click(list_private_models, inputs=None, outputs=models_md)
|
173 |
|
174 |
# CONTROLS
|
175 |
sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK template")
|
|
|
185 |
run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
|
186 |
preview = gr.HTML("<p>No Space yet.</p>")
|
187 |
|
188 |
+
# Only user inputs here—profile/token are auto‑injected
|
189 |
send_btn.click(
|
190 |
fn=handle_user_message,
|
191 |
+
inputs=[chatbot, sdk_choice, api_key, grounding],
|
192 |
outputs=[chatbot, build_box, run_box, preview]
|
193 |
)
|
194 |
|