wuhp commited on
Commit
3536086
·
verified ·
1 Parent(s): 09544f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -71
app.py CHANGED
@@ -1,27 +1,22 @@
1
  import gradio as gr
2
  import json, time
3
- from pydantic import BaseModel
4
  from huggingface_hub import create_repo, upload_file, list_models, constants
5
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
6
  from google import genai
7
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
8
 
9
- # — JSON SPEC MODEL —
10
-
11
- class RepoSpec(BaseModel):
12
- repo_name: str
13
- files: dict[str, str]
14
-
15
  # — USER INFO & MODEL LISTING —
16
 
17
  def show_profile(profile: gr.OAuthProfile | None) -> str:
18
- return f"✅ Logged in as **{profile.username}**" if profile else "*Not logged in.*"
 
 
19
 
20
  def list_private_models(
21
  profile: gr.OAuthProfile | None,
22
  oauth_token: gr.OAuthToken | None
23
  ) -> str:
24
- if not (profile and oauth_token):
25
  return "Please log in to see your models."
26
  models = [
27
  f"{m.id} ({'private' if m.private else 'public'})"
@@ -29,7 +24,7 @@ def list_private_models(
29
  ]
30
  return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
31
 
32
- # — LOG FETCHING
33
 
34
  def _get_space_jwt(repo_id: str):
35
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
@@ -39,79 +34,90 @@ def _get_space_jwt(repo_id: str):
39
 
40
  def fetch_logs(repo_id: str, level: str):
41
  jwt = _get_space_jwt(repo_id)
42
- url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
43
  lines = []
44
- with get_session().get(url, headers=build_hf_headers(token=jwt), stream=True) as resp:
45
  hf_raise_for_status(resp)
46
  for raw in resp.iter_lines():
47
  if raw.startswith(b"data: "):
48
  try:
49
  ev = json.loads(raw[len(b"data: "):].decode())
50
- lines.append(f"[{ev.get('timestamp','')}] {ev.get('data','')}")
 
 
51
  except:
52
  continue
53
  return "\n".join(lines)
54
 
55
- # — CORE LOOP: ASK LLM FOR JSON, WRITE & DEPLOY
56
 
57
  def handle_user_message(
58
  history, # list of {"role","content"} dicts
59
- user_prompt: str,
60
  sdk_choice: str,
61
  gemini_api_key: str,
62
  grounding_enabled: bool,
63
  profile: gr.OAuthProfile | None,
64
  oauth_token: gr.OAuthToken | None
65
  ):
66
- if not (profile and oauth_token):
67
  return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
68
 
69
  client = genai.Client(api_key=gemini_api_key)
70
-
71
- system_msg = {
72
  "role":"system",
73
  "content":(
74
- "Return exactly one JSON object matching this schema:\n"
75
- " repo_name (string)\n"
76
- " • files (object mapping filename→file-content)\n\n"
77
- "Files must include:\n"
78
- " - A code file (default name: app.py unless you choose otherwise)\n"
79
- " - requirements.txt with dependencies\n"
80
- " - README.md with frontmatter (title, emoji, sdk, sdk_version, app_file)\n\n"
81
- "Do NOT output any extra text—only the JSON object."
82
  )
83
- }
84
 
85
- chat = [system_msg] + history + [{"role":"user", "content":user_prompt}]
86
- repo_id = None
87
- build_logs = run_logs = ""
 
88
 
 
89
  for _ in range(5):
90
- # detect SDK version
91
- if sdk_choice == "gradio":
92
- import gradio as _gr; sdk_version = _gr.__version__
93
- else:
94
- import streamlit as _st; sdk_version = _st.__version__
95
-
96
- tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
97
- cfg = GenerateContentConfig(
98
- tools=tools,
99
- response_modalities=["TEXT"],
100
- response_mime_type="application/json",
101
- response_schema=RepoSpec # <<< use the Pydantic model here
102
- )
103
 
104
  resp = client.models.generate_content(
105
  model="gemini-2.5-flash-preview-04-17",
106
  contents=[m["content"] for m in chat],
107
  config=cfg
108
  )
109
- # validate & parse
110
- spec = RepoSpec.model_validate_json(resp.text)
111
- repo_name = spec.repo_name
112
- files = spec.files
113
-
114
- repo_id = f"{profile.username}/{repo_name}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  create_repo(
116
  repo_id=repo_id,
117
  token=oauth_token.token,
@@ -119,13 +125,7 @@ def handle_user_message(
119
  repo_type="space",
120
  space_sdk=sdk_choice
121
  )
122
-
123
- # write & upload files
124
- for fn, content in files.items():
125
- if fn.lower() == "readme.md":
126
- content = content.replace("<SDK_VERSION>", sdk_version)
127
- with open(fn, "w") as f:
128
- f.write(content)
129
  upload_file(
130
  path_or_fileobj=fn,
131
  path_in_repo=fn,
@@ -136,6 +136,7 @@ def handle_user_message(
136
 
137
  build_logs = fetch_logs(repo_id, "build")
138
  run_logs = fetch_logs(repo_id, "run")
 
139
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
140
  break
141
 
@@ -144,12 +145,12 @@ def handle_user_message(
144
  "content":(
145
  f"Build logs:\n{build_logs}\n\n"
146
  f"Run logs:\n{run_logs}\n\n"
147
- "Please fix the JSON spec and return updated JSON only."
148
  )
149
  })
150
  time.sleep(2)
151
 
152
- messages = [{"role":m["role"], "content":m["content"]} for m in chat if m["role"]!="system"]
153
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
154
  return messages, build_logs, run_logs, iframe
155
 
@@ -157,33 +158,39 @@ def handle_user_message(
157
 
158
  with gr.Blocks(title="HF Space Auto‑Builder") as demo:
159
  gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
160
- "1. Sign in   2. Prompt   3. Deploy & Debug\n\n"
161
- "_LLM controls filenames, code, README, requirements, and loops until successful._\n\n---")
162
 
 
163
  login_btn = gr.LoginButton(variant="huggingface", size="lg")
164
  status_md = gr.Markdown("*Not logged in.*")
165
  models_md = gr.Markdown()
166
- demo.load(show_profile, None, status_md)
167
- demo.load(list_private_models, None, models_md)
168
- login_btn.click(show_profile, None, status_md)
169
- login_btn.click(list_private_models, None, models_md)
170
 
171
- sdk_choice = gr.Radio(["gradio","streamlit"], "gradio", label="SDK")
 
 
 
 
 
 
 
172
  api_key = gr.Textbox(label="Gemini API Key", type="password")
173
- grounding = gr.Checkbox(label="Enable grounding")
174
 
 
175
  chatbot = gr.Chatbot(type="messages")
176
- user_in = gr.Textbox(label="Prompt", placeholder="e.g. Build a CSV inspector…")
177
  send_btn = gr.Button("Send")
178
- build_box = gr.Textbox(label="Build logs", lines=5)
179
- run_box = gr.Textbox(label="Run logs", lines=5)
 
180
  preview = gr.HTML("<p>No Space yet.</p>")
181
 
 
182
  send_btn.click(
183
  fn=handle_user_message,
184
- inputs=[chatbot, user_in, sdk_choice, api_key, grounding],
185
  outputs=[chatbot, build_box, run_box, preview]
186
  )
187
 
188
- if __name__ == "__main__":
189
  demo.launch()
 
1
  import gradio as gr
2
  import json, time
 
3
  from huggingface_hub import create_repo, upload_file, list_models, constants
4
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
5
  from google import genai
6
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
7
 
 
 
 
 
 
 
8
  # — USER INFO & MODEL LISTING —
9
 
10
  def show_profile(profile: gr.OAuthProfile | None) -> str:
11
+ if profile is None:
12
+ return "*Not logged in.*"
13
+ return f"✅ Logged in as **{profile.username}**"
14
 
15
  def list_private_models(
16
  profile: gr.OAuthProfile | None,
17
  oauth_token: gr.OAuthToken | None
18
  ) -> str:
19
+ if profile is None or oauth_token is None:
20
  return "Please log in to see your models."
21
  models = [
22
  f"{m.id} ({'private' if m.private else 'public'})"
 
24
  ]
25
  return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
26
 
27
+ # — HELPERS FOR HF SPACE LOGS
28
 
29
  def _get_space_jwt(repo_id: str):
30
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
 
34
 
35
  def fetch_logs(repo_id: str, level: str):
36
  jwt = _get_space_jwt(repo_id)
37
+ logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
38
  lines = []
39
+ with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True) as resp:
40
  hf_raise_for_status(resp)
41
  for raw in resp.iter_lines():
42
  if raw.startswith(b"data: "):
43
  try:
44
  ev = json.loads(raw[len(b"data: "):].decode())
45
+ ts = ev.get("timestamp","")
46
+ txt = ev.get("data","")
47
+ lines.append(f"[{ts}] {txt}")
48
  except:
49
  continue
50
  return "\n".join(lines)
51
 
52
+ # — CORE LOOP: send prompt & (iteratively) deploy
53
 
54
  def handle_user_message(
55
  history, # list of {"role","content"} dicts
 
56
  sdk_choice: str,
57
  gemini_api_key: str,
58
  grounding_enabled: bool,
59
  profile: gr.OAuthProfile | None,
60
  oauth_token: gr.OAuthToken | None
61
  ):
62
+ if profile is None or oauth_token is None:
63
  return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
64
 
65
  client = genai.Client(api_key=gemini_api_key)
66
+ chat = [{
 
67
  "role":"system",
68
  "content":(
69
+ f"You are an AI assistant writing a HuggingFace Space using the "
70
+ f"{sdk_choice} SDK. After producing code, wait for logs; if errors appear, fix them."
 
 
 
 
 
 
71
  )
72
+ }] + history
73
 
74
+ code_fn = "app.py" if sdk_choice=="gradio" else "streamlit_app.py"
75
+ readme_fn = "README.md"
76
+ reqs_fn = "requirements.txt"
77
+ repo_id = f"{profile.username}/{profile.username}-auto-space"
78
 
79
+ build_logs = run_logs = ""
80
  for _ in range(5):
81
+ tools = []
82
+ if grounding_enabled:
83
+ tools.append(Tool(google_search=GoogleSearch()))
84
+ cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
 
 
 
 
 
 
 
 
 
85
 
86
  resp = client.models.generate_content(
87
  model="gemini-2.5-flash-preview-04-17",
88
  contents=[m["content"] for m in chat],
89
  config=cfg
90
  )
91
+ code = resp.text
92
+ chat.append({"role":"assistant","content":code})
93
+
94
+ # write code file
95
+ with open(code_fn, "w") as f:
96
+ f.write(code)
97
+
98
+ # write README.md
99
+ with open(readme_fn, "w") as f:
100
+ f.write(f"""---
101
+ title: Wuhp Auto Space
102
+ emoji: 🐢
103
+ colorFrom: red
104
+ colorTo: pink
105
+ sdk: {sdk_choice}
106
+ sdk_version: 1.44.1
107
+ app_file: {code_fn}
108
+ pinned: false
109
+ ---
110
+
111
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
112
+ """)
113
+
114
+ # write requirements.txt
115
+ base_reqs = "pandas\n"
116
+ extra = "streamlit\n" if sdk_choice=="streamlit" else "gradio\n"
117
+ with open(reqs_fn, "w") as f:
118
+ f.write(base_reqs + extra)
119
+
120
+ # deploy all three files
121
  create_repo(
122
  repo_id=repo_id,
123
  token=oauth_token.token,
 
125
  repo_type="space",
126
  space_sdk=sdk_choice
127
  )
128
+ for fn in (code_fn, readme_fn, reqs_fn):
 
 
 
 
 
 
129
  upload_file(
130
  path_or_fileobj=fn,
131
  path_in_repo=fn,
 
136
 
137
  build_logs = fetch_logs(repo_id, "build")
138
  run_logs = fetch_logs(repo_id, "run")
139
+
140
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
141
  break
142
 
 
145
  "content":(
146
  f"Build logs:\n{build_logs}\n\n"
147
  f"Run logs:\n{run_logs}\n\n"
148
+ "Please fix the code."
149
  )
150
  })
151
  time.sleep(2)
152
 
153
+ messages = [{"role":m["role"],"content":m["content"]} for m in chat if m["role"]!="system"]
154
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
155
  return messages, build_logs, run_logs, iframe
156
 
 
158
 
159
  with gr.Blocks(title="HF Space Auto‑Builder") as demo:
160
  gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
161
+ "1. Sign in\n2. Enter your prompt\n3. Watch code, README, requirements, logs, and preview\n\n---")
 
162
 
163
+ # LOGIN & MODEL LISTING
164
  login_btn = gr.LoginButton(variant="huggingface", size="lg")
165
  status_md = gr.Markdown("*Not logged in.*")
166
  models_md = gr.Markdown()
 
 
 
 
167
 
168
+ # Implicitly injects profile (and token) when calling these
169
+ demo.load(show_profile, inputs=None, outputs=status_md)
170
+ demo.load(list_private_models, inputs=None, outputs=models_md)
171
+ login_btn.click(show_profile, inputs=None, outputs=status_md)
172
+ login_btn.click(list_private_models, inputs=None, outputs=models_md)
173
+
174
+ # CONTROLS
175
+ sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK template")
176
  api_key = gr.Textbox(label="Gemini API Key", type="password")
177
+ grounding = gr.Checkbox(label="Enable grounding", value=False)
178
 
179
+ # CHAT + OUTPUTS
180
  chatbot = gr.Chatbot(type="messages")
181
+ user_in = gr.Textbox(placeholder="Your prompt…", label="Prompt")
182
  send_btn = gr.Button("Send")
183
+
184
+ build_box = gr.Textbox(label="Build logs", lines=5, interactive=False)
185
+ run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
186
  preview = gr.HTML("<p>No Space yet.</p>")
187
 
188
+ # Only user inputs here—profile/token are auto‑injected
189
  send_btn.click(
190
  fn=handle_user_message,
191
+ inputs=[chatbot, sdk_choice, api_key, grounding],
192
  outputs=[chatbot, build_box, run_box, preview]
193
  )
194
 
195
+ if __name__=="__main__":
196
  demo.launch()