wuhp commited on
Commit
d4f7838
·
verified ·
1 Parent(s): 3536086

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -66
app.py CHANGED
@@ -1,30 +1,29 @@
1
- import gradio as gr
2
- import json, time
 
3
  from huggingface_hub import create_repo, upload_file, list_models, constants
4
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
5
  from google import genai
6
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
 
7
 
8
- # — USER INFO & MODEL LISTING —
9
 
10
- def show_profile(profile: gr.OAuthProfile | None) -> str:
11
- if profile is None:
12
- return "*Not logged in.*"
13
- return f"✅ Logged in as **{profile.username}**"
 
 
14
 
15
- def list_private_models(
16
- profile: gr.OAuthProfile | None,
17
- oauth_token: gr.OAuthToken | None
18
- ) -> str:
19
- if profile is None or oauth_token is None:
20
- return "Please log in to see your models."
21
- models = [
22
- f"{m.id} ({'private' if m.private else 'public'})"
23
- for m in list_models(author=profile.username, token=oauth_token.token)
24
- ]
25
- return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
26
 
27
- # — HELPERS FOR HF SPACE LOGS
28
 
29
  def _get_space_jwt(repo_id: str):
30
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
@@ -32,7 +31,7 @@ def _get_space_jwt(repo_id: str):
32
  hf_raise_for_status(r)
33
  return r.json()["token"]
34
 
35
- def fetch_logs(repo_id: str, level: str):
36
  jwt = _get_space_jwt(repo_id)
37
  logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
38
  lines = []
@@ -49,27 +48,28 @@ def fetch_logs(repo_id: str, level: str):
49
  continue
50
  return "\n".join(lines)
51
 
52
- # — CORE LOOP: send prompt & (iteratively) deploy —
53
 
54
  def handle_user_message(
55
- history, # list of {"role","content"} dicts
56
  sdk_choice: str,
57
  gemini_api_key: str,
58
  grounding_enabled: bool,
59
  profile: gr.OAuthProfile | None,
60
  oauth_token: gr.OAuthToken | None
61
  ):
62
- if profile is None or oauth_token is None:
63
  return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
64
 
65
  client = genai.Client(api_key=gemini_api_key)
66
- chat = [{
67
  "role":"system",
68
  "content":(
69
  f"You are an AI assistant writing a HuggingFace Space using the "
70
  f"{sdk_choice} SDK. After producing code, wait for logs; if errors appear, fix them."
71
  )
72
- }] + history
 
73
 
74
  code_fn = "app.py" if sdk_choice=="gradio" else "streamlit_app.py"
75
  readme_fn = "README.md"
@@ -78,65 +78,57 @@ def handle_user_message(
78
 
79
  build_logs = run_logs = ""
80
  for _ in range(5):
81
- tools = []
82
- if grounding_enabled:
83
- tools.append(Tool(google_search=GoogleSearch()))
84
- cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
85
 
86
  resp = client.models.generate_content(
87
  model="gemini-2.5-flash-preview-04-17",
88
  contents=[m["content"] for m in chat],
89
  config=cfg
90
  )
91
- code = resp.text
 
 
92
  chat.append({"role":"assistant","content":code})
93
 
94
- # write code file
95
  with open(code_fn, "w") as f:
96
  f.write(code)
97
 
98
- # write README.md
99
- with open(readme_fn, "w") as f:
100
- f.write(f"""---
101
  title: Wuhp Auto Space
102
  emoji: 🐢
103
  colorFrom: red
104
  colorTo: pink
105
  sdk: {sdk_choice}
106
- sdk_version: 1.44.1
107
  app_file: {code_fn}
108
  pinned: false
109
  ---
110
 
111
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
112
- """)
 
 
113
 
114
- # write requirements.txt
115
  base_reqs = "pandas\n"
116
  extra = "streamlit\n" if sdk_choice=="streamlit" else "gradio\n"
117
  with open(reqs_fn, "w") as f:
118
  f.write(base_reqs + extra)
119
 
120
- # deploy all three files
121
- create_repo(
122
- repo_id=repo_id,
123
- token=oauth_token.token,
124
- exist_ok=True,
125
- repo_type="space",
126
- space_sdk=sdk_choice
127
- )
128
  for fn in (code_fn, readme_fn, reqs_fn):
129
- upload_file(
130
- path_or_fileobj=fn,
131
- path_in_repo=fn,
132
- repo_id=repo_id,
133
- token=oauth_token.token,
134
- repo_type="space"
135
- )
136
 
137
  build_logs = fetch_logs(repo_id, "build")
138
  run_logs = fetch_logs(repo_id, "run")
139
-
140
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
141
  break
142
 
@@ -154,29 +146,23 @@ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-
154
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
155
  return messages, build_logs, run_logs, iframe
156
 
157
- # — BUILD THE UI —
158
 
159
  with gr.Blocks(title="HF Space Auto‑Builder") as demo:
160
- gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
161
- "1. Sign in\n2. Enter your prompt\n3. Watch code, README, requirements, logs, and preview\n\n---")
162
-
163
- # LOGIN & MODEL LISTING
164
  login_btn = gr.LoginButton(variant="huggingface", size="lg")
165
  status_md = gr.Markdown("*Not logged in.*")
166
  models_md = gr.Markdown()
167
-
168
- # Implicitly injects profile (and token) when calling these
169
- demo.load(show_profile, inputs=None, outputs=status_md)
170
  demo.load(list_private_models, inputs=None, outputs=models_md)
171
  login_btn.click(show_profile, inputs=None, outputs=status_md)
172
  login_btn.click(list_private_models, inputs=None, outputs=models_md)
173
 
174
- # CONTROLS
175
  sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK template")
176
  api_key = gr.Textbox(label="Gemini API Key", type="password")
177
  grounding = gr.Checkbox(label="Enable grounding", value=False)
178
 
179
- # CHAT + OUTPUTS
180
  chatbot = gr.Chatbot(type="messages")
181
  user_in = gr.Textbox(placeholder="Your prompt…", label="Prompt")
182
  send_btn = gr.Button("Send")
@@ -185,12 +171,17 @@ with gr.Blocks(title="HF Space Auto‑Builder") as demo:
185
  run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
186
  preview = gr.HTML("<p>No Space yet.</p>")
187
 
188
- # Only user inputs here—profile/token are auto‑injected
189
  send_btn.click(
190
  fn=handle_user_message,
191
  inputs=[chatbot, sdk_choice, api_key, grounding],
192
  outputs=[chatbot, build_box, run_box, preview]
193
  )
194
 
195
- if __name__=="__main__":
196
- demo.launch()
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import importlib.metadata
4
  from huggingface_hub import create_repo, upload_file, list_models, constants
5
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
6
  from google import genai
7
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
8
+ import gradio as gr
9
 
10
+ # — UTILITIES
11
 
12
+ def get_sdk_version(sdk_choice: str) -> str:
13
+ pkg = "gradio" if sdk_choice == "gradio" else "streamlit"
14
+ try:
15
+ return importlib.metadata.version(pkg)
16
+ except importlib.metadata.PackageNotFoundError:
17
+ return "UNKNOWN"
18
 
19
+ def extract_code(text: str) -> str:
20
+ """
21
+ Pull out the last ```…``` block, or fall back to the whole text.
22
+ """
23
+ blocks = re.findall(r"```(?:\w*\n)?([\s\S]*?)```", text)
24
+ return blocks[-1].strip() if blocks else text.strip()
 
 
 
 
 
25
 
26
+ # — HF SPACE LOGGING
27
 
28
  def _get_space_jwt(repo_id: str):
29
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
 
31
  hf_raise_for_status(r)
32
  return r.json()["token"]
33
 
34
+ def fetch_logs(repo_id: str, level: str) -> str:
35
  jwt = _get_space_jwt(repo_id)
36
  logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
37
  lines = []
 
48
  continue
49
  return "\n".join(lines)
50
 
51
+ # — CORE LOOP
52
 
53
  def handle_user_message(
54
+ history,
55
  sdk_choice: str,
56
  gemini_api_key: str,
57
  grounding_enabled: bool,
58
  profile: gr.OAuthProfile | None,
59
  oauth_token: gr.OAuthToken | None
60
  ):
61
+ if not profile or not oauth_token:
62
  return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
63
 
64
  client = genai.Client(api_key=gemini_api_key)
65
+ system_msg = {
66
  "role":"system",
67
  "content":(
68
  f"You are an AI assistant writing a HuggingFace Space using the "
69
  f"{sdk_choice} SDK. After producing code, wait for logs; if errors appear, fix them."
70
  )
71
+ }
72
+ chat = [system_msg] + history
73
 
74
  code_fn = "app.py" if sdk_choice=="gradio" else "streamlit_app.py"
75
  readme_fn = "README.md"
 
78
 
79
  build_logs = run_logs = ""
80
  for _ in range(5):
81
+ tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
82
+ cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
 
 
83
 
84
  resp = client.models.generate_content(
85
  model="gemini-2.5-flash-preview-04-17",
86
  contents=[m["content"] for m in chat],
87
  config=cfg
88
  )
89
+
90
+ raw = resp.text
91
+ code = extract_code(raw)
92
  chat.append({"role":"assistant","content":code})
93
 
94
+ # write code
95
  with open(code_fn, "w") as f:
96
  f.write(code)
97
 
98
+ # write dynamic README
99
+ sdk_version = get_sdk_version(sdk_choice)
100
+ readme = f"""---
101
  title: Wuhp Auto Space
102
  emoji: 🐢
103
  colorFrom: red
104
  colorTo: pink
105
  sdk: {sdk_choice}
106
+ sdk_version: {sdk_version}
107
  app_file: {code_fn}
108
  pinned: false
109
  ---
110
 
111
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
112
+ """
113
+ with open(readme_fn, "w") as f:
114
+ f.write(readme)
115
 
116
+ # write requirements
117
  base_reqs = "pandas\n"
118
  extra = "streamlit\n" if sdk_choice=="streamlit" else "gradio\n"
119
  with open(reqs_fn, "w") as f:
120
  f.write(base_reqs + extra)
121
 
122
+ # push to HF
123
+ create_repo(repo_id=repo_id, token=oauth_token.token,
124
+ exist_ok=True, repo_type="space", space_sdk=sdk_choice)
 
 
 
 
 
125
  for fn in (code_fn, readme_fn, reqs_fn):
126
+ upload_file(path_or_fileobj=fn, path_in_repo=fn,
127
+ repo_id=repo_id, token=oauth_token.token,
128
+ repo_type="space")
 
 
 
 
129
 
130
  build_logs = fetch_logs(repo_id, "build")
131
  run_logs = fetch_logs(repo_id, "run")
 
132
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
133
  break
134
 
 
146
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
147
  return messages, build_logs, run_logs, iframe
148
 
149
+ # — BUILD THE UI —
150
 
151
  with gr.Blocks(title="HF Space Auto‑Builder") as demo:
152
+ gr.Markdown("## Sign in + Auto‑Build Spaces\n\n..."
153
+ )
 
 
154
  login_btn = gr.LoginButton(variant="huggingface", size="lg")
155
  status_md = gr.Markdown("*Not logged in.*")
156
  models_md = gr.Markdown()
157
+ demo.load(show_profile, inputs=None, outputs=status_md)
 
 
158
  demo.load(list_private_models, inputs=None, outputs=models_md)
159
  login_btn.click(show_profile, inputs=None, outputs=status_md)
160
  login_btn.click(list_private_models, inputs=None, outputs=models_md)
161
 
 
162
  sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK template")
163
  api_key = gr.Textbox(label="Gemini API Key", type="password")
164
  grounding = gr.Checkbox(label="Enable grounding", value=False)
165
 
 
166
  chatbot = gr.Chatbot(type="messages")
167
  user_in = gr.Textbox(placeholder="Your prompt…", label="Prompt")
168
  send_btn = gr.Button("Send")
 
171
  run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
172
  preview = gr.HTML("<p>No Space yet.</p>")
173
 
 
174
  send_btn.click(
175
  fn=handle_user_message,
176
  inputs=[chatbot, sdk_choice, api_key, grounding],
177
  outputs=[chatbot, build_box, run_box, preview]
178
  )
179
 
180
+ # — New “Refresh Logs” control for manual edits —
181
+ refresh_btn = gr.Button("Refresh Logs")
182
+ def _refresh(profile, token):
183
+ repo = f"{profile.username}/{profile.username}-auto-space"
184
+ return fetch_logs(repo, "build"), fetch_logs(repo, "run")
185
+ refresh_btn.click(_refresh, inputs=[status_md, models_md], outputs=[build_box, run_box])
186
+
187
+ demo.launch(server_name="0.0.0.0", server_port=7860)