wuhp commited on
Commit
253f2c6
·
verified ·
1 Parent(s): 30d6ccf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -82
app.py CHANGED
@@ -8,15 +8,13 @@ from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
8
  # — USER INFO & MODEL LISTING —
9
 
10
  def show_profile(profile: gr.OAuthProfile | None) -> str:
11
- if profile is None:
12
- return "*Not logged in.*"
13
- return f"✅ Logged in as **{profile.username}**"
14
 
15
  def list_private_models(
16
  profile: gr.OAuthProfile | None,
17
  oauth_token: gr.OAuthToken | None
18
  ) -> str:
19
- if profile is None or oauth_token is None:
20
  return "Please log in to see your models."
21
  models = [
22
  f"{m.id} ({'private' if m.private else 'public'})"
@@ -24,7 +22,7 @@ def list_private_models(
24
  ]
25
  return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
26
 
27
- # — HELPERS FOR HF SPACE LOGS
28
 
29
  def _get_space_jwt(repo_id: str):
30
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
@@ -34,33 +32,31 @@ def _get_space_jwt(repo_id: str):
34
 
35
  def fetch_logs(repo_id: str, level: str):
36
  jwt = _get_space_jwt(repo_id)
37
- logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
38
  lines = []
39
- with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True) as resp:
40
  hf_raise_for_status(resp)
41
  for raw in resp.iter_lines():
42
  if raw.startswith(b"data: "):
43
  try:
44
  ev = json.loads(raw[len(b"data: "):].decode())
45
- ts = ev.get("timestamp","")
46
- txt = ev.get("data","")
47
- lines.append(f"[{ts}] {txt}")
48
  except:
49
  continue
50
  return "\n".join(lines)
51
 
52
- # — CORE LOOP: send prompt & (iteratively) deploy
53
 
54
  def handle_user_message(
55
- history, # list of {"role","content"} dicts
56
  sdk_choice: str,
57
  gemini_api_key: str,
58
  grounding_enabled: bool,
59
  profile: gr.OAuthProfile | None,
60
  oauth_token: gr.OAuthToken | None
61
  ):
62
- if profile is None or oauth_token is None:
63
- return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
64
 
65
  client = genai.Client(api_key=gemini_api_key)
66
  chat = [{
@@ -71,27 +67,32 @@ def handle_user_message(
71
  )
72
  }] + history
73
 
74
- code_fn = "app.py" if sdk_choice=="gradio" else "streamlit_app.py"
 
75
  readme_fn = "README.md"
76
  reqs_fn = "requirements.txt"
77
  repo_id = f"{profile.username}/{profile.username}-auto-space"
78
 
79
  build_logs = run_logs = ""
80
  for _ in range(5):
81
- tools = []
82
- if grounding_enabled:
83
- tools.append(Tool(google_search=GoogleSearch()))
84
- cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
85
-
86
- resp = client.models.generate_content(
 
 
 
 
87
  model="gemini-2.5-flash-preview-04-17",
88
  contents=[m["content"] for m in chat],
89
  config=cfg
90
  )
91
- code = resp.text
92
  chat.append({"role":"assistant","content":code})
93
 
94
- # write code file
95
  with open(code_fn, "w") as f:
96
  f.write(code)
97
 
@@ -103,40 +104,36 @@ emoji: 🐢
103
  colorFrom: red
104
  colorTo: pink
105
  sdk: {sdk_choice}
106
- sdk_version: 1.44.1
107
  app_file: {code_fn}
108
  pinned: false
109
  ---
110
 
111
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
112
  """)
113
 
114
- # write requirements.txt
115
- base_reqs = "pandas\n"
116
- extra = "streamlit\n" if sdk_choice=="streamlit" else "gradio\n"
117
  with open(reqs_fn, "w") as f:
118
- f.write(base_reqs + extra)
119
-
120
- # deploy all three files
121
- create_repo(
122
- repo_id=repo_id,
123
- token=oauth_token.token,
124
- exist_ok=True,
125
- repo_type="space",
126
- space_sdk=sdk_choice
127
- )
128
  for fn in (code_fn, readme_fn, reqs_fn):
129
- upload_file(
130
- path_or_fileobj=fn,
131
- path_in_repo=fn,
132
- repo_id=repo_id,
133
- token=oauth_token.token,
134
- repo_type="space"
135
- )
136
 
 
137
  build_logs = fetch_logs(repo_id, "build")
138
  run_logs = fetch_logs(repo_id, "run")
139
-
140
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
141
  break
142
 
@@ -150,47 +147,70 @@ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-
150
  })
151
  time.sleep(2)
152
 
 
153
  messages = [{"role":m["role"],"content":m["content"]} for m in chat if m["role"]!="system"]
154
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
155
- return messages, build_logs, run_logs, iframe
156
-
157
- # BUILD THE UI —
158
-
159
- with gr.Blocks(title="HF Space Auto‑Builder") as demo:
160
- gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
161
- "1. Sign in\n2. Enter your prompt\n3. Watch code, README, requirements, logs, and preview\n\n---")
162
-
163
- # LOGIN & MODEL LISTING
164
- login_btn = gr.LoginButton(variant="huggingface", size="lg")
165
- status_md = gr.Markdown("*Not logged in.*")
166
- models_md = gr.Markdown()
167
-
168
- # Implicitly injects profile (and token) when calling these
169
- demo.load(show_profile, inputs=None, outputs=status_md)
170
- demo.load(list_private_models, inputs=None, outputs=models_md)
171
- login_btn.click(show_profile, inputs=None, outputs=status_md)
172
- login_btn.click(list_private_models, inputs=None, outputs=models_md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
- # CONTROLS
175
- sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK template")
176
- api_key = gr.Textbox(label="Gemini API Key", type="password")
177
- grounding = gr.Checkbox(label="Enable grounding", value=False)
178
-
179
- # CHAT + OUTPUTS
180
- chatbot = gr.Chatbot(type="messages")
181
- user_in = gr.Textbox(placeholder="Your prompt…", label="Prompt")
182
- send_btn = gr.Button("Send")
183
-
184
- build_box = gr.Textbox(label="Build logs", lines=5, interactive=False)
185
- run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
186
- preview = gr.HTML("<p>No Space yet.</p>")
187
-
188
- # Only user inputs here—profile/token are auto‑injected
189
  send_btn.click(
190
  fn=handle_user_message,
191
  inputs=[chatbot, sdk_choice, api_key, grounding],
192
- outputs=[chatbot, build_box, run_box, preview]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  )
194
 
195
- if __name__=="__main__":
196
- demo.launch()
 
8
  # — USER INFO & MODEL LISTING —
9
 
10
  def show_profile(profile: gr.OAuthProfile | None) -> str:
11
+ return f"✅ Logged in as **{profile.username}**" if profile else "*Not logged in.*"
 
 
12
 
13
  def list_private_models(
14
  profile: gr.OAuthProfile | None,
15
  oauth_token: gr.OAuthToken | None
16
  ) -> str:
17
+ if not (profile and oauth_token):
18
  return "Please log in to see your models."
19
  models = [
20
  f"{m.id} ({'private' if m.private else 'public'})"
 
22
  ]
23
  return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
24
 
25
+ # — LOG FETCHING
26
 
27
  def _get_space_jwt(repo_id: str):
28
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
 
32
 
33
  def fetch_logs(repo_id: str, level: str):
34
  jwt = _get_space_jwt(repo_id)
35
+ url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
36
  lines = []
37
+ with get_session().get(url, headers=build_hf_headers(token=jwt), stream=True) as resp:
38
  hf_raise_for_status(resp)
39
  for raw in resp.iter_lines():
40
  if raw.startswith(b"data: "):
41
  try:
42
  ev = json.loads(raw[len(b"data: "):].decode())
43
+ lines.append(f"[{ev.get('timestamp','')}] {ev.get('data','')}")
 
 
44
  except:
45
  continue
46
  return "\n".join(lines)
47
 
48
+ # — HANDLERS
49
 
50
  def handle_user_message(
51
+ history, # list of dicts {"role","content"}
52
  sdk_choice: str,
53
  gemini_api_key: str,
54
  grounding_enabled: bool,
55
  profile: gr.OAuthProfile | None,
56
  oauth_token: gr.OAuthToken | None
57
  ):
58
+ if not (profile and oauth_token):
59
+ return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>", ""
60
 
61
  client = genai.Client(api_key=gemini_api_key)
62
  chat = [{
 
67
  )
68
  }] + history
69
 
70
+ # choose filenames
71
+ code_fn = "app.py" if sdk_choice == "gradio" else "streamlit_app.py"
72
  readme_fn = "README.md"
73
  reqs_fn = "requirements.txt"
74
  repo_id = f"{profile.username}/{profile.username}-auto-space"
75
 
76
  build_logs = run_logs = ""
77
  for _ in range(5):
78
+ # dynamic sdk_version
79
+ if sdk_choice == "gradio":
80
+ import gradio as _gr; sdk_version = _gr.__version__
81
+ else:
82
+ import streamlit as _st; sdk_version = _st.__version__
83
+
84
+ # ask Gemini
85
+ tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
86
+ cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
87
+ resp = client.models.generate_content(
88
  model="gemini-2.5-flash-preview-04-17",
89
  contents=[m["content"] for m in chat],
90
  config=cfg
91
  )
92
+ code = resp.text
93
  chat.append({"role":"assistant","content":code})
94
 
95
+ # write code
96
  with open(code_fn, "w") as f:
97
  f.write(code)
98
 
 
104
  colorFrom: red
105
  colorTo: pink
106
  sdk: {sdk_choice}
107
+ sdk_version: {sdk_version}
108
  app_file: {code_fn}
109
  pinned: false
110
  ---
111
 
112
+ See config reference → https://huggingface.co/docs/hub/spaces-config-reference
113
  """)
114
 
115
+ # write requirements
116
+ base = "pandas\n"
117
+ extra = ("streamlit\n" if sdk_choice=="streamlit" else "gradio\n")
118
  with open(reqs_fn, "w") as f:
119
+ f.write(base + extra)
120
+
121
+ # deploy
122
+ create_repo(repo_id=repo_id,
123
+ token=oauth_token.token,
124
+ exist_ok=True,
125
+ repo_type="space",
126
+ space_sdk=sdk_choice)
 
 
127
  for fn in (code_fn, readme_fn, reqs_fn):
128
+ upload_file(path_or_fileobj=fn,
129
+ path_in_repo=fn,
130
+ repo_id=repo_id,
131
+ token=oauth_token.token,
132
+ repo_type="space")
 
 
133
 
134
+ # fetch logs
135
  build_logs = fetch_logs(repo_id, "build")
136
  run_logs = fetch_logs(repo_id, "run")
 
137
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
138
  break
139
 
 
147
  })
148
  time.sleep(2)
149
 
150
+ # prepare outputs
151
  messages = [{"role":m["role"],"content":m["content"]} for m in chat if m["role"]!="system"]
152
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
153
+ return messages, build_logs, run_logs, iframe, repo_id
154
+
155
+ def refresh_build_logs(repo_id: str, profile, oauth_token):
156
+ if not (profile and oauth_token and repo_id):
157
+ return "⚠️ Please deploy first."
158
+ return fetch_logs(repo_id, "build")
159
+
160
+ def refresh_run_logs(repo_id: str, profile, oauth_token):
161
+ if not (profile and oauth_token and repo_id):
162
+ return "⚠️ Please deploy first."
163
+ return fetch_logs(repo_id, "run")
164
+
165
+ # — UI —
166
+
167
+ with gr.Blocks() as demo:
168
+ gr.Markdown("## HF Space Auto‑Builder\n1. Sign in  2. Prompt  3. Deploy & Debug ►")
169
+
170
+ # Login & model list
171
+ login_btn = gr.LoginButton("huggingface", size="lg")
172
+ status_md = gr.Markdown("*Not logged in.*")
173
+ models_md = gr.Markdown()
174
+ demo.load(show_profile, None, status_md)
175
+ demo.load(list_private_models, None, models_md)
176
+ login_btn.click(show_profile, None, status_md)
177
+ login_btn.click(list_private_models, None, models_md)
178
+
179
+ # Controls
180
+ with gr.Row():
181
+ sdk_choice = gr.Radio(["gradio","streamlit"], "gradio", label="SDK")
182
+ api_key = gr.Textbox(label="Gemini API Key", type="password")
183
+ grounding = gr.Checkbox(label="Enable grounding")
184
+
185
+ # Chat + outputs
186
+ chatbot = gr.Chatbot(type="messages")
187
+ user_in = gr.Textbox(label="Prompt", placeholder="e.g. CSV inspector…")
188
+ send_btn = gr.Button("Send")
189
+ build_box = gr.Textbox(label="Build logs", lines=5)
190
+ run_box = gr.Textbox(label="Run logs", lines=5)
191
+ preview = gr.HTML("<p>No Space yet.</p>")
192
+ state_repo = gr.Textbox(visible=False)
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  send_btn.click(
195
  fn=handle_user_message,
196
  inputs=[chatbot, sdk_choice, api_key, grounding],
197
+ outputs=[chatbot, build_box, run_box, preview, state_repo]
198
+ )
199
+
200
+ # Manual refresh
201
+ with gr.Row():
202
+ refresh_build = gr.Button("Refresh Build Logs")
203
+ refresh_run = gr.Button("Refresh Run Logs")
204
+
205
+ refresh_build.click(
206
+ fn=refresh_build_logs,
207
+ inputs=[state_repo],
208
+ outputs=build_box
209
+ )
210
+ refresh_run.click(
211
+ fn=refresh_run_logs,
212
+ inputs=[state_repo],
213
+ outputs=run_box
214
  )
215
 
216
+ demo.launch()