wuhp commited on
Commit
835de23
·
verified ·
1 Parent(s): 248d55e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -37
app.py CHANGED
@@ -22,7 +22,7 @@ def list_private_models(
22
  ]
23
  return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
24
 
25
- # — HELPERS FOR HF SPACE LOGS
26
 
27
  def _get_space_jwt(repo_id: str):
28
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
@@ -45,7 +45,7 @@ def fetch_logs(repo_id: str, level: str):
45
  continue
46
  return "\n".join(lines)
47
 
48
- # — CORE LOOP: ask LLM for JSON, write & deploy —
49
 
50
  def handle_user_message(
51
  history, # list of {"role","content"} dicts
@@ -60,57 +60,75 @@ def handle_user_message(
60
 
61
  client = genai.Client(api_key=gemini_api_key)
62
 
63
- # System prompt: instruct JSON output
64
  system_msg = {
65
  "role":"system",
66
  "content":(
67
- "You are an AI assistant that generates all files needed for a HuggingFace Space.\n"
68
- "Based on the user's prompt, return a **single JSON object** with keys:\n"
69
- " • repo_name: string the name of the new Space repo (no username).\n"
70
- " files: object mapping filenames to file contents (strings). \n"
71
- " Required files: one code file (preferably app.py; if you choose a different name, update README.md),\n"
72
- " README.md frontmatter (with title, sdk, sdk_version, app_file), and requirements.txt.\n"
73
- "Do **NOT** return any explanatory text—only that JSON object.\n"
 
 
74
  )
75
  }
76
  chat = [system_msg] + history
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  repo_id = None
79
  build_logs = run_logs = ""
80
 
81
  for _ in range(5):
82
- # dynamic version detection
83
- if sdk_choice == "gradio":
84
  import gradio as _gr; sdk_version = _gr.__version__
85
  else:
86
  import streamlit as _st; sdk_version = _st.__version__
87
 
88
- # ask the LLM
89
  tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
90
- cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
91
- resp = client.models.generate_content(
 
 
 
 
 
 
92
  model="gemini-2.5-flash-preview-04-17",
93
  contents=[m["content"] for m in chat],
94
  config=cfg
95
  )
96
- ai_text = resp.text
97
- chat.append({"role":"assistant","content":ai_text})
98
 
99
- # parse JSON
100
  try:
101
- spec = json.loads(ai_text)
102
  repo_name = spec["repo_name"]
103
  files = spec["files"]
104
- except Exception as e:
105
- # ask it to re-output valid JSON
106
- chat.append({"role":"user","content":
107
- "Your last response wasn't valid JSON. "
108
- "Please reply with exactly the JSON object as specified."})
109
  continue
110
 
111
  repo_id = f"{profile.username}/{repo_name}"
112
-
113
- # write & upload each file
114
  create_repo(
115
  repo_id=repo_id,
116
  token=oauth_token.token,
@@ -118,9 +136,11 @@ def handle_user_message(
118
  repo_type="space",
119
  space_sdk=sdk_choice
120
  )
 
 
121
  for fn, content in files.items():
122
- # fill in sdk_version into README if needed
123
- if fn.lower() == "readme.md":
124
  content = content.replace("<SDK_VERSION>", sdk_version)
125
  with open(fn, "w") as f:
126
  f.write(content)
@@ -136,21 +156,20 @@ def handle_user_message(
136
  build_logs = fetch_logs(repo_id, "build")
137
  run_logs = fetch_logs(repo_id, "run")
138
 
 
139
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
140
  break
141
 
142
- # feed errors back
143
  chat.append({
144
  "role":"user",
145
  "content":(
146
- f"Deployment produced build logs:\n{build_logs}\n\n"
147
- f"and run logs:\n{run_logs}\n\n"
148
- "Please fix the code and output a new JSON spec."
149
  )
150
  })
151
  time.sleep(2)
152
 
153
- # prepare outputs
154
  messages = [{"role":m["role"],"content":m["content"]} for m in chat if m["role"]!="system"]
155
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
156
  return messages, build_logs, run_logs, iframe
@@ -159,14 +178,14 @@ def handle_user_message(
159
 
160
  with gr.Blocks(title="HF Space Auto‑Builder") as demo:
161
  gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
162
- "1. Sign in  2. Prompt  3. Deploy & Debug \n\n"
163
- "*LLM will generate code, README, requirements, and iterate until successful.*\n\n---")
164
 
165
  login_btn = gr.LoginButton("huggingface", size="lg")
166
  status_md = gr.Markdown("*Not logged in.*")
167
  models_md = gr.Markdown()
168
- demo.load(show_profile, None, status_md)
169
- demo.load(list_private_models, None, models_md)
170
  login_btn.click(show_profile, None, status_md)
171
  login_btn.click(list_private_models, None, models_md)
172
 
 
22
  ]
23
  return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
24
 
25
+ # — LOG FETCHING
26
 
27
  def _get_space_jwt(repo_id: str):
28
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
 
45
  continue
46
  return "\n".join(lines)
47
 
48
+ # — CORE LOOP: ask LLM for structured JSON, write & deploy —
49
 
50
  def handle_user_message(
51
  history, # list of {"role","content"} dicts
 
60
 
61
  client = genai.Client(api_key=gemini_api_key)
62
 
63
+ # System prompt: ask for exact JSON per schema
64
  system_msg = {
65
  "role":"system",
66
  "content":(
67
+ "Generate **one JSON object** with keys:\n"
68
+ " repo_name: string (the Space name, no username)\n"
69
+ " • files: object mapping filenames to file-contents strings\n\n"
70
+ "Include at least:\n"
71
+ " - A code file (use app.py unless you explicitly choose another)\n"
72
+ " - requirements.txt with all dependencies\n"
73
+ " - README.md frontmatter (title, emoji, sdk, sdk_version, app_file, etc.)\n\n"
74
+ "Do NOT output any other text—only the JSON.\n"
75
+ f"The user wants a **{sdk_choice}** app with this prompt: {history[-1][1]}"
76
  )
77
  }
78
  chat = [system_msg] + history
79
 
80
+ # JSON schema for the response
81
+ schema = {
82
+ "type": "object",
83
+ "properties": {
84
+ "repo_name": { "type": "string" },
85
+ "files": {
86
+ "type": "object",
87
+ "additionalProperties": { "type": "string" }
88
+ }
89
+ },
90
+ "required": ["repo_name","files"]
91
+ }
92
+
93
  repo_id = None
94
  build_logs = run_logs = ""
95
 
96
  for _ in range(5):
97
+ # detect sdk_version at runtime
98
+ if sdk_choice=="gradio":
99
  import gradio as _gr; sdk_version = _gr.__version__
100
  else:
101
  import streamlit as _st; sdk_version = _st.__version__
102
 
 
103
  tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
104
+ cfg = GenerateContentConfig(
105
+ tools=tools,
106
+ response_modalities=["TEXT"],
107
+ response_mime_type="application/json",
108
+ response_schema=schema
109
+ )
110
+
111
+ resp = client.models.generate_content(
112
  model="gemini-2.5-flash-preview-04-17",
113
  contents=[m["content"] for m in chat],
114
  config=cfg
115
  )
116
+ ai_json = resp.text
 
117
 
118
+ # parse JSON only
119
  try:
120
+ spec = json.loads(ai_json)
121
  repo_name = spec["repo_name"]
122
  files = spec["files"]
123
+ except Exception:
124
+ chat.append({
125
+ "role":"user",
126
+ "content":"The JSON was invalid. Please reply with exactly the JSON object per schema."
127
+ })
128
  continue
129
 
130
  repo_id = f"{profile.username}/{repo_name}"
131
+ # ensure Space exists
 
132
  create_repo(
133
  repo_id=repo_id,
134
  token=oauth_token.token,
 
136
  repo_type="space",
137
  space_sdk=sdk_choice
138
  )
139
+
140
+ # write & upload all files
141
  for fn, content in files.items():
142
+ # inject the true sdk_version if placeholder present
143
+ if fn.lower()=="readme.md":
144
  content = content.replace("<SDK_VERSION>", sdk_version)
145
  with open(fn, "w") as f:
146
  f.write(content)
 
156
  build_logs = fetch_logs(repo_id, "build")
157
  run_logs = fetch_logs(repo_id, "run")
158
 
159
+ # break on clean build
160
  if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
161
  break
162
 
 
163
  chat.append({
164
  "role":"user",
165
  "content":(
166
+ f"Build logs:\n{build_logs}\n\n"
167
+ f"Run logs:\n{run_logs}\n\n"
168
+ "Please fix the JSON spec and return updated JSON."
169
  )
170
  })
171
  time.sleep(2)
172
 
 
173
  messages = [{"role":m["role"],"content":m["content"]} for m in chat if m["role"]!="system"]
174
  iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
175
  return messages, build_logs, run_logs, iframe
 
178
 
179
  with gr.Blocks(title="HF Space Auto‑Builder") as demo:
180
  gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
181
+ "1. Sign in  2. Prompt  3. Deploy & Debug\n"
182
+ "_The LLM controls filenames, title, README, requirements, and code._\n\n---")
183
 
184
  login_btn = gr.LoginButton("huggingface", size="lg")
185
  status_md = gr.Markdown("*Not logged in.*")
186
  models_md = gr.Markdown()
187
+ demo.load(show_profile, None, status_md)
188
+ demo.load(list_private_models, None, models_md)
189
  login_btn.click(show_profile, None, status_md)
190
  login_btn.click(list_private_models, None, models_md)
191