testtsettset / app.py
wuhp's picture
Update app.py
6dbcd3a verified
raw
history blame
19 kB
# app.py
import os
import time
import json
import requests
import gradio as gr
import google.generativeai as genai
from huggingface_hub import create_repo, list_models, upload_file, constants
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
# --- Helper functions for Hugging Face integration ---
def show_profile(profile: gr.OAuthProfile | None) -> str:
if profile is None:
return "*Not logged in.*"
return f"✅ Logged in as **{profile.username}**"
def list_private_models(
profile: gr.OAuthProfile | None,
oauth_token: gr.OAuthToken | None
) -> str:
if profile is None or oauth_token is None:
return "Please log in to see your models."
try:
models = [
f"{m.id} ({'private' if m.private else 'public'})"
for m in list_models(author=profile.username, token=oauth_token.token)
]
return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
except Exception as e:
return f"Error listing models: {e}"
def create_space_action(repo_name: str, sdk: str, profile: gr.OAuthProfile, token: gr.OAuthToken):
repo_id = f"{profile.username}/{repo_name}"
create_repo(
repo_id=repo_id,
token=token.token,
exist_ok=True,
repo_type="space",
space_sdk=sdk
)
url = f"https://huggingface.co/spaces/{repo_id}"
iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe>'
return repo_id, iframe
def upload_file_to_space_action(
file_obj,
path_in_repo: str,
repo_id: str,
profile: gr.OAuthProfile,
token: gr.OAuthToken
) -> str:
if not (profile and token and repo_id):
return "⚠️ Please log in and create a Space first."
try:
upload_file(
path_or_fileobj=file_obj,
path_in_repo=path_in_repo,
repo_id=repo_id,
token=token.token,
repo_type="space"
)
return f"✅ Uploaded `{path_in_repo}`"
except Exception as e:
return f"Error uploading file: {e}"
def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
jwt_url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
r = get_session().get(jwt_url, headers=build_hf_headers(token=token))
hf_raise_for_status(r)
jwt = r.json()["token"]
logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
lines, count = [], 0
with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True, timeout=20) as resp:
hf_raise_for_status(resp)
for raw in resp.iter_lines():
if count >= 200:
lines.append("... truncated ...")
break
if not raw.startswith(b"data: "):
continue
payload = raw[len(b"data: "):]
try:
event = json.loads(payload.decode())
ts = event.get("timestamp", "")
txt = event.get("data", "").strip()
if txt:
lines.append(f"[{ts}] {txt}")
count += 1
except json.JSONDecodeError:
continue
return "\n".join(lines) if lines else f"No {level} logs found."
def get_build_logs_action(repo_id, profile, token):
if not (repo_id and profile and token):
return "⚠️ Please log in and create a Space first."
return _fetch_space_logs_level(repo_id, "build", token.token)
def get_container_logs_action(repo_id, profile, token):
if not (repo_id and profile and token):
return "⚠️ Please log in and create a Space first."
return _fetch_space_logs_level(repo_id, "run", token.token)
# --- Google Gemini integration with model selection ---
def configure_gemini(api_key: str | None, model_name: str | None) -> str:
if not api_key:
return "Gemini API key is not set."
if not model_name:
return "Please select a Gemini model."
try:
genai.configure(api_key=api_key)
genai.GenerativeModel(model_name).generate_content("ping")
return f"Gemini configured successfully with **{model_name}**."
except Exception as e:
return f"Error configuring Gemini: {e}"
def call_gemini(prompt: str, api_key: str, model_name: str) -> str:
if not api_key or not model_name:
return "Error: Gemini API key or model not provided."
try:
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name)
response = model.generate_content(prompt)
return response.text or "Gemini returned an empty response."
except Exception as e:
return f"Error calling Gemini API with {model_name}: {e}"
# --- AI workflow logic ---
def ai_workflow_chat(
message: str,
history: list[list[str | None]],
hf_profile: gr.OAuthProfile | None,
hf_token: gr.OAuthToken | None,
gemini_api_key: str | None,
gemini_model: str | None,
repo_id_state: str | None,
workflow_state: str,
space_sdk: str,
preview_html: str,
container_logs: str,
build_logs: str
) -> tuple[
list[list[str | None]],
str | None,
str,
str,
str,
str
]:
history.append([message, None])
bot_message = ""
new_repo_id = repo_id_state
new_workflow = workflow_state
updated_preview = preview_html
updated_container = container_logs
updated_build = build_logs
try:
if not hf_profile or not hf_token:
bot_message = "Please log in to Hugging Face first."
new_workflow = "awaiting_login"
elif not gemini_api_key or not gemini_model:
bot_message = "Please enter your API key and select a Gemini model."
new_workflow = "awaiting_api_key"
elif (new_workflow == "idle" or "create" in message.lower()) and not new_repo_id:
bot_message = "What should the Space be called? (e.g., `my-awesome-app`)"
new_workflow = "awaiting_repo_name"
elif new_workflow == "awaiting_repo_name":
repo_name = message.strip()
if not repo_name:
bot_message = "Please provide a valid Space name."
else:
bot_message = f"Creating Space `{hf_profile.username}/{repo_name}`..."
new_repo_id, iframe_html = create_space_action(repo_name, space_sdk, hf_profile, hf_token)
updated_preview = iframe_html
bot_message += "\n✅ Space created."
new_workflow = "awaiting_app_description"
elif new_workflow in ("awaiting_app_description", "debugging"):
if new_workflow == "awaiting_app_description":
app_desc = message
bot_message = f"Generating code for a `{space_sdk}` app based on: '{app_desc}'..."
prompt = f"""
You are an AI assistant specializing in Hugging Face Spaces using the {space_sdk} SDK.
Generate a full, single-file Python app based on:
'{app_desc}'
Return **only** the code block (```python ...```).
"""
else:
debug_instr = message
logs = get_container_logs_action(new_repo_id, hf_profile, hf_token)
bot_message = f"Analyzing logs and applying fixes: '{debug_instr}'..."
prompt = f"""
You are debugging a {space_sdk} Space.
Logs:
{logs}
User instructions:
'{debug_instr}'
Generate a fixed, single-file Python app. Return only the ```python``` code block.
"""
new_workflow = "generating_code"
resp = call_gemini(prompt, gemini_api_key, gemini_model)
start = resp.find("```python")
end = resp.rfind("```")
if start != -1 and end != -1 and end > start:
code = resp[start + len("```python"):end].strip()
bot_message += "\n✅ Code generated. Uploading..."
new_workflow = "uploading_code"
upload_log = upload_file_to_space_action(code, "app.py", new_repo_id, hf_profile, hf_token)
bot_message += "\n" + upload_log
if "✅ Uploaded" in upload_log:
bot_message += "\nThe Space is now rebuilding. Say 'check logs' to fetch them."
new_workflow = "awaiting_log_check"
updated_preview = f'<iframe src="https://huggingface.co/spaces/{new_repo_id}" width="100%" height="500px"></iframe>'
else:
new_workflow = "idle"
else:
bot_message += f"\n⚠️ Could not parse code from Gemini.\nResponse:\n{resp}"
new_workflow = "awaiting_app_description"
elif new_workflow == "awaiting_log_check" and "check logs" in message.lower():
bot_message = "Fetching logs..."
updated_container = get_container_logs_action(new_repo_id, hf_profile, hf_token)
updated_build = get_build_logs_action(new_repo_id, hf_profile, hf_token)
bot_message += "\n✅ Logs updated. Describe any errors or say 'generate fix'."
new_workflow = "reviewing_logs"
elif new_workflow == "reviewing_logs" and "generate fix" in message.lower():
latest = get_container_logs_action(new_repo_id, hf_profile, hf_token)
if "Error" not in latest and "Exception" not in latest:
bot_message = "No clear error found. What should I fix?"
new_workflow = "reviewing_logs"
else:
bot_message = "Generating a fix based on logs..."
new_workflow = "debugging"
elif "reset" in message.lower():
bot_message = "Workflow reset."
new_repo_id = None
updated_preview = "<p>No Space created yet.</p>"
updated_container = ""
updated_build = ""
new_workflow = "idle"
else:
bot_message = "Command not recognized. Try 'create', 'check logs', 'generate fix', or 'reset'."
except Exception as e:
bot_message = f"Unexpected error: {e}"
new_workflow = "idle"
if history and history[-1][1] is None:
history[-1][1] = bot_message
else:
history.append([None, bot_message])
return history, new_repo_id, new_workflow, updated_preview, updated_container, updated_build
# --- Build the Gradio UI ---
with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
hf_profile = gr.State(None)
hf_token = gr.State(None)
gemini_key = gr.State(None)
gemini_model = gr.State("gemini-2.5-pro-preview-03-25")
repo_id = gr.State(None)
workflow = gr.State("idle")
sdk_state = gr.State("gradio")
with gr.Row():
# Sidebar
with gr.Column(scale=1, min_width=300):
gr.Markdown("## Hugging Face Login")
login_status = gr.Markdown("*Not logged in.*")
login_btn = gr.LoginButton(variant="huggingface")
ai_builder_tab.load(show_profile, outputs=login_status)
login_btn.click(show_profile, outputs=login_status)
login_btn.click(lambda p, t: (p, t), outputs=[hf_profile, hf_token])
gr.Markdown("## Google AI Studio API Key")
gemini_input = gr.Textbox(label="API Key", type="password")
gemini_status = gr.Markdown("")
gemini_input.change(lambda k: k, inputs=gemini_input, outputs=gemini_key)
gr.Markdown("## Gemini Model")
model_selector = gr.Radio(
choices=[
("Gemini 2.5 Flash Preview 04-17", "gemini-2.5-flash-preview-04-17"),
("Gemini 2.5 Pro Preview 03-25", "gemini-2.5-pro-preview-03-25")
],
value="gemini-2.5-pro-preview-03-25",
label="Select model"
)
model_selector.change(lambda m: m, inputs=model_selector, outputs=gemini_model)
# configure Gemini on key or model change
ai_builder_tab.load(
configure_gemini,
inputs=[gemini_key, gemini_model],
outputs=[gemini_status]
)
gemini_input.change(
configure_gemini,
inputs=[gemini_key, gemini_model],
outputs=[gemini_status]
)
model_selector.change(
configure_gemini,
inputs=[gemini_key, gemini_model],
outputs=[gemini_status]
)
gr.Markdown("## Space SDK")
sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK")
sdk_selector.change(lambda s: s, inputs=sdk_selector, outputs=sdk_state)
# Main content
with gr.Column(scale=3):
chatbot = gr.Chatbot()
user_input = gr.Textbox(placeholder="Type your message…")
send_btn = gr.Button("Send", interactive=False)
ai_builder_tab.load(
lambda p, k, m: gr.update(interactive=bool(p and k and m)),
inputs=[hf_profile, gemini_key, gemini_model],
outputs=[send_btn]
)
login_btn.click(
lambda p, k, m: gr.update(interactive=bool(p and k and m)),
inputs=[hf_profile, gemini_key, gemini_model],
outputs=[send_btn]
)
gemini_input.change(
lambda p, k, m: gr.update(interactive=bool(p and k and m)),
inputs=[hf_profile, gemini_key, gemini_model],
outputs=[send_btn]
)
model_selector.change(
lambda p, k, m: gr.update(interactive=bool(p and k and m)),
inputs=[hf_profile, gemini_key, gemini_model],
outputs=[send_btn]
)
iframe = gr.HTML("<p>No Space created yet.</p>")
build_txt = gr.Textbox(label="Build Logs", lines=10, interactive=False)
run_txt = gr.Textbox(label="Container Logs", lines=10, interactive=False)
def wrap_chat(msg, history, prof, tok, key, model, rid, wf, sdk, prev, run_l, build_l):
new_hist, new_rid, new_wf, new_prev, new_run, new_build = ai_workflow_chat(
msg, history, prof, tok, key, model, rid, wf, sdk, prev, run_l, build_l
)
return [(u or "", v or "") for u, v in new_hist], new_rid, new_wf, new_prev, new_run, new_build
send_btn.click(
wrap_chat,
inputs=[
user_input, chatbot,
hf_profile, hf_token,
gemini_key, gemini_model,
repo_id, workflow, sdk_state,
iframe, run_txt, build_txt
],
outputs=[
chatbot,
repo_id, workflow,
iframe, run_txt, build_txt
]
)
with gr.Blocks(title="Manual Hugging Face Space Manager") as manual_control_tab:
manual_profile = gr.State(None)
manual_token = gr.State(None)
manual_repo = gr.State(None)
gr.Markdown("## Manual Sign-In & Space Management")
manual_login_btn = gr.LoginButton(variant="huggingface", size="lg")
manual_status = gr.Markdown("*Not logged in.*")
manual_models = gr.Markdown()
manual_control_tab.load(show_profile, outputs=manual_status)
manual_login_btn.click(show_profile, outputs=manual_status)
manual_control_tab.load(list_private_models, outputs=manual_models)
manual_login_btn.click(list_private_models, outputs=manual_models)
manual_login_btn.click(lambda p, t: (p, t), outputs=[manual_profile, manual_token])
manual_repo_name = gr.Textbox(label="New Space name", placeholder="my-space")
manual_sdk_sel = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK")
manual_create_btn = gr.Button("Create Space", interactive=False)
manual_create_logs = gr.Textbox(label="Create Logs", lines=3, interactive=False)
manual_preview = gr.HTML("<p>No Space created yet.</p>")
manual_control_tab.load(
lambda p, t: gr.update(interactive=bool(p and t)),
inputs=[manual_profile, manual_token],
outputs=[manual_create_btn]
)
manual_login_btn.click(
lambda p, t: gr.update(interactive=bool(p and t)),
inputs=[manual_profile, manual_token],
outputs=[manual_create_btn]
)
manual_create_btn.click(
create_space_action,
inputs=[manual_repo_name, manual_sdk_sel, manual_profile, manual_token],
outputs=[manual_repo, manual_preview]
).then(lambda _: "", outputs=[manual_create_logs])
manual_path = gr.Textbox(label="Path in Space", value="app.py")
manual_file = gr.File(label="Select file")
manual_up_btn = gr.Button("Upload File", interactive=False)
manual_up_log = gr.Textbox(label="Upload Logs", lines=2, interactive=False)
manual_control_tab.load(
lambda rid, p, t: gr.update(interactive=bool(rid and p and t)),
inputs=[manual_repo, manual_profile, manual_token],
outputs=[manual_up_btn]
)
manual_login_btn.click(
lambda rid, p, t: gr.update(interactive=bool(rid and p and t)),
inputs=[manual_repo, manual_profile, manual_token],
outputs=[manual_up_btn]
)
manual_up_btn.click(
upload_file_to_space_action,
inputs=[manual_file, manual_path, manual_repo, manual_profile, manual_token],
outputs=[manual_up_log]
)
manual_build_btn = gr.Button("Fetch Build Logs", interactive=False)
manual_container_btn = gr.Button("Fetch Container Logs", interactive=False)
manual_build_txt = gr.Textbox(label="Build Logs", lines=10, interactive=False)
manual_container_txt = gr.Textbox(label="Container Logs", lines=10, interactive=False)
for btn in (manual_build_btn, manual_container_btn):
manual_control_tab.load(
lambda rid, p, t: gr.update(interactive=bool(rid and p and t)),
inputs=[manual_repo, manual_profile, manual_token],
outputs=[btn]
)
manual_login_btn.click(
lambda rid, p, t: gr.update(interactive=bool(rid and p and t)),
inputs=[manual_repo, manual_profile, manual_token],
outputs=[btn]
)
manual_build_btn.click(
get_build_logs_action,
inputs=[manual_repo, manual_profile, manual_token],
outputs=[manual_build_txt]
)
manual_container_btn.click(
get_container_logs_action,
inputs=[manual_repo, manual_profile, manual_token],
outputs=[manual_container_txt]
)
demo = gr.TabbedInterface(
[ai_builder_tab, manual_control_tab],
["AI App Builder", "Manual Control"]
)
if __name__ == "__main__":
demo.launch()