Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,309 Bytes
d1ed09d fa9a2fa d1ed09d fa9a2fa d1ed09d fa9a2fa 755230a d1ed09d 24e24f3 755230a d1ed09d 755230a d1ed09d 755230a d1ed09d fa9a2fa 755230a fa9a2fa d1ed09d 755230a d1ed09d fa9a2fa 755230a d1ed09d 755230a fa9a2fa d1ed09d 755230a fa9a2fa 755230a d1ed09d fa9a2fa 1ef8927 755230a d1ed09d fa9a2fa d1ed09d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import os
from pathlib import Path
import json
import base64
from datetime import datetime
# Standard imports
import gradio as gr
from fastapi import FastAPI, Request
from fastapi.staticfiles import StaticFiles
import uvicorn
# Hugging Face Spaces imports
import spaces
from spaces.zero.client import _get_token
# Create FastAPI app
app = FastAPI()
# Create and configure static directory
static_dir = Path("./static")
static_dir.mkdir(parents=True, exist_ok=True)
# Mount static directory to FastAPI
app.mount("/static", StaticFiles(directory="static"), name="static")
# Tell Gradio which paths are allowed to be served
os.environ["GRADIO_ALLOWED_PATHS"] = str(static_dir.resolve())
@spaces.GPU(duration=60) # Specify GPU duration in seconds
def process_text(text):
"""Example GPU function - in reality, this might be model inference"""
return text.upper()
def process_and_save(request: gr.Request, text):
"""Main processing function that handles tokens and calls GPU function"""
# Get and decode the authentication token
token = _get_token(request)
payload = token.split('.')[1]
payload = f"{payload}{'=' * ((4 - len(payload) % 4) % 4)}"
payload = json.loads(base64.urlsafe_b64decode(payload).decode())
print(f"Token payload: {payload}") # For debugging
# Process the text using GPU function
processed_text = process_text(text)
# Save to file
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
file_path = static_dir / f"output_{timestamp}.txt"
with open(file_path, "w") as f:
f.write(processed_text)
return gr.File(value=file_path)
# Mark main function as not requiring GPU
process_and_save.zerogpu = True
# Create Gradio interface
with gr.Blocks() as demo:
text_input = gr.Textbox(label="Enter some text")
submit_btn = gr.Button("Process and Download")
output = gr.File(label="Download Processed File")
submit_btn.click(
fn=process_and_save,
inputs=[text_input],
outputs=output
)
# Mount Gradio app to FastAPI with SSR mode for Spaces
app = gr.mount_gradio_app(app, demo, path="/", ssr_mode=True)
# Run server
if __name__ == "__main__":
# Set SSR mode for Spaces
os.environ["GRADIO_SSR_MODE"] = "True"
uvicorn.run(app, host="0.0.0.0", port=7860) |