m7n commited on
Commit
1a55407
·
verified ·
1 Parent(s): 89bc1ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -34
app.py CHANGED
@@ -4,16 +4,20 @@ import json
4
  import base64
5
  from datetime import datetime
6
 
7
- # Standard imports
8
  import gradio as gr
9
  from fastapi import FastAPI, Request
10
  from fastapi.staticfiles import StaticFiles
11
  import uvicorn
12
 
13
- # Hugging Face Spaces imports
14
  import spaces
15
  from spaces.zero.client import _get_token
16
 
 
 
 
 
 
 
17
  # Create FastAPI app
18
  app = FastAPI()
19
 
@@ -27,40 +31,30 @@ app.mount("/static", StaticFiles(directory="static"), name="static")
27
  # Tell Gradio which paths are allowed to be served
28
  os.environ["GRADIO_ALLOWED_PATHS"] = str(static_dir.resolve())
29
 
30
- @spaces.GPU(duration=4*60) # Specify GPU duration in seconds
31
  def process_text(text):
32
  """Example GPU function - in reality, this might be model inference"""
33
  return text.upper()
34
 
35
  def process_and_save(request: gr.Request, text):
36
  """Main processing function that handles tokens and calls GPU function"""
37
- # Get and decode the authentication token
38
  token = _get_token(request)
39
  payload = token.split('.')[1]
40
  payload = f"{payload}{'=' * ((4 - len(payload) % 4) % 4)}"
41
  payload = json.loads(base64.urlsafe_b64decode(payload).decode())
42
- print(f"Token payload: {payload}") # For debugging
43
-
44
- # Process the text using GPU function
45
- processed_text = process_text(text)
46
-
47
- # Save to file
48
- # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
49
- # file_path = static_dir / f"output_{timestamp}.txt"
50
- # with open(file_path, "w") as f:
51
- # f.write(processed_text)
52
 
53
- # return gr.File(value=file_path)
 
54
 
55
  # Mark main function as not requiring GPU
56
  process_and_save.zerogpu = True
57
 
58
-
59
  # Create Gradio interface
60
  with gr.Blocks() as demo:
61
  text_input = gr.Textbox(label="Enter some text")
62
  submit_btn = gr.Button("Process and Download")
63
- output = gr.File(label="Download Processed File")
64
 
65
  submit_btn.click(
66
  fn=process_and_save,
@@ -68,24 +62,9 @@ with gr.Blocks() as demo:
68
  outputs=output
69
  )
70
 
71
-
72
- # Set up environment variables (you can also set these in your Spaces config)
73
- os.environ["GRADIO_SSR_MODE"] = "True"
74
- os.environ["GRADIO_SERVER_PORT"] = "7860"
75
- os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
76
- os.environ["GRADIO_NODE_SERVER_NAME"] = "0.0.0.0"
77
-
78
-
79
-
80
-
81
- # Mount Gradio app to FastAPI with SSR mode for Spaces
82
- #app = gr.mount_gradio_app(app, demo, path="/", ssr_mode=True)
83
  app = gr.mount_gradio_app(app, demo, path="/", ssr_mode=True, node_port=7861)
84
 
85
- # Run server
86
  if __name__ == "__main__":
87
- # Set SSR mode for Spaces
88
- os.environ["GRADIO_SSR_MODE"] = "True"
89
- #uvicorn.run(app)
90
-
91
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
4
  import base64
5
  from datetime import datetime
6
 
 
7
  import gradio as gr
8
  from fastapi import FastAPI, Request
9
  from fastapi.staticfiles import StaticFiles
10
  import uvicorn
11
 
 
12
  import spaces
13
  from spaces.zero.client import _get_token
14
 
15
+ # Set up environment variables (you can also set these in your Spaces config)
16
+ os.environ["GRADIO_SSR_MODE"] = "True"
17
+ os.environ["GRADIO_SERVER_PORT"] = "7860"
18
+ os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
19
+ os.environ["GRADIO_NODE_SERVER_NAME"] = "0.0.0.0"
20
+
21
  # Create FastAPI app
22
  app = FastAPI()
23
 
 
31
  # Tell Gradio which paths are allowed to be served
32
  os.environ["GRADIO_ALLOWED_PATHS"] = str(static_dir.resolve())
33
 
34
+ @spaces.GPU(duration=4*60)
35
  def process_text(text):
36
  """Example GPU function - in reality, this might be model inference"""
37
  return text.upper()
38
 
39
  def process_and_save(request: gr.Request, text):
40
  """Main processing function that handles tokens and calls GPU function"""
 
41
  token = _get_token(request)
42
  payload = token.split('.')[1]
43
  payload = f"{payload}{'=' * ((4 - len(payload) % 4) % 4)}"
44
  payload = json.loads(base64.urlsafe_b64decode(payload).decode())
45
+ print(f"Token payload: {payload}")
 
 
 
 
 
 
 
 
 
46
 
47
+ processed_text = process_text(text)
48
+ return processed_text # Simplified for demonstration
49
 
50
  # Mark main function as not requiring GPU
51
  process_and_save.zerogpu = True
52
 
 
53
  # Create Gradio interface
54
  with gr.Blocks() as demo:
55
  text_input = gr.Textbox(label="Enter some text")
56
  submit_btn = gr.Button("Process and Download")
57
+ output = gr.Textbox(label="Output")
58
 
59
  submit_btn.click(
60
  fn=process_and_save,
 
62
  outputs=output
63
  )
64
 
65
+ # Mount Gradio app to FastAPI with SSR mode and node_port set to 7861
 
 
 
 
 
 
 
 
 
 
 
66
  app = gr.mount_gradio_app(app, demo, path="/", ssr_mode=True, node_port=7861)
67
 
68
+ # Run the server on port 7860 (only one public port on Spaces)
69
  if __name__ == "__main__":
 
 
 
 
70
  uvicorn.run(app, host="0.0.0.0", port=7860)