Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,41 +1,12 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
import time
|
4 |
-
from collections import defaultdict
|
5 |
|
6 |
-
#
|
7 |
token = os.environ["TOKEN"]
|
8 |
model = os.environ["MODEL"]
|
9 |
|
10 |
-
#
|
11 |
-
request_log = defaultdict(list)
|
12 |
-
|
13 |
-
def rate_limiter(fn):
|
14 |
-
def wrapper(*args, **kwargs):
|
15 |
-
user_ip = "user_ip_placeholder" # Replace with actual IP retrieval logic
|
16 |
-
current_time = time.time()
|
17 |
-
|
18 |
-
# Keep only recent requests within the last minute
|
19 |
-
request_log[user_ip] = [t for t in request_log[user_ip] if current_time - t < 60]
|
20 |
-
|
21 |
-
if len(request_log[user_ip]) >= 5: # Limit to 5 requests per minute
|
22 |
-
return "Rate limit exceeded. Please try again later."
|
23 |
-
|
24 |
-
request_log[user_ip].append(current_time)
|
25 |
-
return fn(*args, **kwargs)
|
26 |
-
return wrapper
|
27 |
-
|
28 |
-
# Load your model from Gradio Spaces and apply the rate limiter
|
29 |
-
@rate_limiter
|
30 |
-
def limited_model_function(*args, **kwargs):
|
31 |
-
return demo(*args, **kwargs)
|
32 |
-
|
33 |
demo = gr.load(model, src="spaces", token=token)
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
show_error=False,
|
38 |
-
quiet=True,
|
39 |
-
debug=False,
|
40 |
-
fn=limited_model_function # Wrap the function with rate limiting
|
41 |
-
)
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
|
|
|
|
3 |
|
4 |
+
# Fetch token and model information
|
5 |
token = os.environ["TOKEN"]
|
6 |
model = os.environ["MODEL"]
|
7 |
|
8 |
+
# Load the model from Hugging Face Spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
demo = gr.load(model, src="spaces", token=token)
|
10 |
|
11 |
+
# Launch without 'fn', as it's not a valid argument for the launch method
|
12 |
+
demo.launch(show_api=False, show_error=False, quiet=True, debug=False)
|
|
|
|
|
|
|
|
|
|