wolfofbackstreet commited on
Commit
f17f776
·
verified ·
1 Parent(s): 174a54f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -118
app.py CHANGED
@@ -1,119 +1,119 @@
1
- import inspect
2
- from typing import get_type_hints, Callable, Any
3
- import gradio as gr
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
-
6
- model_id = "unsloth/SmolLM2-135M-Instruct-GGUF"
7
- filename = "SmolLM2-135M-Instruct-Q8_0.gguf"
8
-
9
- tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
10
- model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
11
-
12
-
13
- def parse_docstring(func):
14
- doc = inspect.getdoc(func)
15
- if not doc:
16
- return {"title": "Untitled", "description": ""}
17
-
18
- lines = doc.splitlines()
19
- title = next((line.replace("Title:", "").strip() for line in lines if line.startswith("Title:")), "Untitled")
20
- description = "\n".join(line.strip() for line in lines if line.startswith("Description:"))
21
- description = description.replace("Description:", "").strip()
22
-
23
- return {"title": title, "description": description}
24
-
25
- def gradio_app_with_docs(func: Callable) -> Callable:
26
- sig = inspect.signature(func)
27
- type_hints = get_type_hints(func)
28
- metadata = parse_docstring(func)
29
-
30
- """
31
- A decorator that automatically builds and launches a Gradio interface
32
- based on function type hints.
33
-
34
- Args:
35
- func: A callable with type-hinted parameters and return type.
36
-
37
- Returns:
38
- The wrapped function with a `.launch()` method to start the app.
39
- """
40
- # Infer Gradio components from type hints
41
- def _map_type(t: type) -> gr.Component:
42
- if t == str:
43
- return gr.Textbox(label="Input")
44
- elif t == int:
45
- return gr.Number(precision=0)
46
- elif t == float:
47
- return gr.Number()
48
- elif t == bool:
49
- return gr.Checkbox()
50
- elif hasattr(t, "__origin__") and t.__origin__ == list: # Handle List[type]
51
- elem_type = t.__args__[0]
52
- if elem_type == str:
53
- return gr.Dropdown(choices=["Option1", "Option2"])
54
- else:
55
- raise ValueError(f"Unsupported list element type: {elem_type}")
56
- else:
57
- raise ValueError(f"Unsupported type: {t}")
58
-
59
- # Extract function signature and type hints
60
- sig = inspect.signature(func)
61
- type_hints = get_type_hints(func)
62
-
63
- # Map parameters to Gradio inputs
64
- inputs = []
65
- for name, param in sig.parameters.items():
66
- if name == "self":
67
- continue # Skip self in class methods
68
- param_type = type_hints.get(name, Any)
69
- component = _map_type(param_type)
70
- component.label = name.replace("_", " ").title()
71
- inputs.append(component)
72
-
73
- # Map return type to Gradio output
74
- return_type = type_hints.get("return", Any)
75
- outputs = _map_type(return_type)
76
-
77
- # Wrap function with Gradio interface
78
- interface = gr.Interface(fn=func, inputs=inputs, outputs=outputs)
79
-
80
- with gr.Blocks() as demo:
81
- gr.Markdown(f"## {metadata['title']}\n{metadata['description']}")
82
- interface = gr.Interface(fn=func, inputs=inputs, outputs=outputs)
83
-
84
- def wrapper(*args, **kwargs):
85
- return func(*args, **kwargs)
86
-
87
- wrapper.launch = lambda: demo.launch()
88
- return wrapper
89
-
90
-
91
- @gradio_app_with_docs
92
- def generate_response(prompt: str) -> str:
93
- """
94
- Title: Super Tiny GPTQ V2 Model on CPU
95
- Description: A Simple app to test out the potentials of small GPTQ LLM model.
96
-
97
- Args:
98
- prompt (str): A simple prompt.
99
-
100
- Returns:
101
- str: Simplified response.
102
- """
103
- inputs = tokenizer(prompt, return_tensors="pt").to("cpu") # Move inputs to CPU
104
- outputs = model.generate(
105
- **inputs,
106
- max_new_tokens=50,
107
- temperature=0.7,
108
- top_p=0.9
109
- )
110
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
111
-
112
- # # Example usage
113
- # prompt = "Explain quantum computing in simple terms."
114
- # response = generate_response(prompt)
115
- # print(response)
116
-
117
-
118
- if __name__ == "__main__":
119
  generate_response.launch()
 
1
+ import inspect
2
+ from typing import get_type_hints, Callable, Any
3
+ import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ model_id = "unsloth/SmolLM2-135M-Instruct-GGUF"
7
+ filename = "SmolLM2-135M-Instruct-Q8_0.gguf"
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
10
+ model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
11
+
12
+
13
+ def parse_docstring(func):
14
+ doc = inspect.getdoc(func)
15
+ if not doc:
16
+ return {"title": "Untitled", "description": ""}
17
+
18
+ lines = doc.splitlines()
19
+ title = next((line.replace("Title:", "").strip() for line in lines if line.startswith("Title:")), "Untitled")
20
+ description = "\n".join(line.strip() for line in lines if line.startswith("Description:"))
21
+ description = description.replace("Description:", "").strip()
22
+
23
+ return {"title": title, "description": description}
24
+
25
+ def gradio_app_with_docs(func: Callable) -> Callable:
26
+ sig = inspect.signature(func)
27
+ type_hints = get_type_hints(func)
28
+ metadata = parse_docstring(func)
29
+
30
+ """
31
+ A decorator that automatically builds and launches a Gradio interface
32
+ based on function type hints.
33
+
34
+ Args:
35
+ func: A callable with type-hinted parameters and return type.
36
+
37
+ Returns:
38
+ The wrapped function with a `.launch()` method to start the app.
39
+ """
40
+ # Infer Gradio components from type hints
41
+ def _map_type(t: type) -> gr.Component:
42
+ if t == str:
43
+ return gr.Textbox(label="Input")
44
+ elif t == int:
45
+ return gr.Number(precision=0)
46
+ elif t == float:
47
+ return gr.Number()
48
+ elif t == bool:
49
+ return gr.Checkbox()
50
+ elif hasattr(t, "__origin__") and t.__origin__ == list: # Handle List[type]
51
+ elem_type = t.__args__[0]
52
+ if elem_type == str:
53
+ return gr.Dropdown(choices=["Option1", "Option2"])
54
+ else:
55
+ raise ValueError(f"Unsupported list element type: {elem_type}")
56
+ else:
57
+ raise ValueError(f"Unsupported type: {t}")
58
+
59
+ # Extract function signature and type hints
60
+ sig = inspect.signature(func)
61
+ type_hints = get_type_hints(func)
62
+
63
+ # Map parameters to Gradio inputs
64
+ inputs = []
65
+ for name, param in sig.parameters.items():
66
+ if name == "self":
67
+ continue # Skip self in class methods
68
+ param_type = type_hints.get(name, Any)
69
+ component = _map_type(param_type)
70
+ component.label = name.replace("_", " ").title()
71
+ inputs.append(component)
72
+
73
+ # Map return type to Gradio output
74
+ return_type = type_hints.get("return", Any)
75
+ outputs = _map_type(return_type)
76
+
77
+ # Wrap function with Gradio interface
78
+ interface = gr.Interface(fn=func, inputs=inputs, outputs=outputs)
79
+
80
+ with gr.Blocks() as demo:
81
+ gr.Markdown(f"## {metadata['title']}\n{metadata['description']}")
82
+ interface = gr.Interface(fn=func, inputs=inputs, outputs=outputs)
83
+
84
+ def wrapper(*args, **kwargs):
85
+ return func(*args, **kwargs)
86
+
87
+ wrapper.launch = lambda: demo.launch()
88
+ return wrapper
89
+
90
+
91
+ @gradio_app_with_docs
92
+ def generate_response(prompt: str) -> str:
93
+ """
94
+ Title: Super Tiny GGUF Model on CPU
95
+ Description: A Simple app to test out the potentials of small GGUF LLM model.
96
+
97
+ Args:
98
+ prompt (str): A simple prompt.
99
+
100
+ Returns:
101
+ str: Simplified response.
102
+ """
103
+ inputs = tokenizer(prompt, return_tensors="pt").to("cpu") # Move inputs to CPU
104
+ outputs = model.generate(
105
+ **inputs,
106
+ max_new_tokens=50,
107
+ temperature=0.7,
108
+ top_p=0.9
109
+ )
110
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
111
+
112
+ # # Example usage
113
+ # prompt = "Explain quantum computing in simple terms."
114
+ # response = generate_response(prompt)
115
+ # print(response)
116
+
117
+
118
+ if __name__ == "__main__":
119
  generate_response.launch()