test
Browse files- README.md +1 -1
- app.py +0 -6
- peft_app.py +123 -0
- requirements.txt +2 -1
README.md
CHANGED
@@ -5,7 +5,7 @@ colorFrom: gray
|
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.6.0
|
8 |
-
app_file:
|
9 |
pinned: false
|
10 |
python_version: 3.10.0
|
11 |
---
|
|
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.6.0
|
8 |
+
app_file: peft_app.py
|
9 |
pinned: false
|
10 |
python_version: 3.10.0
|
11 |
---
|
app.py
CHANGED
@@ -1,11 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
# Load model directly
|
4 |
-
# from transformers import AutoModel, AutoTokenizer
|
5 |
-
|
6 |
-
# model = AutoModel.from_pretrained("ID2223JR/gguf_model")
|
7 |
-
# tokenizer = AutoTokenizer.from_pretrained("ID2223JR/gguf_model")
|
8 |
-
|
9 |
from llama_cpp import Llama
|
10 |
|
11 |
llm = Llama.from_pretrained(
|
|
|
1 |
import gradio as gr
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from llama_cpp import Llama
|
4 |
|
5 |
llm = Llama.from_pretrained(
|
peft_app.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from transformers import TextStreamer
|
4 |
+
|
5 |
+
from unsloth import FastLanguageModel
|
6 |
+
|
7 |
+
max_seq_length = 2048
|
8 |
+
dtype = None
|
9 |
+
load_in_4bit = True
|
10 |
+
|
11 |
+
peft_model_id = "ID2223JE/lora_model"
|
12 |
+
|
13 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
14 |
+
model_name=peft_model_id,
|
15 |
+
max_seq_length=max_seq_length,
|
16 |
+
dtype=dtype,
|
17 |
+
load_in_4bit=load_in_4bit,
|
18 |
+
)
|
19 |
+
FastLanguageModel.for_inference(model)
|
20 |
+
|
21 |
+
|
22 |
+
# Data storage
|
23 |
+
ingredients_list = []
|
24 |
+
|
25 |
+
|
26 |
+
# Function to add ingredient
|
27 |
+
def add_ingredient(ingredient, quantity):
|
28 |
+
if ingredient and int(quantity) > 0:
|
29 |
+
ingredients_list.append(f"{ingredient}, {quantity} grams")
|
30 |
+
return (
|
31 |
+
"\n".join(ingredients_list),
|
32 |
+
gr.update(value="", interactive=True),
|
33 |
+
gr.update(value=None, interactive=True),
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
# Function to enable/disable add button
|
38 |
+
def validate_inputs(ingredient, quantity):
|
39 |
+
if ingredient and quantity > 0:
|
40 |
+
return gr.update(interactive=True)
|
41 |
+
return gr.update(interactive=False)
|
42 |
+
|
43 |
+
|
44 |
+
# Function to handle model submission
|
45 |
+
def submit_to_model():
|
46 |
+
if not ingredients_list:
|
47 |
+
return "Ingredients list is empty! Please add ingredients first."
|
48 |
+
|
49 |
+
# Join ingredients into a single prompt
|
50 |
+
prompt = f"Using the following ingredients, suggest a recipe:\n\n" + "\n".join(
|
51 |
+
ingredients_list
|
52 |
+
)
|
53 |
+
|
54 |
+
messages = [
|
55 |
+
{
|
56 |
+
"role": "system",
|
57 |
+
"content": "You are a world-renowned chef, celebrated for your expertise in creating delectable dishes from diverse cuisines. You have a vast knowledge of ingredients, cooking techniques, and dietary preferences. Your role is to suggest personalized recipes based on the ingredients available, dietary restrictions, or specific meal requests. Please provide clear, step-by-step instructions and any useful tips to enhance the dish's flavor or presentation. Begin by introducing the recipe and why it’s a great choice.",
|
58 |
+
},
|
59 |
+
{"role": "user", "content": prompt},
|
60 |
+
]
|
61 |
+
inputs = tokenizer.apply_chat_template(
|
62 |
+
messages,
|
63 |
+
tokenize=True,
|
64 |
+
add_generation_prompt=True, # Must add for generation
|
65 |
+
return_tensors="pt",
|
66 |
+
)
|
67 |
+
text_streamer = TextStreamer(tokenizer, skip_prompt=True)
|
68 |
+
|
69 |
+
return model.generate(
|
70 |
+
input_ids=inputs,
|
71 |
+
streamer=text_streamer,
|
72 |
+
max_new_tokens=128,
|
73 |
+
use_cache=True,
|
74 |
+
temperature=1.5,
|
75 |
+
min_p=0.1,
|
76 |
+
)
|
77 |
+
|
78 |
+
|
79 |
+
# App
|
80 |
+
def app():
|
81 |
+
with gr.Blocks() as demo:
|
82 |
+
with gr.Row():
|
83 |
+
ingredient_input = gr.Textbox(
|
84 |
+
label="Ingredient", placeholder="Enter ingredient name"
|
85 |
+
)
|
86 |
+
quantity_input = gr.Number(label="Quantity (grams)", value=None)
|
87 |
+
|
88 |
+
add_button = gr.Button("Add Ingredient", interactive=False)
|
89 |
+
output = gr.Textbox(label="Ingredients List", lines=10, interactive=False)
|
90 |
+
|
91 |
+
with gr.Row():
|
92 |
+
submit_button = gr.Button("Submit")
|
93 |
+
model_output = gr.Textbox(
|
94 |
+
label="Recipe Suggestion", lines=10, interactive=False
|
95 |
+
)
|
96 |
+
|
97 |
+
# Validate inputs
|
98 |
+
ingredient_input.change(
|
99 |
+
validate_inputs, [ingredient_input, quantity_input], add_button
|
100 |
+
)
|
101 |
+
quantity_input.change(
|
102 |
+
validate_inputs, [ingredient_input, quantity_input], add_button
|
103 |
+
)
|
104 |
+
|
105 |
+
# Add ingredient logic
|
106 |
+
add_button.click(
|
107 |
+
add_ingredient,
|
108 |
+
[ingredient_input, quantity_input],
|
109 |
+
[output, ingredient_input, quantity_input],
|
110 |
+
)
|
111 |
+
|
112 |
+
# Submit to model logic
|
113 |
+
submit_button.click(
|
114 |
+
submit_to_model,
|
115 |
+
inputs=None, # No inputs required as it uses the global ingredients_list
|
116 |
+
outputs=model_output,
|
117 |
+
)
|
118 |
+
|
119 |
+
return demo
|
120 |
+
|
121 |
+
|
122 |
+
demo = app()
|
123 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -2,4 +2,5 @@ gradio==5.1.0
|
|
2 |
llama-cpp-python==0.3.2
|
3 |
transformers==4.46.3
|
4 |
torch==2.5.1
|
5 |
-
huggingface_hub==0.25.2
|
|
|
|
2 |
llama-cpp-python==0.3.2
|
3 |
transformers==4.46.3
|
4 |
torch==2.5.1
|
5 |
+
huggingface_hub==0.25.2
|
6 |
+
unsloth[cu121-torch251] @ git+https://github.com/unslothai/unsloth.git
|