IST199655 commited on
Commit
b6e525e
Β·
1 Parent(s): 929827c
Files changed (2) hide show
  1. app.py +72 -14
  2. requirements.txt +3 -1
app.py CHANGED
@@ -2,10 +2,31 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("llama_lora_model_1")
 
 
 
 
 
 
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -25,19 +46,56 @@ def respond(
25
 
26
  messages.append({"role": "user", "content": message})
27
 
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
  messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  """
 
2
  from huggingface_hub import InferenceClient
3
 
4
  """
5
+ Copied from inference in colab notebook
6
  """
7
+ from unsloth.chat_templates import get_chat_template
8
+ from unsloth import FastLanguageModel
9
+
10
+ # IMPORTING MODEL AND TOKENIZER β€”β€”β€”β€”β€”β€”β€”β€”
11
+
12
+ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
13
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
14
+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
15
 
16
+ model, tokenizer = FastLanguageModel.from_pretrained(
17
+ model_name = "llama_lora_model_1",
18
+ max_seq_length = max_seq_length,
19
+ dtype = dtype,
20
+ load_in_4bit = load_in_4bit,
21
+ )
22
+
23
+ tokenizer = get_chat_template(
24
+ tokenizer,
25
+ chat_template = "llama-3.1",
26
+ )
27
+ FastLanguageModel.for_inference(model) # Enable native 2x faster inference
28
+
29
+ # RUNNING INFERENCE β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
30
 
31
  def respond(
32
  message,
 
46
 
47
  messages.append({"role": "user", "content": message})
48
 
49
+ inputs = tokenizer.apply_chat_template(
 
 
50
  messages,
51
+ tokenize = True,
52
+ add_generation_prompt = True, # Must add for generation
53
+ return_tensors = "pt",
54
+ ).to("cuda")
55
+
56
+ outputs = model.generate(input_ids = inputs, max_new_tokens = max_tokens, use_cache = True,
57
+ temperature = 1.5, min_p = 0.1)
58
+ response = tokenizer.batch_decode(outputs)
59
+
60
+ yield response
61
+
62
+ """
63
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
64
+ """
65
+ # client = InferenceClient("llama_lora_model_1")
66
+
67
+
68
+ # def respond(
69
+ # message,
70
+ # history: list[tuple[str, str]],
71
+ # system_message,
72
+ # max_tokens,
73
+ # temperature,
74
+ # top_p,
75
+ # ):
76
+ # messages = [{"role": "system", "content": system_message}]
77
+
78
+ # for val in history:
79
+ # if val[0]:
80
+ # messages.append({"role": "user", "content": val[0]})
81
+ # if val[1]:
82
+ # messages.append({"role": "assistant", "content": val[1]})
83
+
84
+ # messages.append({"role": "user", "content": message})
85
+
86
+ # response = ""
87
+
88
+ # for message in client.chat_completion(
89
+ # messages,
90
+ # max_tokens=max_tokens,
91
+ # stream=True,
92
+ # temperature=temperature,
93
+ # top_p=top_p,
94
+ # ):
95
+ # token = message.choices[0].delta.content
96
+
97
+ # response += token
98
+ # yield response
99
 
100
 
101
  """
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- huggingface_hub==0.25.2
 
 
 
1
+ huggingface_hub==0.25.2
2
+
3
+ unsloth