danishmuhammad commited on
Commit
bc1ec3f
·
verified ·
1 Parent(s): b38d421

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from unsloth import FastLanguageModel
2
+ import torch
3
+ import gradio as gr
4
+ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
5
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
6
+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
7
+ # 4bit pre quantized models we support for 4x faster downloading + no OOMs.
8
+ fourbit_models = [
9
+ "unsloth/mistral-7b-v0.3-bnb-4bit", # New Mistral v3 2x faster!
10
+ "unsloth/mistral-7b-instruct-v0.3-bnb-4bit",
11
+ "unsloth/llama-3-8b-bnb-4bit", # Llama-3 15 trillion tokens model 2x faster!
12
+ "unsloth/llama-3-8b-Instruct-bnb-4bit",
13
+ "unsloth/llama-3-70b-bnb-4bit",
14
+ "unsloth/Phi-3-mini-4k-instruct", # Phi-3 2x faster!
15
+ "unsloth/Phi-3-medium-4k-instruct",
16
+ "unsloth/mistral-7b-bnb-4bit",
17
+ "unsloth/gemma-7b-bnb-4bit", # Gemma 2.2x faster!
18
+ ] # More models at https://huggingface.co/unsloth
19
+
20
+ model, tokenizer = FastLanguageModel.from_pretrained(
21
+ model_name = "danishmuhammad/ccat_2025_llama3.1_8B",
22
+ max_seq_length = max_seq_length,
23
+ dtype = dtype,
24
+ load_in_4bit = load_in_4bit,
25
+ # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
26
+ )
27
+ FastLanguageModel.for_inference(model)
28
+
29
+ alpaca_prompt = """Below is an input that describes a question, answer the following question as clearly as possible. If additional context is needed, provide it briefly.
30
+
31
+
32
+ ### Input:
33
+ {}
34
+
35
+ ### Response:
36
+ {}"""
37
+
38
+ with gr.Blocks() as demo:
39
+ chatbot = gr.Chatbot(layout="bubble")
40
+ user_input = gr.Textbox()
41
+ clear = gr.ClearButton([user_input, chatbot])
42
+
43
+ def answers_chat(user_input,history):
44
+ history = history or []
45
+ formatted_input = alpaca_prompt.format(user_input, "")
46
+ inputs = tokenizer([formatted_input], return_tensors="pt").to("cuda")
47
+
48
+ # Generate response with adjusted parameters
49
+ outputs = model.generate(
50
+ **inputs,
51
+ max_new_tokens=512, # Increase to allow for longer responses
52
+ temperature=0.4, # Add temperature to introduce variation
53
+ repetition_penalty=1.2, # Penalize repeating tokens
54
+ no_repeat_ngram_size=3, # Avoid repeating sequences of 3 tokens
55
+ use_cache=True,
56
+ eos_token_id=tokenizer.eos_token_id
57
+ )
58
+ response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
59
+
60
+ formatted_response = response[len(formatted_input):].strip()
61
+
62
+ history.append((user_input,formatted_response))
63
+
64
+ return "",history
65
+
66
+
67
+
68
+
69
+ user_input.submit(answers_chat, [user_input, chatbot], [user_input, chatbot])
70
+
71
+
72
+ demo.launch()