syedmoinms commited on
Commit
ea47c25
Β·
verified Β·
1 Parent(s): fa3ae76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -3,16 +3,22 @@ from telegram import Update
3
  from telegram.ext import Application, CommandHandler, MessageHandler, filters, CallbackContext
4
 
5
  # βœ… Hugging Face Model Path
6
- model_path = "TheBloke/Pygmalion-13B-SuperHOT-8K-GPTQ"
 
7
 
8
  # βœ… Load Model
9
- model = Llama.from_pretrained(model_path, model_file="model.gguf", n_ctx=4096)
10
 
11
  # βœ… Telegram Bot Token
12
  TELEGRAM_BOT_TOKEN = "7881901341:AAEaE5gndeORmCuyzSwOyf2ELFLXHneCpiw"
13
 
14
  def chat(prompt):
15
- output = model(prompt, max_tokens=200, temperature=0.7, top_p=0.9)
 
 
 
 
 
16
  return output["choices"][0]["text"]
17
 
18
  # βœ… Telegram Commands
 
3
  from telegram.ext import Application, CommandHandler, MessageHandler, filters, CallbackContext
4
 
5
  # βœ… Hugging Face Model Path
6
+ model_path = "/app/models" # Tumhara actual model path
7
+ model_file = "model.gguf" # Model file name
8
 
9
  # βœ… Load Model
10
+ model = Llama(model_path=f"{model_path}/{model_file}", n_ctx=4096)
11
 
12
  # βœ… Telegram Bot Token
13
  TELEGRAM_BOT_TOKEN = "7881901341:AAEaE5gndeORmCuyzSwOyf2ELFLXHneCpiw"
14
 
15
  def chat(prompt):
16
+ output = model(
17
+ prompt=prompt, # Yeh correct syntax hai
18
+ max_tokens=200,
19
+ temperature=0.7,
20
+ top_p=0.9
21
+ )
22
  return output["choices"][0]["text"]
23
 
24
  # βœ… Telegram Commands