Kaan commited on
Commit
a5148e9
·
verified ·
1 Parent(s): e3d6e0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -7
app.py CHANGED
@@ -9,16 +9,11 @@ app = FastAPI()
9
  async def install_model():
10
  #hf_hub_download(repo_id="TheBloke/Mistral-7B-v0.1-GGUF", filename="mistral-7b-v0.1.Q4_K_M.gguf")
11
  prompt = "Once upon a time, there was a"
12
- # Tokenize the prompt
 
13
  inputs = tokenizer(prompt, return_tensors="pt")
14
-
15
- # Generate text
16
  output = model.generate(input_ids=inputs["input_ids"], max_length=50, num_return_sequences=3, temperature=0.7)
17
-
18
- # Decode the generated sequences
19
  generated_texts = tokenizer.batch_decode(output, skip_special_tokens=True)
20
-
21
- # Print the generated sequences
22
  for i, text in enumerate(generated_texts):
23
  print(f"Generated Text {i+1}: {text}")
24
  return generated_texts
 
9
  async def install_model():
10
  #hf_hub_download(repo_id="TheBloke/Mistral-7B-v0.1-GGUF", filename="mistral-7b-v0.1.Q4_K_M.gguf")
11
  prompt = "Once upon a time, there was a"
12
+ tokenizer = AutoTokenizer.from_pretrained("./mistral-7b-v0.1.Q4_K_M.gguf")
13
+ model = AutoModelForCausalLM.from_pretrained("./mistral-7b-v0.1.Q4_K_M.gguf")
14
  inputs = tokenizer(prompt, return_tensors="pt")
 
 
15
  output = model.generate(input_ids=inputs["input_ids"], max_length=50, num_return_sequences=3, temperature=0.7)
 
 
16
  generated_texts = tokenizer.batch_decode(output, skip_special_tokens=True)
 
 
17
  for i, text in enumerate(generated_texts):
18
  print(f"Generated Text {i+1}: {text}")
19
  return generated_texts