Starchik commited on
Commit
41277a7
·
verified ·
1 Parent(s): aef616f

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +5 -16
main.py CHANGED
@@ -1,20 +1,9 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
2
 
3
- # Load model and tokenizer
4
- model_name = "mistralai/Codestral-22B-v0.1"
5
- model = AutoModelForCausalLM.from_pretrained(model_name)
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
 
8
- # No need to move model to GPU, default is CPU
9
- # model.to("cpu") # This line can be omitted since it's already on CPU by default
10
 
11
- # Encode input tokens
12
- input_text = "Your input text here"
13
- tokens = tokenizer(input_text, return_tensors="pt").input_ids
14
-
15
- # Generate output
16
- generated_ids = model.generate(tokens, max_new_tokens=1000, do_sample=True)
17
-
18
- # Decode generated tokens
19
- result = tokenizer.decode(generated_ids[0].tolist(), skip_special_tokens=True)
20
  print(result)
 
1
+ from mistral_inference.transformer import Transformer
2
+ from mistral_inference.generate import generate
3
 
4
+ model = Transformer.from_folder(mistral_models_path)
5
+ out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
 
 
6
 
7
+ result = tokenizer.decode(out_tokens[0])
 
8
 
 
 
 
 
 
 
 
 
 
9
  print(result)