nazmul5836 commited on
Commit
4e1a807
·
verified ·
1 Parent(s): 52a3f34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -20
app.py CHANGED
@@ -1,24 +1,18 @@
1
- from mistral_inference.transformer import Transformer
2
- from mistral_inference.generate import generate
3
 
4
- from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
5
- from mistral_common.protocol.instruct.messages import UserMessage, TextChunk, ImageURLChunk
6
- from mistral_common.protocol.instruct.request import ChatCompletionRequest
7
 
8
- tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tekken.json")
9
- model = Transformer.from_folder(mistral_models_path)
 
 
 
 
10
 
11
- url = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
12
- prompt = "Describe the image."
13
 
14
- completion_request = ChatCompletionRequest(messages=[UserMessage(content=[ImageURLChunk(image_url=url), TextChunk(text=prompt)])])
15
-
16
- encoded = tokenizer.encode_chat_completion(completion_request)
17
-
18
- images = encoded.images
19
- tokens = encoded.tokens
20
-
21
- out_tokens, _ = generate([tokens], model, images=[images], max_tokens=256, temperature=0.35, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
22
- result = tokenizer.decode(out_tokens[0])
23
-
24
- print(result)
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
2
 
3
+ # Hugging Face Model লোড করা
4
+ model_id = "deepseek-ai/DeepSeek-R1"
 
5
 
6
+ model = AutoModelForCausalLM.from_pretrained(
7
+ model_id,
8
+ trust_remote_code=True,
9
+ torch_dtype="auto", # Automatic dtype (no FP8)
10
+ low_cpu_mem_usage=True # কম মেমোরি ব্যবহার করবে
11
+ )
12
 
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
14
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
15
 
16
+ # টেস্ট রান
17
+ output = pipe("Hello, who are you?", max_length=100)
18
+ print(output)