soiz1 commited on
Commit
f3cbed6
·
verified ·
1 Parent(s): 598d83c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -18
app.py CHANGED
@@ -1,19 +1,3 @@
1
- import os
2
- os.system("""
3
- # 1. safetensors -> PyTorch 形式に変換
4
- python -c "
5
- from transformers import AutoModelForCausalLM
6
- import torch
7
- model = AutoModelForCausalLM.from_pretrained('deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct')
8
- torch.save(model.state_dict(), 'pytorch_model.bin')
9
- "
10
-
11
- # 2. GGUF 形式に変換 (llama.cpp が必要)
12
- git clone https://github.com/ggerganov/llama.cpp
13
- cd llama.cpp
14
- python convert.py --outtype f16 /app/pytorch_model.bin
15
-
16
- """)
17
  from flask import Flask, request, Response, render_template
18
  from huggingface_hub import hf_hub_download
19
  from llama_cpp import Llama
@@ -211,8 +195,8 @@ HTML_CONTENT = '''
211
  </html>
212
  '''
213
  def download_model():
214
- model_name = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
215
- model_file = "deepseek-coder-v2-lite-instruct.Q6_K.gguf" # or another quantized version
216
  return hf_hub_download(repo_id=model_name, filename=model_file)
217
 
218
  def initialize_model():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from flask import Flask, request, Response, render_template
2
  from huggingface_hub import hf_hub_download
3
  from llama_cpp import Llama
 
195
  </html>
196
  '''
197
  def download_model():
198
+ model_name = "bartowski/DeepSeek-Coder-V2-Lite-Instruct-GGUF"
199
+ model_file = "DeepSeek-Coder-V2-Lite-Instruct-Q6_K.gguf" # or another quantized version
200
  return hf_hub_download(repo_id=model_name, filename=model_file)
201
 
202
  def initialize_model():