Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,19 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from flask import Flask, request, Response, render_template
|
3 |
from huggingface_hub import hf_hub_download
|
4 |
from llama_cpp import Llama
|
@@ -197,8 +212,8 @@ HTML_CONTENT = '''
|
|
197 |
'''
|
198 |
|
199 |
def download_model():
|
200 |
-
model_name = "
|
201 |
-
model_file = "
|
202 |
return hf_hub_download(model_name, filename=model_file)
|
203 |
|
204 |
def initialize_model(model_path):
|
@@ -213,7 +228,8 @@ model_path = download_model()
|
|
213 |
llm = initialize_model(model_path)
|
214 |
|
215 |
system_prompt = (
|
216 |
-
"You are a
|
|
|
217 |
)
|
218 |
|
219 |
chat_history = [{"role": "system", "content": system_prompt}]
|
@@ -246,4 +262,4 @@ def chat():
|
|
246 |
return Response(generate(), content_type='text/event-stream')
|
247 |
|
248 |
if __name__ == '__main__':
|
249 |
-
app.run(debug=True, port=7860, host="0.0.0.0")
|
|
|
1 |
+
import os
|
2 |
+
os.system("""
|
3 |
+
# 1. safetensors -> PyTorch 形式に変換
|
4 |
+
python -c "
|
5 |
+
from transformers import AutoModelForCausalLM
|
6 |
+
import torch
|
7 |
+
model = AutoModelForCausalLM.from_pretrained('deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct')
|
8 |
+
torch.save(model.state_dict(), 'pytorch_model.bin')
|
9 |
+
"
|
10 |
+
|
11 |
+
# 2. GGUF 形式に変換 (llama.cpp が必要)
|
12 |
+
git clone https://github.com/ggerganov/llama.cpp
|
13 |
+
cd llama.cpp
|
14 |
+
python convert.py --outtype f16 /path/to/pytorch_model.bin
|
15 |
+
|
16 |
+
""")
|
17 |
from flask import Flask, request, Response, render_template
|
18 |
from huggingface_hub import hf_hub_download
|
19 |
from llama_cpp import Llama
|
|
|
212 |
'''
|
213 |
|
214 |
def download_model():
|
215 |
+
model_name = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
|
216 |
+
model_file = "deepseek-coder-v2-lite-instruct.Q6_K.gguf" # 適切なGGUFファイル名に変更
|
217 |
return hf_hub_download(model_name, filename=model_file)
|
218 |
|
219 |
def initialize_model(model_path):
|
|
|
228 |
llm = initialize_model(model_path)
|
229 |
|
230 |
system_prompt = (
|
231 |
+
"You are a helpful AI coding assistant. Your mission is to help people with programming "
|
232 |
+
"and technical questions, providing clear and concise answers."
|
233 |
)
|
234 |
|
235 |
chat_history = [{"role": "system", "content": system_prompt}]
|
|
|
262 |
return Response(generate(), content_type='text/event-stream')
|
263 |
|
264 |
if __name__ == '__main__':
|
265 |
+
app.run(debug=True, port=7860, host="0.0.0.0")
|