ky32's picture
Update app.py
bcd9770 verified
raw
history blame
773 Bytes
from fastapi import FastAPI, HTTPException
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import copy
import time
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
app = FastAPI()
llm = Llama(
model_path=hf_hub_download(
repo_id="TheBloke/Mistral-7B-v0.1-GGUF",
filename="mistral-7b-v0.1.Q4_K_M.gguf"),
n_ctx=2048,
)
print("kaki")
print(model_path)
print("kaki")
@app.get("/")
async def generate_text():
try:
output = llm(
"Q: Name the planets in the solar system? A: ",
max_tokens=32,
stop=["Q:", "\n"],
echo=True)
return output
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))