Spaces:
Runtime error
Runtime error
Kaan
commited on
app
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
3 |
|
4 |
|
5 |
# Create an instance of the FastAPI class
|
@@ -8,18 +9,16 @@ app = FastAPI()
|
|
8 |
# Define a route for the root endpoint
|
9 |
@app.get("/llm")
|
10 |
async def read_root():
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
print(decoded[0])
|
22 |
-
return {"message": decoded[0]}
|
23 |
|
24 |
|
25 |
|
|
|
1 |
from fastapi import FastAPI
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
from llama_cpp import Llama
|
4 |
|
5 |
|
6 |
# Create an instance of the FastAPI class
|
|
|
9 |
# Define a route for the root endpoint
|
10 |
@app.get("/llm")
|
11 |
async def read_root():
|
12 |
+
llm = Llama.from_pretrained(
|
13 |
+
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
14 |
+
filename="*q8_0.gguf",
|
15 |
+
verbose=False)
|
16 |
+
output = llm(
|
17 |
+
"Q: Name the planets in the solar system? A: ", # Prompt
|
18 |
+
max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window
|
19 |
+
stop=["Q:", "\n"], # Stop generating just before the model would generate a new question
|
20 |
+
echo=True # Echo the prompt back in the output)
|
21 |
+
return {"message": output}
|
|
|
|
|
22 |
|
23 |
|
24 |
|