|
import os |
|
import re |
|
from langchain_chroma import Chroma |
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.chains import RetrievalQA |
|
from langchain_community.document_loaders import TextLoader, Docx2txtLoader |
|
from langchain.prompts import PromptTemplate |
|
from langchain_ollama import OllamaLLM |
|
import subprocess |
|
|
|
model_name = "qwen2.5:3b" |
|
|
|
subprocess.run(f"ollama pull {model_name}", shell=True) |
|
|
|
|
|
model = OllamaLLM(model="qwen2.5:3b") |
|
|
|
|
|
from fastapi import FastAPI |
|
|
|
app = FastAPI() |
|
|
|
@app.get("/") |
|
def greet_json(): |
|
response_text2 = model.invoke('hi') |
|
x1 = subprocess.run(f"ollama show qwen2.5:3b", capture_output = True,shell=True) |
|
return {"Hello1": f"{x1}", |
|
"Hello1": f"{response_text2}", |
|
|
|
} |