nada-mah commited on
Commit
fb50379
·
verified ·
1 Parent(s): 9094e4e

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +48 -0
  2. app1.py +44 -0
  3. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FROM ollama/ollama
2
+
3
+ # COPY ./run-ollama.sh /tmp/run-ollama.sh
4
+ # COPY ./Modelfile /tmp/Modelfile
5
+
6
+
7
+ # WORKDIR /tmp
8
+
9
+ # RUN chmod +x run-ollama.sh
10
+
11
+ # RUN ./run-ollama.sh
12
+
13
+ # FROM python:3.10.12 AS build
14
+
15
+ # RUN useradd -m -u 1000 user
16
+ # WORKDIR /app
17
+
18
+ # COPY --chown=user ./requirements.txt requirements.txt
19
+ # RUN pip install --no-cache-dir --upgrade -r requirements.txt
20
+
21
+ # COPY --chown=user . /app
22
+
23
+ # USER user
24
+
25
+ # ENV HOME=/home/user \
26
+ # PATH=/home/user/.local/bin:$PATH
27
+
28
+ # CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
29
+ FROM python:3.10.12
30
+
31
+ RUN curl -fsSL https://ollama.com/install.sh | sh
32
+
33
+ RUN useradd -m -u 1000 user
34
+ WORKDIR /app
35
+
36
+ COPY --chown=user ./requirements.txt requirements.txt
37
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
38
+ COPY ./Modelfile /Modelfile
39
+
40
+ COPY --chown=user . /app
41
+
42
+ USER user
43
+
44
+ ENV HOME=/home/user \
45
+ PATH=/home/user/.local/bin:$PATH
46
+
47
+
48
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app1.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from langchain_chroma import Chroma
4
+ from langchain_huggingface import HuggingFaceEmbeddings
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.chains import RetrievalQA
7
+ from langchain_community.document_loaders import TextLoader, Docx2txtLoader
8
+ from langchain.prompts import PromptTemplate
9
+ from langchain_ollama import OllamaLLM
10
+ import subprocess
11
+ import threading
12
+ import time
13
+
14
+ model_name = "qwen2.5:3b"
15
+
16
+ def ollama_service_thread():
17
+ subprocess.run("ollama serve", shell=True)
18
+
19
+ OLLAMA_SERVICE_THREAD = threading.Thread(target=ollama_service_thread)
20
+ OLLAMA_SERVICE_THREAD.start()
21
+
22
+ print("Giving ollama serve a moment")
23
+ time.sleep(10)
24
+
25
+ subprocess.run(f"ollama pull {model_name}", shell=True)
26
+
27
+
28
+
29
+
30
+ model = OllamaLLM(model="qwen2.5:3b")
31
+
32
+
33
+ from fastapi import FastAPI
34
+
35
+ app = FastAPI()
36
+
37
+ @app.get("/")
38
+ def greet_json():
39
+ response_text2 = model.invoke('hi')
40
+ x1 = subprocess.run(f"ollama show qwen2.5:3b", capture_output = True,shell=True)
41
+ return {"Hello1": f"{x1}",
42
+ "Hello1": f"{response_text2}",
43
+
44
+ }
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ langchain-community
2
+ langchain
3
+ transformers
4
+ langchain_huggingface
5
+ langchain-chroma
6
+ langchain_ollama
7
+ docx2txt
8
+ fastapi
9
+ uvicorn[standard]