Spaces:
Sleeping
Sleeping
Commit
·
19f2b1d
0
Parent(s):
v1
Browse files- Dockerfile +21 -0
- InstallFromReadme.sh +5 -0
- README.md +13 -0
- app.py +40 -0
- docker-compose.yml +19 -0
- requirements.txt +1 -0
Dockerfile
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM pytorch/pytorch:2.1.0-cuda12.1-cudnn8-devel
|
2 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
3 |
+
|
4 |
+
#RUN apt-get update && \
|
5 |
+
# apt-get install --no-install-recommends -y sox libsox-fmt-all curl wget gcc git git-lfs build-essential libaio-dev libsndfile1 ssh ffmpeg && \
|
6 |
+
# apt-get clean && apt-get -y autoremove
|
7 |
+
|
8 |
+
WORKDIR /app
|
9 |
+
COPY requirements.txt .
|
10 |
+
RUN python -m pip install --verbose -r requirements.txt
|
11 |
+
RUN python -m pip cache purge
|
12 |
+
|
13 |
+
|
14 |
+
RUN python -m pip install spaces
|
15 |
+
|
16 |
+
COPY . .
|
17 |
+
RUN chmod +x InstallFromReadme.sh
|
18 |
+
RUN ./InstallFromReadme.sh
|
19 |
+
|
20 |
+
|
21 |
+
CMD ["python","app.py"]
|
InstallFromReadme.sh
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version=$(cat README.md | grep -E "sdk_version\: (.+)" | cut -d " " -f 2)
|
2 |
+
|
3 |
+
echo "Installing gradio version $version";
|
4 |
+
|
5 |
+
pip install gradio==$version
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: SqlServerLib
|
3 |
+
emoji: 🖥️
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: gold
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.23.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
startup_duration_timeout: 4h
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
from sentence_transformers import SentenceTransformer
|
4 |
+
from sentence_transformers.util import cos_sim
|
5 |
+
from sentence_transformers.quantization import quantize_embeddings
|
6 |
+
|
7 |
+
print("Loading embedding model");
|
8 |
+
dimensions = 768
|
9 |
+
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1", truncate_dim=dimensions)
|
10 |
+
|
11 |
+
@spaces.GPU
|
12 |
+
def embed(text):
|
13 |
+
|
14 |
+
query_embedding = model.encode(text, prompt_name="query")
|
15 |
+
return query_embedding.tolist();
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
with gr.Blocks() as demo:
|
20 |
+
txtEmbed = gr.Text(label="Text to embed")
|
21 |
+
btnEmbed = gr.Button("embed");
|
22 |
+
|
23 |
+
search = gr.Text(label="Script to search")
|
24 |
+
|
25 |
+
results = gr.Text(label="results");
|
26 |
+
|
27 |
+
btnEmbed.click(embed, [txtEmbed], [results])
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
if __name__ == "__main__":
|
34 |
+
demo.launch(
|
35 |
+
share=False,
|
36 |
+
debug=False,
|
37 |
+
server_port=7860,
|
38 |
+
server_name="0.0.0.0",
|
39 |
+
allowed_paths=[]
|
40 |
+
)
|
docker-compose.yml
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
app:
|
3 |
+
build:
|
4 |
+
context: .
|
5 |
+
dockerfile: Dockerfile
|
6 |
+
ports:
|
7 |
+
- 80:7860
|
8 |
+
stdin_open: true # docker run -i
|
9 |
+
tty: true # docker run -t
|
10 |
+
deploy:
|
11 |
+
resources:
|
12 |
+
reservations:
|
13 |
+
devices:
|
14 |
+
- driver: nvidia
|
15 |
+
count: all
|
16 |
+
capabilities: [gpu]
|
17 |
+
|
18 |
+
|
19 |
+
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
sentence-transformers
|