DanielIglesias97 commited on
Commit
ce1805e
·
1 Parent(s): 22305f0

First upload to the repository of NLP_Chatbot.

Browse files
Files changed (4) hide show
  1. Dockerfile +18 -0
  2. app.py +19 -0
  3. chatbot.py +34 -0
  4. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12 AS base
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV HOME=/home/user \
6
+ PATH=/home/user/.local/bin:$PATH
7
+
8
+ WORKDIR $HOME/app
9
+
10
+ COPY --chown=user ./requirements.txt ./requirements.txt
11
+
12
+ RUN pip3 install -r requirements.txt
13
+
14
+ COPY --chown=user . $HOME/app
15
+
16
+ FROM base AS run
17
+
18
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from chatbot import ChatBot
2
+ import gradio as gr
3
+ import time
4
+
5
+ def main():
6
+ chatbot = ChatBot()
7
+
8
+ with gr.Blocks() as demo:
9
+ chatbot_gr = gr.Chatbot(height = 350)
10
+ textbox = gr.Textbox(label="Introduce your prompt here")
11
+ start_btn = gr.Button("Submit")
12
+ stop_btn = gr.Button("Stop")
13
+
14
+ click_event = start_btn.click(chatbot.answer, [textbox, chatbot_gr], [chatbot_gr], api_name=False)
15
+ stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[click_event])
16
+
17
+ demo.launch(server_name="0.0.0.0", server_port=7860)
18
+
19
+ main()
chatbot.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
2
+ from threading import Thread
3
+
4
+ class ChatBot():
5
+
6
+ def __init__(self, device='cpu'):
7
+ self.tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B")
8
+ self.model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/SmolLM2-1.7B")
9
+ self.device = device
10
+ self.model.to(device)
11
+
12
+ def answer(self, message, history):
13
+ if (len(message.strip())>0):
14
+ input_text = message
15
+ input_ids = self.tokenizer(input_text, return_tensors="pt")
16
+ input_ids = input_ids.to(self.device)
17
+ streamer = TextIteratorStreamer(self.tokenizer)
18
+ generation_kwargs = dict(input_ids, streamer=streamer, max_new_tokens=1500)
19
+
20
+ thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
21
+ thread.start()
22
+
23
+ if (len(history)==0):
24
+ history = []
25
+
26
+ history.append([message, ""])
27
+ for new_text_aux in streamer:
28
+ history[-1][1]+=new_text_aux
29
+
30
+ yield history
31
+
32
+ print('Waiting for the thread to finish')
33
+ thread.join()
34
+ print('The thread has finished')
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==5.23.3
2
+ numpy==1.26.4
3
+ transformers==4.50.3
4
+ torch==2.2.1