Sanchit2207 commited on
Commit
c4a4b0b
·
verified ·
1 Parent(s): f2be31a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -3,25 +3,23 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
  import concurrent.futures
5
 
6
- # Set device
7
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
9
- # Load models and tokenizers
10
  def load_model(name):
11
  tokenizer = AutoTokenizer.from_pretrained(name)
12
  model = AutoModelForCausalLM.from_pretrained(name)
13
 
14
- # Define pad token explicitly
15
  tokenizer.pad_token = tokenizer.eos_token
16
  model.config.pad_token_id = tokenizer.pad_token_id
17
 
18
  return tokenizer, model.to(device)
19
 
20
  tokenizer1, model1 = load_model("Gensyn/Qwen2.5-0.5B-Instruct")
21
- tokenizer2, model2 = load_model("deepset/roberta-base-squad2")
22
  tokenizer3, model3 = load_model("microsoft/phi-1_5")
23
 
24
- # Generation function
25
  def generate_response(model, tokenizer, prompt):
26
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(device)
27
  outputs = model.generate(
@@ -35,7 +33,7 @@ def generate_response(model, tokenizer, prompt):
35
  )
36
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
37
 
38
- # Multi-agent handler
39
  def multi_agent_chat(user_input):
40
  with concurrent.futures.ThreadPoolExecutor() as executor:
41
  futures = [
@@ -46,13 +44,13 @@ def multi_agent_chat(user_input):
46
  results = [f.result() for f in futures]
47
  return results
48
 
49
- # Gradio Interface
50
  interface = gr.Interface(
51
  fn=multi_agent_chat,
52
  inputs=gr.Textbox(lines=2, placeholder="Ask something..."),
53
  outputs=[
54
  gr.Textbox(label="Agent 1 (Gensyn/Qwen2.5-0.5B-Instruct)"),
55
- gr.Textbox(label="Agent 2 (deepset/roberta-base-squad2)"),
56
  gr.Textbox(label="Agent 3 (microsoft/phi-1_5)")
57
  ],
58
  title="3-Agent AI Chatbot",
 
3
  import gradio as gr
4
  import concurrent.futures
5
 
6
+ device = torch.device("cpu")
7
+
8
 
 
9
  def load_model(name):
10
  tokenizer = AutoTokenizer.from_pretrained(name)
11
  model = AutoModelForCausalLM.from_pretrained(name)
12
 
13
+
14
  tokenizer.pad_token = tokenizer.eos_token
15
  model.config.pad_token_id = tokenizer.pad_token_id
16
 
17
  return tokenizer, model.to(device)
18
 
19
  tokenizer1, model1 = load_model("Gensyn/Qwen2.5-0.5B-Instruct")
20
+ tokenizer2, model2 = load_model("google/flan-t5-small")
21
  tokenizer3, model3 = load_model("microsoft/phi-1_5")
22
 
 
23
  def generate_response(model, tokenizer, prompt):
24
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(device)
25
  outputs = model.generate(
 
33
  )
34
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
35
 
36
+
37
  def multi_agent_chat(user_input):
38
  with concurrent.futures.ThreadPoolExecutor() as executor:
39
  futures = [
 
44
  results = [f.result() for f in futures]
45
  return results
46
 
47
+
48
  interface = gr.Interface(
49
  fn=multi_agent_chat,
50
  inputs=gr.Textbox(lines=2, placeholder="Ask something..."),
51
  outputs=[
52
  gr.Textbox(label="Agent 1 (Gensyn/Qwen2.5-0.5B-Instruct)"),
53
+ gr.Textbox(label="Agent 2 (google/flan-t5-small)"),
54
  gr.Textbox(label="Agent 3 (microsoft/phi-1_5)")
55
  ],
56
  title="3-Agent AI Chatbot",