Shreyas94 commited on
Commit
9773bc0
·
verified ·
1 Parent(s): e1c6e0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -10
app.py CHANGED
@@ -75,16 +75,20 @@ def format_prompt(user_prompt, chat_history):
75
  return prompt
76
 
77
  # Model inference function
78
- def model_inference(user_prompt, web_search):
 
 
 
 
79
  if web_search:
80
  # Perform web search and generate text based on the retrieved results
81
- web_results = search(user_prompt)
82
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
83
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
84
  generate_kwargs = dict(max_new_tokens=4000, do_sample=True)
85
  formatted_prompt = format_prompt(
86
- f"""You are OpenGPT 4o... [USER] {user_prompt} [WEB] {web2} [OpenGPT 4o]""",
87
- [(user_prompt, web2)])
88
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
89
  output = ""
90
  for response in stream:
@@ -95,7 +99,7 @@ def model_inference(user_prompt, web_search):
95
  # Use the microsoft/Phi-3-mini-4k-instruct model for generating text based on user prompts
96
  client = InferenceClient("microsoft/Phi-3-mini-4k-instruct")
97
  generate_kwargs = dict(max_new_tokens=5000, do_sample=True)
98
- formatted_prompt = format_prompt(f"""You are OpenGPT 4o... [USER] {user_prompt} [OpenGPT 4o]""", [(user_prompt, )])
99
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
100
  output = ""
101
  for response in stream:
@@ -104,20 +108,20 @@ def model_inference(user_prompt, web_search):
104
  yield output
105
 
106
  # Create a chatbot interface with a Fetch button
107
- def fetch_response(prompt, web_search):
108
  for response in model_inference(prompt, web_search):
109
  return response
110
 
111
  chatbot = gr.Interface(
112
- fn=fetch_response,
113
  inputs=[
114
- gr.Textbox(label="User Prompt"),
115
  gr.Checkbox(label="Enable Web Search", value=False),
116
  "button"
117
  ],
118
- outputs=gr.Textbox(label="Response"),
119
  live=True
120
  )
121
 
122
  # Launch the Gradio interface
123
- chatbot.launch()
 
75
  return prompt
76
 
77
  # Model inference function
78
+ def model_inference(prompt, web_search):
79
+ for response in fetch_response(prompt, web_search):
80
+ yield response
81
+
82
+ def fetch_response(prompt, web_search):
83
  if web_search:
84
  # Perform web search and generate text based on the retrieved results
85
+ web_results = search(prompt)
86
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
87
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
88
  generate_kwargs = dict(max_new_tokens=4000, do_sample=True)
89
  formatted_prompt = format_prompt(
90
+ f"""You are OpenGPT 4o... [USER] {prompt} [WEB] {web2} [OpenGPT 4o]""",
91
+ [(prompt, web2)])
92
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
93
  output = ""
94
  for response in stream:
 
99
  # Use the microsoft/Phi-3-mini-4k-instruct model for generating text based on user prompts
100
  client = InferenceClient("microsoft/Phi-3-mini-4k-instruct")
101
  generate_kwargs = dict(max_new_tokens=5000, do_sample=True)
102
+ formatted_prompt = format_prompt(f"""You are OpenGPT 4o... [USER] {prompt} [OpenGPT 4o]""", [(prompt, )])
103
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
104
  output = ""
105
  for response in stream:
 
108
  yield output
109
 
110
  # Create a chatbot interface with a Fetch button
111
+ def start_inference(prompt, web_search):
112
  for response in model_inference(prompt, web_search):
113
  return response
114
 
115
  chatbot = gr.Interface(
116
+ fn=start_inference,
117
  inputs=[
118
+ gr.Textbox(label="User Prompt", placeholder="Enter your prompt here..."),
119
  gr.Checkbox(label="Enable Web Search", value=False),
120
  "button"
121
  ],
122
+ outputs=gr.Textbox(label="Response", placeholder="Responses will appear here..."),
123
  live=True
124
  )
125
 
126
  # Launch the Gradio interface
127
+ chatbot.launch(share=True)