ctn8176 commited on
Commit
6b537f0
·
verified ·
1 Parent(s): d49480c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -2,6 +2,7 @@ import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
  import requests
 
5
 
6
  model_name = "Writer/palmyra-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -84,17 +85,18 @@ def generate_response(prompt):
84
 
85
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
86
 
87
- # Display image directly in the chat
88
- image_component = gr.Image(image_url, alt="Movie Poster")
89
 
90
- return f"Movie Info:\n{movie_info}\n\nGenerated Response:\n{generated_text}", image_component
91
 
92
  # Define chat function for gr.ChatInterface
93
  def chat_function(message, history):
94
- response, image_component = generate_response(message)
95
  history.append([message, response])
96
- return response, image_component
97
 
98
  # Create Gradio Chat Interface
99
  chat_interface = gr.ChatInterface(chat_function)
100
- chat_interface.launch(share=True)
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
  import requests
5
+ from IPython.display import display, Image
6
 
7
  model_name = "Writer/palmyra-small"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
85
 
86
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
87
 
88
+ # Embed image directly in the response
89
+ display(Image(url=image_url, alt="Movie Poster"))
90
 
91
+ return f"Movie Info:\n{movie_info}\n\nGenerated Response:\n{generated_text}"
92
 
93
  # Define chat function for gr.ChatInterface
94
  def chat_function(message, history):
95
+ response = generate_response(message)
96
  history.append([message, response])
97
+ return response
98
 
99
  # Create Gradio Chat Interface
100
  chat_interface = gr.ChatInterface(chat_function)
101
+ chat_interface.launch(share=True) # Added share=True to create a public link
102
+