Update app.py
Browse files
app.py
CHANGED
@@ -48,13 +48,23 @@ def get_movie_info(movie_title):
|
|
48 |
# Convert poster_path to a complete image URL
|
49 |
# image_url = f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else ""
|
50 |
|
51 |
-
return
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
else:
|
54 |
-
return "Movie not found"
|
55 |
|
56 |
except Exception as e:
|
57 |
-
return f"Error: {e}"
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
def generate_response(prompt):
|
60 |
input_text_template = (
|
@@ -67,8 +77,17 @@ def generate_response(prompt):
|
|
67 |
# Call the get_movie_info function to enrich the response
|
68 |
movie_info = get_movie_info(prompt)
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
# Concatenate the movie info with the input template
|
71 |
-
input_text_template +=
|
|
|
|
|
|
|
72 |
|
73 |
model_inputs = tokenizer(input_text_template, return_tensors="pt").to(device)
|
74 |
|
@@ -84,19 +103,19 @@ def generate_response(prompt):
|
|
84 |
|
85 |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
return f"Movie Info:\n{movie_info}\n\nGenerated Response:\n{generated_text}\n"
|
91 |
|
92 |
# Define chat function for gr.ChatInterface
|
93 |
def chat_function(message, history):
|
94 |
-
|
95 |
-
history.append([message,
|
96 |
-
|
|
|
97 |
|
98 |
# Create Gradio Chat Interface
|
99 |
chat_interface = gr.ChatInterface(chat_function)
|
100 |
chat_interface.launch(share=True) # Added share=True to create a public link
|
101 |
|
102 |
|
|
|
|
48 |
# Convert poster_path to a complete image URL
|
49 |
# image_url = f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else ""
|
50 |
|
51 |
+
return {
|
52 |
+
"title": title,
|
53 |
+
"year": year,
|
54 |
+
"genre": genre,
|
55 |
+
"tmdb_link": tmdb_link,
|
56 |
+
}
|
57 |
|
58 |
else:
|
59 |
+
return {"error": "Movie not found"}
|
60 |
|
61 |
except Exception as e:
|
62 |
+
return {"error": f"Error: {e}"}
|
63 |
+
|
64 |
+
def process_image(movie_info):
|
65 |
+
# Process the image, return image_url
|
66 |
+
# For now, let's just return a placeholder
|
67 |
+
return "https://via.placeholder.com/150"
|
68 |
|
69 |
def generate_response(prompt):
|
70 |
input_text_template = (
|
|
|
77 |
# Call the get_movie_info function to enrich the response
|
78 |
movie_info = get_movie_info(prompt)
|
79 |
|
80 |
+
if "error" in movie_info:
|
81 |
+
return f"Error: {movie_info['error']}", None
|
82 |
+
|
83 |
+
# Process the image separately
|
84 |
+
image_url = process_image(movie_info)
|
85 |
+
|
86 |
# Concatenate the movie info with the input template
|
87 |
+
input_text_template += (
|
88 |
+
f" Movie Info: Title: {movie_info['title']}, Year: {movie_info['year']}, "
|
89 |
+
f"Genre: {movie_info['genre']}\nFind more info here: {movie_info['tmdb_link']}"
|
90 |
+
)
|
91 |
|
92 |
model_inputs = tokenizer(input_text_template, return_tensors="pt").to(device)
|
93 |
|
|
|
103 |
|
104 |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
105 |
|
106 |
+
return f"Movie Info:\n{movie_info['title']}, {movie_info['year']}, {movie_info['genre']}\n\n" \
|
107 |
+
f"Generated Response:\n{generated_text}", image_url
|
|
|
|
|
108 |
|
109 |
# Define chat function for gr.ChatInterface
|
110 |
def chat_function(message, history):
|
111 |
+
response_text, response_image = generate_response(message)
|
112 |
+
history.append([message, response_text])
|
113 |
+
history.append([message, response_image]) # Separate history for image
|
114 |
+
return response_text, response_image
|
115 |
|
116 |
# Create Gradio Chat Interface
|
117 |
chat_interface = gr.ChatInterface(chat_function)
|
118 |
chat_interface.launch(share=True) # Added share=True to create a public link
|
119 |
|
120 |
|
121 |
+
|