chaaim123 commited on
Commit
a7e9ffc
·
verified ·
1 Parent(s): 9192d2f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +208 -0
app.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import gradio as gr
3
+ from utils.document_utils import initialize_logging
4
+ from globals import app_config
5
+
6
+ # Configure logging
7
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
8
+ initialize_logging()
9
+
10
+ def load_sample_question(question):
11
+ return question
12
+
13
+ def clear_selection():
14
+ return gr.update(value=[]), "", "", gr.update(value=[]) # Reset doc_selector to empty list
15
+
16
+ def process_uploaded_file(file, current_selection):
17
+ """Process uploaded file using DocumentManager and update UI."""
18
+ try:
19
+ if file is None:
20
+ # When file input is cleared, preserve current selection and choices
21
+ uploaded_docs = app_config.doc_manager.get_uploaded_documents()
22
+ return (
23
+ "",
24
+ gr.update(choices=uploaded_docs, value=current_selection or []),
25
+ False,
26
+ ""
27
+ )
28
+
29
+ status, filename, doc_id = app_config.doc_manager.process_document(file.name if file else None)
30
+
31
+ updated_selection = current_selection if current_selection else []
32
+ if filename and filename not in updated_selection:
33
+ updated_selection.append(filename)
34
+ trigger_summary = bool(filename)
35
+ logging.info(f"Processed file: {filename}, Trigger summary: {trigger_summary}")
36
+
37
+ return (
38
+ status,
39
+ gr.update(choices=app_config.doc_manager.get_uploaded_documents(), value=updated_selection),
40
+ trigger_summary,
41
+ filename
42
+ )
43
+ except Exception as e:
44
+ logging.error(f"Error in process_uploaded_file: {e}")
45
+ return "Error processing file", gr.update(choices=[]), False, ''
46
+
47
+ def update_doc_selector(selected_docs):
48
+ """Keep selected documents in sync."""
49
+ return selected_docs
50
+
51
+ # UI Configuration
52
+ models = [ "gemma2-9b-it", "llama3-70b-8192"]
53
+
54
+ example_questions = [
55
+ "What is the architecture of the Communication Server?",
56
+ "Show me an example of a configuration file.",
57
+ "How to create Protected File Directories ?",
58
+ "What functionalities are available in the Communication Server setups?",
59
+ "What is Mediator help?",
60
+ "Why AzureBlobStorage port is used?"
61
+ ]
62
+
63
+ with gr.Blocks(css="""
64
+ .chatbot .user {
65
+ position: relative;
66
+ background-color: #cfdcfd;
67
+ padding: 12px 16px;
68
+ border-radius: 20px;
69
+ border-bottom-right-radius: 6px;
70
+ display: inline-block;
71
+ max-width: 80%;
72
+ margin: 8px 0;
73
+ }
74
+
75
+ /* Tail effect */
76
+ .chatbot .user::after {
77
+ content: "";
78
+ position: absolute;
79
+ right: -10px;
80
+ bottom: 10px;
81
+ width: 0;
82
+ height: 0;
83
+ border: 10px solid transparent;
84
+ border-left-color: #cfdcfd;
85
+ border-right: 0;
86
+ border-top: 0;
87
+ margin-top: -5px;
88
+ }
89
+ .chatbot .bot { background-color: #f1f8e9; padding: 8px; border-radius: 10px; } /* Light green for bot responses */
90
+ """) as interface:
91
+ interface.title = "🤖 IntelliDoc: AI Document Explorer"
92
+ gr.Markdown("""
93
+ # 🤖 IntelliDoc: AI Document Explorer
94
+ **AI Document Explorer** allows you to upload PDF documents and interact with them using AI-powered analysis and summarization. Ask questions, extract key insights, and gain a deeper understanding of your documents effortlessly.
95
+ """)
96
+ summary_query_state = gr.State() # State to hold the summary query
97
+ trigger_summary_state = gr.State() # State to hold trigger flag
98
+ filename_state = gr.State() # State to hold file name
99
+ chunks_state = gr.State()
100
+ summary_text_state = gr.State()
101
+ sample_questions_state = gr.State()
102
+
103
+ with gr.Row():
104
+ # Left Sidebar
105
+ with gr.Column(scale=2):
106
+ gr.Markdown("## Upload and Select Document")
107
+ upload_btn = gr.File(label="Upload PDF Document", file_types=[".pdf"])
108
+ doc_selector = gr.Dropdown(
109
+ choices=app_config.doc_manager.get_uploaded_documents(),
110
+ label="Documents",
111
+ multiselect=True,
112
+ value=[] # Initial value as empty list
113
+ )
114
+ model_selector = gr.Dropdown(choices=models, label="Models", interactive=True)
115
+ clear_btn = gr.Button("Clear Selection")
116
+ upload_status = gr.Textbox(label="Upload Status", interactive=False)
117
+
118
+ # Process uploaded file and update UI
119
+ upload_event = upload_btn.change(
120
+ process_uploaded_file,
121
+ inputs=[upload_btn, doc_selector],
122
+ outputs=[
123
+ upload_status,
124
+ doc_selector,
125
+ trigger_summary_state, # Store trigger_summary
126
+ filename_state
127
+ ]
128
+ )
129
+
130
+
131
+ # Middle Section (Chat & LLM Response)
132
+ with gr.Column(scale=6):
133
+ gr.Markdown("## Chat with document(s)")
134
+ chat_history = gr.Chatbot(label="Chat History", height= 650, bubble_full_width= False, type="messages")
135
+ with gr.Row():
136
+ chat_input = gr.Textbox(label="Ask additional questions about the document...", show_label=False, placeholder="Ask additional questions about the document...", elem_id="chat-input", lines=3)
137
+ chat_btn = gr.Button("🚀 Send", variant="primary", elem_id="send-button", scale=0)
138
+ chat_btn.click(app_config.chat_manager.generate_chat_response, inputs=[chat_input, doc_selector, chat_history], outputs=chat_history).then(
139
+ lambda: "", # Return an empty string to clear the chat_input
140
+ outputs=chat_input
141
+ )
142
+
143
+ # Right Sidebar (Sample Questions & History)
144
+ with gr.Column(scale=2):
145
+ gr.Markdown("## Sample questions for this document:")
146
+ with gr.Column():
147
+ sample_questions = gr.Dropdown(
148
+ label="Select a sample question",
149
+ choices=[],
150
+ interactive=True,
151
+ allow_custom_value=True # Allows users to type custom questions if needed
152
+ )
153
+
154
+ clear_btn.click(
155
+ clear_selection,
156
+ outputs=[doc_selector, upload_status, filename_state, sample_questions]
157
+ )
158
+ # Reinitialize LLM when the model changes
159
+ model_selector.change(
160
+ app_config.gen_llm.reinitialize_llm,
161
+ inputs=[model_selector],
162
+ outputs=[upload_status]
163
+ )
164
+
165
+ # After upload, generate "Auto Summary" message only if trigger_summary is True
166
+ upload_event.then(
167
+ fn=lambda trigger, filename: "Can you provide summary of the document" if trigger and filename else None,
168
+ inputs=[trigger_summary_state, filename_state],
169
+ outputs=[summary_query_state]
170
+ ).then(
171
+ fn=lambda query, history: history + [{"role": "user", "content": ""}, {"role": "assistant", "content": "Generating summary of the document, please wait..."}] if query else history,
172
+ inputs=[summary_query_state, chat_history],
173
+ outputs=[chat_history]
174
+ ).then(
175
+ fn=lambda trigger, filename: app_config.doc_manager.get_chunks(filename) if trigger and filename else None,
176
+ inputs=[trigger_summary_state, filename_state],
177
+ outputs=[chunks_state]
178
+ ).then(
179
+ fn=lambda chunks: app_config.chat_manager.generate_summary(chunks) if chunks else None,
180
+ inputs=[chunks_state],
181
+ outputs=[summary_text_state]
182
+ ).then(
183
+ fn=lambda summary, history: history + [{"role": "assistant", "content": summary}] if summary else history,
184
+ inputs=[summary_text_state, chat_history],
185
+ outputs=[chat_history]
186
+ ).then(
187
+ fn=lambda chunks: app_config.chat_manager.generate_sample_questions(chunks) if chunks else [],
188
+ inputs=[chunks_state],
189
+ outputs=[sample_questions_state]
190
+ ).then(
191
+ fn=lambda questions: gr.update(
192
+ choices=questions if questions else ["No questions available"],
193
+ value=questions[0] if questions else None # Set the first question as default
194
+ ),
195
+ inputs=[sample_questions_state],
196
+ outputs=[sample_questions]
197
+ )
198
+ # Populate chat_input when a question is selected
199
+ sample_questions.change(
200
+ fn=lambda question: question,
201
+ inputs=[sample_questions],
202
+ outputs=[chat_input]
203
+ )
204
+ #gr.Markdown("## Logs")
205
+ #history = gr.Textbox(label="Previous Queries", interactive=False)
206
+
207
+ if __name__ == "__main__":
208
+ interface.launch()