Avinash109 commited on
Commit
e8acedf
·
verified ·
1 Parent(s): ec8b044

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +363 -0
app.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import json
4
+ import datetime
5
+ import tempfile
6
+ import subprocess
7
+ import black
8
+ from streamlit_ace import st_ace
9
+ from streamlit_extras.colored_header import colored_header
10
+ from streamlit_extras.add_vertical_space import add_vertical_space
11
+ import re
12
+ from typing import Optional, Dict, List
13
+ import ast
14
+
15
+ def get_ollama_models():
16
+ """Fetch available models from Ollama"""
17
+ try:
18
+ response = requests.get("http://localhost:11434/api/tags")
19
+ if response.status_code == 200:
20
+ models = [model["name"] for model in response.json()["models"]]
21
+ return models
22
+ return []
23
+ except Exception as e:
24
+ st.error(f"Error fetching models: {str(e)}")
25
+ return []
26
+
27
+ def clear_chat():
28
+ """Clear the chat history"""
29
+ if 'messages' in st.session_state:
30
+ st.session_state.messages = []
31
+ if 'current_session' in st.session_state:
32
+ st.session_state.current_session = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
33
+
34
+ def handle_file_upload():
35
+ """Handle file upload functionality"""
36
+ uploaded_file = st.file_uploader(
37
+ "Upload a file",
38
+ type=["txt", "pdf", "py", "json", "csv"],
39
+ help="Upload a file to discuss with the AI"
40
+ )
41
+
42
+ if uploaded_file is not None:
43
+ file_contents = uploaded_file.read()
44
+ if uploaded_file.type == "application/pdf":
45
+ return f"Uploaded PDF: {uploaded_file.name}"
46
+ else:
47
+ try:
48
+ return file_contents.decode()
49
+ except UnicodeDecodeError:
50
+ return "Binary file uploaded"
51
+ return None
52
+
53
+ def generate_response(prompt, model, temperature, max_tokens, system_prompt, stream=False):
54
+ """Generate response from Ollama model with parameters"""
55
+ url = "http://localhost:11434/api/generate"
56
+ data = {
57
+ "model": model,
58
+ "prompt": prompt,
59
+ "system": system_prompt,
60
+ "temperature": temperature,
61
+ "max_tokens": max_tokens,
62
+ "stream": stream
63
+ }
64
+
65
+ try:
66
+ if stream:
67
+ response_text = ""
68
+ with requests.post(url, json=data, stream=True) as response:
69
+ for line in response.iter_lines():
70
+ if line:
71
+ json_response = json.loads(line)
72
+ response_text += json_response.get("response", "")
73
+ yield response_text
74
+ else:
75
+ response = requests.post(url, json=data)
76
+ if response.status_code == 200:
77
+ return response.json()["response"]
78
+ else:
79
+ st.error(f"Error {response.status_code}: {response.text}")
80
+ return "Error: Unable to get response from the model."
81
+ except Exception as e:
82
+ st.error(f"Error generating response: {str(e)}")
83
+ return f"Error: {str(e)}"
84
+
85
+ class CodeAnalyzer:
86
+ @staticmethod
87
+ def extract_code_blocks(text: str) -> List[str]:
88
+ """Extract code blocks from markdown text"""
89
+ code_blocks = re.findall(r'```(?:python)?\n(.*?)\n```', text, re.DOTALL)
90
+ return code_blocks
91
+
92
+ @staticmethod
93
+ def is_code_complete(code: str) -> bool:
94
+ """Check if the code block is syntactically complete"""
95
+ try:
96
+ ast.parse(code)
97
+ return True
98
+ except SyntaxError:
99
+ return False
100
+
101
+ @staticmethod
102
+ def get_context(code: str) -> Dict:
103
+ """Analyze code to extract context (variables, functions, classes)"""
104
+ context = {
105
+ 'variables': [],
106
+ 'functions': [],
107
+ 'classes': []
108
+ }
109
+
110
+ try:
111
+ tree = ast.parse(code)
112
+ for node in ast.walk(tree):
113
+ if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store):
114
+ context['variables'].append(node.id)
115
+ elif isinstance(node, ast.FunctionDef):
116
+ context['functions'].append(node.name)
117
+ elif isinstance(node, ast.ClassDef):
118
+ context['classes'].append(node.name)
119
+ except:
120
+ pass
121
+
122
+ return context
123
+
124
+ class CodeCompletion:
125
+ def __init__(self, model: str):
126
+ self.model = model
127
+
128
+ def get_completion_suggestions(self, code: str, context: Dict) -> str:
129
+ """Generate code completion suggestions based on context"""
130
+ prompt = f"""Given the following code context:
131
+ Code:
132
+ {code}
133
+
134
+ Context:
135
+ Variables: {', '.join(context['variables'])}
136
+ Functions: {', '.join(context['functions'])}
137
+ Classes: {', '.join(context['classes'])}
138
+
139
+ Please complete or continue this code in a natural way."""
140
+
141
+ response = generate_response(prompt, self.model, 0.3, 500,
142
+ "You are a Python coding assistant. Provide only code completion, no explanations.")
143
+ return response
144
+
145
+ def handle_code_continuation(incomplete_code: str, model: str) -> str:
146
+ """Handle continuation of incomplete code"""
147
+ prompt = f"""Complete the following Python code:
148
+ {incomplete_code}
149
+
150
+ Provide only the completion part that would make this code syntactically complete and logical."""
151
+
152
+ response = generate_response(prompt, model, 0.3, 500,
153
+ "You are a Python coding assistant. Complete the code naturally.")
154
+ return response
155
+
156
+ def format_code(code: str) -> str:
157
+ """Format Python code using black"""
158
+ try:
159
+ return black.format_str(code, mode=black.FileMode())
160
+ except:
161
+ return code
162
+
163
+ def init_session_state():
164
+ """Initialize session state variables"""
165
+ if "messages" not in st.session_state:
166
+ st.session_state.messages = []
167
+ if "sessions" not in st.session_state:
168
+ st.session_state.sessions = {}
169
+ if "current_session" not in st.session_state:
170
+ st.session_state.current_session = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
171
+ if "system_prompt" not in st.session_state:
172
+ st.session_state.system_prompt = "You are a helpful AI assistant."
173
+ if "saved_code_snippets" not in st.session_state:
174
+ st.session_state.saved_code_snippets = []
175
+ if "code_context" not in st.session_state:
176
+ st.session_state.code_context = {}
177
+ if "current_code_block" not in st.session_state:
178
+ st.session_state.current_code_block = None
179
+ if "code_history" not in st.session_state:
180
+ st.session_state.code_history = []
181
+ if "last_code_state" not in st.session_state:
182
+ st.session_state.last_code_state = None
183
+
184
+ def setup_page_config():
185
+ """Setup page configuration and styling"""
186
+ st.set_page_config(
187
+ page_title="Enhanced Ollama Chat",
188
+ page_icon="🤖",
189
+ layout="wide",
190
+ initial_sidebar_state="expanded"
191
+ )
192
+
193
+ st.markdown("""
194
+ <style>
195
+ /* Main container styling */
196
+ .main {
197
+ max-width: 1200px;
198
+ margin: 0 auto;
199
+ padding: 2rem;
200
+ }
201
+
202
+ /* Message container styling */
203
+ .stChatMessage {
204
+ background-color: #ffffff;
205
+ border-radius: 8px;
206
+ padding: 1rem;
207
+ margin: 0.5rem 0;
208
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
209
+ }
210
+
211
+ /* Chat input styling */
212
+ .stChatInputContainer {
213
+ border-radius: 8px;
214
+ border: 1px solid #e0e0e0;
215
+ padding: 0.5rem;
216
+ background-color: #ffffff;
217
+ }
218
+
219
+ /* Code editor styling */
220
+ .code-editor {
221
+ border-radius: 8px;
222
+ margin: 1rem 0;
223
+ border: 1px solid #e0e0e0;
224
+ }
225
+
226
+ /* Code snippet container */
227
+ .code-snippet {
228
+ background-color: #f8fafc;
229
+ padding: 1rem;
230
+ border-radius: 8px;
231
+ margin: 0.5rem 0;
232
+ }
233
+
234
+ /* Code completion suggestions */
235
+ .completion-suggestion {
236
+ background-color: #f1f5f9;
237
+ padding: 0.5rem;
238
+ border-left: 3px solid #0284c7;
239
+ margin: 0.25rem 0;
240
+ }
241
+ </style>
242
+ """, unsafe_allow_html=True)
243
+
244
+ def code_editor_section():
245
+ """Render the code editor section"""
246
+ st.subheader("📝 Code Editor")
247
+
248
+ code_content = st_ace(
249
+ value=st.session_state.current_code_block or "",
250
+ language="python",
251
+ theme="monokai",
252
+ key="code_editor",
253
+ height=300,
254
+ show_gutter=True,
255
+ wrap=True,
256
+ auto_update=True
257
+ )
258
+
259
+ col1, col2 = st.columns(2)
260
+
261
+ with col1:
262
+ if st.button("Format Code"):
263
+ st.session_state.current_code_block = format_code(code_content)
264
+
265
+ with col2:
266
+ if st.button("Get Completion Suggestions"):
267
+ if code_content:
268
+ code_analyzer = CodeAnalyzer()
269
+ context = code_analyzer.get_context(code_content)
270
+ completion = CodeCompletion(st.session_state.selected_model)
271
+ suggestions = completion.get_completion_suggestions(code_content, context)
272
+ st.code(suggestions, language="python")
273
+
274
+ def main():
275
+ """Main application logic"""
276
+ setup_page_config()
277
+ init_session_state()
278
+
279
+ # Sidebar configuration
280
+ with st.sidebar:
281
+ colored_header(label="Model Settings", description="Configure your chat parameters", color_name="blue-70")
282
+
283
+ available_models = get_ollama_models()
284
+ if not available_models:
285
+ st.error("⚠️ No Ollama models found. Please make sure Ollama is running and models are installed.")
286
+ st.stop()
287
+
288
+ selected_model = st.selectbox("Choose a model", available_models, index=0 if available_models else None)
289
+ st.session_state.selected_model = selected_model
290
+
291
+ with st.expander("Advanced Settings", expanded=False):
292
+ temperature = st.slider("Temperature", 0.0, 2.0, 0.7, 0.1)
293
+ max_tokens = st.number_input("Max Tokens", 50, 4096, 2048)
294
+ system_prompt = st.text_area("System Prompt", st.session_state.system_prompt)
295
+ stream_output = st.checkbox("Stream Output", value=True)
296
+
297
+ if st.button("Clear Chat"):
298
+ clear_chat()
299
+
300
+ st.title("🤖 Enhanced Ollama Chat")
301
+ st.caption(f"Currently using: {selected_model}")
302
+
303
+ # Main interface tabs
304
+ tab1, tab2 = st.tabs(["Chat", "Code Editor"])
305
+
306
+ with tab1:
307
+ # File upload section
308
+ uploaded_content = handle_file_upload()
309
+ if uploaded_content:
310
+ st.session_state.messages.append({
311
+ "role": "user",
312
+ "content": f"I've uploaded the following content:\n\n{uploaded_content}"
313
+ })
314
+
315
+ # Display chat messages
316
+ for message in st.session_state.messages:
317
+ with st.chat_message(message["role"]):
318
+ st.markdown(message["content"])
319
+
320
+ # Check for code blocks in the message
321
+ code_blocks = CodeAnalyzer.extract_code_blocks(message["content"])
322
+ if code_blocks and message["role"] == "assistant":
323
+ for code in code_blocks:
324
+ if not CodeAnalyzer.is_code_complete(code):
325
+ st.info("This code block appears to be incomplete. Would you like to complete it?")
326
+ if st.button("Complete Code", key=f"complete_{len(code)}"):
327
+ completion = handle_code_continuation(code, selected_model)
328
+ st.code(completion, language="python")
329
+
330
+ # Chat input
331
+ if prompt := st.chat_input("Message (use @ to attach a file, / for commands)"):
332
+ with st.chat_message("user"):
333
+ st.markdown(prompt)
334
+ st.session_state.messages.append({"role": "user", "content": prompt})
335
+
336
+ with st.chat_message("assistant"):
337
+ if stream_output:
338
+ message_placeholder = st.empty()
339
+ for response in generate_response(prompt, selected_model, temperature, max_tokens, system_prompt, stream=True):
340
+ message_placeholder.markdown(response)
341
+ final_response = response
342
+ else:
343
+ with st.spinner("Thinking..."):
344
+ final_response = generate_response(prompt, selected_model, temperature, max_tokens, system_prompt, stream=False)
345
+ st.markdown(final_response)
346
+
347
+ # Store code blocks in context
348
+ code_blocks = CodeAnalyzer.extract_code_blocks(final_response)
349
+ if code_blocks:
350
+ st.session_state.last_code_state = code_blocks[-1]
351
+
352
+ st.session_state.messages.append({"role": "assistant", "content": final_response})
353
+
354
+ with tab2:
355
+ code_editor_section()
356
+
357
+ # Footer
358
+ add_vertical_space(2)
359
+ st.markdown("---")
360
+ st.markdown("Made with ❤️ using Streamlit and Ollama")
361
+
362
+ if __name__ == "__main__":
363
+ main()