ApaCu commited on
Commit
fb39837
Β·
verified Β·
1 Parent(s): aee85f8

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +324 -0
main.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import time
4
+ import uuid
5
+ from typing import Dict, List
6
+ import markdown
7
+ import re
8
+ from pygments import highlight
9
+ from pygments.lexers import get_lexer_by_name
10
+ from pygments.formatters import HtmlFormatter
11
+
12
+ # Untuk integrasi dengan model AI (misalnya Hugging Face Transformers atau API eksternal)
13
+ import requests
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM
15
+ import torch
16
+
17
+ # Konfigurasi dasar
18
+ AI_MODEL_ID = os.environ.get("AI_MODEL_ID", "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ")
19
+ API_TOKEN = os.environ.get("HUGGINGFACE_API_TOKEN", None)
20
+ MAX_HISTORY_LENGTH = 10
21
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
22
+
23
+ # Dictionary untuk menyimpan semua sesi chat
24
+ active_sessions: Dict[str, List[Dict]] = {}
25
+
26
+ # Inisialisasi model dan tokenizer
27
+ @torch.inference_mode()
28
+ def initialize_model():
29
+ try:
30
+ print(f"Loading model {AI_MODEL_ID} on {DEVICE}...")
31
+ tokenizer = AutoTokenizer.from_pretrained(AI_MODEL_ID)
32
+ model = AutoModelForCausalLM.from_pretrained(
33
+ AI_MODEL_ID,
34
+ device_map=DEVICE,
35
+ torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32
36
+ )
37
+ print("Model loaded successfully!")
38
+ return model, tokenizer
39
+ except Exception as e:
40
+ print(f"Error loading model: {e}")
41
+ return None, None
42
+
43
+ model, tokenizer = initialize_model()
44
+
45
+ # Fungsi untuk memformat kode dengan syntax highlighting
46
+ def format_code_blocks(text):
47
+ def replace_code_block(match):
48
+ language = match.group(1) or "python"
49
+ code = match.group(2)
50
+ try:
51
+ lexer = get_lexer_by_name(language, stripall=True)
52
+ formatter = HtmlFormatter(style="github", cssclass="syntax-highlight")
53
+ result = highlight(code, lexer, formatter)
54
+ return f'<div class="code-block">{result}</div>'
55
+ except:
56
+ # Fallback jika bahasa tidak dikenali
57
+ return f'<pre><code class="{language}">{code}</code></pre>'
58
+
59
+ # Cari dan ganti semua code blocks markdown
60
+ pattern = r'```(\w+)?\n([\s\S]+?)\n```'
61
+ return re.sub(pattern, replace_code_block, text)
62
+
63
+ # Fungsi untuk memproses pesan dan mendapatkan respons AI
64
+ @torch.inference_mode()
65
+ def process_message(message, history, session_id):
66
+ if session_id not in active_sessions:
67
+ active_sessions[session_id] = []
68
+
69
+ # Tambahkan pesan pengguna ke history sesi
70
+ if len(active_sessions[session_id]) >= MAX_HISTORY_LENGTH:
71
+ active_sessions[session_id].pop(0)
72
+ active_sessions[session_id].append({"role": "user", "content": message})
73
+
74
+ # Konversi history ke format prompt untuk model
75
+ prompt = format_prompt(active_sessions[session_id])
76
+
77
+ # Jalankan inferensi dengan animasi loading
78
+ yield "βŒ› Thinking..."
79
+ time.sleep(0.5)
80
+ yield "βŒ› Generating response..."
81
+
82
+ try:
83
+ # Gunakan model untuk generate respons
84
+ inputs = tokenizer(prompt, return_tensors="pt").to(DEVICE)
85
+ output = model.generate(
86
+ inputs["input_ids"],
87
+ max_length=2048,
88
+ temperature=0.7,
89
+ top_p=0.9,
90
+ do_sample=True,
91
+ pad_token_id=tokenizer.eos_token_id
92
+ )
93
+
94
+ # Proses output
95
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
96
+ # Ekstrak hanya bagian respons AI dari keseluruhan output
97
+ ai_response = extract_ai_response(response, prompt)
98
+
99
+ # Tambahkan respons AI ke history sesi
100
+ active_sessions[session_id].append({"role": "assistant", "content": ai_response})
101
+
102
+ # Format respons dengan markdown dan syntax highlighting untuk kode
103
+ formatted_response = format_code_blocks(markdown.markdown(ai_response))
104
+
105
+ return formatted_response
106
+ except Exception as e:
107
+ error_msg = f"Error generating response: {str(e)}"
108
+ print(error_msg)
109
+ return f"<span style='color: red;'>{error_msg}</span>"
110
+
111
+ # Format prompt sesuai dengan kebutuhan model
112
+ def format_prompt(messages):
113
+ prompt = ""
114
+ for msg in messages:
115
+ if msg["role"] == "user":
116
+ prompt += f"USER: {msg['content']}\n"
117
+ else:
118
+ prompt += f"ASSISTANT: {msg['content']}\n"
119
+ prompt += "ASSISTANT: "
120
+ return prompt
121
+
122
+ # Ekstrak respons AI dari output model
123
+ def extract_ai_response(full_response, prompt):
124
+ # Hapus prompt dari respons untuk mendapatkan hanya output baru
125
+ if full_response.startswith(prompt):
126
+ return full_response[len(prompt):].strip()
127
+ return full_response.strip()
128
+
129
+ # Fungsi untuk membuat sesi baru
130
+ def create_new_session():
131
+ session_id = str(uuid.uuid4())
132
+ active_sessions[session_id] = []
133
+ return session_id, []
134
+
135
+ # Debug mode untuk membantu debugging kode
136
+ def debug_code(code, session_id):
137
+ try:
138
+ # Simulasi proses debugging
139
+ yield "πŸ” Analyzing code..."
140
+ time.sleep(1)
141
+
142
+ # Cek sintaks dasar
143
+ compile(code, '<string>', 'exec')
144
+ yield "βœ… Syntax check passed"
145
+ time.sleep(0.5)
146
+
147
+ # Analisis kode sederhana
148
+ lines = code.split('\n')
149
+ issues = []
150
+
151
+ # Cek beberapa masalah umum
152
+ for i, line in enumerate(lines):
153
+ if 'print(' in line and not line.strip().endswith(')'):
154
+ issues.append(f"Line {i+1}: Missing closing parenthesis in print statement")
155
+ if '#' not in line and line.strip().endswith(':') and i+1 < len(lines) and not lines[i+1].startswith(' '):
156
+ issues.append(f"Line {i+1}: Missing indentation after control statement")
157
+
158
+ if issues:
159
+ yield "πŸ”΄ Found potential issues:\n" + "\n".join(issues)
160
+ else:
161
+ # Simulasi eksekusi
162
+ yield "🟒 No obvious issues detected. Running code..."
163
+ time.sleep(1)
164
+
165
+ # Tambahkan respons ke sesi
166
+ if session_id in active_sessions:
167
+ active_sessions[session_id].append({
168
+ "role": "assistant",
169
+ "content": f"I've analyzed your code and it looks good syntactically. Here are some tips for improvement:\n\n```python\n{code}\n```\n\nConsider adding more comments and error handling for better robustness."
170
+ })
171
+
172
+ yield "βœ… Code analysis complete. The code appears to be valid Python code."
173
+ except SyntaxError as e:
174
+ error_msg = f"πŸ”΄ Syntax Error: {str(e)}"
175
+ yield error_msg
176
+
177
+ # Tambahkan analisis ke sesi
178
+ if session_id in active_sessions:
179
+ active_sessions[session_id].append({
180
+ "role": "assistant",
181
+ "content": f"I found a syntax error in your code:\n\n```python\n{code}\n```\n\nError: {str(e)}\n\nPlease check your syntax and try again."
182
+ })
183
+ except Exception as e:
184
+ error_msg = f"πŸ”΄ Error during analysis: {str(e)}"
185
+ yield error_msg
186
+
187
+ # CSS kustom untuk UI yang lebih baik
188
+ custom_css = """
189
+ .container {max-width: 850px; margin: auto;}
190
+ .chat-message {padding: 12px; border-radius: 10px; margin-bottom: 10px; position: relative;}
191
+ .user-message {background-color: #e6f7ff; text-align: right; margin-left: 20%;}
192
+ .bot-message {background-color: #f2f2f2; margin-right: 20%;}
193
+ .timestamp {font-size: 0.7em; color: #888; position: absolute; bottom: 2px; right: 10px;}
194
+ .syntax-highlight {border-radius: 5px; padding: 10px !important; margin: 15px 0 !important; overflow-x: auto;}
195
+ .code-block {border: 1px solid #ddd; border-radius: 5px; margin: 10px 0;}
196
+ .typing-indicator {font-style: italic; color: #888;}
197
+ """
198
+
199
+ # Fungsi untuk membangun antarmuka Gradio
200
+ def build_ui():
201
+ with gr.Blocks(css=custom_css) as demo:
202
+ gr.Markdown("# AI Chat with Code Capabilities")
203
+
204
+ with gr.Row():
205
+ with gr.Column(scale=3):
206
+ # Chat interface utama
207
+ chatbot = gr.Chatbot(
208
+ label="Conversation",
209
+ height=500,
210
+ elem_classes="container"
211
+ )
212
+
213
+ with gr.Row():
214
+ message_input = gr.Textbox(
215
+ label="Your message",
216
+ placeholder="Ask anything or paste code for debugging...",
217
+ lines=3
218
+ )
219
+
220
+ with gr.Row():
221
+ submit_btn = gr.Button("Send", variant="primary")
222
+ clear_btn = gr.Button("Clear Chat")
223
+ debug_btn = gr.Button("Debug Code", variant="secondary")
224
+
225
+ with gr.Column(scale=1):
226
+ # Sidebar untuk manajemen sesi
227
+ new_session_btn = gr.Button("New Session")
228
+ session_info = gr.Textbox(label="Current Session", value="", visible=False)
229
+
230
+ # Info model
231
+ gr.Markdown(f"### Model Info\n- Using: {AI_MODEL_ID}\n- Device: {DEVICE}")
232
+
233
+ # Settings
234
+ temperature = gr.Slider(
235
+ minimum=0.1, maximum=1.5, value=0.7, step=0.1,
236
+ label="Temperature (Creativity)"
237
+ )
238
+
239
+ # Status
240
+ status_box = gr.Textbox(label="Status", value="Ready")
241
+
242
+ # Hidden state untuk session ID
243
+ session_id = gr.State(str(uuid.uuid4()))
244
+
245
+ # Fungsi callback
246
+ def on_submit(message, chat_history, sid):
247
+ if not message.strip():
248
+ return "", chat_history
249
+
250
+ # Update chat history untuk UI
251
+ chat_history.append([message, None])
252
+ status_box.update(value="Generating response...")
253
+
254
+ return "", chat_history
255
+
256
+ submit_btn.click(
257
+ on_submit,
258
+ [message_input, chatbot, session_id],
259
+ [message_input, chatbot]
260
+ ).then(
261
+ process_message,
262
+ [message_input, chatbot, session_id],
263
+ chatbot
264
+ ).then(
265
+ lambda: "Ready",
266
+ None,
267
+ status_box
268
+ )
269
+
270
+ # Debug code button behavior
271
+ def on_debug(message, chat_history, sid):
272
+ if not message.strip():
273
+ return chat_history, "Please enter code to debug"
274
+
275
+ chat_history.append([message, None])
276
+ return chat_history, "Debugging code..."
277
+
278
+ debug_btn.click(
279
+ on_debug,
280
+ [message_input, chatbot, session_id],
281
+ [chatbot, status_box]
282
+ ).then(
283
+ debug_code,
284
+ [message_input, session_id],
285
+ chatbot
286
+ ).then(
287
+ lambda: "Ready",
288
+ None,
289
+ status_box
290
+ )
291
+
292
+ # New session button behavior
293
+ def start_new_session():
294
+ new_sid = str(uuid.uuid4())
295
+ active_sessions[new_sid] = []
296
+ return new_sid, [], f"New session started: {new_sid[:8]}...", "Ready"
297
+
298
+ new_session_btn.click(
299
+ start_new_session,
300
+ None,
301
+ [session_id, chatbot, session_info, status_box]
302
+ )
303
+
304
+ # Clear chat button behavior
305
+ clear_btn.click(lambda sid: ([], f"Session cleared: {sid[:8]}...", "Ready"),
306
+ [session_id],
307
+ [chatbot, session_info, status_box])
308
+
309
+ return demo
310
+
311
+ # Main function
312
+ def main():
313
+ demo = build_ui()
314
+
315
+ # Launch app
316
+ demo.queue(concurrency_count=5).launch(
317
+ server_name="0.0.0.0",
318
+ server_port=7860,
319
+ share=False,
320
+ debug=False
321
+ )
322
+
323
+ if __name__ == "__main__":
324
+ main()