Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,12 +4,11 @@ from llama_cpp import Llama
|
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
import numpy as np
|
6 |
from typing import List
|
7 |
-
import time
|
8 |
|
9 |
model = Llama(
|
10 |
model_path=hf_hub_download(
|
11 |
-
repo_id=os.environ.get("REPO_ID", "Lyte/QuadConnect2.5-1.5B-v0.1.0b"),
|
12 |
-
filename=os.environ.get("MODEL_FILE", "unsloth.Q8_0.gguf"),
|
13 |
),
|
14 |
n_ctx=16384
|
15 |
)
|
@@ -48,9 +47,9 @@ def extract_xml_move(text: str) -> str:
|
|
48 |
return match.group(1)
|
49 |
return ""
|
50 |
|
51 |
-
def
|
52 |
"""
|
53 |
-
|
54 |
"""
|
55 |
import re
|
56 |
match = re.search(r'<reasoning>(.*?)</reasoning>', text, re.DOTALL)
|
@@ -302,9 +301,7 @@ def create_interface():
|
|
302 |
margin: 15px 0;
|
303 |
font-family: monospace;
|
304 |
min-height: 100px;
|
305 |
-
|
306 |
-
overflow-y: auto;
|
307 |
-
white-space: pre-wrap;
|
308 |
}
|
309 |
.reasoning-box {
|
310 |
border-left: 4px solid #2196F3;
|
@@ -312,6 +309,7 @@ def create_interface():
|
|
312 |
margin: 10px 0;
|
313 |
background: #22004d;
|
314 |
border-radius: 0 10px 10px 0;
|
|
|
315 |
}
|
316 |
#column-buttons {
|
317 |
display: flex;
|
@@ -328,8 +326,12 @@ def create_interface():
|
|
328 |
display: block;
|
329 |
}
|
330 |
.thinking-indicator {
|
331 |
-
|
332 |
-
|
|
|
|
|
|
|
|
|
333 |
}
|
334 |
"""
|
335 |
|
@@ -370,7 +372,7 @@ def create_interface():
|
|
370 |
info="Lower values make AI more deterministic, higher values more creative"
|
371 |
)
|
372 |
|
373 |
-
def
|
374 |
if game.game_over:
|
375 |
return [
|
376 |
render_board(game.board),
|
@@ -402,49 +404,17 @@ def create_interface():
|
|
402 |
|
403 |
# Use the new game state formatting
|
404 |
game_state = game.format_game_state()
|
405 |
-
print(game_state)
|
406 |
|
407 |
-
#
|
408 |
-
reasoning_html = ''
|
409 |
-
|
410 |
-
<div class="reasoning-box">
|
411 |
-
<p><strong>🤔 Reasoning:</strong></p>
|
412 |
-
<p class="thinking-indicator">AI is thinking...</p>
|
413 |
-
</div>
|
414 |
-
</div>
|
415 |
-
'''
|
416 |
|
417 |
-
|
418 |
-
render_board(game.board),
|
419 |
-
"AI is thinking...",
|
420 |
-
reasoning_html
|
421 |
-
]
|
422 |
-
|
423 |
-
def process_ai_response(temperature=0.8):
|
424 |
-
if game.game_over or game.current_player != 2:
|
425 |
-
return [
|
426 |
-
render_board(game.board),
|
427 |
-
"Your turn!" if not game.game_over else "Game is over! Click New Game to play again.",
|
428 |
-
reasoning_display.value
|
429 |
-
]
|
430 |
-
|
431 |
-
game_state = game.format_game_state()
|
432 |
-
|
433 |
-
# Start streaming response
|
434 |
full_response = ""
|
435 |
-
|
436 |
-
move_str = ""
|
437 |
|
438 |
-
#
|
439 |
-
|
440 |
-
<div id="ai-reasoning">
|
441 |
-
<div class="reasoning-box">
|
442 |
-
<p><strong>🤔 Reasoning:</strong></p>
|
443 |
-
<p>
|
444 |
-
'''
|
445 |
-
|
446 |
-
# Create streaming session
|
447 |
-
response_stream = model.create_chat_completion(
|
448 |
messages=[
|
449 |
{"role": "system", "content": SYSTEM_PROMPT},
|
450 |
{"role": "user", "content": game_state}
|
@@ -452,97 +422,93 @@ def create_interface():
|
|
452 |
temperature=temperature,
|
453 |
top_p=0.95,
|
454 |
max_tokens=1024,
|
455 |
-
stream=True
|
456 |
-
)
|
457 |
-
|
458 |
-
# Process stream
|
459 |
-
collected_text = ""
|
460 |
-
for chunk in response_stream:
|
461 |
if 'choices' in chunk and len(chunk['choices']) > 0:
|
462 |
-
|
463 |
-
if
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
if
|
471 |
-
#
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
current_html += f'<p><strong>📍 Move chosen:</strong> Column {move_str.upper()}</p>'
|
494 |
-
current_html += '</div></div>'
|
495 |
-
yield [
|
496 |
-
render_board(game.board),
|
497 |
-
"AI is making a move...",
|
498 |
-
current_html
|
499 |
-
]
|
500 |
|
501 |
-
#
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
</div>
|
510 |
-
'''
|
511 |
-
|
512 |
-
# Make AI move
|
513 |
-
if move_str:
|
514 |
ai_col = game.parse_ai_move(move_str)
|
515 |
|
516 |
-
if ai_col
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
538 |
|
539 |
-
|
540 |
-
game.
|
541 |
-
yield [
|
542 |
-
render_board(game.board),
|
543 |
-
"AI made an invalid move! You win by default!",
|
544 |
-
'<div id="ai-reasoning">AI made an invalid move or provided an invalid response!</div>'
|
545 |
-
]
|
546 |
|
547 |
def reset_game():
|
548 |
game.board = np.zeros((6, 7))
|
@@ -558,20 +524,13 @@ def create_interface():
|
|
558 |
|
559 |
# Event handlers
|
560 |
for i, btn in enumerate(col_buttons):
|
561 |
-
# First handle player move
|
562 |
btn.click(
|
563 |
-
fn=
|
564 |
inputs=[
|
565 |
gr.Number(value=i, visible=False),
|
566 |
temperature_slider
|
567 |
],
|
568 |
-
outputs=[board_display, status, reasoning_display]
|
569 |
-
#queue=False # Process player move immediately
|
570 |
-
).then( # Then process AI response with streaming
|
571 |
-
fn=process_ai_response,
|
572 |
-
inputs=[temperature_slider],
|
573 |
-
outputs=[board_display, status, reasoning_display],
|
574 |
-
#streaming=True # Enable streaming for AI response
|
575 |
)
|
576 |
|
577 |
reset_btn.click(
|
|
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
import numpy as np
|
6 |
from typing import List
|
|
|
7 |
|
8 |
model = Llama(
|
9 |
model_path=hf_hub_download(
|
10 |
+
repo_id=os.environ.get("REPO_ID", "Lyte/QuadConnect2.5-1.5B-v0.1.0b"), #"Lyte/QuadConnect2.5-0.5B-v0.0.9b"),#"Lyte/QuadConnect2.5-0.5B-v0.0.8b"), #"Lyte/QuadConnect2.5-0.5B-v0.0.6b"), #"Lyte/QuadConnect-Llama-1B-v0.0.7b"),#"
|
11 |
+
filename=os.environ.get("MODEL_FILE", "unsloth.Q8_0.gguf"), #"quadconnect.Q8_0.gguf"),
|
12 |
),
|
13 |
n_ctx=16384
|
14 |
)
|
|
|
47 |
return match.group(1)
|
48 |
return ""
|
49 |
|
50 |
+
def extract_xml_reasoning(text: str) -> str:
|
51 |
"""
|
52 |
+
Extracts the reasoning section from the XML format.
|
53 |
"""
|
54 |
import re
|
55 |
match = re.search(r'<reasoning>(.*?)</reasoning>', text, re.DOTALL)
|
|
|
301 |
margin: 15px 0;
|
302 |
font-family: monospace;
|
303 |
min-height: 100px;
|
304 |
+
color: white;
|
|
|
|
|
305 |
}
|
306 |
.reasoning-box {
|
307 |
border-left: 4px solid #2196F3;
|
|
|
309 |
margin: 10px 0;
|
310 |
background: #22004d;
|
311 |
border-radius: 0 10px 10px 0;
|
312 |
+
color: white;
|
313 |
}
|
314 |
#column-buttons {
|
315 |
display: flex;
|
|
|
326 |
display: block;
|
327 |
}
|
328 |
.thinking-indicator {
|
329 |
+
color: #ffc107;
|
330 |
+
font-style: italic;
|
331 |
+
}
|
332 |
+
.move-highlight {
|
333 |
+
font-weight: bold;
|
334 |
+
color: #4CAF50;
|
335 |
}
|
336 |
"""
|
337 |
|
|
|
372 |
info="Lower values make AI more deterministic, higher values more creative"
|
373 |
)
|
374 |
|
375 |
+
def handle_move(col, temperature=0.8):
|
376 |
if game.game_over:
|
377 |
return [
|
378 |
render_board(game.board),
|
|
|
404 |
|
405 |
# Use the new game state formatting
|
406 |
game_state = game.format_game_state()
|
|
|
407 |
|
408 |
+
# Initialize the reasoning display with a "thinking" message
|
409 |
+
reasoning_html = '<div id="ai-reasoning"><p class="thinking-indicator">Thinking...</p></div>'
|
410 |
+
yield [render_board(game.board), "AI is thinking...", reasoning_html]
|
|
|
|
|
|
|
|
|
|
|
|
|
411 |
|
412 |
+
# Prepare to stream AI's response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
413 |
full_response = ""
|
414 |
+
current_reasoning = ""
|
|
|
415 |
|
416 |
+
# Get AI response with streaming
|
417 |
+
for chunk in model.create_chat_completion(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
418 |
messages=[
|
419 |
{"role": "system", "content": SYSTEM_PROMPT},
|
420 |
{"role": "user", "content": game_state}
|
|
|
422 |
temperature=temperature,
|
423 |
top_p=0.95,
|
424 |
max_tokens=1024,
|
425 |
+
stream=True # Enable streaming!
|
426 |
+
):
|
|
|
|
|
|
|
|
|
427 |
if 'choices' in chunk and len(chunk['choices']) > 0:
|
428 |
+
content = chunk['choices'][0].get('delta', {}).get('content', '')
|
429 |
+
if content:
|
430 |
+
full_response += content
|
431 |
+
|
432 |
+
# Try to extract current reasoning for display
|
433 |
+
try:
|
434 |
+
# Update the displayed reasoning as it comes in
|
435 |
+
current_reasoning = extract_xml_reasoning(full_response)
|
436 |
+
if current_reasoning:
|
437 |
+
# Format reasoning for display
|
438 |
+
reasoning_html = f'''
|
439 |
+
<div id="ai-reasoning">
|
440 |
+
<div class="reasoning-box">
|
441 |
+
<p><strong>🤔 Reasoning:</strong></p>
|
442 |
+
<p>{current_reasoning}</p>
|
443 |
+
<p class="thinking-indicator">Deciding on next move...</p>
|
444 |
+
</div>
|
445 |
+
</div>
|
446 |
+
'''
|
447 |
+
yield [render_board(game.board), "AI is thinking...", reasoning_html]
|
448 |
+
except:
|
449 |
+
# If we can't extract reasoning yet, just show what we have
|
450 |
+
reasoning_html = f'''
|
451 |
+
<div id="ai-reasoning">
|
452 |
+
<div class="reasoning-box">
|
453 |
+
<p><strong>🤔 Reasoning:</strong></p>
|
454 |
+
<p class="thinking-indicator">Analyzing the board...</p>
|
455 |
+
</div>
|
456 |
+
</div>
|
457 |
+
'''
|
458 |
+
yield [render_board(game.board), "AI is thinking...", reasoning_html]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
459 |
|
460 |
+
# Process the complete response
|
461 |
+
try:
|
462 |
+
reasoning = extract_xml_reasoning(full_response)
|
463 |
+
move_str = extract_xml_move(full_response)
|
464 |
+
|
465 |
+
if not move_str:
|
466 |
+
raise ValueError("Invalid move format from AI")
|
467 |
+
|
|
|
|
|
|
|
|
|
|
|
468 |
ai_col = game.parse_ai_move(move_str)
|
469 |
|
470 |
+
if ai_col == -1:
|
471 |
+
raise ValueError("Invalid move format from AI")
|
472 |
+
|
473 |
+
# Format final reasoning with move for display
|
474 |
+
reasoning_html = f'''
|
475 |
+
<div id="ai-reasoning">
|
476 |
+
<div class="reasoning-box">
|
477 |
+
<p><strong>🤔 Reasoning:</strong></p>
|
478 |
+
<p>{reasoning}</p>
|
479 |
+
<p><strong>📍 Move chosen:</strong> <span class="move-highlight">Column {move_str.upper()}</span></p>
|
480 |
+
</div>
|
481 |
+
</div>
|
482 |
+
'''
|
483 |
+
|
484 |
+
# Make the AI's move
|
485 |
+
success, _ = game.make_move(ai_col)
|
486 |
+
if success:
|
487 |
+
# Check for AI winner
|
488 |
+
winner = game.check_winner()
|
489 |
+
if winner == 2:
|
490 |
+
game.game_over = True
|
491 |
+
return [
|
492 |
+
render_board(game.board),
|
493 |
+
"🤖 AI wins! Better luck next time!",
|
494 |
+
reasoning_html
|
495 |
+
]
|
496 |
+
else:
|
497 |
+
return [
|
498 |
+
render_board(game.board),
|
499 |
+
"AI made invalid move! You win by default!",
|
500 |
+
'<div id="ai-reasoning">AI made an invalid move!</div>'
|
501 |
+
]
|
502 |
+
except Exception as e:
|
503 |
+
game.game_over = True
|
504 |
+
return [
|
505 |
+
render_board(game.board),
|
506 |
+
"AI error occurred! You win by default!",
|
507 |
+
f'<div id="ai-reasoning">Error: {str(e)}</div>'
|
508 |
+
]
|
509 |
|
510 |
+
game.current_player = 1
|
511 |
+
return [render_board(game.board), "Your turn!", reasoning_html]
|
|
|
|
|
|
|
|
|
|
|
512 |
|
513 |
def reset_game():
|
514 |
game.board = np.zeros((6, 7))
|
|
|
524 |
|
525 |
# Event handlers
|
526 |
for i, btn in enumerate(col_buttons):
|
|
|
527 |
btn.click(
|
528 |
+
fn=handle_move,
|
529 |
inputs=[
|
530 |
gr.Number(value=i, visible=False),
|
531 |
temperature_slider
|
532 |
],
|
533 |
+
outputs=[board_display, status, reasoning_display]
|
|
|
|
|
|
|
|
|
|
|
|
|
534 |
)
|
535 |
|
536 |
reset_btn.click(
|