File size: 43,492 Bytes
a0f57d6
3290861
a0f57d6
4eff17c
3290861
a0f57d6
 
 
 
 
 
6146397
4c46f34
a0f57d6
0df9635
4eff17c
 
 
 
5d26448
9053015
 
 
 
 
fb9266f
a0f57d6
 
 
9053015
a0f57d6
 
 
 
5d26448
a0f57d6
4eff17c
dbd6fa0
 
 
 
 
 
 
 
 
 
 
 
 
4eff17c
a0f57d6
 
4eff17c
 
a0f57d6
 
dbd6fa0
a0f57d6
dbd6fa0
a0f57d6
 
 
 
 
 
 
 
 
dbd6fa0
599725a
a0f57d6
dbd6fa0
 
a0f57d6
dbd6fa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0df9635
a0f57d6
 
70dd0f7
a0f57d6
70dd0f7
a0f57d6
 
599725a
dbd6fa0
0596125
a0f57d6
 
 
e8459e6
 
 
a0f57d6
 
e8459e6
 
a0f57d6
 
dbd6fa0
 
e8459e6
a0f57d6
e8459e6
a0f57d6
e8459e6
dbd6fa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0596125
dbd6fa0
 
 
0596125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbd6fa0
a0f57d6
 
 
 
 
 
 
e8459e6
dbd6fa0
a0f57d6
 
 
 
dbd6fa0
 
 
 
0596125
a0f57d6
dbd6fa0
 
 
 
 
 
 
 
 
 
a0f57d6
dbd6fa0
 
 
 
 
 
0596125
 
dbd6fa0
e8459e6
dbd6fa0
 
 
0596125
 
dbd6fa0
 
 
e8459e6
6dbcd3a
dbd6fa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0596125
dbd6fa0
 
 
 
 
 
0596125
 
 
 
 
 
dbd6fa0
 
 
 
 
0596125
 
 
 
 
dbd6fa0
 
 
 
0596125
 
 
dbd6fa0
 
 
 
 
0596125
 
dbd6fa0
 
0596125
 
 
 
 
dbd6fa0
 
 
 
 
 
 
 
 
0596125
 
 
 
 
 
dbd6fa0
 
0596125
dbd6fa0
 
 
0596125
dbd6fa0
 
 
3290861
6dbcd3a
 
dbd6fa0
 
6dbcd3a
dbd6fa0
 
0596125
dbd6fa0
 
 
 
 
 
 
 
 
 
 
 
 
 
0596125
 
 
 
dbd6fa0
 
 
 
 
 
0596125
dbd6fa0
 
 
 
 
 
 
 
0596125
dbd6fa0
 
 
 
0596125
 
 
 
 
dbd6fa0
 
 
 
 
0596125
dbd6fa0
 
 
 
 
 
 
0596125
dbd6fa0
0596125
dbd6fa0
0596125
 
 
 
 
dbd6fa0
 
 
 
 
0596125
 
 
 
dbd6fa0
 
 
0596125
dbd6fa0
 
 
 
 
 
 
0596125
dbd6fa0
 
 
 
0596125
 
 
 
 
 
dbd6fa0
 
 
 
 
 
 
 
 
 
 
0596125
 
dbd6fa0
 
0596125
 
dbd6fa0
 
 
0596125
 
 
 
dbd6fa0
 
 
0596125
dbd6fa0
 
 
 
 
 
 
0596125
dbd6fa0
 
 
0596125
 
 
 
 
 
 
dbd6fa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0596125
dbd6fa0
0596125
 
 
 
 
dbd6fa0
 
 
0596125
 
 
 
dbd6fa0
 
 
 
 
 
 
 
 
 
 
 
 
 
0596125
 
 
dbd6fa0
 
 
0596125
 
 
 
dbd6fa0
 
 
0596125
 
 
 
 
dbd6fa0
 
 
 
 
 
 
 
3290861
 
 
dbd6fa0
 
6dbcd3a
dbd6fa0
 
 
 
 
 
 
 
3290861
dbd6fa0
 
3290861
 
dbd6fa0
 
0596125
 
 
 
3290861
e8459e6
dbd6fa0
 
 
 
 
 
0596125
dbd6fa0
 
 
 
 
 
 
0596125
dbd6fa0
 
 
 
0596125
 
 
 
 
 
dbd6fa0
 
 
 
 
 
 
 
0596125
 
 
 
dbd6fa0
0596125
 
 
dbd6fa0
 
 
 
0596125
 
 
 
 
dbd6fa0
e8459e6
a0f57d6
0596125
 
 
 
 
 
 
a0f57d6
 
 
dbd6fa0
6dbcd3a
 
 
dbd6fa0
 
 
 
 
 
 
0596125
a0f57d6
 
 
 
 
 
 
dbd6fa0
 
9053015
0596125
9053015
0596125
a0f57d6
 
dbd6fa0
a0f57d6
dbd6fa0
 
e8459e6
 
 
 
 
 
3290861
0596125
 
 
e8459e6
3290861
dbd6fa0
 
e8459e6
dbd6fa0
e8459e6
 
dbd6fa0
0596125
6dbcd3a
e8459e6
 
 
 
 
 
 
 
 
 
 
 
 
 
a0f57d6
dbd6fa0
a0f57d6
dbd6fa0
9053015
a0f57d6
dbd6fa0
 
 
 
a0f57d6
 
0596125
 
dbd6fa0
9053015
a0f57d6
dbd6fa0
0596125
 
dbd6fa0
0596125
 
dbd6fa0
 
0596125
9053015
dbd6fa0
 
9053015
 
 
dbd6fa0
 
9053015
 
 
dbd6fa0
 
e8459e6
 
 
dbd6fa0
 
9053015
 
 
dbd6fa0
 
 
a0f57d6
dbd6fa0
0596125
a0f57d6
dbd6fa0
a0f57d6
 
e8459e6
 
 
dbd6fa0
 
a0f57d6
 
 
e8459e6
dbd6fa0
 
a0f57d6
dbd6fa0
 
 
 
a0f57d6
 
0596125
 
dbd6fa0
 
0596125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbd6fa0
 
4eff17c
dbd6fa0
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
import os
import re
import time
import json
import io
import requests

import gradio as gr
import google.generativeai as genai

from huggingface_hub import create_repo, list_models, upload_file, constants
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status

# --- Helper functions for Hugging Face integration ---

def show_profile(profile: gr.OAuthProfile | None) -> str:
    if profile is None:
        return "*Not logged in.*"
    return f"βœ… Logged in as **{profile.username}**"

def list_private_models(
    profile: gr.OAuthProfile | None,
    oauth_token: gr.OAuthToken | None
) -> str:
    if profile is None or oauth_token is None:
        return "Please log in to see your models."
    try:
        models = [
            f"{m.id} ({'private' if m.private else 'public'})"
            for m in list_models(author=profile.username, token=oauth_token.token)
        ]
        return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
    except Exception as e:
        return f"Error listing models: {e}"

def create_space_action(repo_name: str, sdk: str, profile: gr.OAuthProfile, token: gr.OAuthToken):
    repo_id = f"{profile.username}/{repo_name}"
    try:
        create_repo(
            repo_id=repo_id,
            token=token.token,
            exist_ok=True,
            repo_type="space",
            space_sdk=sdk
        )
        url    = f"https://huggingface.co/spaces/{repo_id}"
        iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe>'
        return repo_id, iframe
    except Exception as e:
        raise RuntimeError(f"Failed to create Space {repo_id}: {e}") # Raise instead of returning string

def upload_file_to_space_action(
    file_obj,
    path_in_repo: str,
    repo_id: str,
    profile: gr.OAuthProfile,
    token: gr.OAuthToken
) -> None: # Return None on success, raise on failure
    if not (profile and token and repo_id):
        raise ValueError("Hugging Face profile, token, or repo_id is missing.")
    try:
        upload_file(
            path_or_fileobj=file_obj,
            path_in_repo=path_in_repo,
            repo_id=repo_id,
            token=token.token,
            repo_type="space"
        )
    except Exception as e:
        raise RuntimeError(f"Failed to upload `{path_in_repo}` to {repo_id}: {e}") # Raise exception

def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
    if not repo_id or not token:
         return f"Cannot fetch {level} logs: repo_id or token missing." # Handle missing state gracefully
    jwt_url  = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
    try:
        r        = get_session().get(jwt_url, headers=build_hf_headers(token=token))
        hf_raise_for_status(r)
        jwt      = r.json()["token"]
        logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
        lines, count = [], 0
        with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True, timeout=20) as resp:
            hf_raise_for_status(resp)
            for raw in resp.iter_lines():
                if count >= 200:
                    lines.append("... truncated ...")
                    break
                if not raw.startswith(b"data: "):
                    continue
                payload = raw[len(b"data: "):]
                try:
                    event = json.loads(payload.decode())
                    ts    = event.get("timestamp", "")
                    txt   = event.get("data", "").strip()
                    if txt:
                        lines.append(f"[{ts}] {txt}")
                        count += 1
                except json.JSONDecodeError:
                    continue
        return "\n".join(lines) if lines else f"No {level} logs found."
    except Exception as e:
        # Don't raise here, just return error message in logs box
        return f"Error fetching {level} logs: {e}"


def get_build_logs_action(repo_id, profile, token):
    if not (repo_id and profile and token):
        return "⚠️ Please log in and create a Space first."
    return _fetch_space_logs_level(repo_id, "build", token.token)

def get_container_logs_action(repo_id, profile, token):
    if not (repo_id and profile and token):
        return "⚠️ Please log in and create a Space first."
    # Add a short delay before fetching run logs, build might just finish
    time.sleep(5) # Added delay for robustness
    return _fetch_space_logs_level(repo_id, "run", token.token)


# --- Google Gemini integration with model selection ---

def configure_gemini(api_key: str | None, model_name: str | None) -> str:
    if not api_key:
        return "Gemini API key is not set."
    if not model_name:
        return "Please select a Gemini model."
    try:
        genai.configure(api_key=api_key)
        # Test a simple ping
        genai.GenerativeModel(model_name).generate_content("ping", stream=False) # Use stream=False for sync ping
        return f"Gemini configured successfully with **{model_name}**."
    except Exception as e:
        return f"Error configuring Gemini: {e}"

def call_gemini(prompt: str, api_key: str, model_name: str) -> str:
    if not api_key or not model_name:
        raise ValueError("Gemini API key or model not set.")
    try:
        genai.configure(api_key=api_key)
        model    = genai.GenerativeModel(model_name)
        response = model.generate_content(prompt, stream=False) # Use stream=False for sync call
        return response.text or ""
    except Exception as e:
        raise RuntimeError(f"Gemini API call failed: {e}") # Raise exception


# --- AI workflow logic (State Machine) ---

# Define States
STATE_IDLE = "idle"
STATE_AWAITING_REPO_NAME = "awaiting_repo_name"
STATE_CREATING_SPACE = "creating_space"
STATE_GENERATING_CODE = "generating_code"
STATE_UPLOADING_APP_PY = "uploading_app_py"
STATE_GENERATING_REQUIREMENTS = "generating_requirements"
STATE_UPLOADING_REQUIREMENTS = "uploading_requirements"
STATE_GENERATING_README = "generating_readme"
STATE_UPLOADING_README = "uploading_readme"
STATE_CHECKING_LOGS_BUILD = "checking_logs_build"
STATE_CHECKING_LOGS_RUN = "checking_logs_run"
STATE_DEBUGGING_CODE = "debugging_code"
STATE_UPLOADING_FIXED_APP_PY = "uploading_fixed_app_py"
STATE_COMPLETE = "complete" # Added a final state

MAX_DEBUG_ATTEMPTS = 3

def update_chat(history: list[list[str | None]], bot_message: str) -> list[list[str | None]]:
    """Helper to set the bot's response for the last user message."""
    # Assume the last entry was just added with history.append([message, None])
    # If history is empty, this is an error in logic flow, but add safety.
    if history:
        # Ensure the last message is indeed a user message awaiting response
        if history[-1][1] is None:
            history[-1][1] = bot_message
        else:
             # This case means we're trying to add a bot response but the last message
             # already has one. This might happen if a step is re-triggered.
             # Append as a new bot-only message as a fallback.
             # In a strict state machine, this might indicate a flow error.
             # But for robustness, let's add it.
             history.append([None, bot_message])
    else:
        # This shouldn't happen - update_chat should always be called after a user message is added.
        print("Warning: update_chat called with empty history.")
        history.append([None, bot_message]) # As a fallback, add a bot-only message

    return history # Return the modified history list


def ai_workflow_chat(
    message: str,
    history: list[list[str | None]],
    hf_profile: gr.OAuthProfile | None,
    hf_token:   gr.OAuthToken   | None,
    gemini_api_key: str         | None,
    gemini_model:   str         | None,
    repo_id_state:  str | None,
    workflow_state: str,
    space_sdk:      str,
    preview_html:   str,
    container_logs: str,
    build_logs:     str,
    debug_attempts_state: int,
    app_description_state: str | None, # Persist initial request
    repo_name_state: str | None,       # Persist chosen name
    generated_code_state: str | None,  # Temporarily stores generated code or file content
) -> tuple[
    list[list[str | None]], # history
    str | None,             # repo_id
    str,                    # workflow_state
    str,                    # preview_html
    str,                    # container_logs
    str,                    # build_logs
    int,                    # debug_attempts_state
    str | None,             # app_description_state
    str | None,             # repo_name_state
    str | None,             # generated_code_state
]:
    # Unpack state variables
    repo_id = repo_id_state
    state = workflow_state
    attempts = debug_attempts_state
    app_desc = app_description_state
    repo_name = repo_name_state
    generated_code = generated_code_state # This slot is reused for different file contents


    updated_preview = preview_html
    updated_build = build_logs
    updated_run = container_logs

    # Add user message to history with a placeholder for the bot's response
    # We yield immediately after this to show the user's message
    history.append([message, None])
    yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


    try:
        # --- State Machine Logic ---

        if state == STATE_IDLE:
            # Check prerequisites first
            if not (hf_profile and hf_token):
                history = update_chat(history, "Please log in to Hugging Face first.")
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
                return # Stop workflow until login
            if not (gemini_api_key and gemini_model):
                 history = update_chat(history, "Please enter your API key and select a Gemini model.")
                 yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
                 return # Stop workflow until API key/model set

            # Look for commands
            reset_match = "reset" in message.lower()
            generate_match = re.search(r'generate (?:me )?(?:a|an) \w+ app called (\w+)', message, re.I)
            create_match = re.search(r'create (?:a|an)? space called (\w+)', message, re.I)

            if reset_match:
                history = update_chat(history, "Workflow reset.")
                # Reset all state variables
                yield history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0, None, None, None
                return # End workflow for this trigger

            elif generate_match:
                new_repo_name = generate_match.group(1)
                new_app_desc = message # Store the full request
                history = update_chat(history, f"Acknowledged: '{message}'. Starting workflow to create Space `{hf_profile.username}/{new_repo_name}`.")
                # Transition to creating space state, passing name and description
                state = STATE_CREATING_SPACE
                repo_name = new_repo_name
                app_desc = new_app_desc
                # Yield state change and bot message
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


            elif create_match:
                 new_repo_name = create_match.group(1)
                 history = update_chat(history, f"Acknowledged: '{message}'. Starting workflow to create Space `{hf_profile.username}/{new_repo_name}`.")
                 # Transition to creating space state, just passing the name (desc will be default)
                 state = STATE_CREATING_SPACE
                 repo_name = new_repo_name
                 # app_desc remains None or existing
                 # Yield state change and bot message
                 yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

            elif "create" in message.lower() and not repo_id: # Generic create trigger
                history = update_chat(history, "Okay, what should the Space be called? (e.g., `my-awesome-app`)")
                # Transition to awaiting name state
                state = STATE_AWAITING_REPO_NAME
                # Yield state change and bot message
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

            else:
                # Handle other chat messages if needed, or just respond unknown
                history = update_chat(history, "Command not recognized. Try 'generate me a gradio app called myapp', or 'reset'.")
                # Stay in IDLE state
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
                return # End workflow for this trigger


        # --- Workflow steps triggered by state ---
        # Note: These steps assume the state was transitioned into in the previous yield.
        # A new click triggers the function again, and the state machine picks up here.

        if state == STATE_CREATING_SPACE:
            # This state is triggered when we *already have* the repo_name in state
            if not repo_name: # Safety check
                history = update_chat(history, "Internal error: Repo name missing for creation. Resetting.")
                yield history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0, None, None, None
                return

            try:
                new_repo_id, iframe_html = create_space_action(repo_name, space_sdk, hf_profile, hf_token)
                updated_preview = iframe_html
                repo_id = new_repo_id # Update repo_id state variable
                history = update_chat(history, f"βœ… Space `{repo_id}` created. Click 'Send' to generate and upload code.")
                # Transition to generating code state
                state = STATE_GENERATING_CODE
                # Yield state change and bot message
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

            except Exception as e:
                history = update_chat(history, f"❌ Error creating space: {e}. Click 'reset'.")
                # Reset state on failure
                yield history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0, None, None, None


        elif state == STATE_GENERATING_CODE:
            # Use the stored app description or a default
            prompt_desc = app_desc if app_desc else 'a Gradio image-blur test app with upload and slider controls'
            prompt = f"""
You are an AI assistant specializing in Hugging Face Spaces using the {space_sdk} SDK.
Generate a full, single-file Python app based on:
'{prompt_desc}'
Return **only** the python code block for app.py. Do not include any extra text, explanations, or markdown outside the code block.
"""
            try:
                history = update_chat(history, "🧠 Generating `app.py` code with Gemini...")
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield to show "Generating..." message

                code = call_gemini(prompt, gemini_api_key, gemini_model)
                # Clean markdown and whitespace
                code = code.strip()
                if code.startswith("```python"):
                    code = code[len("```python"):].strip()
                if code.endswith("```"):
                    code = code[:-len("```")].strip()

                if not code:
                     raise ValueError("Gemini returned empty code.")

                history = update_chat(history, "βœ… `app.py` code generated. Click 'Send' to upload.")
                # Transition to uploading state, store the generated code
                state = STATE_UPLOADING_APP_PY
                generated_code = code # Store code in state
                # Yield state change and bot message
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

            except Exception as e:
                history = update_chat(history, f"❌ Error generating code: {e}. Click 'reset'.")
                # Reset state on failure
                yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None


        elif state == STATE_UPLOADING_APP_PY:
            # Use the generated_code stored in state
            if not generated_code:
                 history = update_chat(history, "Internal error: No code to upload. Resetting.")
                 yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
                 return

            history = update_chat(history, "☁️ Uploading `app.py`...")
            yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield to show message

            try:
                upload_file_to_space_action(io.StringIO(generated_code), "app.py", repo_id, hf_profile, hf_token)
                history = update_chat(history, "βœ… Uploaded `app.py`. Click 'Send' to generate requirements.")
                # Transition to generating requirements, clear the temporary code storage
                state = STATE_GENERATING_REQUIREMENTS
                generated_code = None # Clear temporary storage
                # Yield state change and bot message
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

            except Exception as e:
                history = update_chat(history, f"❌ Error uploading app.py: {e}. Click 'reset'.")
                yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None


        elif state == STATE_GENERATING_REQUIREMENTS:
            history = update_chat(history, "πŸ“„ Generating `requirements.txt`...")
            yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield to show message

            # Simple heuristic for requirements based on SDK and common needs
            reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
            # Add common deps if likely used (could parse code, but simpler heuristic for demo)
            if "google.generativeai" in str(generated_code_state) or gemini_api_key: # Check if Gemini was used for code generation OR if key is set
                 reqs_list.append("google-generativeai")
            if "requests" in str(generated_code_state):
                 reqs_list.append("requests")
            reqs_list.append("huggingface_hub") # Needed for log fetching etc if done inside the space itself (though not currently)
            # Add Pillow for image processing if it's an image app (common requirement)
            if "image" in str(app_desc).lower() or "upload" in str(app_desc).lower():
                reqs_list.append("Pillow")


            reqs_content = "\n".join(reqs_list) + "\n"

            history = update_chat(history, "βœ… `requirements.txt` generated. Click 'Send' to upload.")
            # Transition to uploading requirements, store content temporarily
            state = STATE_UPLOADING_REQUIREMENTS
            generated_code = reqs_content # Pass content in state (reusing generated_code slot)
            # Yield state change and bot message
            yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


        elif state == STATE_UPLOADING_REQUIREMENTS:
             # Use content stored in state (reusing generated_code slot)
            reqs_content_to_upload = generated_code # Get content from state
            if not reqs_content_to_upload:
                 history = update_chat(history, "Internal error: No requirements content to upload. Resetting.")
                 yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
                 return

            history = update_chat(history, "☁️ Uploading `requirements.txt`...")
            yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield, keep temp state

            try:
                upload_file_to_space_action(io.StringIO(reqs_content_to_upload), "requirements.txt", repo_id, hf_profile, hf_token)
                history = update_chat(history, "βœ… Uploaded `requirements.txt`. Click 'Send' to generate README.")
                # Transition to generating README, clear the temporary storage
                state = STATE_GENERATING_README
                generated_code = None # Clear temporary storage
                # Yield state change and bot message
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


            except Exception as e:
                history = update_chat(history, f"❌ Error uploading requirements.txt: {e}. Click 'reset'.")
                yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None

        elif state == STATE_GENERATING_README:
            history = update_chat(history, "πŸ“ Generating `README.md`...")
            yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield to show message

            # Generate a simple README based on app_desc or repo_name
            readme_title = repo_name if repo_name else "My Awesome Space"
            # Use app_desc if available, otherwise a generic description
            readme_description = app_desc if app_desc else f"This Hugging Face Space hosts an AI-generated {space_sdk} application."

            readme_content = f"# {readme_title}\n\n{readme_description}\n\n" \
                             "This Space was automatically generated by an AI workflow.\n\n" \
                             f"Built with the {space_sdk} SDK.\n"

            history = update_chat(history, "βœ… `README.md` generated. Click 'Send' to upload.")
            # Transition to uploading README, store content temporarily
            state = STATE_UPLOADING_README
            generated_code = readme_content # Pass content in state
            # Yield state change and bot message
            yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


        elif state == STATE_UPLOADING_README:
            # Use content stored in state (reusing generated_code slot)
            readme_content_to_upload = generated_code # Get content from state
            if not readme_content_to_upload:
                 history = update_chat(history, "Internal error: No README content to upload. Resetting.")
                 yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
                 return

            history = update_chat(history, "☁️ Uploading `README.md`...")
            yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield, keep temp state

            try:
                upload_file_to_space_action(io.StringIO(readme_content_to_upload), "README.md", repo_id, hf_profile, hf_token)
                history = update_chat(history, "βœ… Uploaded `README.md`. All files uploaded. Space is now building. Click 'Send' to check build logs.")
                # Transition to checking build logs, clear the temporary storage
                state = STATE_CHECKING_LOGS_BUILD
                generated_code = None # Clear temporary storage
                # Yield state change and bot message
                yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


            except Exception as e:
                history = update_chat(history, f"❌ Error uploading README.md: {e}. Click 'reset'.")
                yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None


        elif state == STATE_CHECKING_LOGS_BUILD:
             # Optional: Add a short delay here if needed, but fetch action includes timeout
             history = update_chat(history, "πŸ” Fetching build logs...")
             yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Show message

             build_logs_text = get_build_logs_action(repo_id, hf_profile, hf_token)
             updated_build = build_logs_text

             # Simple check: if build logs contain "Error" or "Exception", might indicate build issue.
             # More robust would involve checking build status via API, but logs are simpler for demo.
             # Assuming successful build leads to container logs check.
             # Check updated_build content for errors
             if "Error" in updated_build or "Exception" in updated_build:
                  history = update_chat(history, "⚠️ Build logs may contain errors. Please inspect above. Click 'Send' to check container logs (app might still start).")
                  # Transition to run logs check
                  state = STATE_CHECKING_LOGS_RUN
                  # Yield state change and message, include updated build logs
                  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

             else:
                  history = update_chat(history, "βœ… Build logs fetched. Click 'Send' to check container logs.")
                  # Transition to run logs check
                  state = STATE_CHECKING_LOGS_RUN
                  # Yield state change and message, include updated build logs
                  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


        elif state == STATE_CHECKING_LOGS_RUN:
             history = update_chat(history, "πŸ” Fetching container logs...")
             yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Show message

             container_logs_text = get_container_logs_action(repo_id, hf_profile, hf_token)
             updated_run = container_logs_text

             # Check for errors/exceptions in run logs
             if ("Error" in updated_run or "Exception" in updated_run) and attempts < MAX_DEBUG_ATTEMPTS:
                  attempts += 1
                  history = update_chat(history, f"❌ Errors detected in container logs. Attempting debug fix #{attempts}/{MAX_DEBUG_ATTEMPTS}. Click 'Send' to proceed.")
                  # Transition to debugging state, increment attempts
                  state = STATE_DEBUGGING_CODE
                  # Yield state change and message, include updated run logs
                  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

             elif ("Error" in updated_run or "Exception" in updated_run) and attempts >= MAX_DEBUG_ATTEMPTS:
                  history = update_chat(history, f"❌ Errors detected in container logs. Max debug attempts ({MAX_DEBUG_ATTEMPTS}) reached. Please inspect logs manually or click 'reset'.")
                  # Transition to complete state after failed attempts
                  state = STATE_COMPLETE # Indicate workflow finished with errors
                  # Yield state change and message
                  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code

             else:
                  history = update_chat(history, "βœ… App appears to be running successfully! Check the iframe above. Click 'reset' to start a new project.")
                  # Transition to complete state on success
                  state = STATE_COMPLETE # Indicate workflow finished successfully
                  # Yield state change and message
                  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


        elif state == STATE_DEBUGGING_CODE:
             history = update_chat(history, f"🧠 Calling Gemini to generate fix based on logs...")
             yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Show message

             debug_prompt = f"""
You are debugging a {space_sdk} Space. The goal is to fix the code in `app.py` based on the container logs provided.

Here are the container logs:
{updated_run}

Generate the *complete, fixed* content for `app.py` based on these logs.
Return **only** the python code block for app.py. Do not include any extra text, explanations, or markdown outside the code block.
"""
             try:
                  fix_code = call_gemini(debug_prompt, gemini_api_key, gemini_model)
                  # Clean markdown and whitespace
                  fix_code = fix_code.strip()
                  if fix_code.startswith("```python"):
                      fix_code = fix_code[len("```python"):].strip()
                  if fix_code.endswith("```"):
                      fix_code = fix_code[:-len("```")].strip()

                  if not fix_code:
                     raise ValueError("Gemini returned empty fix code.")


                  history = update_chat(history, "βœ… Fix code generated. Click 'Send' to upload.")
                  # Transition to uploading fixed code, pass the fixed code
                  state = STATE_UPLOADING_FIXED_APP_PY
                  generated_code = fix_code # Store fix_code in state
                  # Yield state change and message
                  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


             except Exception as e:
                  history = update_chat(history, f"❌ Error generating debug code: {e}. Click 'reset'.")
                  # Reset on failure
                  yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None

        elif state == STATE_UPLOADING_FIXED_APP_PY:
             # Use the fixed code stored in state (reusing generated_code slot)
             fixed_code_to_upload = generated_code # Get code from state
             if not fixed_code_to_upload:
                  history = update_chat(history, "Internal error: No fixed code available to upload. Resetting.")
                  yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
                  return

             history = update_chat(history, "☁️ Uploading fixed `app.py`...")
             yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield, keep temp state

             try:
                  upload_file_to_space_action(io.StringIO(fixed_code_to_upload), "app.py", repo_id, hf_profile, hf_token)
                  history = update_chat(history, "βœ… Fixed `app.py` uploaded. Space will rebuild. Click 'Send' to check logs again.")
                  # Transition back to checking run logs (after rebuild), clear temporary storage
                  state = STATE_CHECKING_LOGS_RUN
                  generated_code = None # Clear temporary storage
                  # Yield state change and message
                  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code


             except Exception as e:
                  history = update_chat(history, f"❌ Error uploading fixed app.py: {e}. Click 'reset'.")
                  yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None

        elif state == STATE_COMPLETE:
             # App is successfully deployed or failed after attempts.
             # User should click reset or start a new command.
             # Just yield the current state to update UI if needed.
             # The message for STATE_COMPLETE is set in the state it transitions from.
             pass # No state change needed, just yield current state at the end of try block


        # If we reached here and the state wasn't handled (e.g., from a previous error state)
        # or if a yield didn't happen in the previous block (logic error)
        # The initial yield after adding the user message ensures something is always sent back.


    except Exception as e:
        # Catch-all for unexpected exceptions in any state
        # This might mean the state machine logic itself failed or a core function raised unexpectedly.
        error_message = f"Workflow step failed unexpectedly ({state}): {e}. Click 'Send' to re-attempt this step or 'reset'."
        history = update_chat(history, error_message)
        print(f"Critical Error in state {state}: {e}") # Print to server logs
        # Transition to idle state on unexpected errors to prevent getting stuck.
        yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None


    # Final yield after the try block ensures the final state is returned
    # after a successful step completes or after an error is caught.
    # This is particularly important for states like STATE_COMPLETE.
    yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code



# --- Build the Gradio UI ---

with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
    # State variables
    hf_profile   = gr.State(None)
    hf_token     = gr.State(None)
    gemini_key   = gr.State(None)
    gemini_model = gr.State("gemini-2.5-flash-preview-04-17") # Default model
    repo_id      = gr.State(None)     # ID of the created Space (e.g., 'user/repo')
    workflow     = gr.State(STATE_IDLE) # Current state of the AI workflow
    sdk_state    = gr.State("gradio") # Selected SDK
    debug_attempts = gr.State(0)       # Counter for debug attempts
    app_description = gr.State(None)  # Stores the user's original request string
    repo_name_state = gr.State(None)  # Stores the parsed repo name
    generated_code_state = gr.State(None) # Temporarily stores generated code or file content (reused)

    with gr.Row():
        # Sidebar
        with gr.Column(scale=1, min_width=300):
            gr.Markdown("## Hugging Face Login")
            login_status = gr.Markdown("*Not logged in.*")
            login_btn    = gr.LoginButton(variant="huggingface")

            # Initial load to check login status
            ai_builder_tab.load(show_profile, outputs=login_status)
            # Update status and state on login click
            login_btn.click(show_profile, outputs=login_status)
            login_btn.click(lambda p, t: (p, t), inputs=[login_btn], outputs=[hf_profile, hf_token]) # Use login_btn output directly

            gr.Markdown("## Google AI Studio API Key")
            gemini_input  = gr.Textbox(label="API Key", type="password", interactive=True) # Ensure interactive
            gemini_status = gr.Markdown("")

            # Update key in state
            gemini_input.change(lambda k: k, inputs=gemini_input, outputs=gemini_key)

            gr.Markdown("## Gemini Model")
            model_selector = gr.Radio(
                choices=[
                    ("Gemini 2.5 Flash Preview 04-17", "gemini-2.5-flash-preview-04-17"),
                    ("Gemini 2.5 Pro Preview 03-25",   "gemini-2.5-pro-preview-03-25"),
                    ("Gemini 1.5 Flash",               "gemini-1.5-flash"), # Keep relevant models
                    ("Gemini 1.5 Pro",                 "gemini-1.5-pro"),
                    ("Gemini 1.0 Pro",                 "gemini-1.0-pro"),
                ],
                value="gemini-2.5-flash-preview-04-17",
                label="Select model",
                interactive=True # Ensure interactive
            )
            # Update model in state
            model_selector.change(lambda m: m, inputs=model_selector, outputs=gemini_model)

            # Configure Gemini status on load and when key/model changes
            # Note: These handlers *update the status text*, they don't block the workflow.
            ai_builder_tab.load(
                configure_gemini,
                inputs=[gemini_key, gemini_model],
                outputs=[gemini_status]
            )
            gemini_input.change(
                configure_gemini,
                inputs=[gemini_key, gemini_model],
                outputs=[gemini_status]
            )
            model_selector.change(
                configure_gemini,
                inputs=[gemini_key, gemini_model],
                outputs=[gemini_status]
            )


            gr.Markdown("## Space SDK")
            sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK", interactive=True)
            sdk_selector.change(lambda s: s, inputs=sdk_selector, outputs=sdk_state)

            gr.Markdown("## Workflow Status")
            status_text = gr.Textbox(label="Current State", value=STATE_IDLE, interactive=False)
            repo_id_text = gr.Textbox(label="Current Space ID", value="None", interactive=False)

        # Main content
        with gr.Column(scale=3):
            # Corrected Chatbot initialization
            chatbot    = gr.Chatbot(type='messages') # Added type='messages'
            user_input = gr.Textbox(placeholder="Type your message…", interactive=True) # Ensure interactive
            send_btn   = gr.Button("Send", interactive=False)

            # Logic to enable send button only when logged in and API key is set
            # This function determines the interactive state of the button
            def update_send_button_state(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None, key: str | None, model: str | None):
                is_logged_in = profile is not None and token is not None
                is_gemini_ready = key is not None and model is not None # Basic check
                # Could add a check if configure_gemini returned success last time
                return gr.update(interactive=is_logged_in and is_gemini_ready)

            # Update button state on load and whenever relevant inputs change
            ai_builder_tab.load(
                update_send_button_state,
                inputs=[hf_profile, hf_token, gemini_key, gemini_model],
                outputs=[send_btn]
            )
            login_btn.click(
                update_send_button_state,
                inputs=[hf_profile, hf_token, gemini_key, gemini_model],
                outputs=[send_btn]
            )
            gemini_input.change(
                update_send_button_state,
                inputs=[hf_profile, hf_token, gemini_key, gemini_model],
                outputs=[send_btn]
            )
            model_selector.change(
                update_send_button_state,
                inputs=[hf_profile, hf_token, gemini_key, gemini_model],
                outputs=[send_btn]
            )

            iframe    = gr.HTML("<p>No Space created yet.</p>")
            build_txt = gr.Textbox(label="Build Logs", lines=10, interactive=False, value="") # Initialize empty
            run_txt   = gr.Textbox(label="Container Logs", lines=10, interactive=False, value="") # Initialize empty

            # The main event handler for the Send button
            # It maps inputs/outputs to the ai_workflow_chat generator function
            send_btn.click(
                ai_workflow_chat,
                inputs=[
                    user_input, chatbot,
                    hf_profile, hf_token,
                    gemini_key, gemini_model,
                    repo_id, workflow, sdk_state,
                    iframe, run_txt, build_txt, # Pass current UI values
                    debug_attempts, app_description, repo_name_state, generated_code_state # Pass state variables
                ],
                outputs=[
                    chatbot,
                    repo_id, workflow,
                    iframe, run_txt, build_txt, # Update UI values
                    debug_attempts, app_description, repo_name_state, generated_code_state # Update state variables
                ]
            ).success( # Clear input after successful send
                 lambda: gr.update(value=""),
                 inputs=None,
                 outputs=user_input
            )

            # Link state variables to UI status displays (reactive updates)
            # These update the UI components whenever the State variables they listen to change.
            workflow.change(lambda s: s, inputs=workflow, outputs=status_text)
            repo_id.change(lambda r: r if r else "None", inputs=repo_id, outputs=repo_id_text)
            # The logs and iframe are updated directly by the `send_btn.click` output,
            # but adding reactive updates from the state variables can sometimes help
            # ensure consistency if state changes are yielded before the UI components are
            # explicitly updated in the same yield tuple.
            # iframe.change(lambda h: h, inputs=iframe, outputs=iframe) # Already linked via click outputs
            # build_txt.change(lambda t: t, inputs=build_txt, outputs=build_txt) # Already linked
            # run_txt.change(lambda t: t, inputs=run_txt, outputs=run_txt) # Already linked


    # Add an initial message to the chatbot on load
    # THIS CALL MUST BE INSIDE the with gr.Blocks() block
    def greet():
        # Returning a list of lists in the format [user_msg, bot_msg] for Chatbot
        # An initial message is often just a bot message, format can be [None, bot_msg]
        # but Chatbot(type='messages') expects {'role': ..., 'content': ...}
        # For the initial message, the tuple format might still work, or we structure it.
        # Let's try the tuple format first as it was used before.
        return [[None, "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."]]

    ai_builder_tab.load(greet, outputs=chatbot)


if __name__ == "__main__":
    # Set max retries for requests used by huggingface_hub internally
    # This can help with transient network issues
    # from requests.adapters import HTTPAdapter
    # from urllib3.util.retry import Retry
    # retry_strategy = Retry(total=3, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
    # adapter = HTTPAdapter(max_retries=retry_strategy)
    # get_session().mount("http://", adapter)
    # get_session().mount("https://", adapter)

    ai_builder_tab.launch()