Andre commited on
Commit
8322301
·
1 Parent(s): 90888e7

Added LS ID gen

Browse files
.backup/working code backup/LS-AI-img-gen ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit 90888e775b9ca58b4833b1dcef77fcee1c75429e
.gitignore CHANGED
@@ -1,2 +1,2 @@
1
  images/
2
- .venv/
 
1
  images/
2
+ .venv/
adventurers_data.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data": {
3
+ "adventurers": [
4
+ {
5
+ "owner": "0x62f0ca21835f5fb7470f80eaee41dcf29f7989e14607f023f390fcfb864bc7c",
6
+ "id": 450,
7
+ "name": "Realms #5656",
8
+ "strength": 1,
9
+ "vitality": 6,
10
+ "dexterity": 4,
11
+ "intelligence": 1,
12
+ "wisdom": 2,
13
+ "charisma": 7,
14
+ "level": 13,
15
+ "xp": 175,
16
+ "health": 0,
17
+ "beastHealth": 0,
18
+ "head": "Leather Cap",
19
+ "hand": "Hard Leather Gloves",
20
+ "chest": "Hard Leather Armor",
21
+ "waist": "Hard Leather Belt",
22
+ "foot": "Linen Shoes",
23
+ "weapon": "Quarterstaff",
24
+ "gold": 0,
25
+ "neck": null,
26
+ "ring": null,
27
+ "luck": 2,
28
+ "battleActionCount": 0,
29
+ "customRenderer": null,
30
+ "statUpgrades": 0
31
+ }
32
+ ]
33
+ }
34
+ }
adventurers_data2.json ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data": {
3
+ "battles": [
4
+ {
5
+ "adventurerId": 450,
6
+ "adventurerHealth": 49,
7
+ "beast": "Wyvern",
8
+ "beastHealth": 10,
9
+ "beastLevel": 3,
10
+ "seed": "0x3f6f4350",
11
+ "attacker": "Adventurer",
12
+ "fled": false,
13
+ "damageDealt": 4,
14
+ "criticalHit": false,
15
+ "damageTaken": 0,
16
+ "damageLocation": null,
17
+ "xpEarnedAdventurer": 0,
18
+ "xpEarnedItems": 0,
19
+ "goldEarned": 0,
20
+ "discoveryTime": "2024-09-11T21:56:12.363Z"
21
+ },
22
+ {
23
+ "adventurerId": 450,
24
+ "adventurerHealth": 49,
25
+ "beast": "Wyvern",
26
+ "beastHealth": 10,
27
+ "beastLevel": 3,
28
+ "seed": "0x3f6f4350",
29
+ "attacker": "Beast",
30
+ "fled": false,
31
+ "damageDealt": 0,
32
+ "criticalHit": false,
33
+ "damageTaken": 13,
34
+ "damageLocation": "Foot",
35
+ "xpEarnedAdventurer": 0,
36
+ "xpEarnedItems": 0,
37
+ "goldEarned": 0,
38
+ "discoveryTime": "2024-09-11T21:56:12.363Z"
39
+ },
40
+ {
41
+ "adventurerId": 450,
42
+ "adventurerHealth": 75,
43
+ "beast": "Wyvern",
44
+ "beastHealth": 18,
45
+ "beastLevel": 3,
46
+ "seed": "0x3f6f4350",
47
+ "attacker": "Adventurer",
48
+ "fled": false,
49
+ "damageDealt": 4,
50
+ "criticalHit": false,
51
+ "damageTaken": 0,
52
+ "damageLocation": null,
53
+ "xpEarnedAdventurer": 0,
54
+ "xpEarnedItems": 0,
55
+ "goldEarned": 0,
56
+ "discoveryTime": "2024-09-11T21:56:12.361Z"
57
+ },
58
+ {
59
+ "adventurerId": 450,
60
+ "adventurerHealth": 62,
61
+ "beast": "Wyvern",
62
+ "beastHealth": 14,
63
+ "beastLevel": 3,
64
+ "seed": "0x3f6f4350",
65
+ "attacker": "Beast",
66
+ "fled": false,
67
+ "damageDealt": 0,
68
+ "criticalHit": false,
69
+ "damageTaken": 13,
70
+ "damageLocation": "Waist",
71
+ "xpEarnedAdventurer": 0,
72
+ "xpEarnedItems": 0,
73
+ "goldEarned": 0,
74
+ "discoveryTime": "2024-09-11T21:56:12.363Z"
75
+ },
76
+ {
77
+ "adventurerId": 450,
78
+ "adventurerHealth": 75,
79
+ "beast": "Wyvern",
80
+ "beastHealth": 18,
81
+ "beastLevel": 3,
82
+ "seed": "0x3f6f4350",
83
+ "attacker": "Beast",
84
+ "fled": false,
85
+ "damageDealt": 0,
86
+ "criticalHit": false,
87
+ "damageTaken": 4,
88
+ "damageLocation": "Chest",
89
+ "xpEarnedAdventurer": 0,
90
+ "xpEarnedItems": 0,
91
+ "goldEarned": 0,
92
+ "discoveryTime": "2024-09-11T21:56:12.362Z"
93
+ },
94
+ {
95
+ "adventurerId": 450,
96
+ "adventurerHealth": 79,
97
+ "beast": "Wyvern",
98
+ "beastHealth": 22,
99
+ "beastLevel": 3,
100
+ "seed": "0x3f6f4350",
101
+ "attacker": "Beast",
102
+ "fled": false,
103
+ "damageDealt": 0,
104
+ "criticalHit": false,
105
+ "damageTaken": 13,
106
+ "damageLocation": "Head",
107
+ "xpEarnedAdventurer": 0,
108
+ "xpEarnedItems": 0,
109
+ "goldEarned": 0,
110
+ "discoveryTime": "2024-09-11T21:56:12.361Z"
111
+ },
112
+ {
113
+ "adventurerId": 450,
114
+ "adventurerHealth": 90,
115
+ "beast": "Pixie",
116
+ "beastHealth": 0,
117
+ "beastLevel": 1,
118
+ "seed": "0x5a60e008",
119
+ "attacker": "Adventurer",
120
+ "fled": false,
121
+ "damageDealt": 4,
122
+ "criticalHit": false,
123
+ "damageTaken": 0,
124
+ "damageLocation": null,
125
+ "xpEarnedAdventurer": 4,
126
+ "xpEarnedItems": 8,
127
+ "goldEarned": 0,
128
+ "discoveryTime": "2024-09-11T21:53:25.168Z"
129
+ },
130
+ {
131
+ "adventurerId": 450,
132
+ "adventurerHealth": 90,
133
+ "beast": "Fairy",
134
+ "beastHealth": 3,
135
+ "beastLevel": 1,
136
+ "seed": "0x1c2",
137
+ "attacker": "Beast",
138
+ "fled": false,
139
+ "damageDealt": 0,
140
+ "criticalHit": false,
141
+ "damageTaken": 10,
142
+ "damageLocation": "Chest",
143
+ "xpEarnedAdventurer": 0,
144
+ "xpEarnedItems": 0,
145
+ "goldEarned": 0,
146
+ "discoveryTime": "2024-09-10T18:35:14.760Z"
147
+ },
148
+ {
149
+ "adventurerId": 450,
150
+ "adventurerHealth": 79,
151
+ "beast": "Wyvern",
152
+ "beastHealth": 22,
153
+ "beastLevel": 3,
154
+ "seed": "0x3f6f4350",
155
+ "attacker": "Adventurer",
156
+ "fled": false,
157
+ "damageDealt": 4,
158
+ "criticalHit": false,
159
+ "damageTaken": 0,
160
+ "damageLocation": null,
161
+ "xpEarnedAdventurer": 0,
162
+ "xpEarnedItems": 0,
163
+ "goldEarned": 0,
164
+ "discoveryTime": "2024-09-11T21:56:12.361Z"
165
+ },
166
+ {
167
+ "adventurerId": 450,
168
+ "adventurerHealth": 62,
169
+ "beast": "Wyvern",
170
+ "beastHealth": 14,
171
+ "beastLevel": 3,
172
+ "seed": "0x3f6f4350",
173
+ "attacker": "Adventurer",
174
+ "fled": false,
175
+ "damageDealt": 4,
176
+ "criticalHit": false,
177
+ "damageTaken": 0,
178
+ "damageLocation": null,
179
+ "xpEarnedAdventurer": 0,
180
+ "xpEarnedItems": 0,
181
+ "goldEarned": 0,
182
+ "discoveryTime": "2024-09-11T21:56:12.362Z"
183
+ }
184
+ ]
185
+ }
186
+ }
app.py CHANGED
@@ -1,7 +1,9 @@
1
  # app.py
2
- from config.config import models, prompts, api_token # Direct import
 
3
  import gradio as gr
4
  from src.img_gen import generate_image
 
5
 
6
  # Gradio Interface
7
  def gradio_interface():
@@ -20,22 +22,24 @@ def gradio_interface():
20
  with gr.Row():
21
  # Set default values for dropdowns
22
  prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Beast", value=prompts[0]["alias"])
23
- character_dropdown = gr.Dropdown(choices=["Beast only", "Wizard", "Warrior"], label="Select Character", value="Beast only")
 
24
  model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"])
25
  with gr.Row():
26
  # Add a text box for custom user input (max 200 characters)
27
  custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200)
 
28
  with gr.Row():
29
  generate_button = gr.Button("Generate Image")
30
  with gr.Row():
31
  output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%")
32
  with gr.Row():
33
  status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False)
34
-
35
  # Connect the button to the function
36
  generate_button.click(
37
  generate_image,
38
- inputs=[prompt_dropdown,
 
39
  custom_prompt_input,
40
  character_dropdown,
41
  model_dropdown
 
1
  # app.py
2
+ from config.config import prompts, api_token # Direct import
3
+ from config.models import models
4
  import gradio as gr
5
  from src.img_gen import generate_image
6
+ from metadata.metadata import fetch_metadata
7
 
8
  # Gradio Interface
9
  def gradio_interface():
 
22
  with gr.Row():
23
  # Set default values for dropdowns
24
  prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Beast", value=prompts[0]["alias"])
25
+ adventurer_id = gr.Number(label="Adventurer ID:")
26
+ character_dropdown = gr.Dropdown(choices=["Portait", "Last battle", "Loot bag"], label="Select Scene", value="Portait")
27
  model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"])
28
  with gr.Row():
29
  # Add a text box for custom user input (max 200 characters)
30
  custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200)
31
+ #custom_prompt_input = f""
32
  with gr.Row():
33
  generate_button = gr.Button("Generate Image")
34
  with gr.Row():
35
  output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%")
36
  with gr.Row():
37
  status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False)
 
38
  # Connect the button to the function
39
  generate_button.click(
40
  generate_image,
41
+ inputs=[adventurer_id,
42
+ prompt_dropdown,
43
  custom_prompt_input,
44
  character_dropdown,
45
  model_dropdown
app_modal.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio_interface.py
2
+ import gradio as gr
3
+ import modal
4
+ from config.config import prompts, models_modal # Indirect import
5
+ #from img_gen import generate_image
6
+
7
+ print("Hello from gradio_interface_head!")
8
+
9
+ # Modal remote function synchronously
10
+ def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input, cpu_gpu ="GPU"):
11
+ # Debug:
12
+ debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}"
13
+ print(debug_message) # Print to console for debugging
14
+ try:
15
+ # Check for CPU/GPU dropdown option
16
+ if cpu_gpu == "GPU":
17
+ f = modal.Function.from_name("LS-img-gen-modal", "generate_image_gpu")
18
+ else:
19
+ f = modal.Function.from_name("LS-img-gen-modal", "generate_image_cpu")
20
+
21
+ # Import the remote function
22
+ image_path, message = f.remote(
23
+ prompt_dropdown,
24
+ team_dropdown,
25
+ model_dropdown,
26
+ custom_prompt_input,
27
+ )
28
+ return image_path, message
29
+ except Exception as e:
30
+ return None, f"Error calling generate_image function: {e}"
31
+
32
+
33
+ # Gradio Interface
34
+ def gradio_interface():
35
+ with gr.Blocks(css="""
36
+ .gradio-container {
37
+ background-image: url('');
38
+ background-size: cover;
39
+ background-position: center;
40
+ }
41
+ .output-image img {
42
+ width: 2500px; /* Force image to fill container width */
43
+ object-fit: cover; /* ACTIVATE FOR IMAGE-FIT CONTAINER */
44
+ }
45
+ """) as demo:
46
+ gr.Markdown("# ========== Loot Survivor - AI Image Generator ==========")
47
+ with gr.Row():
48
+ # Set default values for dropdowns
49
+ prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Beast", value=prompts[0]["alias"])
50
+ character_dropdown = gr.Dropdown(choices=["Beast only", "Wizard", "Warrior"], label="Select Character", value="Beast only")
51
+ model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models_modal], label="Select Model", value=models_modal[0]["alias"])
52
+ with gr.Row():
53
+ # Add a text box for custom user input (max 200 characters)
54
+ custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200)
55
+ with gr.Row():
56
+ generate_button = gr.Button("Generate Image")
57
+ with gr.Row():
58
+ output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%")
59
+ with gr.Row():
60
+ status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False)
61
+
62
+
63
+ # Import the remote function
64
+ f = modal.Function.from_name("img-gen-modal-gpu", "generate_image")
65
+
66
+ # Connect the button to the function
67
+ generate_button.click(
68
+ generate,
69
+ inputs=[prompt_dropdown,
70
+ custom_prompt_input,
71
+ character_dropdown,
72
+ model_dropdown
73
+ ],
74
+ outputs=[output_image, status_text]
75
+ )
76
+ return demo
77
+
78
+ # Create the demo instance
79
+ demo = gradio_interface()
80
+
81
+ # Only launch if running directly
82
+ if __name__ == "__main__":
83
+ demo.queue().launch()
config/__pycache__/config.cpython-310.pyc ADDED
Binary file (617 Bytes). View file
 
config/__pycache__/config.cpython-311.pyc CHANGED
Binary files a/config/__pycache__/config.cpython-311.pyc and b/config/__pycache__/config.cpython-311.pyc differ
 
config/__pycache__/models.cpython-310.pyc ADDED
Binary file (293 Bytes). View file
 
config/__pycache__/models.cpython-311.pyc CHANGED
Binary files a/config/__pycache__/models.cpython-311.pyc and b/config/__pycache__/models.cpython-311.pyc differ
 
config/__pycache__/prompts.cpython-310.pyc ADDED
Binary file (2.79 kB). View file
 
config/config.py CHANGED
@@ -1,7 +1,7 @@
1
  # config.py
2
  import os
3
  from config.prompts import prompts
4
- from config.models import models
5
 
6
  # Retrieve the Hugging Face token
7
  api_token = os.getenv("HF_TOKEN")
@@ -9,4 +9,4 @@ api_token = os.getenv("HF_TOKEN")
9
  # Debugging: Print prompt and model options
10
  print("##### IMPORTING CONFIG #####")
11
  print("Prompt Options:", [p["alias"] for p in prompts])
12
- print("Model Options:", [m["alias"] for m in models])
 
1
  # config.py
2
  import os
3
  from config.prompts import prompts
4
+ from config.models import models_modal
5
 
6
  # Retrieve the Hugging Face token
7
  api_token = os.getenv("HF_TOKEN")
 
9
  # Debugging: Print prompt and model options
10
  print("##### IMPORTING CONFIG #####")
11
  print("Prompt Options:", [p["alias"] for p in prompts])
12
+ print("Model Options:", [m["alias"] for m in models_modal])
config/models.py CHANGED
@@ -3,3 +3,11 @@ models = [
3
  {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
4
  {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"},
5
  ]
 
 
 
 
 
 
 
 
 
3
  {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
4
  {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"},
5
  ]
6
+
7
+ models_modal = [
8
+ {"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"},
9
+ #{"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"},
10
+ #{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
11
+ #{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"},
12
+ #{"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"},
13
+ ]
metadata/__pycache__/metadata.cpython-311.pyc ADDED
Binary file (7.53 kB). View file
 
metadata/metadata copy 2.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json # Import the json module for saving data
3
+
4
+ # Define the GraphQL endpoint
5
+ url = "https://ls-indexer-sepolia.provable.games/graphql"
6
+
7
+ # Define the GraphQL queries
8
+ query = """
9
+ query MyQuery {
10
+ adventurers(limit: 10, where: {id: {eq: 555}}) {
11
+ owner
12
+ id
13
+ name
14
+ strength
15
+ vitality
16
+ dexterity
17
+ intelligence
18
+ wisdom
19
+ charisma
20
+ level
21
+ xp
22
+ health
23
+ beastHealth
24
+ head
25
+ hand
26
+ chest
27
+ waist
28
+ foot
29
+ weapon
30
+ gold
31
+ neck
32
+ ring
33
+ luck
34
+ battleActionCount
35
+ customRenderer
36
+ statUpgrades
37
+ }
38
+ }
39
+ """
40
+
41
+ query2 = """
42
+ query MyQuery {
43
+ battles(where: {adventurerId: {eq: 555}}) {
44
+ adventurerId
45
+ adventurerHealth
46
+ beast
47
+ beastHealth
48
+ beastLevel
49
+ seed
50
+ attacker
51
+ fled
52
+ damageDealt
53
+ criticalHit
54
+ damageTaken
55
+ damageLocation
56
+ xpEarnedAdventurer
57
+ xpEarnedItems
58
+ goldEarned
59
+ discoveryTime
60
+ }
61
+ }
62
+ """
63
+
64
+ # Define the request payloads
65
+ payload = {
66
+ "query": query
67
+ }
68
+ payload2 = {
69
+ "query": query2
70
+ }
71
+
72
+ # Send the POST requests to the GraphQL API
73
+ response = requests.post(url, json=payload)
74
+ response2 = requests.post(url, json=payload2)
75
+
76
+ # Check if the requests were successful
77
+ if response.status_code == 200 and response2.status_code == 200:
78
+ # Parse the JSON responses
79
+ data = response.json()
80
+ data2 = response2.json()
81
+ print("Data fetched successfully.")
82
+
83
+ # Save the data to files
84
+ with open("adventurers_data.json", "w") as file:
85
+ json.dump(data, file, indent=4) # Save with pretty-printing (indent=4)
86
+ with open("adventurers_data2.json", "w") as file:
87
+ json.dump(data2, file, indent=4) # Save with pretty-printing (indent=4)
88
+ print("Data saved to 'adventurers_data.json' and 'adventurers_data2.json'.")
89
+
90
+ # Extract the list of adventurers from both queries
91
+ adventurers = data.get("data", {}).get("adventurers", [])
92
+ battles = data2.get("data", {}).get("battles", [])
93
+
94
+ # Create a dictionary to map adventurers by their ID for quick lookup
95
+ adventurers_dict = {adv["id"]: adv for adv in adventurers}
96
+
97
+ # Add fields from the second query to the corresponding adventurer
98
+ for adv2 in battles:
99
+ adventurer_id = adv2["adventurerId"]
100
+ if adventurer_id in adventurers_dict:
101
+ print("here")
102
+ # Add new fields to the existing adventurer
103
+ adventurers_dict[adventurer_id].update(adv2)
104
+ else:
105
+ print("else")
106
+
107
+ # Print each adventurer's details dynamically
108
+ for adventurer in adventurers_dict.values():
109
+ # Create a dictionary to store field values
110
+ adventurer_data = adventurer # Use the updated dictionary
111
+
112
+ print("\n=====Adventurer Details=====")
113
+ for key, value in adventurer_data.items():
114
+ print(f"{key.capitalize()}: {value}")
115
+
116
+ if adventurer_data['health'] != 0:
117
+ print("\n=====THE ADVENTURER IS STILL ALIVE=====")
118
+ else:
119
+ print("\n=====THE ADVENTURER IS DEAD=====")
120
+
121
+ # Example: Access specific fields
122
+ print(f"\nEquipment list of {adventurer_data['name']}:")
123
+ print(f"\nAdventurer Head: {adventurer_data.get('head', 'None')}")
124
+ print(f"Hand: {adventurer_data.get('hand', 'None')}")
125
+ print(f"Chest: {adventurer_data.get('chest', 'None')}")
126
+ print(f"Waist: {adventurer_data.get('waist', 'None')}")
127
+ print(f"Foot: {adventurer_data.get('foot', 'None')}")
128
+ print(f"Weapon: {adventurer_data.get('weapon', 'None')}")
129
+ print(f"Last combat: {adventurer_data.get('beast', 'Unknown')}")
130
+ print(f"Beast Level: {adventurer_data.get('beastLevel', 'Unknown')}")
131
+ print(f"Attacker: {adventurer_data.get('attacker', 'Unknown')}")
132
+ print(f"Fled: {adventurer_data.get('fled', 'Unknown')}")
133
+ print(f"Damage Dealt: {adventurer_data.get('damageDealt', 'Unknown')}")
134
+ print(f"Damage Taken: {adventurer_data.get('damageTaken', 'Unknown')}")
135
+ print(f"Crticial Hit: {adventurer_data.get('criticalHit', 'Unknown')}")
136
+ print(f"Damage Location: {adventurer_data.get('damageLocation', 'Unknown')}")
137
+ print(f"Beast Health: {adventurer_data.get('beastHealth', 'Unknown')}")
138
+ print(f"Adventure Health: {adventurer_data.get('adventurerHealth', 'Unknown')}")
139
+
140
+ else:
141
+ # Print detailed error information
142
+ print(f"Failed to fetch data. Status codes: {response.status_code}, {response2.status_code}")
143
+ print("Response 1 Headers:", response.headers)
144
+ print("Response 1 Body:", response.text)
145
+ print("Response 2 Headers:", response2.headers)
146
+ print("Response 2 Body:", response2.text)
metadata/metadata copy.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json # Import the json module for saving data
3
+
4
+ # Define the GraphQL endpoint
5
+ url = "https://ls-indexer-sepolia.provable.games/graphql"
6
+
7
+ # Define the GraphQL query
8
+ query = """
9
+ query MyQuery {
10
+ adventurers(limit: 10, where: {id: {eq: 555}}) {
11
+ owner
12
+ id
13
+ name
14
+ strength
15
+ vitality
16
+ dexterity
17
+ intelligence
18
+ wisdom
19
+ charisma
20
+ level
21
+ xp
22
+ health
23
+ beastHealth
24
+ head
25
+ hand
26
+ chest
27
+ waist
28
+ foot
29
+ weapon
30
+ gold
31
+ neck
32
+ ring
33
+ luck
34
+ battleActionCount
35
+ customRenderer
36
+ statUpgrades
37
+ }
38
+ }
39
+ """
40
+
41
+ # Define the request payload
42
+ payload = {
43
+ "query": query
44
+ }
45
+
46
+ # Send the POST request to the GraphQL API
47
+ response = requests.post(url, json=payload)
48
+
49
+ # Check if the request was successful
50
+ if response.status_code == 200:
51
+ # Parse the JSON response
52
+ data = response.json()
53
+ print("Data fetched successfully:")
54
+ print(data)
55
+ # Save the data to a file
56
+ with open("adventurers_data.json", "w") as file:
57
+ json.dump(data, file, indent=4) # Save with pretty-printing (indent=4)
58
+ print("Data saved to 'adventurers_data.json'.")
59
+
60
+ # Extract the list of adventurers
61
+ adventurers = data.get("data", {}).get("adventurers", [])
62
+
63
+ # Print each adventurer's details dynamically
64
+ for adventurer in adventurers:
65
+ # Assign the 'hand' and 'head' fields to variables
66
+ hand_var = adventurer.get("hand")
67
+ head_var = adventurer.get("head")
68
+
69
+ # Print the variables (for debugging)
70
+ print(f"Hand: {hand_var}")
71
+ print(f"Head: {head_var}")
72
+
73
+ # Check if 'hand_var' is not None and call action()
74
+ if hand_var is not None:
75
+ print(hand_var)
76
+
77
+ print("\n=====Adventurer Details=====")
78
+ for key, value in adventurer.items():
79
+ print(f"{key.capitalize()}: {value}")
80
+ adventurer_data[key] = value # Assign to dictionary
81
+
82
+ if heatlh is not 0:
83
+ print("\n=====THE ADVENTURER IS STILL ALIVE=====")
84
+ else:
85
+ print("\n=====THE ADVENTURER IS DEAD=====")
86
+ # Example: Access specific fields
87
+ print(f"\nEquipment list of {adventurer_data['name']}:")
88
+ print(f"\nAdventurer Head: {adventurer_data['head']}")
89
+ print(f"Hand: {adventurer_data['hand']}")
90
+ print(f"Chest: {adventurer_data['chest']}")
91
+ print(f"Waist: {adventurer_data['waist']}")
92
+ print(f"Foot: {adventurer_data['foot']}")
93
+ print(f"Weapon: {adventurer_data['weapon']}")
94
+ print(f"Last combat: {adventurer_data['weapon']}")
95
+
96
+
97
+ else:
98
+ print(f"Failed to fetch data. Status code: {response.status_code}")
99
+ print(response.text)
metadata/metadata.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json # Import the json module for saving data
3
+
4
+ # Define the GraphQL endpoint
5
+ url = "https://ls-indexer-sepolia.provable.games/graphql"
6
+
7
+ def fetch_metadata(adv_id):
8
+ if adv_id:
9
+ print(f"Adventure ID: {adv_id}")
10
+
11
+ # Define the GraphQL queries
12
+ query = """
13
+ query MyQuery($id: FeltValue!) {
14
+ adventurers(limit: 10, where: {id: {eq: $id}}) {
15
+ owner
16
+ id
17
+ name
18
+ strength
19
+ vitality
20
+ dexterity
21
+ intelligence
22
+ wisdom
23
+ charisma
24
+ level
25
+ xp
26
+ health
27
+ beastHealth
28
+ head
29
+ hand
30
+ chest
31
+ waist
32
+ foot
33
+ weapon
34
+ gold
35
+ neck
36
+ ring
37
+ luck
38
+ battleActionCount
39
+ customRenderer
40
+ statUpgrades
41
+ }
42
+ }
43
+ """
44
+
45
+ query2 = """
46
+ query MyQuery($id: FeltValue!) {
47
+ battles(where: {adventurerId: {eq: $id}}) {
48
+ adventurerId
49
+ adventurerHealth
50
+ beast
51
+ beastHealth
52
+ beastLevel
53
+ seed
54
+ attacker
55
+ fled
56
+ damageDealt
57
+ criticalHit
58
+ damageTaken
59
+ damageLocation
60
+ xpEarnedAdventurer
61
+ xpEarnedItems
62
+ goldEarned
63
+ discoveryTime
64
+ }
65
+ }
66
+ """
67
+
68
+ variables = {"id": adv_id}
69
+
70
+ # Define the request payloads
71
+ payload = {
72
+ "query": query,
73
+ "variables": variables
74
+ }
75
+ payload2 = {
76
+ "query": query2,
77
+ "variables": variables
78
+ }
79
+
80
+ # Send the POST requests to the GraphQL API
81
+ response = requests.post(url, json=payload)
82
+ response2 = requests.post(url, json=payload2)
83
+
84
+ # Check if the requests were successful
85
+ if response.status_code == 200 and response2.status_code == 200:
86
+ # Parse the JSON responses
87
+ data = response.json()
88
+ data2 = response2.json()
89
+ print("Data fetched successfully.")
90
+
91
+ # Save the data to files
92
+ with open("adventurers_data.json", "w") as file:
93
+ json.dump(data, file, indent=4) # Save with pretty-printing (indent=4)
94
+ with open("adventurers_data2.json", "w") as file:
95
+ json.dump(data2, file, indent=4) # Save with pretty-printing (indent=4)
96
+ print("Data saved to 'adventurers_data.json' and 'adventurers_data2.json'.")
97
+
98
+ # Extract the list of adventurers from both queries
99
+ adventurers = data.get("data", {}).get("adventurers", [])
100
+ battles = data2.get("data", {}).get("battles", [])
101
+
102
+ # Create a dictionary to map adventurers by their ID for quick lookup
103
+ adventurers_dict = {adv["id"]: adv for adv in adventurers}
104
+
105
+ # Add fields from the second query to the corresponding adventurer
106
+ for adv2 in battles:
107
+ adventurer_id = adv2["adventurerId"]
108
+ if adventurer_id in adventurers_dict:
109
+ print("battles loop")
110
+ # Add new fields to the existing adventurer
111
+ adventurers_dict[adventurer_id].update(adv2)
112
+ else:
113
+ print("else")
114
+
115
+ # Print each adventurer's details dynamically
116
+ for adventurer in adventurers_dict.values():
117
+ # Create a dictionary to store field values
118
+ adventurer_data = adventurer # Use the updated dictionary
119
+
120
+ print("\n=====Adventurer Details=====")
121
+ for key, value in adventurer_data.items():
122
+ print(f"{key.capitalize()}: {value}")
123
+
124
+ if adventurer_data['health'] != 0:
125
+ print("\n=====THE ADVENTURER IS STILL ALIVE=====")
126
+ else:
127
+ print("\n=====THE ADVENTURER IS DEAD=====")
128
+
129
+ # Example: Access specific fields
130
+ print(f"\nEquipment list of {adventurer_data['name']}:")
131
+ print(f"\nAdventurer Head: {adventurer_data.get('head', 'None')}")
132
+ print(f"Hand: {adventurer_data.get('hand', 'None')}")
133
+ print(f"Chest: {adventurer_data.get('chest', 'None')}")
134
+ print(f"Waist: {adventurer_data.get('waist', 'None')}")
135
+ print(f"Foot: {adventurer_data.get('foot', 'None')}")
136
+ print(f"Weapon: {(weapon := adventurer_data.get('weapon', 'None'))}")
137
+ print(f"Beast: {adventurer_data.get('beast', 'Unknown')}")
138
+ print(f"Beast Level: {adventurer_data.get('beastLevel', 'Unknown')}")
139
+ print(f"Attacker: {adventurer_data.get('attacker', 'Unknown')}")
140
+ print(f"Fled: {adventurer_data.get('fled', 'Unknown')}")
141
+ print(f"Damage Dealt: {adventurer_data.get('damageDealt', 'Unknown')}")
142
+ print(f"Damage Taken: {adventurer_data.get('damageTaken', 'Unknown')}")
143
+ print(f"Crticial Hit: {adventurer_data.get('criticalHit', 'Unknown')}")
144
+ print(f"Damage Location: {adventurer_data.get('damageLocation', 'Unknown')}")
145
+ print(f"Beast Health: {adventurer_data.get('beastHealth', 'Unknown')}")
146
+ print(f"Adventurer Health: {adventurer_data.get('adventurerHealth', 'Unknown')}")
147
+
148
+ if adventurer_data.get('weapon') == "Club":
149
+ print("HELLO!")
150
+
151
+ if weapon == "Club":
152
+ print("HELLO AGAIN!")
153
+ else:
154
+ # Print detailed error information
155
+ print(f"Failed to fetch data. Status codes: {response.status_code}, {response2.status_code}")
156
+ print("Response 1 Headers:", response.headers)
157
+ print("Response 1 Body:", response.text)
158
+ print("Response 2 Headers:", response2.headers)
159
+ print("Response 2 Body:", response2.text)
160
+
161
+ return adventurer_data
requirements.txt CHANGED
@@ -10,4 +10,5 @@ invisible_watermark
10
  huggingface_hub[hf_transfer]
11
  sentencepiece
12
  opencv-python==4.5.5.64
13
- gguf
 
 
10
  huggingface_hub[hf_transfer]
11
  sentencepiece
12
  opencv-python==4.5.5.64
13
+ gguf
14
+ tensorflow
src/__pycache__/check_dependecies.cpython-311.pyc ADDED
Binary file (950 Bytes). View file
 
src/__pycache__/img_gen.cpython-311.pyc CHANGED
Binary files a/src/__pycache__/img_gen.cpython-311.pyc and b/src/__pycache__/img_gen.cpython-311.pyc differ
 
src/__pycache__/img_gen_modal.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
src/__pycache__/img_gen_modal.cpython-311.pyc ADDED
Binary file (10.1 kB). View file
 
src/check_dependecies.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ print("Running debug check...")
2
+ # Debug function to check installed packages
3
+ def check_dependencies():
4
+ packages = [
5
+ "diffusers", # For Stable Diffusion
6
+ "transformers", # For Hugging Face models
7
+ "torch", # PyTorch
8
+ "accelerate", # For distributed training/inference
9
+ "gradio", # For the Gradio interface (updated to latest version)
10
+ "safetensors", # For safe model loading
11
+ "pillow", # For image processing
12
+ "sentencepiece",
13
+ "gguf",
14
+ ]
15
+
16
+ for package in packages:
17
+ try:
18
+ import importlib
19
+ module = importlib.import_module(package)
20
+ print(f" {package} is installed. Version:")
21
+ except ImportError:
22
+ print(f" {package} is NOT installed.")
src/img_gen copy.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # img_gen.py
2
+ import sys
3
+ import os
4
+ import random
5
+ from huggingface_hub import InferenceClient, login
6
+ from datetime import datetime
7
+ from config.config import prompts, api_token
8
+ from config.models import models
9
+ from metadata.metadata import fetch_metadata
10
+
11
+ def generate_image(
12
+ adventurer_id,
13
+ prompt_alias,
14
+ custom_prompt,
15
+ characer_dropdown,
16
+ model_alias,
17
+ height=360,
18
+ width=640,
19
+ num_inference_steps=20,
20
+ guidance_scale=2.0,
21
+ seed=-1):
22
+
23
+ adventurer = fetch_metadata(adventurer_id)
24
+ print(f"ANDRE {adventurer['name']}")
25
+
26
+ prompt = f"A portait of a medieval, fantasy adventurer, equiped with a {adventurer['weapon']} (depending on his weapon make the characer a warrior, or a hunter or a wizard). He is also equiped in the head with a {adventurer['head']}, hands with {adventurer['hand']}, the chest with a {adventurer['chest']}, and the waist with a {adventurer['waist']}. Unreal Engine render style, photorealistic, realistic fantasy style."
27
+ # Find the selected prompt and model
28
+ try:
29
+ #prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
30
+ model_name = next(m for m in models if m["alias"] == model_alias)["name"]
31
+
32
+ except StopIteration:
33
+ return None, "ERROR: Invalid prompt or model selected."
34
+
35
+ # Print the original prompt and dynamic values for debugging
36
+ print("Original Prompt:")
37
+ print(prompt)
38
+
39
+ # Append the custom character (if provided)
40
+ if characer_dropdown == "Wizard":
41
+ prompt += f" A wizard combats using powerful magic against the {prompt_alias}"
42
+ elif characer_dropdown == "Warrior":
43
+ prompt += f" A warrior combats using his weapons against the {prompt_alias}"
44
+ else:
45
+ pass
46
+
47
+ # Append the custom prompt (if provided)
48
+ if custom_prompt and len(custom_prompt.strip()) > 0:
49
+ prompt += " " + custom_prompt.strip()
50
+
51
+ # Print the formatted prompt for debugging
52
+ print("\nFormatted Prompt:")
53
+ print(prompt)
54
+
55
+ # Randomize the seed if needed
56
+ if seed == -1:
57
+ seed = random.randint(0, 1000000)
58
+
59
+ # HF LOGIN
60
+ print("Initializing HF TOKEN")
61
+ print (api_token)
62
+ # login(token=api_token)
63
+ # print("model_name:")
64
+ # print(model_name)
65
+
66
+
67
+ # Initialize the InferenceClient
68
+ try:
69
+ print("-----INITIALIZING INFERENCE-----")
70
+ client = InferenceClient(model_name, token=api_token)
71
+ print("Inference activated")
72
+ except Exception as e:
73
+ return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
74
+
75
+ #Generate the image
76
+ try:
77
+ print("-----GENERATING IMAGE-----")
78
+ print("-----HOLD ON-----")
79
+ image = client.text_to_image(
80
+ prompt,
81
+ guidance_scale=guidance_scale,
82
+ num_inference_steps=num_inference_steps,
83
+ width=width,
84
+ height=height,
85
+ seed=seed
86
+ )
87
+ print("-----IMAGE GENERATED SUCCESSFULLY!-----")
88
+ except Exception as e:
89
+ return None, f"ERROR: Failed to generate image. Details: {e}"
90
+
91
+ # Save the image with a timestamped filename
92
+ print("-----SAVING-----", image)
93
+ path = "images"
94
+
95
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
96
+ output_filename = f"{path}/{timestamp}_{seed}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{characer_dropdown.replace(' ', '_').lower()}.png"
97
+ try:
98
+ image.save(output_filename)
99
+ except Exception as e:
100
+ return None, f"ERROR: Failed to save image. Details: {e}"
101
+ print("-----DONE!-----")
102
+ print("-----CALL THE BANNERS!-----")
103
+
104
+ return output_filename, "Image generated successfully!"
src/img_gen.py CHANGED
@@ -4,9 +4,12 @@ import os
4
  import random
5
  from huggingface_hub import InferenceClient, login
6
  from datetime import datetime
7
- from config.config import models, prompts, api_token # Direct import
 
 
8
 
9
  def generate_image(
 
10
  prompt_alias,
11
  custom_prompt,
12
  characer_dropdown,
@@ -16,9 +19,27 @@ def generate_image(
16
  num_inference_steps=20,
17
  guidance_scale=2.0,
18
  seed=-1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  # Find the selected prompt and model
20
  try:
21
- prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
22
  model_name = next(m for m in models if m["alias"] == model_alias)["name"]
23
 
24
  except StopIteration:
@@ -28,13 +49,6 @@ def generate_image(
28
  print("Original Prompt:")
29
  print(prompt)
30
 
31
- # Append the custom character (if provided)
32
- if characer_dropdown == "Wizard":
33
- prompt += f" A wizard combats using powerful magic against the {prompt_alias}"
34
- elif characer_dropdown == "Warrior":
35
- prompt += f" A warrior combats using his weapons against the {prompt_alias}"
36
- else:
37
- pass
38
 
39
  # Append the custom prompt (if provided)
40
  if custom_prompt and len(custom_prompt.strip()) > 0:
@@ -84,8 +98,11 @@ def generate_image(
84
  print("-----SAVING-----", image)
85
  path = "images"
86
 
 
 
 
87
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
88
- output_filename = f"{path}/{timestamp}_{seed}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{characer_dropdown.replace(' ', '_').lower()}.png"
89
  try:
90
  image.save(output_filename)
91
  except Exception as e:
@@ -93,4 +110,4 @@ def generate_image(
93
  print("-----DONE!-----")
94
  print("-----CALL THE BANNERS!-----")
95
 
96
- return output_filename, "Image generated successfully!"
 
4
  import random
5
  from huggingface_hub import InferenceClient, login
6
  from datetime import datetime
7
+ from config.config import prompts, api_token
8
+ from config.models import models
9
+ from metadata.metadata import fetch_metadata
10
 
11
  def generate_image(
12
+ adventurer_id,
13
  prompt_alias,
14
  custom_prompt,
15
  characer_dropdown,
 
19
  num_inference_steps=20,
20
  guidance_scale=2.0,
21
  seed=-1):
22
+
23
+ adventurer = fetch_metadata(adventurer_id)
24
+ print(f"ANDRE {adventurer['name']}")
25
+
26
+ # Set the custom prompt variables
27
+ if characer_dropdown == "Portait":
28
+ prompt = f"A portait of a medieval, fantasy adventurer, equiped with a weapon: a {adventurer['weapon']} (depending on his weapon, make the characer dressed as a warrior, or as a hunter or as a wizard). He is also equiped in the head with a {adventurer['head']}, the hands with {adventurer['hand']}, the chest with a {adventurer['chest']}, and the waist with a {adventurer['waist']}. Please be sure to use only medieval items that were possble to be made in that period. Unreal Engine render style, photorealistic, atmospheric light, realistic fantasy style."
29
+
30
+ if characer_dropdown == "Last battle":
31
+ prompt = f"A battle between a medieval fantasy adventurer, and a big {adventurer['beast']} monster. The adventurer is combating with {adventurer['weapon']} (depending on his equipment, make the characer dressed as a warrior, or as a hunter or as a wizard). He is also equiped in the head with {adventurer['head']}, the hands with {adventurer['hand']}, the chest with {adventurer['chest']}, the waist with {adventurer['waist']}, the fet with {adventurer['foot']}. Please sure to use only medieval items that were possble to be made in that period. Add details for the monster as well. is Unreal Engine render style, photorealistic, realistic fantasy style."
32
+
33
+ elif characer_dropdown == "Loot bag":
34
+ prompt = f"A loot bag from a medieval fantasy adventurer and his equipments. On the floor also a {adventurer['weapon']} a {adventurer['head']}, a {adventurer['hand']}, a {adventurer['chest']}, a {adventurer['waist']}, and a {adventurer['foot']}. Please sure to use only medieval items that were possble to be made in that period. Inside the bag also gold coins. Atmospheric light, cavern, dungeon context. Unreal Engine render style, photorealistic, realistic fantasy style."
35
+ else:
36
+ pass
37
+
38
+
39
+
40
  # Find the selected prompt and model
41
  try:
42
+ #prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
43
  model_name = next(m for m in models if m["alias"] == model_alias)["name"]
44
 
45
  except StopIteration:
 
49
  print("Original Prompt:")
50
  print(prompt)
51
 
 
 
 
 
 
 
 
52
 
53
  # Append the custom prompt (if provided)
54
  if custom_prompt and len(custom_prompt.strip()) > 0:
 
98
  print("-----SAVING-----", image)
99
  path = "images"
100
 
101
+ message = f"Image generated successfully! Call the banners! \nID: {adventurer['id']}, NAME: {adventurer['name']}, WEAPON: {adventurer['weapon']}, HEAD: {adventurer['head']}, HAND: {adventurer['hand']}, CHEST: {adventurer['chest']}, WAIST: {adventurer['waist']}, BEAST: {adventurer['beast']}"
102
+ file_name_extension = f"ID: {adventurer['id']}, NAME: {adventurer['name']}, WEAPON: {adventurer['weapon']}, HEAD: {adventurer['head']}, HAND: {adventurer['hand']}, CHEST: {adventurer['chest']}, WAIST: {adventurer['waist']}, BEAST: {adventurer['beast']}"
103
+
104
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
105
+ output_filename = f"{path}/{timestamp}_{seed}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{characer_dropdown.replace(' ', '_').lower()}_{file_name_extension.replace(' ', '_').lower()}.png"
106
  try:
107
  image.save(output_filename)
108
  except Exception as e:
 
110
  print("-----DONE!-----")
111
  print("-----CALL THE BANNERS!-----")
112
 
113
+ return output_filename, message
src/img_gen_modal.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #img_gen_modal.py
2
+ import modal
3
+ import random
4
+ import io
5
+ from config.config import prompts, api_token
6
+ from config.models import models_modal
7
+ import os
8
+ import gradio as gr
9
+ import torch
10
+ import sentencepiece
11
+ import torch
12
+ from huggingface_hub import login
13
+ from transformers import AutoTokenizer
14
+ import random
15
+ from datetime import datetime
16
+ from diffusers.callbacks import SDXLCFGCutoffCallback
17
+ from diffusers import FluxPipeline
18
+ from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline, AutoencoderTiny, AutoencoderKL, DiffusionPipeline, FluxTransformer2DModel, GGUFQuantizationConfig
19
+ from PIL import Image
20
+ from src.check_dependecies import check_dependencies
21
+ import numpy as np
22
+
23
+ MAX_SEED = np.iinfo(np.int32).max
24
+ MAX_IMAGE_SIZE = 2048
25
+
26
+ CACHE_DIR = "/model_cache"
27
+
28
+ # Define the Modal image
29
+ image = (
30
+ modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9")
31
+ .pip_install_from_requirements("requirements.txt")
32
+ #modal.Image.debian_slim(python_version="3.9") # Base image
33
+ # .apt_install(
34
+ # "git",
35
+ # )
36
+ # .pip_install(
37
+ # "diffusers",
38
+ # f"git+https://github.com/huggingface/transformers.git"
39
+ # )
40
+ .env(
41
+ {
42
+ "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR
43
+ }
44
+ )
45
+ )
46
+
47
+ # Create a Modal app
48
+ app = modal.App("LS-img-gen-modal", image=image)
49
+ with image.imports():
50
+ import os
51
+
52
+ flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume
53
+
54
+ # GPU FUNCTION
55
+ @app.function(volumes={"/data": flux_model_vol},
56
+ secrets=[modal.Secret.from_name("huggingface-token")],
57
+ gpu="L40S",
58
+ timeout = 300
59
+ )
60
+ # MAIN GENERATE IMAGE FUNCTION
61
+ def generate_image_gpu(
62
+ prompt_alias,
63
+ custom_prompt,
64
+ characer_dropdown,
65
+ model_alias,
66
+ height=360,
67
+ width=640,
68
+ num_inference_steps=20,
69
+ guidance_scale=2.0,
70
+ seed=-1):
71
+ # Find the selected prompt and model
72
+ print("Hello from LS_img_gen!")
73
+
74
+ check_dependencies()
75
+
76
+ try:
77
+ prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
78
+ model_name = next(m for m in models_modal if m["alias"] == model_alias)["name"]
79
+
80
+ except StopIteration:
81
+ return None, "ERROR: Invalid prompt or model selected."
82
+
83
+ # Print the original prompt and dynamic values for debugging
84
+ print("Original Prompt:")
85
+ print(prompt)
86
+
87
+ # Append the custom character (if provided)
88
+ if characer_dropdown == "Wizard":
89
+ prompt += f" A wizard combats using powerful magic against the {prompt_alias}"
90
+ elif characer_dropdown == "Warrior":
91
+ prompt += f" A warrior combats using his weapons against the {prompt_alias}"
92
+ else:
93
+ pass
94
+
95
+ # Append the custom prompt (if provided)
96
+ if custom_prompt and len(custom_prompt.strip()) > 0:
97
+ prompt += " " + custom_prompt.strip()
98
+
99
+ # Print the formatted prompt for debugging
100
+ print("\nFormatted Prompt:")
101
+ print(prompt)
102
+
103
+ # Randomize the seed if needed
104
+ if seed == -1:
105
+ seed = random.randint(0, 1000000)
106
+
107
+ # HF LOGIN
108
+ print("Initializing HF TOKEN")
109
+ print (api_token)
110
+ # login(token=api_token)
111
+ # print("model_name:")
112
+ # print(model_name)
113
+
114
+
115
+ # Use absolute path with leading slash
116
+ model_path = f"/data/{model_name}" # Changed from "data/" to "/data/"
117
+ print(f"Loading model from local path: {model_path}")
118
+
119
+ # Debug: Check if the directory exists and list its contents
120
+ if os.path.exists(model_path):
121
+ print("Directory exists. Contents:")
122
+ for item in os.listdir(model_path):
123
+ print(f" - {item}")
124
+ else:
125
+ print(f"Directory does not exist: {model_path}")
126
+ print("Contents of /data:")
127
+ print(os.listdir("/data"))
128
+ # CHECK FOR TORCH USING CUDA
129
+ print("CHECK FOR TORCH USING CUDA")
130
+ print(f"CUDA available: {torch.cuda.is_available()}")
131
+ if torch.cuda.is_available():
132
+ print("inside if")
133
+ print(f"CUDA device count: {torch.cuda.device_count()}")
134
+ print(f"Current device: {torch.cuda.current_device()}")
135
+ print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}")
136
+
137
+ try:
138
+ print("-----INITIALIZING PIPE-----")
139
+ pipe = FluxPipeline.from_pretrained(
140
+ model_path,
141
+ torch_dtype=torch.bfloat16,
142
+ #torch_dtype=torch.float16,
143
+ #torch_dtype=torch.float32,
144
+ #vae=taef1,
145
+ local_files_only=True,
146
+ )
147
+ #torch.cuda.empty_cache()
148
+
149
+ if torch.cuda.is_available():
150
+ print("CUDA available")
151
+ print("using gpu")
152
+ pipe = pipe.to("cuda")
153
+ pipe_message = "CUDA"
154
+ #pipe.enable_model_cpu_offload() # official recommended method but is running slower w it
155
+ else:
156
+ print("CUDA not available")
157
+ print("using cpu")
158
+ pipe = pipe.to("cpu")
159
+ pipe_message = "CPU"
160
+ print(f"-----{pipe_message} PIPE INITIALIZED-----")
161
+ print(f"Using device: {pipe.device}")
162
+ except Exception as e:
163
+ print(f"Detailed error: {str(e)}")
164
+ return None, f"ERROR: Failed to initialize PIPE2. Details: {e}"
165
+
166
+ ########## SENDING IMG GEN TO PIPE - WORKING CODE ##########
167
+ try:
168
+ print("-----SENDING IMG GEN TO PIPE-----")
169
+ print("-----HOLD ON-----")
170
+ image = pipe(
171
+ prompt,
172
+ guidance_scale=guidance_scale,
173
+ num_inference_steps=num_inference_steps,
174
+ width=width,
175
+ height=height,
176
+ max_sequence_length=512,
177
+ #callback_on_step_end=decode_tensors,
178
+ #callback_on_step_end_tensor_inputs=["latents"],
179
+ # seed=seed
180
+ ).images[0]
181
+ #############################################################
182
+
183
+ print("-----IMAGE GENERATED SUCCESSFULLY!-----")
184
+ print(image)
185
+
186
+ except Exception as e:
187
+ return f"ERROR: Failed to initialize InferenceClient. Details: {e}"
188
+
189
+ try:
190
+ # Save the image with a timestamped filename
191
+ print("-----SAVING-----", image)
192
+
193
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
194
+ output_filename = f"/data/LS_images/{timestamp}_{seed}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{characer_dropdown.replace(' ', '_').lower()}.png"
195
+ try:
196
+ image.save(output_filename)
197
+ except Exception as e:
198
+ return None, f"ERROR: Failed to save image. Details: {e}"
199
+ print("-----DONE!-----")
200
+ print("-----CALL THE BANNERS!-----")
201
+
202
+ except Exception as e:
203
+ print(f"ERROR: Failed to save image. Details: {e}")
204
+ # Return the filename and success message
205
+ return image, "Image generated successfully!"