File size: 14,433 Bytes
f7b7133
ae37e96
f7b7133
 
 
 
ae37e96
f7b7133
 
 
 
ae37e96
f7b7133
 
 
ae37e96
f7b7133
ae37e96
 
f7b7133
 
 
 
ae37e96
 
f7b7133
 
ae37e96
f7b7133
ae37e96
f7b7133
 
 
 
 
 
 
ae37e96
f7b7133
 
ae37e96
f7b7133
 
ae37e96
f7b7133
ae37e96
f7b7133
 
 
ae37e96
 
 
 
 
 
 
 
 
 
 
 
 
f7b7133
 
 
ae37e96
 
f7b7133
 
 
 
 
ae37e96
 
 
 
 
 
 
 
 
 
 
 
 
f7b7133
 
 
 
ae37e96
f7b7133
 
 
ae37e96
 
f7b7133
ae37e96
f7b7133
 
ae37e96
f7b7133
 
ae37e96
 
f7b7133
 
ae37e96
 
 
 
 
 
f7b7133
 
ae37e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7b7133
ae37e96
 
f7b7133
 
ae37e96
 
f7b7133
ae37e96
f7b7133
 
 
ae37e96
f7b7133
ae37e96
 
f7b7133
ae37e96
f7b7133
 
 
 
 
ae37e96
 
f7b7133
ae37e96
f7b7133
 
 
ae37e96
f7b7133
ae37e96
f7b7133
 
ae37e96
 
f7b7133
ae37e96
f7b7133
 
ae37e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7b7133
ae37e96
 
f7b7133
ae37e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7b7133
ae37e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7b7133
ae37e96
f7b7133
 
 
 
 
 
 
 
 
 
 
ae37e96
f7b7133
 
 
 
 
ae37e96
f7b7133
 
 
 
 
 
 
ae37e96
f7b7133
 
 
 
ae37e96
 
 
 
 
f7b7133
ae37e96
 
 
 
 
f7b7133
ae37e96
f7b7133
ae37e96
f7b7133
ae37e96
f7b7133
ae37e96
f7b7133
ae37e96
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
# Install required libraries
# !pip install gradio>=3.50.2 transformers>=4.30.0 networkx>=3.0 plotly>=5.0.0 torch>=2.0.0 numpy>=1.21.0

import gradio as gr
import torch
import networkx as nx
import plotly.graph_objects as go
from transformers import pipeline
import numpy as np
import traceback

# Check device
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")

# Load models with fallback
try:
    summarizer = pipeline("summarization", model="facebook/bart-large-cnn", device=0 if device == "cuda" else -1)
    text_generator = pipeline("text-generation", model="gpt2", device=0 if device == "cuda" else -1)
    print("✓ Models loaded successfully")
except Exception as e:
    print(f"Model loading error: {e}")
    try:
        summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-6-6", device=0 if device == "cuda" else -1)
        text_generator = pipeline("text-generation", model="distilgpt2", device=0 if device == "cuda" else -1)
        print("✓ Fallback models loaded successfully")
    except:
        print("Critical error loading models. Exiting.")

# Progress tracking
progress_status = {"value": 0, "desc": ""}

def update_progress(value, desc=""):
    progress_status["value"] = value
    progress_status["desc"] = desc
    print(f"Progress: {value}% - {desc}")

# Simplified summary generation
def generate_summary(topic):
    try:
        update_progress(10, "Starting your idea analysis")
        
        if "AI" in topic or "artificial intelligence" in topic.lower():
            ai_component = "The AI part makes it super smart by figuring out patterns and doing tasks automatically!"
        else:
            ai_component = "Adding some AI could make it even cooler by helping with data or tasks."
            
        if "urban farming" in topic.lower() or "agriculture" in topic.lower():
            summary = f"""
            Your idea, {topic}, is a fresh way to grow food right in cities!

            What it’s about: This startup uses cool tech to grow fresh food indoors or on rooftops. Think sensors and smart systems to keep plants happy.

            Who it’s for: People like restaurants wanting fresh veggies, city folks who love healthy food, or stores looking to cut delivery costs.

            How it works: You’d need things like sensors, grow lights, and a smart app to run it all. {ai_component}

            How it makes money: Sell to businesses, charge people monthly for fresh food, or share your tech with others.

            Why it stands out: Fresher food, less travel, and it works all year—no matter the weather!

            Things to watch out for: It might cost a lot to start, and some cities have rules about this.
            """
        else:
            outputs = text_generator(
                f"A startup idea about {topic} could be:", 
                max_length=300, 
                num_return_sequences=1,
                temperature=0.7
            )
            generated_text = outputs[0]['generated_text']
            summary = f"""
            Your idea, {topic}, is a cool way to make something better!

            What it’s about: This startup helps with [problem/need] by using [simple solution]. It’s special because [unique bit].

            Who it’s for: People like [customer type] would love this—think [example customers].

            How it works: You’d need [basic tools/tech] to build it. {ai_component}

            How it makes money: You could [simple revenue idea, e.g., sell it or charge a fee].

            Why it stands out: It’s better because [easy advantage, e.g., it’s simpler or cheaper].

            Things to watch out for: Starting might be tough because of [simple challenge, e.g., money or time].
            
            {generated_text}
            """
        
        update_progress(60, "Idea analysis ready")
        return summary
        
    except Exception as e:
        print(f"Error generating summary: {str(e)}\n{traceback.format_exc()}")
        return f"Oops! Something went wrong: {str(e)}. Try again with a new idea!"

# Responsive interactive mind map with Plotly
def generate_mindmap(topic, summary):
    try:
        update_progress(70, "Building your idea map")
        
        concepts = [
            "What It Does", "Who It’s For", "How It Makes Money",
            "What Makes It Special", "What’s Needed", "Next Steps"
        ]
        subconcepts = {
            "What It Does": ["Main Idea", "Key Features"],
            "Who It’s For": ["Customers", "Why They’d Like It"],
            "How It Makes Money": ["Ways to Earn", "Pricing"],
            "What Makes It Special": ["Standout Points", "Edge Over Others"],
            "What’s Needed": ["Tools or Tech", "Skills"],
            "Next Steps": ["First Moves", "Goals"]
        }
        
        nodes = [topic] + concepts + [sub for concept in concepts for sub in subconcepts[concept]]
        edges = [(topic, concept) for concept in concepts] + \
                [(concept, sub) for concept in concepts for sub in subconcepts[concept]]
        
        pos = {topic: (0, 0)}
        for i, concept in enumerate(concepts):
            pos[concept] = (np.cos(i * 2 * np.pi / len(concepts)), np.sin(i * 2 * np.pi / len(concepts)))
            for j, sub in enumerate(subconcepts[concept]):
                pos[sub] = (pos[concept][0] + 0.3 * np.cos(j), pos[concept][1] + 0.3 * np.sin(j))
        
        edge_x, edge_y = [], []
        for edge in edges:
            x0, y0 = pos[edge[0]]
            x1, y1 = pos[edge[1]]
            edge_x.extend([x0, x1, None])
            edge_y.extend([y0, y1, None])
        
        node_x, node_y = [pos[node][0] for node in nodes], [pos[node][1] for node in nodes]
        
        fig = go.Figure()
        fig.add_trace(go.Scatter(x=edge_x, y=edge_y, mode='lines', line=dict(color='gray', width=1), hoverinfo='none'))
        fig.add_trace(go.Scatter(
            x=node_x, y=node_y, mode='markers+text', text=nodes, textposition="middle center",
            marker=dict(size=[30 if n == topic else 20 if n in concepts else 15 for n in nodes], 
                       color=['#FF9999' if n == topic else '#99CCFF' if n in concepts else '#CCFF99' for n in nodes]),
            hoverinfo='text'
        ))
        
        # Responsive layout for mind map
        fig.update_layout(
            showlegend=False, 
            title=f"Your Idea Map: {topic}", 
            title_x=0.5, 
            paper_bgcolor='white', 
            plot_bgcolor='white',
            autosize=True,  # Makes it scale with container
            margin=dict(l=20, r=20, t=40, b=20)  # Reduced margins for mobile
        )
        
        update_progress(100, "Idea map ready")
        return fig
        
    except Exception as e:
        print(f"Error generating mind map: {str(e)}")
        return None

# Main processing
def process_input(topic):
    try:
        if not topic or len(topic.strip()) < 3:
            return "Please enter a real idea (3+ characters)!", None, gr.update(visible=False)
        
        print(f"Processing: {topic}")
        update_progress(0, "Starting your idea journey")
        
        summary = f"Congrats! You’ve started shaping {topic}. Here’s your plan:\n\n{generate_summary(topic)}"
        mindmap = generate_mindmap(topic, summary)
        
        return summary, mindmap, gr.update(visible=True)
    
    except Exception as e:
        print(f"Error in processing: {str(e)}\n{traceback.format_exc()}")
        return f"Oops! Something broke: {str(e)}", None, gr.update(visible=True)

# Follow-up question processing
def process_followup_question(question, current_summary):
    try:
        if not question or len(question.strip()) < 5:
            return "Ask something longer (5+ characters)!", None
        
        print(f"Follow-up: {question}")
        prompt = f"Question: {question}\n\nContext: {current_summary}\n\nAnswer:"
        
        response = text_generator(prompt, max_length=300, num_return_sequences=1, temperature=0.7)
        answer = response[0]['generated_text'].split("Answer:")[-1].strip()
        
        return f"Here’s your answer:\n\n{answer}", None
    
    except Exception as e:
        print(f"Error in follow-up: {str(e)}\n{traceback.format_exc()}")
        return f"Sorry, something went wrong: {str(e)}", None

# Save and load ideas
def save_idea(name, summary, mindmap, saved):
    if not name:
        return gr.update(), "Please give your idea a name!"
    saved[name] = {"summary": summary, "mindmap": mindmap}
    return gr.update(choices=list(saved.keys())), f"Great job! '{name}' is saved—keep refining it!"

def load_idea(name, saved):
    if name in saved:
        return saved[name]["summary"], saved[name]["mindmap"], gr.update(visible=True)
    return "No idea found with that name.", None, gr.update(visible=False)

# Custom CSS for responsiveness
custom_css = """
.container {
    max-width: 100%;
    padding: 10px;
}
.header {
    font-size: clamp(1.5rem, 4vw, 2.5rem);
    text-align: center;
}
.textbox {
    width: 100% !important;
    min-height: 100px;
}
.button {
    width: 100%;
    margin: 5px 0;
}
.plot {
    width: 100% !important;
    height: auto !important;
}
@media (max-width: 768px) {
    .gr-row {
        flex-direction: column !important;
    }
    .gr-column {
        width: 100% !important;
    }
}
"""

# Gradio interface
with gr.Blocks(theme=gr.themes.Glass(primary_hue="blue", secondary_hue="green"), css=custom_css) as demo:
    current_summary = gr.State("")
    saved_ideas = gr.State({})

    gr.Markdown("# 🧠 VisualMind - Grow Your Startup Idea", elem_classes=["header"])
    gr.Markdown("Dream up a startup idea and watch it come to life—works great on any screen!", elem_classes=["container"])

    with gr.Tabs():
        with gr.Tab("Create Your Idea"):
            with gr.Row(equal_height=False):
                with gr.Column(scale=1, min_width=300):
                    topic_input = gr.Textbox(
                        label="Your Startup Idea", 
                        placeholder="e.g., Smart Urban Gardens",
                        elem_classes=["textbox"]
                    )
                    submit_btn = gr.Button("Build My Idea", variant="primary", elem_classes=["button"])
            
            progress = gr.Textbox(label="What’s Happening", value="Ready to start!", interactive=False, elem_classes=["textbox"])
            
            with gr.Row(visible=False, equal_height=False) as results_container:
                with gr.Column(scale=1, min_width=300):
                    summary_output = gr.Textbox(label="Your Idea Plan", lines=10, elem_classes=["textbox"])
                    
                    with gr.Accordion("Explore More", open=False):
                        followup_input = gr.Textbox(
                            label="Got a question?", 
                            placeholder="e.g., How do I start small?",
                            elem_classes=["textbox"]
                        )
                        followup_btn = gr.Button("Ask Away", elem_classes=["button"])
                        followup_output = gr.Textbox(label="Here’s What We Think", lines=5, elem_classes=["textbox"])
                        gr.Markdown("""
                        ### Need Ideas to Ask?
                        - How can I make this stand out?
                        - Who might love this?
                        - What’s my first step?
                        """)
                
                with gr.Column(scale=1, min_width=300):
                    mindmap_output = gr.Plot(label="Your Interactive Idea Map", elem_classes=["plot"])
            
            with gr.Row(equal_height=False):
                with gr.Column(scale=1, min_width=150):
                    idea_name = gr.Textbox(label="Name Your Idea", placeholder="e.g., GreenCity", elem_classes=["textbox"])
                    save_btn = gr.Button("Save Idea", elem_classes=["button"])
                with gr.Column(scale=1, min_width=150):
                    load_dropdown = gr.Dropdown(label="Pick a Saved Idea", choices=[], elem_classes=["textbox"])
                    load_btn = gr.Button("Load Idea", elem_classes=["button"])

        with gr.Tab("Tips & Inspiration"):
            gr.Markdown("""
            ### Spark Your Next Big Thing!
            - **Example:** *Eco-Friendly Packaging* - Uses plant-based materials and smart tech to cut waste.
            - **Try This:** Think of something you wish worked better—how could an app or gadget fix it?
            - **Tip:** Start small—test your idea with friends first!
            """, elem_classes=["container"])

    # Event handlers
    submit_btn.click(
        fn=lambda: "Bringing your idea to life...",
        inputs=None,
        outputs=progress
    ).then(
        process_input, 
        inputs=[topic_input], 
        outputs=[summary_output, mindmap_output, results_container]
    ).then(
        fn=lambda s: s,
        inputs=[summary_output],
        outputs=[current_summary]
    ).then(
        fn=lambda: "Ready for more questions—keep exploring!",
        inputs=None,
        outputs=progress
    )
    
    followup_btn.click(
        fn=lambda: "Thinking about your question...",
        inputs=None,
        outputs=progress
    ).then(
        process_followup_question,
        inputs=[followup_input, current_summary],
        outputs=[followup_output, mindmap_output]
    ).then(
        fn=lambda: "All set—ask another if you’d like!",
        inputs=None,
        outputs=progress
    )
    
    save_btn.click(
        save_idea,
        inputs=[idea_name, summary_output, mindmap_output, saved_ideas],
        outputs=[load_dropdown, progress]
    )
    
    load_btn.click(
        load_idea,
        inputs=[load_dropdown, saved_ideas],
        outputs=[summary_output, mindmap_output, results_container]
    )

# Launch with error handling
try:
    demo.launch(debug=True, share=True, height=800)  # Set a default height for better mobile scrolling
except Exception as e:
    print(f"Launch error: {e}")
    try:
        demo.launch(debug=True, server_name="0.0.0.0", height=800)
    except Exception as e2:
        print(f"Second launch failed: {e2}")
        print("Try restarting your runtime or checking library installations.")