hruday96 commited on
Commit
b6e5361
Β·
verified Β·
1 Parent(s): ca7fbd3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -43
app.py CHANGED
@@ -1,53 +1,82 @@
1
  import streamlit as st
2
  import google.generativeai as genai
3
 
4
- # Configure API Key
 
 
 
 
 
 
 
 
5
  GOOGLE_API_KEY = st.secrets["GEMINI_API_KEY"]
 
 
6
  genai.configure(api_key=GOOGLE_API_KEY)
7
 
8
- # Select Gemini Model
9
- gemini_model = genai.GenerativeModel("gemini-2.0-flash")
10
 
11
- # Streamlit App Layout
12
- st.title('PromptLab')
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- # Mode Selection (Shinobi & Raikage)
15
- mode = st.radio("Select a mode:", ["πŸŒ€ Shinobi", "⚑ Raikage"], horizontal=True)
16
-
17
- # User Input
18
- st.subheader("Enter Your Prompt:")
19
- user_prompt = st.text_area('Enter your prompt:')
20
-
21
- # Function to Generate Enhanced Prompt
22
- def get_gemini_response(prompt):
23
- try:
24
- response = gemini_model.generate_content(prompt)
25
- return response.text if response else "Error: No response received."
26
- except Exception as e:
27
- return f"❌ Gemini error: {e}"
28
-
29
- # Function to Format Prompt Based on Mode
30
- def generate_enhanced_prompt(user_prompt, mode):
31
- if mode == "πŸŒ€ Shinobi":
32
- system_prompt = "You are an expert in structured prompt design. Refine the following prompt for clarity, conciseness, and structured output."
33
- elif mode == "⚑ Raikage":
34
- system_prompt = "You are a world-class AI strategist specializing in execution-focused prompts. Transform the following prompt for high-impact, expert-level results."
35
-
36
- # Generate response using Gemini API
37
- return get_gemini_response(system_prompt + "\n\n" + user_prompt)
38
-
39
- # Button to Submit the Prompt
40
  if st.button("Generate Enhanced Prompt"):
41
- if user_prompt.strip():
42
- try:
43
- with st.spinner("Enhancing prompt..."):
44
- enhanced_prompt = generate_enhanced_prompt(user_prompt, mode)
45
- st.subheader("Enhanced Prompt:")
46
- st.write(enhanced_prompt) # Displaying the text from the response
47
- except Exception as e:
48
- st.error(f"Error: {e}")
49
- else:
50
- st.warning("Please enter a prompt before generating.")
51
 
52
- # Footer
53
- st.markdown("Built with 🧠 by Hruday & Google Gemini")
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import google.generativeai as genai
3
 
4
+ # Streamlit app layout
5
+ st.title('PromptLab')
6
+
7
+ # Create two columns for the Shinobi and Raikage buttons
8
+ col1, col2 = st.columns(2)
9
+
10
+ mode = st.radio("Choose a mode:", ["Shinobi", "Raikage"], horizontal=True)
11
+
12
+ # Retrieve the API key from Streamlit secrets
13
  GOOGLE_API_KEY = st.secrets["GEMINI_API_KEY"]
14
+
15
+ # Configure the Google Generative AI API with your API key
16
  genai.configure(api_key=GOOGLE_API_KEY)
17
 
18
+ # Input field for the blog topic
19
+ topic = st.text_area('Enter your prompt:')
20
 
21
+ # Display selected mode
22
+ st.write(f"You selected: {mode}")
23
+
24
+
25
+ # Shinobi and Raikage templates
26
+ SHINOBI_TEMPLATE = """
27
+ You are an advanced prompt enhancer, specializing in creating structured, high-clarity prompts that optimize LLM performance.
28
+ Your task is to refine a given prompt using the **Shinobi framework**, ensuring the following principles:
29
+
30
+ βœ… **Concise & High-Density Prompting** β†’ Remove fluff, keeping instructions clear and actionable (~250 words max).
31
+ βœ… **Explicit Role Definition** β†’ Assign a role to the AI for better contextual grounding.
32
+ βœ… **Step-by-Step Clarity** β†’ Break the task into structured sections, avoiding ambiguity.
33
+ βœ… **Defined Output Format** β†’ Specify the response format (JSON, CSV, list, structured text, etc.).
34
+ βœ… **Zero Conflicting Instructions** β†’ Ensure clarity in constraints (e.g., avoid β€œsimple yet comprehensive”).
35
+ βœ… **Optional: One-Shot Example** β†’ Add a single example where relevant to guide the AI.
36
 
37
+ ### **Enhance the following prompt using Shinobi principles:**
38
+ **Original Prompt:**
39
+ {user_prompt}
40
+
41
+ **Enhanced Shinobi Prompt:**
42
+ """
43
+
44
+ RAIKAGE_TEMPLATE = """
45
+ You are an elite AI strategist, specializing in designing execution-focused prompts that maximize LLM efficiency.
46
+ Your task is to refine a given prompt using the **Raikage framework**, ensuring the following principles:
47
+
48
+ βœ… **Precision & Depth** β†’ Ensure expert-level guidance, reducing vagueness and ambiguity.
49
+ βœ… **Context & Execution Approach** β†’ Include a structured methodology to solve the problem.
50
+ βœ… **Defined Output Format** β†’ Specify exact structure (JSON, formatted text, markdown, tables, or code blocks).
51
+ βœ… **Edge Case Handling & Constraints** β†’ Account for potential failures and model limitations.
52
+ βœ… **Optional: Few-Shot Prompting** β†’ If beneficial, provide 1-2 high-quality examples for refinement.
53
+ βœ… **Complies with External Factors** β†’ Adhere to best practices (e.g., ethical scraping, security policies).
54
+
55
+ ### **Enhance the following prompt using Raikage principles:**
56
+ **Original Prompt:**
57
+ {user_prompt}
58
+
59
+ **Enhanced Raikage Prompt:**
60
+ """
 
 
61
  if st.button("Generate Enhanced Prompt"):
62
+ if topic.strip():
63
+ with st.spinner("Enhancing your prompt..."):
64
+ # Choose the template based on the selected mode
65
+ if mode == "Shinobi":
66
+ prompt = SHINOBI_TEMPLATE.format(user_prompt=topic)
67
+ else:
68
+ prompt = RAIKAGE_TEMPLATE.format(user_prompt=topic)
 
 
 
69
 
70
+ # Initialize the generative model
71
+ model = genai.GenerativeModel('gemini-pro')
72
+
73
+ # Generate enhanced prompt
74
+ try:
75
+ response = model.generate_content(prompt)
76
+ enhanced_prompt = response.text # Extract the response text
77
+ st.subheader("πŸ”Ή Enhanced Prompt:")
78
+ st.code(enhanced_prompt, language="markdown")
79
+ except Exception as e:
80
+ st.error(f"❌ Error generating enhanced prompt: {e}")
81
+ else:
82
+ st.warning("⚠️ Please enter a prompt before generating.")