Markndrei commited on
Commit
25ff1f5
·
1 Parent(s): 29c18d2

Added Prompt Specification Features

Browse files
Files changed (1) hide show
  1. app.py +82 -27
app.py CHANGED
@@ -2,45 +2,100 @@ import streamlit as st
2
  from openai import OpenAI
3
  import os
4
 
5
- # Set your API key here
6
  client = OpenAI(
7
  base_url="https://integrate.api.nvidia.com/v1",
8
  api_key=os.environ.get("NVIDIA_API_KEY")
9
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def query_ai_model(prompt):
 
 
 
 
 
 
 
 
 
 
12
  try:
13
- # Example of using NVIDIA's OpenAI integration
14
- completion = client.chat.completions.create(
15
- model="meta/llama-3.1-405b-instruct",
16
- messages=[{"role": "user", "content": prompt}],
17
- temperature=0.2,
18
- top_p=0.7,
19
- max_tokens=1024,
20
- stream=True
21
- )
22
- response = ""
23
- response_container = st.empty()
24
-
25
- for chunk in completion:
26
- if chunk.choices[0].delta.content is not None:
27
- response += chunk.choices[0].delta.content
28
- response_container.markdown(response)
29
  except Exception as e:
30
- return f"An error occurred: {str(e)}"
 
 
31
 
32
- # Streamlit App
33
  st.title("Mark's AI Chatbot")
34
- st.write("Enter a prompt below and click 'Ask AI' to get a response from the AI model.")
35
 
36
- # Input prompt
37
  user_input = st.text_area("Your Prompt:", placeholder="Type something...")
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  if st.button("Generate Answer"):
40
  if user_input.strip():
41
- with st.spinner("Getting a response from the AI model..."):
42
- ai_response = query_ai_model(user_input)
43
- st.success("AI Response:")
44
- st.write(ai_response)
 
 
 
 
 
 
 
 
 
 
 
45
  else:
46
- st.warning("Please enter a prompt before clicking the Button.")
 
2
  from openai import OpenAI
3
  import os
4
 
5
+ # Set up NVIDIA API client
6
  client = OpenAI(
7
  base_url="https://integrate.api.nvidia.com/v1",
8
  api_key=os.environ.get("NVIDIA_API_KEY")
9
  )
10
+ """
11
+ Parameters for Response Specification Features:
12
+ - model: The AI model to use for generating responses.
13
+ - temperature: Controls the randomness of the response. Higher values result in more randomness.
14
+ Example Use Cases for this one:
15
+ - 0.0: Always the same response
16
+ - 0.1 - 0.3: Mostly Deterministic, Factual and repetitive siya.
17
+ - 0.4 - 0.7: Balanced between coherence and creative responses ni.
18
+ - 0.8 - 1.0: More creative and imaginative responses[less coherent].
19
+ - max_tokens: The maximum number of tokens(words/subwords) to generate in the response.
20
+ - top_p: Controls the probability of sampling from the top tokens. Higher values result in more creativity. [This is related to the temperature parameter]
21
+ -This is also known as nucleus sampling, determining the probability of nexty words the AI will consider
22
+ The higher the value, the more diverse the response will be.
23
+ For example bala:
24
+ top_p + low temp = more accurate and factual responses
25
+ top_p + high temp = more creative responses, unexpected responses siya bih.
26
+ - num_responses: The number of responses to generate.
27
+ - fact_check: If True, the AI will check the factual accuracy of the response.
28
+ If False, the AI will prioritize creativity over factual accuracy.
29
 
30
+ IN SUMMARY:
31
+ - temperature controls creativity vs accuracy.
32
+ - max_tokens affects length.
33
+ - top_p fine-tunes word diversity.
34
+ - fact_check ensures factual correctness (but slightly limits fluency).
35
+ - num_responses generates different variations of the same prompt.
36
+ """
37
+
38
+ def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7, max_tokens=512, top_p=0.9, fact_check=False, num_responses=1):
39
+ responses = []
40
+
41
  try:
42
+ if fact_check:
43
+ prompt = "Ensure factual accuracy. " + prompt
44
+
45
+ for _ in range(num_responses): # Response Loop para sa number of responses
46
+ completion = client.chat.completions.create(
47
+ model=model,
48
+ messages=[{"role": "user", "content": prompt}],
49
+ temperature=temperature,
50
+ top_p=top_p,
51
+ max_tokens=max_tokens
52
+ )
53
+ response = completion.choices[0].message.content
54
+ responses.append(response)
55
+
 
 
56
  except Exception as e:
57
+ st.error(f"An error occurred: {str(e)}")
58
+
59
+ return responses # Return a list of responses
60
 
61
+ # Simple streamlit UI palang this
62
  st.title("Mark's AI Chatbot")
63
+ st.write("Provide a topic and customize the response criteria.")
64
 
65
+ # Input Fields
66
  user_input = st.text_area("Your Prompt:", placeholder="Type something...")
67
 
68
+ # Dropdown Menus
69
+ output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
70
+ tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
71
+
72
+ # Sliders
73
+ creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
74
+ max_length = st.slider("Max Length (tokens):", min_value=100, max_value=1024, value=512, step=50)
75
+
76
+ #Numeric Inputs
77
+ num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, value=1, step=1)
78
+
79
+ # Checkboxes
80
+ enable_creativity = st.checkbox("Enable Creative Mode", value=True)
81
+ fact_checking = st.checkbox("Enable Fact-Checking")
82
+
83
  if st.button("Generate Answer"):
84
  if user_input.strip():
85
+ with st.spinner("Generating response..."):
86
+ full_prompt = f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_input}"
87
+ ai_responses = query_ai_model(
88
+ full_prompt,
89
+ temperature=creativity_level if enable_creativity else 0.2,
90
+ max_tokens=max_length,
91
+ top_p=0.9 if enable_creativity else 0.7,
92
+ fact_check=fact_checking,
93
+ num_responses=num_responses
94
+ )
95
+
96
+ st.success("AI Responses:")
97
+ for i, response in enumerate(ai_responses, 1):
98
+ st.markdown(f"### Response {i}")
99
+ st.write(response)
100
  else:
101
+ st.warning("Please enter a prompt before clicking the button.")