Spaces:
Running
Running
Reverted Updates due to unexpected errors.
Browse files
app.py
CHANGED
@@ -2,66 +2,45 @@ import streamlit as st
|
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
|
5 |
-
# Set
|
6 |
client = OpenAI(
|
7 |
base_url="https://integrate.api.nvidia.com/v1",
|
8 |
api_key=os.environ.get("NVIDIA_API_KEY")
|
9 |
)
|
10 |
|
11 |
-
def query_ai_model(prompt
|
12 |
try:
|
|
|
13 |
completion = client.chat.completions.create(
|
14 |
-
model=
|
15 |
messages=[{"role": "user", "content": prompt}],
|
16 |
-
temperature=
|
17 |
-
top_p=
|
18 |
-
max_tokens=
|
19 |
stream=True
|
20 |
)
|
21 |
response = ""
|
22 |
response_container = st.empty()
|
23 |
-
|
24 |
for chunk in completion:
|
25 |
if chunk.choices[0].delta.content is not None:
|
26 |
response += chunk.choices[0].delta.content
|
27 |
response_container.markdown(response)
|
28 |
except Exception as e:
|
29 |
-
|
30 |
|
31 |
-
# Streamlit
|
32 |
st.title("Mark's AI Chatbot")
|
33 |
-
|
34 |
-
st.write("Provide a topic and customize the response criteria.")
|
35 |
-
=======
|
36 |
-
st.write("Enter a prompt below and click 'Generate Answer' to get a response from the AI model.")
|
37 |
-
>>>>>>> 16dd513590026069c27327e4c9dd4e29445fccaf
|
38 |
|
39 |
-
# Input
|
40 |
user_input = st.text_area("Your Prompt:", placeholder="Type something...")
|
41 |
|
42 |
-
# Dropdown Menus
|
43 |
-
output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
|
44 |
-
tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
|
45 |
-
|
46 |
-
# Sliders/Numeric Inputs
|
47 |
-
creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
|
48 |
-
max_length = st.slider("Max Length (tokens):", min_value=100, max_value=1024, value=512, step=50)
|
49 |
-
num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, value=1, step=1)
|
50 |
-
|
51 |
-
# Checkboxes
|
52 |
-
enable_creativity = st.checkbox("Enable Creative Mode", value=True)
|
53 |
-
fact_checking = st.checkbox("Enable Fact-Checking")
|
54 |
-
|
55 |
if st.button("Generate Answer"):
|
56 |
if user_input.strip():
|
57 |
-
with st.spinner("
|
58 |
-
|
59 |
-
ai_response = query_ai_model(
|
60 |
-
full_prompt,
|
61 |
-
temperature=creativity_level if enable_creativity else 0.2,
|
62 |
-
max_tokens=max_length
|
63 |
-
)
|
64 |
st.success("AI Response:")
|
65 |
st.write(ai_response)
|
66 |
else:
|
67 |
-
st.warning("Please enter a prompt before clicking the
|
|
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
|
5 |
+
# Set your API key here
|
6 |
client = OpenAI(
|
7 |
base_url="https://integrate.api.nvidia.com/v1",
|
8 |
api_key=os.environ.get("NVIDIA_API_KEY")
|
9 |
)
|
10 |
|
11 |
+
def query_ai_model(prompt):
|
12 |
try:
|
13 |
+
# Example of using NVIDIA's OpenAI integration
|
14 |
completion = client.chat.completions.create(
|
15 |
+
model="meta/llama-3.1-405b-instruct",
|
16 |
messages=[{"role": "user", "content": prompt}],
|
17 |
+
temperature=0.2,
|
18 |
+
top_p=0.7,
|
19 |
+
max_tokens=1024,
|
20 |
stream=True
|
21 |
)
|
22 |
response = ""
|
23 |
response_container = st.empty()
|
24 |
+
|
25 |
for chunk in completion:
|
26 |
if chunk.choices[0].delta.content is not None:
|
27 |
response += chunk.choices[0].delta.content
|
28 |
response_container.markdown(response)
|
29 |
except Exception as e:
|
30 |
+
return f"An error occurred: {str(e)}"
|
31 |
|
32 |
+
# Streamlit App
|
33 |
st.title("Mark's AI Chatbot")
|
34 |
+
st.write("Enter a prompt below and click 'Ask AI' to get a response from the AI model.")
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
# Input prompt
|
37 |
user_input = st.text_area("Your Prompt:", placeholder="Type something...")
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
if st.button("Generate Answer"):
|
40 |
if user_input.strip():
|
41 |
+
with st.spinner("Getting a response from the AI model..."):
|
42 |
+
ai_response = query_ai_model(user_input)
|
|
|
|
|
|
|
|
|
|
|
43 |
st.success("AI Response:")
|
44 |
st.write(ai_response)
|
45 |
else:
|
46 |
+
st.warning("Please enter a prompt before clicking the Button.")
|