shreetishresthanp commited on
Commit
b66710c
·
verified ·
1 Parent(s): 67d983d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -0
app.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from google import genai
3
+ from google.genai import types
4
+ from openai import OpenAI
5
+
6
+ # Show title and description.
7
+ st.title("💬 LSAT Tutor")
8
+ st.write(
9
+ "Hey there! I'm your tutor for today. We'll revise the LSAT Logical Reasoning Section."
10
+ )
11
+
12
+ # Ask user for their OpenAI API key via `st.text_input`.
13
+ # Alternatively, you can store the API key in `./.streamlit/secrets.toml` and access it
14
+ # via `st.secrets`, see https://docs.streamlit.io/develop/concepts/connections/secrets-management
15
+ # openai_api_key = st.text_input("OpenAI API Key", type="password")
16
+ GEMINI_API_KEY = "AIzaSyAjpHA08BUwLhK-tIlORxcB18RAp3541-M"
17
+
18
+ # Create a client.
19
+ client = genai.Client(api_key=GEMINI_API_KEY)
20
+
21
+ # Create a session state variable to store the chat messages. This ensures that the
22
+ # messages persist across reruns.
23
+ if "messages" not in st.session_state:
24
+ st.session_state.messages = []
25
+
26
+ # Display the existing chat messages via `st.chat_message`.
27
+ for message in st.session_state.messages:
28
+ with st.chat_message(message["role"]):
29
+ st.markdown(message["content"])
30
+
31
+ # Create a chat input field to allow the user to enter a message. This will display
32
+ # automatically at the bottom of the page.
33
+ if prompt := st.chat_input("Ready to begin?"):
34
+
35
+ # Store and display the current prompt.
36
+ st.session_state.messages.append({"role": "user", "content": prompt})
37
+ with st.chat_message("user"):
38
+ st.markdown(prompt)
39
+
40
+ # Generate a response using the OpenAI API.
41
+ # stream = client.chat.completions.create(
42
+ # model="gemini-2.0-flash",
43
+ # # config=types.GenerateContentConfig(
44
+ # # system_instruction=system_instruction,
45
+ # # tools=[tools]),
46
+ # messages=[
47
+ # {"role": m["role"], "content": m["content"]}
48
+ # for m in st.session_state.messages
49
+ # ],
50
+ # stream=True,
51
+ # )
52
+
53
+ stream = client.chats.create(model="gemini-2.0-flash",
54
+ # messages = [
55
+ # {"role": m["role"], "content": m["content"]}
56
+ # for m in st.session_state.messages
57
+ # ]
58
+ # config=types.GenerateContentConfig(
59
+ # system_instruction=system_instruction,
60
+ # tools=[tools]
61
+ # )
62
+ )
63
+
64
+ # Stream the response to the chat using `st.write_stream`, then store it in
65
+ # session state.
66
+ with st.chat_message("assistant"):
67
+ response = st.write_stream(stream.send_message(prompt))
68
+ st.session_state.messages.append({"role": "assistant", "content": response})
69
+
70
+
71
+
72
+
73
+
74
+ # stream = client.chat.completions.create(
75
+ # model="gpt-3.5-turbo",
76
+ # messages=[
77
+ # {"role": m["role"], "content": m["content"]}
78
+ # for m in st.session_state.messages
79
+ # ],
80
+ # stream=True,
81
+ # )
82
+
83
+
84
+
85
+
86
+ # import streamlit as st
87
+ # import random
88
+ # import time
89
+
90
+
91
+ # # Streamed response emulator
92
+ # def response_generator():
93
+ # response = random.choice(
94
+ # [
95
+ # "Hello there! How can I assist you today?",
96
+ # "Hi, human! Is there anything I can help you with?",
97
+ # "Hi there. Do you need help?",
98
+ # ]
99
+ # )
100
+ # for word in response.split():
101
+ # yield word + " "
102
+ # time.sleep(0.05)
103
+
104
+
105
+ # st.title("Simple chat")
106
+
107
+ # # Initialize chat history
108
+ # if "messages" not in st.session_state:
109
+ # st.session_state.messages = []
110
+
111
+ # # Display chat messages from history on app rerun
112
+ # for message in st.session_state.messages:
113
+ # with st.chat_message(message["role"]):
114
+ # st.markdown(message["content"])
115
+
116
+ # # Accept user input
117
+ # if prompt := st.chat_input("What is up?"):
118
+ # # Add user message to chat history
119
+ # st.session_state.messages.append({"role": "user", "content": prompt})
120
+ # # Display user message in chat message container
121
+ # with st.chat_message("user"):
122
+ # st.markdown(prompt)
123
+
124
+ # # Display assistant response in chat message container
125
+ # with st.chat_message("assistant"):
126
+ # response = st.write_stream(response_generator())
127
+ # # Add assistant response to chat history
128
+ # st.session_state.messages.append({"role": "assistant", "content": response})