ofermend commited on
Commit
bdb1bc8
·
1 Parent(s): 4a4855c

bump version

Browse files
Files changed (6) hide show
  1. .gitignore +3 -1
  2. Dockerfile +3 -1
  3. agent.py +10 -7
  4. app.py +6 -0
  5. requirements.txt +2 -3
  6. st_app.py +21 -10
.gitignore CHANGED
@@ -10,6 +10,8 @@ core/__pycache__/
10
  # C extensions
11
  *.so
12
 
 
 
13
  # Distribution / packaging
14
  .Python
15
  build/
@@ -139,4 +141,4 @@ dmypy.json
139
  # project file
140
  project.yaml
141
 
142
- .idea/
 
10
  # C extensions
11
  *.so
12
 
13
+ .streamlit
14
+
15
  # Distribution / packaging
16
  .Python
17
  build/
 
141
  # project file
142
  project.yaml
143
 
144
+ .idea/
Dockerfile CHANGED
@@ -4,7 +4,9 @@ WORKDIR /app
4
 
5
  COPY ./requirements.txt /app/requirements.txt
6
 
7
- RUN pip3 install --no-cache-dir -r /app/requirements.txt
 
 
8
 
9
  # User
10
  RUN useradd -m -u 1000 user
 
4
 
5
  COPY ./requirements.txt /app/requirements.txt
6
 
7
+ RUN pip3 install --no-cache-dir --upgrade pip
8
+ RUN pip3 install --no-cache-dir wheel setuptools build
9
+ RUN pip3 install --no-cache-dir --use-pep517 -r /app/requirements.txt
10
 
11
  # User
12
  RUN useradd -m -u 1000 user
agent.py CHANGED
@@ -10,7 +10,7 @@ from omegaconf import OmegaConf
10
  from vectara_agentic.agent import Agent
11
  from vectara_agentic.tools import ToolsFactory, VectaraToolFactory
12
  from vectara_agentic.agent_config import AgentConfig
13
- from vectara_agentic.sub_query_workflow import SubQuestionQueryWorkflow
14
 
15
  from dotenv import load_dotenv
16
  load_dotenv(override=True)
@@ -36,6 +36,7 @@ tickers = {
36
  years = range(2015, 2025)
37
  initial_prompt = "How can I help you today?"
38
 
 
39
  # Tool to get the income statement for a given company and year using the FMP API
40
  @lru_cache(maxsize=256)
41
  def fmp_income_statement(
@@ -94,8 +95,10 @@ class AgentTools:
94
  self.tools_factory = ToolsFactory()
95
  self.agent_config = agent_config
96
  self.cfg = _cfg
97
- self.vec_factory = VectaraToolFactory(vectara_api_key=_cfg.api_key,
98
- vectara_corpus_key=_cfg.corpus_key)
 
 
99
 
100
  def get_tools(self):
101
  class QueryTranscriptsArgs(BaseModel):
@@ -117,12 +120,12 @@ class AgentTools:
117
  You can ask this tool any question about the company including risks, opportunities, financial performance, competitors and more.
118
  """,
119
  tool_args_schema = QueryTranscriptsArgs,
120
- reranker = "multilingual_reranker_v1", rerank_k = 100, rerank_cutoff = 0.1,
121
  n_sentences_before = 2, n_sentences_after = 4, lambda_val = 0.005,
122
  summary_num_results = 15,
123
  vectara_summarizer = summarizer,
124
  include_citations = True,
125
- verbose=False,
126
  )
127
 
128
  class SearchTranscriptsArgs(BaseModel):
@@ -157,7 +160,7 @@ class AgentTools:
157
  [ask_transcripts, search_transcripts]
158
  )
159
 
160
- def initialize_agent(_cfg, agent_progress_callback=None):
161
  financial_bot_instructions = """
162
  - You are a helpful financial assistant, with expertise in financial reporting, in conversation with a user.
163
  - Use the 'fmp_income_statement' tool (with the company ticker and year) to obtain financial data.
@@ -183,7 +186,7 @@ def initialize_agent(_cfg, agent_progress_callback=None):
183
  agent_progress_callback=agent_progress_callback,
184
  query_logging_callback=query_logging,
185
  verbose=True,
186
- #workflow_cls=SubQuestionQueryWorkflow,
187
  )
188
 
189
  agent.report()
 
10
  from vectara_agentic.agent import Agent
11
  from vectara_agentic.tools import ToolsFactory, VectaraToolFactory
12
  from vectara_agentic.agent_config import AgentConfig
13
+ from vectara_agentic.sub_query_workflow import SequentialSubQuestionsWorkflow
14
 
15
  from dotenv import load_dotenv
16
  load_dotenv(override=True)
 
36
  years = range(2015, 2025)
37
  initial_prompt = "How can I help you today?"
38
 
39
+
40
  # Tool to get the income statement for a given company and year using the FMP API
41
  @lru_cache(maxsize=256)
42
  def fmp_income_statement(
 
95
  self.tools_factory = ToolsFactory()
96
  self.agent_config = agent_config
97
  self.cfg = _cfg
98
+ self.vec_factory = VectaraToolFactory(
99
+ vectara_api_key=_cfg.api_key,
100
+ vectara_corpus_key=_cfg.corpus_key
101
+ )
102
 
103
  def get_tools(self):
104
  class QueryTranscriptsArgs(BaseModel):
 
120
  You can ask this tool any question about the company including risks, opportunities, financial performance, competitors and more.
121
  """,
122
  tool_args_schema = QueryTranscriptsArgs,
123
+ reranker = "multilingual_reranker_v1", rerank_k = 100, rerank_cutoff = 0.2,
124
  n_sentences_before = 2, n_sentences_after = 4, lambda_val = 0.005,
125
  summary_num_results = 15,
126
  vectara_summarizer = summarizer,
127
  include_citations = True,
128
+ verbose = False,
129
  )
130
 
131
  class SearchTranscriptsArgs(BaseModel):
 
160
  [ask_transcripts, search_transcripts]
161
  )
162
 
163
+ def initialize_agent(_cfg, agent_progress_callback=None) -> Agent:
164
  financial_bot_instructions = """
165
  - You are a helpful financial assistant, with expertise in financial reporting, in conversation with a user.
166
  - Use the 'fmp_income_statement' tool (with the company ticker and year) to obtain financial data.
 
186
  agent_progress_callback=agent_progress_callback,
187
  query_logging_callback=query_logging,
188
  verbose=True,
189
+ workflow_cls=SequentialSubQuestionsWorkflow,
190
  )
191
 
192
  agent.report()
app.py CHANGED
@@ -1,10 +1,15 @@
 
1
  import streamlit as st
 
 
2
  from st_app import launch_bot
3
  import uuid
4
 
5
  import nest_asyncio
6
  import asyncio
7
 
 
 
8
  # Setup for HTTP API Calls to Amplitude Analytics
9
  if 'device_id' not in st.session_state:
10
  st.session_state.device_id = str(uuid.uuid4())
@@ -17,5 +22,6 @@ async def main():
17
 
18
  if __name__ == "__main__":
19
  st.set_page_config(page_title="Financial Assistant", layout="wide")
 
20
  nest_asyncio.apply()
21
  asyncio.run(main())
 
1
+
2
  import streamlit as st
3
+ import torch
4
+
5
  from st_app import launch_bot
6
  import uuid
7
 
8
  import nest_asyncio
9
  import asyncio
10
 
11
+ torch.classes.__path__ = []
12
+
13
  # Setup for HTTP API Calls to Amplitude Analytics
14
  if 'device_id' not in st.session_state:
15
  st.session_state.device_id = str(uuid.uuid4())
 
22
 
23
  if __name__ == "__main__":
24
  st.set_page_config(page_title="Financial Assistant", layout="wide")
25
+
26
  nest_asyncio.apply()
27
  asyncio.run(main())
requirements.txt CHANGED
@@ -1,9 +1,8 @@
1
  omegaconf==2.3.0
2
  python-dotenv==1.0.1
3
- streamlit==1.41.1
4
- streamlit_pills==0.3.0
5
  streamlit_feedback==0.1.3
6
  uuid==1.30
7
  langdetect==1.0.9
8
  langcodes==3.4.0
9
- vectara-agentic==0.2.5
 
1
  omegaconf==2.3.0
2
  python-dotenv==1.0.1
3
+ streamlit==1.43.2
 
4
  streamlit_feedback==0.1.3
5
  uuid==1.30
6
  langdetect==1.0.9
7
  langcodes==3.4.0
8
+ vectara-agentic==0.2.11
st_app.py CHANGED
@@ -1,9 +1,10 @@
1
- from PIL import Image
2
  import sys
3
  import re
4
 
 
 
5
  import streamlit as st
6
- from streamlit_pills import pills
7
  from streamlit_feedback import streamlit_feedback
8
 
9
  from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
@@ -13,6 +14,12 @@ from agent import initialize_agent, get_agent_config
13
 
14
  initial_prompt = "How can I help you today?"
15
 
 
 
 
 
 
 
16
  def format_log_msg(log_msg: str):
17
  max_log_msg_size = 500
18
  return log_msg if len(log_msg) <= max_log_msg_size else log_msg[:max_log_msg_size]+'...'
@@ -50,7 +57,7 @@ def agent_progress_callback(status_type: AgentStatusType, msg: str):
50
 
51
  def show_example_questions():
52
  if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
53
- selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
54
  if selected_example:
55
  st.session_state.ex_prompt = selected_example
56
  st.session_state.first_turn = False
@@ -71,7 +78,10 @@ async def launch_bot():
71
  st.session_state.first_turn = True
72
  st.session_state.show_logs = False
73
  if 'agent' not in st.session_state:
74
- st.session_state.agent = initialize_agent(cfg, agent_progress_callback=agent_progress_callback)
 
 
 
75
  else:
76
  st.session_state.agent.clear_memory()
77
 
@@ -115,7 +125,8 @@ async def launch_bot():
115
  # Display chat messages
116
  for message in st.session_state.messages:
117
  with st.chat_message(message["role"], avatar=message["avatar"]):
118
- st.write(message["content"])
 
119
 
120
  example_container = st.empty()
121
  with example_container:
@@ -144,10 +155,11 @@ async def launch_bot():
144
  with st.chat_message("assistant", avatar='🤖'):
145
  st.session_state.status = st.status('Processing...', expanded=False)
146
  response = st.session_state.agent.chat(st.session_state.prompt)
147
-
148
- # from vectara_agentic.sub_query_workflow import SubQuestionQueryWorkflow
149
- # response = await st.session_state.agent.run(inputs=SubQuestionQueryWorkflow.InputsModel(query=st.session_state.prompt))
150
  res = escape_dollars_outside_latex(response.response)
 
 
 
 
151
 
152
  #response = await st.session_state.agent.achat(st.session_state.prompt)
153
  #res = escape_dollars_outside_latex(response.response)
@@ -155,10 +167,9 @@ async def launch_bot():
155
  #res = await st.session_state.agent.astream_chat(st.session_state.prompt)
156
  #response = ''.join([token async for token in res.async_response_gen()])
157
  #res = escape_dollars_outside_latex(response)
158
-
159
  message = {"role": "assistant", "content": res, "avatar": '🤖'}
160
  st.session_state.messages.append(message)
161
- st.markdown(res)
162
 
163
  send_amplitude_data(
164
  user_query=st.session_state.messages[-2]["content"],
 
1
+
2
  import sys
3
  import re
4
 
5
+ from PIL import Image
6
+
7
  import streamlit as st
 
8
  from streamlit_feedback import streamlit_feedback
9
 
10
  from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
 
14
 
15
  initial_prompt = "How can I help you today?"
16
 
17
+ # def pil_to_base64(img):
18
+ # buffered = BytesIO()
19
+ # img.save(buffered, format="PNG")
20
+ # return base64.b64encode(buffered.getvalue()).decode()
21
+
22
+
23
  def format_log_msg(log_msg: str):
24
  max_log_msg_size = 500
25
  return log_msg if len(log_msg) <= max_log_msg_size else log_msg[:max_log_msg_size]+'...'
 
57
 
58
  def show_example_questions():
59
  if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
60
+ selected_example = st.pills("Queries to Try:", st.session_state.example_messages, default=None)
61
  if selected_example:
62
  st.session_state.ex_prompt = selected_example
63
  st.session_state.first_turn = False
 
78
  st.session_state.first_turn = True
79
  st.session_state.show_logs = False
80
  if 'agent' not in st.session_state:
81
+ st.session_state.agent = initialize_agent(
82
+ cfg,
83
+ agent_progress_callback=agent_progress_callback,
84
+ )
85
  else:
86
  st.session_state.agent.clear_memory()
87
 
 
125
  # Display chat messages
126
  for message in st.session_state.messages:
127
  with st.chat_message(message["role"], avatar=message["avatar"]):
128
+ st.markdown(message["content"], unsafe_allow_html=True)
129
+ # st.write(message["content"])
130
 
131
  example_container = st.empty()
132
  with example_container:
 
155
  with st.chat_message("assistant", avatar='🤖'):
156
  st.session_state.status = st.status('Processing...', expanded=False)
157
  response = st.session_state.agent.chat(st.session_state.prompt)
 
 
 
158
  res = escape_dollars_outside_latex(response.response)
159
+
160
+ #from vectara_agentic.sub_query_workflow import SequentialSubQuestionsWorkflow
161
+ #response = await st.session_state.agent.run(inputs=SequentialSubQuestionsWorkflow.InputsModel(query=st.session_state.prompt))
162
+ #res = escape_dollars_outside_latex(response.response)
163
 
164
  #response = await st.session_state.agent.achat(st.session_state.prompt)
165
  #res = escape_dollars_outside_latex(response.response)
 
167
  #res = await st.session_state.agent.astream_chat(st.session_state.prompt)
168
  #response = ''.join([token async for token in res.async_response_gen()])
169
  #res = escape_dollars_outside_latex(response)
170
+
171
  message = {"role": "assistant", "content": res, "avatar": '🤖'}
172
  st.session_state.messages.append(message)
 
173
 
174
  send_amplitude_data(
175
  user_query=st.session_state.messages[-2]["content"],