tikendraw commited on
Commit
4cb26b4
·
1 Parent(s): e4bde4c

added breakpoints bw thoughts

Browse files
Files changed (1) hide show
  1. app/app.py +7 -15
app/app.py CHANGED
@@ -1,22 +1,11 @@
1
-
2
- from calendar import c
3
- from dataclasses import dataclass
4
- from math import exp
5
- from shutil import which
6
  import time
7
- from webbrowser import get
8
- from litellm.types.utils import ModelResponse
9
  import streamlit as st
10
  from app.utils import generate_answer, load_llm
11
- from core.types import ThoughtStepsDisplay, ThoughtSteps, BigMessage , Message
12
  from .app_config import InputConfig, ENV_FILE_PATH, CONFIG_FILE_PATH
13
- from core.llms.base_llm import BaseLLM
14
- from core.llms.litellm_llm import LLM
15
- from core.llms.utils import user_message_with_images
16
- from PIL import Image
17
  from core.prompts.think_mark_think import SYSTEM_PROMPT
18
 
19
- st.set_page_config(page_title="Open-o1", page_icon="🧠", layout="wide")
20
 
21
 
22
  def config_sidebar(config:InputConfig) -> InputConfig:
@@ -28,7 +17,7 @@ def config_sidebar(config:InputConfig) -> InputConfig:
28
  temperature = st.sidebar.number_input('Temperature: ',value=config.temperature, min_value=0.0, step=0.1, max_value=10.0)
29
  timeout = st.sidebar.number_input('Timeout(seconds): ',value=config.timeout, min_value=0.0,step = 1.0)
30
  sleeptime = st.sidebar.number_input('Sleep Time(seconds)',value=config.sleeptime, min_value=0.0, step = 1.0, help='Time between requests to avoid hitting rate limit')
31
- force_max_steps = st.sidebar.checkbox('Force Max Steps', value=config.force_max_steps, help="If checked, will generate given number of max steps. If not checked, assistant can stop at few step thinking it has the write answer.")
32
 
33
  config.model_name = model_name
34
  config.model_api_key = model_api_key
@@ -49,6 +38,7 @@ def config_sidebar(config:InputConfig) -> InputConfig:
49
 
50
 
51
  def main():
 
52
  st.title('Open-O1')
53
  st.write('Welcome to Open-O1!')
54
 
@@ -126,8 +116,10 @@ def main():
126
  thoughts.append(step)
127
 
128
  st.write(step.to_thought_steps_display().md())
129
-
 
130
  status.update(label=step.step_title, state="running", expanded=False)
 
131
 
132
  status.update(
133
  label=f"Thought for {time.time()-start_time:.2f} seconds", state="complete", expanded=False
 
 
 
 
 
 
1
  import time
 
 
2
  import streamlit as st
3
  from app.utils import generate_answer, load_llm
4
+ from core.types import ThoughtStepsDisplay, BigMessage
5
  from .app_config import InputConfig, ENV_FILE_PATH, CONFIG_FILE_PATH
 
 
 
 
6
  from core.prompts.think_mark_think import SYSTEM_PROMPT
7
 
8
+
9
 
10
 
11
  def config_sidebar(config:InputConfig) -> InputConfig:
 
17
  temperature = st.sidebar.number_input('Temperature: ',value=config.temperature, min_value=0.0, step=0.1, max_value=10.0)
18
  timeout = st.sidebar.number_input('Timeout(seconds): ',value=config.timeout, min_value=0.0,step = 1.0)
19
  sleeptime = st.sidebar.number_input('Sleep Time(seconds)',value=config.sleeptime, min_value=0.0, step = 1.0, help='Time between requests to avoid hitting rate limit')
20
+ force_max_steps = st.sidebar.checkbox('Force Max Steps', value=config.force_max_steps, help="If checked, will generate given number of max steps. If not checked, assistant can stop at few step thinking it has the right answer.")
21
 
22
  config.model_name = model_name
23
  config.model_api_key = model_api_key
 
38
 
39
 
40
  def main():
41
+ st.set_page_config(page_title="Open-o1", page_icon="🧠", layout="wide")
42
  st.title('Open-O1')
43
  st.write('Welcome to Open-O1!')
44
 
 
116
  thoughts.append(step)
117
 
118
  st.write(step.to_thought_steps_display().md())
119
+ # add breakline after each step
120
+ st.markdown('---')
121
  status.update(label=step.step_title, state="running", expanded=False)
122
+
123
 
124
  status.update(
125
  label=f"Thought for {time.time()-start_time:.2f} seconds", state="complete", expanded=False