kcarnold commited on
Commit
b35561c
Β·
1 Parent(s): 819c4b4

Disable revisions page

Browse files
Files changed (2) hide show
  1. app.py +3 -3
  2. custom_llm.py +9 -5
app.py CHANGED
@@ -9,7 +9,7 @@ def landing():
9
 
10
  st.page_link(rewrite_page, label="Rewrite with predictions", icon="πŸ“")
11
  st.page_link(highlight_page, label="Highlight locations for possible edits", icon="πŸ–οΈ")
12
- st.page_link(generate_page, label="Generate revisions", icon="πŸ”„")
13
  st.page_link(type_assistant_response_page, label="Type Assistant Response", icon="πŸ”€")
14
 
15
  st.markdown("*Note*: These services send data to a remote server for processing. The server logs requests. Don't use sensitive or identifiable information on this page.")
@@ -416,7 +416,7 @@ showLogprobs(allLogprobs.length - 1);
416
 
417
  rewrite_page = st.Page(rewrite_with_predictions, title="Rewrite with predictions", icon="πŸ“")
418
  highlight_page = st.Page(highlight_edits, title="Highlight locations for possible edits", icon="πŸ–οΈ")
419
- generate_page = st.Page(generate_revisions, title="Generate revisions", icon="πŸ”„")
420
  type_assistant_response_page = st.Page(type_assistant_response, title="Type Assistant Response", icon="πŸ”€")
421
  show_internals_page = st.Page(show_internals, title="Show Internals", icon="πŸ”§")
422
 
@@ -425,7 +425,7 @@ page = st.navigation([
425
  st.Page(landing, title="Home", icon="🏠"),
426
  highlight_page,
427
  rewrite_page,
428
- generate_page,
429
  type_assistant_response_page,
430
  show_internals_page
431
  ])
 
9
 
10
  st.page_link(rewrite_page, label="Rewrite with predictions", icon="πŸ“")
11
  st.page_link(highlight_page, label="Highlight locations for possible edits", icon="πŸ–οΈ")
12
+ #st.page_link(generate_page, label="Generate revisions", icon="πŸ”„")
13
  st.page_link(type_assistant_response_page, label="Type Assistant Response", icon="πŸ”€")
14
 
15
  st.markdown("*Note*: These services send data to a remote server for processing. The server logs requests. Don't use sensitive or identifiable information on this page.")
 
416
 
417
  rewrite_page = st.Page(rewrite_with_predictions, title="Rewrite with predictions", icon="πŸ“")
418
  highlight_page = st.Page(highlight_edits, title="Highlight locations for possible edits", icon="πŸ–οΈ")
419
+ #generate_page = st.Page(generate_revisions, title="Generate revisions", icon="πŸ”„")
420
  type_assistant_response_page = st.Page(type_assistant_response, title="Type Assistant Response", icon="πŸ”€")
421
  show_internals_page = st.Page(show_internals, title="Show Internals", icon="πŸ”§")
422
 
 
425
  st.Page(landing, title="Home", icon="🏠"),
426
  highlight_page,
427
  rewrite_page,
428
+ #generate_page,
429
  type_assistant_response_page,
430
  show_internals_page
431
  ])
custom_llm.py CHANGED
@@ -74,10 +74,11 @@ async def models_lifespan(app: FastAPI):
74
  params={"original_doc": test_doc, "prompt": test_prompt, "doc_in_progress": "This is"})
75
  print(f"Next token endpoint: {time.time() - start:.2f}s")
76
 
77
- start = time.time()
78
- response = client.get("/api/gen_revisions",
79
- params={"doc": test_doc, "prompt": test_prompt, "n": 1, "max_length": 16})
80
- print(f"Gen revisions endpoint: {time.time() - start:.2f}s")
 
81
 
82
  yield
83
 
@@ -151,7 +152,10 @@ def gen_revisions(
151
  max_length: Optional[int] = 1024,
152
  ):
153
 
154
-
 
 
 
155
  model = ml_models['llm']['model']
156
  tokenizer = ml_models['llm']['tokenizer']
157
 
 
74
  params={"original_doc": test_doc, "prompt": test_prompt, "doc_in_progress": "This is"})
75
  print(f"Next token endpoint: {time.time() - start:.2f}s")
76
 
77
+ if False:
78
+ start = time.time()
79
+ response = client.get("/api/gen_revisions",
80
+ params={"doc": test_doc, "prompt": test_prompt, "n": 1, "max_length": 16})
81
+ print(f"Gen revisions endpoint: {time.time() - start:.2f}s")
82
 
83
  yield
84
 
 
152
  max_length: Optional[int] = 1024,
153
  ):
154
 
155
+ return {
156
+ 'ok': False,
157
+ 'message': 'This endpoint has been disabled.'
158
+ }
159
  model = ml_models['llm']['model']
160
  tokenizer = ml_models['llm']['tokenizer']
161