mjbuehler commited on
Commit
4a2c6e7
·
verified ·
1 Parent(s): 03491fb

Update app.py

Browse files

A few small fixes

Files changed (1) hide show
  1. app.py +26 -61
app.py CHANGED
@@ -16,8 +16,28 @@ from pydantic import BaseModel, ValidationError
16
  from pypdf import PdfReader
17
  from tenacity import retry, retry_if_exception_type
18
 
 
 
19
  import re
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def read_readme():
22
  readme_path = Path("README.md")
23
  if readme_path.exists():
@@ -580,25 +600,6 @@ def update_instructions(template):
580
  INSTRUCTION_TEMPLATES[template]["dialog"]
581
  )
582
 
583
- import concurrent.futures as cf
584
- import glob
585
- import io
586
- import os
587
- import time
588
- from pathlib import Path
589
- from tempfile import NamedTemporaryFile
590
- from typing import List, Literal
591
-
592
- import gradio as gr
593
-
594
- from loguru import logger
595
- from openai import OpenAI
596
- from promptic import llm
597
- from pydantic import BaseModel, ValidationError
598
- from pypdf import PdfReader
599
- from tenacity import retry, retry_if_exception_type
600
-
601
-
602
  class DialogueItem(BaseModel):
603
  text: str
604
  speaker: Literal["speaker-1", "speaker-2"]
@@ -606,22 +607,8 @@ class DialogueItem(BaseModel):
606
  class Dialogue(BaseModel):
607
  scratchpad: str
608
  dialogue: List[DialogueItem]
609
- '''
610
- def get_mp3(text: str, voice: str, audio_model: str, api_key: str = None) -> bytes:
611
- client = OpenAI(
612
- api_key=api_key or os.getenv("OPENAI_API_KEY"),
613
- )
614
 
615
- with client.audio.speech.with_streaming_response.create(
616
- model=audio_model,
617
- voice=voice,
618
- input=text,
619
- ) as response:
620
- with io.BytesIO() as file:
621
- for chunk in response.iter_bytes():
622
- file.write(chunk)
623
- return file.getvalue()
624
- '''
625
  def get_mp3(text: str, voice: str, audio_model: str, api_key: str = None,
626
  speaker_instructions: str ='Speak in an emotive and friendly tone.') -> bytes:
627
 
@@ -640,28 +627,6 @@ def get_mp3(text: str, voice: str, audio_model: str, api_key: str = None,
640
  file.write(chunk)
641
  return file.getvalue()
642
 
643
-
644
-
645
- from functools import wraps
646
- '''
647
- def conditional_llm(model, api_base=None, api_key=None, reasoning_effort="N/A"):
648
- """
649
- Conditionally apply the @llm decorator based on the api_base parameter.
650
- If api_base is provided, it applies the @llm decorator with api_base.
651
- Otherwise, it applies the @llm decorator without api_base.
652
- """
653
-
654
- def decorator(func):
655
- if api_base:
656
- return llm(model=model, api_base=api_base, )(func)
657
- else:
658
- if reasoning_effort=="N/A":
659
- return llm(model=model, api_key=api_key, )(func)
660
- else:
661
- return llm(model=model, api_key=api_key, reasoning_effort=reasoning_effort)(func)
662
-
663
- return decorator
664
- '''
665
  def conditional_llm(
666
  model,
667
  api_base=None,
@@ -1235,7 +1200,7 @@ with gr.Blocks(title="PDF to Audio", css="""
1235
  fn=lambda error: gr.Warning(error) if error else None,
1236
  inputs=[error_output],
1237
  outputs=[]
1238
- ).then( # fill spreadsheet editor
1239
  fn=dialogue_to_df,
1240
  inputs=[cached_dialogue],
1241
  outputs=[df_editor],
@@ -1289,10 +1254,10 @@ with gr.Blocks(title="PDF to Audio", css="""
1289
  gr.Markdown(read_readme())
1290
 
1291
  # Enable queueing for better performance
1292
- #demo.queue(max_size=20, default_concurrency_limit=32)
1293
 
1294
  # Launch the Gradio app
1295
- #if __name__ == "__main__":
1296
- # demo.launch(share=True)
1297
 
1298
- demo.launch()
 
16
  from pypdf import PdfReader
17
  from tenacity import retry, retry_if_exception_type
18
 
19
+ from functools import wraps
20
+
21
  import re
22
 
23
+ import concurrent.futures as cf
24
+ import glob
25
+ import io
26
+ import os
27
+ import time
28
+ from pathlib import Path
29
+ from tempfile import NamedTemporaryFile
30
+ from typing import List, Literal
31
+
32
+ import gradio as gr
33
+
34
+ from loguru import logger
35
+ from openai import OpenAI
36
+ from promptic import llm
37
+ from pydantic import BaseModel, ValidationError
38
+ from pypdf import PdfReader
39
+ from tenacity import retry, retry_if_exception_type
40
+
41
  def read_readme():
42
  readme_path = Path("README.md")
43
  if readme_path.exists():
 
600
  INSTRUCTION_TEMPLATES[template]["dialog"]
601
  )
602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603
  class DialogueItem(BaseModel):
604
  text: str
605
  speaker: Literal["speaker-1", "speaker-2"]
 
607
  class Dialogue(BaseModel):
608
  scratchpad: str
609
  dialogue: List[DialogueItem]
 
 
 
 
 
610
 
611
+
 
 
 
 
 
 
 
 
 
612
  def get_mp3(text: str, voice: str, audio_model: str, api_key: str = None,
613
  speaker_instructions: str ='Speak in an emotive and friendly tone.') -> bytes:
614
 
 
627
  file.write(chunk)
628
  return file.getvalue()
629
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630
  def conditional_llm(
631
  model,
632
  api_base=None,
 
1200
  fn=lambda error: gr.Warning(error) if error else None,
1201
  inputs=[error_output],
1202
  outputs=[]
1203
+ ).then( # fill spreadsheet editor
1204
  fn=dialogue_to_df,
1205
  inputs=[cached_dialogue],
1206
  outputs=[df_editor],
 
1254
  gr.Markdown(read_readme())
1255
 
1256
  # Enable queueing for better performance
1257
+ demo.queue(max_size=20, default_concurrency_limit=32)
1258
 
1259
  # Launch the Gradio app
1260
+ if __name__ == "__main__":
1261
+ demo.launch(share=True)
1262
 
1263
+ #demo.launch()