mjbuehler commited on
Commit
f14db37
·
verified ·
1 Parent(s): d3c5cbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -8
app.py CHANGED
@@ -476,6 +476,14 @@ STANDARD_TEXT_MODELS = [
476
  "openai/custom_model",
477
  ]
478
 
 
 
 
 
 
 
 
 
479
  STANDARD_AUDIO_MODELS = [
480
  "tts-1",
481
  "tts-1-hd",
@@ -536,27 +544,29 @@ def get_mp3(text: str, voice: str, audio_model: str, api_key: str = None,
536
 
537
  from functools import wraps
538
 
539
- def conditional_llm(model, api_base=None, api_key=None):
540
  """
541
  Conditionally apply the @llm decorator based on the api_base parameter.
542
  If api_base is provided, it applies the @llm decorator with api_base.
543
  Otherwise, it applies the @llm decorator without api_base.
544
  """
545
-
546
- #for o-x reasoning models (o1, o3, o4, ...)
547
- reasoning_effort="high"
548
 
549
  def decorator(func):
550
  if api_base:
551
- return llm(model=model, api_base=api_base, reasoning_effort=reasoning_effort)(func)
552
  else:
553
- return llm(model=model, api_key=api_key, reasoning_effort=reasoning_effort)(func)
 
 
 
 
554
  return decorator
555
 
556
  def generate_audio(
557
  files: list,
558
  openai_api_key: str = None,
559
  text_model: str = "o4-mini", #o1-2024-12-17", #"o1-preview-2024-09-12",
 
560
  audio_model: str = "tts-1",
561
  speaker_1_voice: str = "alloy",
562
  speaker_2_voice: str = "echo",
@@ -781,6 +791,13 @@ with gr.Blocks(title="PDF to Audio", css="""
781
  value="o3-mini", #"o4-mini", #"o1-preview-2024-09-12", #"gpt-4o-mini",
782
  info="Select the model to generate the dialogue text.",
783
  )
 
 
 
 
 
 
 
784
  audio_model = gr.Dropdown(
785
  label="Audio Generation Model",
786
  choices=STANDARD_AUDIO_MODELS,
@@ -890,7 +907,7 @@ with gr.Blocks(title="PDF to Audio", css="""
890
  submit_btn.click(
891
  fn=validate_and_generate_audio,
892
  inputs=[
893
- files, openai_api_key, text_model, audio_model,
894
  speaker_1_voice, speaker_2_voice, speaker_1_instructions, speaker_2_instructions,
895
  api_base,
896
  intro_instructions, text_instructions, scratch_pad_instructions,
@@ -920,7 +937,7 @@ with gr.Blocks(title="PDF to Audio", css="""
920
  ),
921
  inputs=[
922
  use_edited_transcript, edited_transcript,
923
- files, openai_api_key, text_model, audio_model,
924
  speaker_1_voice, speaker_2_voice, speaker_1_instructions, speaker_2_instructions,
925
  api_base,
926
  intro_instructions, text_instructions, scratch_pad_instructions,
 
476
  "openai/custom_model",
477
  ]
478
 
479
+ REASONING_EFFORTS = [
480
+ "N/A",
481
+ "low",
482
+ "medium",
483
+ "high",
484
+ ]
485
+
486
+
487
  STANDARD_AUDIO_MODELS = [
488
  "tts-1",
489
  "tts-1-hd",
 
544
 
545
  from functools import wraps
546
 
547
+ def conditional_llm(model, api_base=None, api_key=None, reasoning_effort=None):
548
  """
549
  Conditionally apply the @llm decorator based on the api_base parameter.
550
  If api_base is provided, it applies the @llm decorator with api_base.
551
  Otherwise, it applies the @llm decorator without api_base.
552
  """
 
 
 
553
 
554
  def decorator(func):
555
  if api_base:
556
+ return llm(model=model, api_base=api_base, )(func)
557
  else:
558
+ if reasoning_effort=="N/A":
559
+ return llm(model=model, api_key=api_key, )(func)
560
+ else:
561
+ return llm(model=model, api_key=api_key, reasoning_effort=reasoning_effort)(func)
562
+
563
  return decorator
564
 
565
  def generate_audio(
566
  files: list,
567
  openai_api_key: str = None,
568
  text_model: str = "o4-mini", #o1-2024-12-17", #"o1-preview-2024-09-12",
569
+ reasoning_effort: str = None
570
  audio_model: str = "tts-1",
571
  speaker_1_voice: str = "alloy",
572
  speaker_2_voice: str = "echo",
 
791
  value="o3-mini", #"o4-mini", #"o1-preview-2024-09-12", #"gpt-4o-mini",
792
  info="Select the model to generate the dialogue text.",
793
  )
794
+ reasoning_effort = gr.Dropdown(
795
+ label="Reasoning effort (for reasoning models, e.g. o1, o3, o4)",
796
+ choices=REASONING_EFFORTS,
797
+ value=None #"o3-mini", #"o4-mini", #"o1-preview-2024-09-12", #"gpt-4o-mini",
798
+ info="Select reasoning effort used.",
799
+ )
800
+
801
  audio_model = gr.Dropdown(
802
  label="Audio Generation Model",
803
  choices=STANDARD_AUDIO_MODELS,
 
907
  submit_btn.click(
908
  fn=validate_and_generate_audio,
909
  inputs=[
910
+ files, openai_api_key, text_model, reasoning_effort, audio_model,
911
  speaker_1_voice, speaker_2_voice, speaker_1_instructions, speaker_2_instructions,
912
  api_base,
913
  intro_instructions, text_instructions, scratch_pad_instructions,
 
937
  ),
938
  inputs=[
939
  use_edited_transcript, edited_transcript,
940
+ files, openai_api_key, text_model, reasoning_effort, audio_model,
941
  speaker_1_voice, speaker_2_voice, speaker_1_instructions, speaker_2_instructions,
942
  api_base,
943
  intro_instructions, text_instructions, scratch_pad_instructions,