zach commited on
Commit
fc05e1d
·
1 Parent(s): 229217f

Improve Anthropic API error handling

Browse files
src/app.py CHANGED
@@ -28,6 +28,7 @@ from src.constants import (
28
  VOTE_FOR_OPTION_TWO
29
  )
30
  from src.integrations import (
 
31
  generate_text_with_claude,
32
  text_to_speech_with_hume,
33
  text_to_speech_with_elevenlabs
@@ -50,23 +51,44 @@ def validate_and_generate_text(prompt: str) -> tuple[Union[str, gr.update], gr.u
50
  - The generated text or an error message.
51
  - The updated state of the "Generate" button.
52
  """
 
53
  try:
54
- validate_prompt_length(prompt, PROMPT_MAX_LENGTH, PROMPT_MIN_LENGTH) # Raises error if invalid
55
  except ValueError as ve:
56
  logger.warning(f'Validation error: {ve}')
57
- return str(ve), gr.update(interactive=True) # Show error, re-enable button
 
58
 
 
59
  try:
60
  generated_text = generate_text_with_claude(prompt)
61
- if not generated_text:
62
- raise ValueError("Claude API returned an empty response.")
 
 
 
63
 
64
  logger.info(f'Generated text ({len(generated_text)} characters).')
65
- return gr.update(value=generated_text), gr.update(interactive=False) # Keep button disabled
 
 
 
 
 
 
 
 
 
 
66
 
 
67
  except Exception as e:
68
- logger.error(f'Error while generating text with Claude API: {e}')
69
- return "Error: Failed to generate text. Please try again.", gr.update(interactive=True) # Re-enable button
 
 
 
 
70
 
71
 
72
 
 
28
  VOTE_FOR_OPTION_TWO
29
  )
30
  from src.integrations import (
31
+ AnthropicError,
32
  generate_text_with_claude,
33
  text_to_speech_with_hume,
34
  text_to_speech_with_elevenlabs
 
51
  - The generated text or an error message.
52
  - The updated state of the "Generate" button.
53
  """
54
+ # Local prompt validation
55
  try:
56
+ validate_prompt_length(prompt, PROMPT_MAX_LENGTH, PROMPT_MIN_LENGTH)
57
  except ValueError as ve:
58
  logger.warning(f'Validation error: {ve}')
59
+ # Show validation error to user, re-enable the "Generate" button
60
+ return str(ve), gr.update(interactive=True)
61
 
62
+ # Call the Anthropic API to generate text
63
  try:
64
  generated_text = generate_text_with_claude(prompt)
65
+
66
+ # Optionally handle empty or unusual responses
67
+ if not generated_text.strip():
68
+ logger.warning("Anthropic API returned an empty response.")
69
+ return "Error: Anthropic API returned an empty response.", gr.update(interactive=True)
70
 
71
  logger.info(f'Generated text ({len(generated_text)} characters).')
72
+ return gr.update(value=generated_text), gr.update(interactive=False)
73
+
74
+ # Handle Anthropic-specific errors
75
+ except AnthropicError as ae:
76
+ # You can log the internal error details
77
+ logger.error(f'AnthropicError while generating text: {str(ae)}')
78
+ # Return an error message about Anthropic API failing to generate text, re-enable the "Generate" button
79
+ return (
80
+ "Error: There was an issue communicating with the Anthropic API. Please try again later.",
81
+ gr.update(interactive=True),
82
+ )
83
 
84
+ # Catch any other unexpected exceptions
85
  except Exception as e:
86
+ logger.error(f'Unexpected error while generating text: {e}')
87
+ # Return a generic catch-all error message, re-enable the "Generate" button
88
+ return (
89
+ "Error: Failed to generate text. Please try again.",
90
+ gr.update(interactive=True),
91
+ )
92
 
93
 
94
 
src/integrations/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
- from .anthropic_api import generate_text_with_claude
2
- from .hume_api import text_to_speech_with_hume
3
- from .elevenlabs_api import text_to_speech_with_elevenlabs
 
1
+ from .anthropic_api import generate_text_with_claude, AnthropicError
2
+ from .elevenlabs_api import text_to_speech_with_elevenlabs
3
+ from .hume_api import text_to_speech_with_hume
src/integrations/anthropic_api.py CHANGED
@@ -97,10 +97,11 @@ anthropic_config = AnthropicConfig()
97
  wait=wait_fixed(2),
98
  before=before_log(logger, logging.DEBUG),
99
  after=after_log(logger, logging.DEBUG),
 
100
  )
101
  def generate_text_with_claude(prompt: str) -> str:
102
  """
103
- Generates text using Claude via the Anthropic SDK.
104
 
105
  Args:
106
  prompt (str): The input prompt for Claude.
@@ -109,18 +110,11 @@ def generate_text_with_claude(prompt: str) -> str:
109
  str: The generated text.
110
 
111
  Raises:
112
- ValueError: If the prompt exceeds the maximum allowed length.
113
  AnthropicError: If there is an error communicating with the Anthropic API.
114
-
115
- Example:
116
- >>> generate_text_with_claude("Write a haiku about nature.")
117
- "Gentle waves crashing, / Whispering secrets softly, / Infinite blue skies."
118
-
119
- >>> generate_text_with_claude("")
120
- "The prompt exceeds the maximum allowed length of 500 characters. Your prompt contains 512 characters."
121
  """
122
  logger.debug(f'Generating text with Claude. Prompt length: {len(prompt)} characters.')
123
 
 
124
  try:
125
  response: Message = anthropic_config.client.messages.create(
126
  model=anthropic_config.model,
@@ -137,7 +131,6 @@ def generate_text_with_claude(prompt: str) -> str:
137
 
138
  # Process response content
139
  blocks: Union[List[TextBlock], TextBlock, None] = response.content
140
-
141
  if isinstance(blocks, list):
142
  result = '\n\n'.join(block.text for block in blocks if isinstance(block, TextBlock))
143
  logger.debug(f'Processed response from list: {truncate_text(result)}')
@@ -148,12 +141,12 @@ def generate_text_with_claude(prompt: str) -> str:
148
 
149
  logger.warning(f'Unexpected response type: {type(blocks)}')
150
  return str(blocks or 'No content generated.')
151
-
152
  except Exception as e:
153
- logger.exception(f'Error generating text with Claude: {e}')
154
  raise AnthropicError(
155
  message=(
156
- f'Error generating text with Claude: {e}. '
157
  f'HTTP Status: {getattr(response, "status", "N/A")}. '
158
  f'Prompt (truncated): {truncate_text(prompt)}. '
159
  f'Model: {anthropic_config.model}, Max tokens: {anthropic_config.max_tokens}'
 
97
  wait=wait_fixed(2),
98
  before=before_log(logger, logging.DEBUG),
99
  after=after_log(logger, logging.DEBUG),
100
+ reraise=True
101
  )
102
  def generate_text_with_claude(prompt: str) -> str:
103
  """
104
+ Generates text using Claude (Anthropic LLM) via the Anthropic SDK.
105
 
106
  Args:
107
  prompt (str): The input prompt for Claude.
 
110
  str: The generated text.
111
 
112
  Raises:
 
113
  AnthropicError: If there is an error communicating with the Anthropic API.
 
 
 
 
 
 
 
114
  """
115
  logger.debug(f'Generating text with Claude. Prompt length: {len(prompt)} characters.')
116
 
117
+ response = None
118
  try:
119
  response: Message = anthropic_config.client.messages.create(
120
  model=anthropic_config.model,
 
131
 
132
  # Process response content
133
  blocks: Union[List[TextBlock], TextBlock, None] = response.content
 
134
  if isinstance(blocks, list):
135
  result = '\n\n'.join(block.text for block in blocks if isinstance(block, TextBlock))
136
  logger.debug(f'Processed response from list: {truncate_text(result)}')
 
141
 
142
  logger.warning(f'Unexpected response type: {type(blocks)}')
143
  return str(blocks or 'No content generated.')
144
+
145
  except Exception as e:
146
+ logger.exception(f'Error generating text with Anthropic: {e}')
147
  raise AnthropicError(
148
  message=(
149
+ f'Error generating text with Anthropic: {e}. '
150
  f'HTTP Status: {getattr(response, "status", "N/A")}. '
151
  f'Prompt (truncated): {truncate_text(prompt)}. '
152
  f'Model: {anthropic_config.model}, Max tokens: {anthropic_config.max_tokens}'