baconnier commited on
Commit
7838862
·
verified ·
1 Parent(s): 6ac93f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -78
app.py CHANGED
@@ -1,27 +1,24 @@
1
  import os
2
  import json
3
- import re
4
  from huggingface_hub import InferenceClient
5
  import gradio as gr
6
  from pydantic import BaseModel, Field
7
- from typing import Optional, Literal
8
  from huggingface_hub.errors import HfHubHTTPError
9
 
10
  class PromptInput(BaseModel):
11
  text: str = Field(..., description="The initial prompt text")
12
- meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"] = Field(..., description="Choice of meta prompt strategy")
13
 
14
- class RefinementOutput(BaseModel):
15
- query_analysis: Optional[str] = None
16
- initial_prompt_evaluation: Optional[str] = None
17
- refined_prompt: Optional[str] = None
18
- explanation_of_refinements: Optional[str] = None
19
- raw_content: Optional[str] = None
20
 
21
  class PromptRefiner:
22
  def __init__(self, api_token: str):
23
  self.client = InferenceClient(token=api_token, timeout=300)
24
- self.meta_prompts = {
25
  "morphosis": original_meta_prompt,
26
  "verse": new_meta_prompt,
27
  "physics": metaprompt1,
@@ -34,19 +31,23 @@ class PromptRefiner:
34
 
35
  def refine_prompt(self, prompt_input: PromptInput) -> tuple:
36
  try:
37
- # Select meta prompt using dictionary instead of if-elif chain
38
  selected_meta_prompt = self.meta_prompts.get(
39
- prompt_input.meta_prompt_choice,
40
  advanced_meta_prompt
41
  )
42
-
43
  messages = [
44
  {
45
- "role": "system",
46
- "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more detailed.'
 
 
 
 
 
47
  },
48
  {
49
- "role": "user",
50
  "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)
51
  }
52
  ]
@@ -55,101 +56,81 @@ class PromptRefiner:
55
  model=prompt_refiner_model,
56
  messages=messages,
57
  max_tokens=2000,
58
- temperature=0.8
 
59
  )
60
-
 
61
  response_content = response.choices[0].message.content.strip()
62
-
63
- # Parse the response
64
- result = self._parse_response(response_content)
65
-
 
 
 
 
 
 
 
66
  return (
67
- result.get('initial_prompt_evaluation', ''),
68
- result.get('refined_prompt', ''),
69
- result.get('explanation_of_refinements', ''),
70
  result
71
  )
72
 
73
  except HfHubHTTPError as e:
 
 
 
 
 
74
  return (
75
- "Error: Model timeout. Please try again later.",
76
- "The selected model is currently experiencing high traffic.",
77
- "The selected model is currently experiencing high traffic.",
78
- {}
79
  )
80
  except Exception as e:
 
 
 
 
 
81
  return (
82
- f"Error: {str(e)}",
83
- "",
84
- "An unexpected error occurred.",
85
- {}
86
  )
87
 
88
- def _parse_response(self, response_content: str) -> dict:
89
- try:
90
- # Try to find JSON in response
91
- json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
92
- if json_match:
93
- json_str = json_match.group(1)
94
- json_str = re.sub(r'\n\s*', ' ', json_str)
95
- json_str = json_str.replace('"', '\\"')
96
- json_output = json.loads(f'"{json_str}"')
97
-
98
- if isinstance(json_output, str):
99
- json_output = json.loads(json_output)
100
- output={
101
- key: value.replace('\\"', '"') if isinstance(value, str) else value
102
- for key, value in json_output.items()
103
- }
104
- output['response_content']=json_output
105
- # Clean up JSON values
106
- return output
107
-
108
- # Fallback to regex parsing if no JSON found
109
- output = {}
110
- for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
111
- pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
112
- match = re.search(pattern, response_content, re.DOTALL)
113
- output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"') if match else ""
114
- output['response_content']=response_content
115
- return output
116
-
117
- except (json.JSONDecodeError, ValueError) as e:
118
- print(f"Error parsing response: {e}")
119
- print(f"Raw content: {response_content}")
120
- return {
121
- "initial_prompt_evaluation": "Error parsing response",
122
- "refined_prompt": "",
123
- "explanation_of_refinements": str(e),
124
- 'response_content':str(e)
125
- }
126
-
127
  def apply_prompt(self, prompt: str, model: str) -> str:
128
  try:
129
  messages = [
130
  {
131
  "role": "system",
132
- "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections. Incorporate a variety of lists, headers, and text to make the answer visually appealing"
133
  },
134
  {
135
  "role": "user",
136
  "content": prompt
137
  }
138
  ]
139
-
140
  response = self.client.chat_completion(
141
  model=model,
142
  messages=messages,
143
  max_tokens=2000,
144
  temperature=0.8
145
  )
146
-
147
- output = response.choices[0].message.content.strip()
148
- return output.replace('\n\n', '\n').strip()
149
-
150
  except Exception as e:
151
  return f"Error: {str(e)}"
152
 
 
153
  class GradioInterface:
154
  def __init__(self, prompt_refiner: PromptRefiner):
155
  self.prompt_refiner = prompt_refiner
 
1
  import os
2
  import json
 
3
  from huggingface_hub import InferenceClient
4
  import gradio as gr
5
  from pydantic import BaseModel, Field
6
+ from typing import Optional, Literal, Dict
7
  from huggingface_hub.errors import HfHubHTTPError
8
 
9
  class PromptInput(BaseModel):
10
  text: str = Field(..., description="The initial prompt text")
11
+ meta_prompt_choice: Literal["star", "done", "physics", "morphosis", "verse", "phor", "bolism", "math", "arpe"] = Field(..., description="Choice of meta prompt strategy")
12
 
13
+ class LLMResponse(BaseModel):
14
+ initial_prompt_evaluation: str = Field(default="")
15
+ refined_prompt: str = Field(default="")
16
+ explanation_of_refinements: str = Field(default="")
 
 
17
 
18
  class PromptRefiner:
19
  def __init__(self, api_token: str):
20
  self.client = InferenceClient(token=api_token, timeout=300)
21
+ self.meta_prompts: Dict[str, str] = {
22
  "morphosis": original_meta_prompt,
23
  "verse": new_meta_prompt,
24
  "physics": metaprompt1,
 
31
 
32
  def refine_prompt(self, prompt_input: PromptInput) -> tuple:
33
  try:
 
34
  selected_meta_prompt = self.meta_prompts.get(
35
+ prompt_input.meta_prompt_choice,
36
  advanced_meta_prompt
37
  )
38
+
39
  messages = [
40
  {
41
+ "role": "system",
42
+ "content": '''You are an expert at refining prompts. Respond in JSON format with exactly these fields:
43
+ {
44
+ "initial_prompt_evaluation": "your evaluation of the initial prompt",
45
+ "refined_prompt": "your refined version of the prompt",
46
+ "explanation_of_refinements": "your explanation of the changes made"
47
+ }'''
48
  },
49
  {
50
+ "role": "user",
51
  "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)
52
  }
53
  ]
 
56
  model=prompt_refiner_model,
57
  messages=messages,
58
  max_tokens=2000,
59
+ temperature=0.8,
60
+ response_format={"type": "json_object"}
61
  )
62
+
63
+ # Parse response using Pydantic
64
  response_content = response.choices[0].message.content.strip()
65
+ try:
66
+ parsed_response = LLMResponse.model_validate_json(response_content)
67
+ result = parsed_response.model_dump()
68
+ except Exception as e:
69
+ # Fallback to basic dict if JSON parsing fails
70
+ result = {
71
+ "initial_prompt_evaluation": "Error parsing model response",
72
+ "refined_prompt": response_content,
73
+ "explanation_of_refinements": str(e)
74
+ }
75
+
76
  return (
77
+ result["initial_prompt_evaluation"],
78
+ result["refined_prompt"],
79
+ result["explanation_of_refinements"],
80
  result
81
  )
82
 
83
  except HfHubHTTPError as e:
84
+ error_response = LLMResponse(
85
+ initial_prompt_evaluation="Error: Model timeout",
86
+ refined_prompt="The model is currently experiencing high traffic",
87
+ explanation_of_refinements="Please try again later"
88
+ )
89
  return (
90
+ error_response.initial_prompt_evaluation,
91
+ error_response.refined_prompt,
92
+ error_response.explanation_of_refinements,
93
+ error_response.model_dump()
94
  )
95
  except Exception as e:
96
+ error_response = LLMResponse(
97
+ initial_prompt_evaluation=f"Error: {str(e)}",
98
+ refined_prompt="",
99
+ explanation_of_refinements="An unexpected error occurred"
100
+ )
101
  return (
102
+ error_response.initial_prompt_evaluation,
103
+ error_response.refined_prompt,
104
+ error_response.explanation_of_refinements,
105
+ error_response.model_dump()
106
  )
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  def apply_prompt(self, prompt: str, model: str) -> str:
109
  try:
110
  messages = [
111
  {
112
  "role": "system",
113
+ "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections."
114
  },
115
  {
116
  "role": "user",
117
  "content": prompt
118
  }
119
  ]
120
+
121
  response = self.client.chat_completion(
122
  model=model,
123
  messages=messages,
124
  max_tokens=2000,
125
  temperature=0.8
126
  )
127
+
128
+ return response.choices[0].message.content.strip().replace('\n\n', '\n')
129
+
 
130
  except Exception as e:
131
  return f"Error: {str(e)}"
132
 
133
+
134
  class GradioInterface:
135
  def __init__(self, prompt_refiner: PromptRefiner):
136
  self.prompt_refiner = prompt_refiner