infinitymatter commited on
Commit
2cfcc36
·
verified ·
1 Parent(s): a004994

Update src/models.py

Browse files
Files changed (1) hide show
  1. src/models.py +17 -19
src/models.py CHANGED
@@ -21,33 +21,31 @@ if not anthropic_api_key:
21
  OPENAI_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
22
  CLAUDE_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
23
 
24
- # Define a helper function to perform the direct API call
25
- def hf_inference_request(token, model, prompt, max_new_tokens=200):
26
- # Ensure the prompt is a string
27
- if not isinstance(prompt, str):
28
- prompt = str(prompt)
29
-
30
- url = f"https://api-inference.huggingface.co/models/{model}"
31
- headers = {"Authorization": f"Bearer {token}"}
32
- payload = {"inputs": prompt, "parameters": {"max_new_tokens": max_new_tokens}}
33
-
34
- response = requests.post(url, headers=headers, json=payload)
35
- # Raise an HTTPError if the API response was unsuccessful
36
- response.raise_for_status()
37
- return response.json()
38
-
39
  def get_gpt_completion(prompt, system_message):
40
  try:
41
- response = openai.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
42
- return response
 
 
 
 
 
 
 
43
  except Exception as e:
44
  print(f"GPT error: {e}")
45
  raise
46
 
 
47
  def get_claude_completion(prompt, system_message):
48
  try:
49
- response = claude.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
50
- return response
 
 
 
 
 
51
  except Exception as e:
52
  print(f"Claude error: {e}")
53
  raise
 
21
  OPENAI_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
22
  CLAUDE_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def get_gpt_completion(prompt, system_message):
25
  try:
26
+ response = openai.chat.completions.create(
27
+ model=OPENAI_MODEL,
28
+ messages=[
29
+ {"role": "system", "content": system_message},
30
+ {"role": "user", "content": prompt}
31
+ ],
32
+ stream=False,
33
+ )
34
+ return response.choices[0].message.content
35
  except Exception as e:
36
  print(f"GPT error: {e}")
37
  raise
38
 
39
+ # Call Anthropic's Claude model with prompt and system message
40
  def get_claude_completion(prompt, system_message):
41
  try:
42
+ result = claude.messages.create(
43
+ model=CLAUDE_MODEL,
44
+ max_tokens=2000,
45
+ system=system_message,
46
+ messages=[{"role": "user", "content": prompt}]
47
+ )
48
+ return result.content[0].text
49
  except Exception as e:
50
  print(f"Claude error: {e}")
51
  raise