infinitymatter commited on
Commit
48c9787
·
verified ·
1 Parent(s): 8116448

Update src/models.py

Browse files
Files changed (1) hide show
  1. src/models.py +17 -10
src/models.py CHANGED
@@ -1,10 +1,8 @@
1
- from openai import OpenAI
2
- import anthropic
3
  import os
 
 
4
  from dotenv import load_dotenv
5
- from huggingface_hub import HfApi;
6
 
7
- from huggingface_hub import InferenceClient
8
  # Load environment variables from .env file
9
  load_dotenv(override=True)
10
 
@@ -19,17 +17,25 @@ if not openai_api_key:
19
  if not anthropic_api_key:
20
  print("❌ Anthropic API Key is missing!")
21
 
22
- # Initialize disguised API clients
23
- openai = InferenceClient(token=openai_api_key)
24
- claude = InferenceClient(token=anthropic_api_key)
25
-
26
  # Model names
27
  OPENAI_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
28
  CLAUDE_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
29
 
 
 
 
 
 
 
 
 
 
 
 
30
  def get_gpt_completion(prompt, system_message):
31
  try:
32
- response = openai.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
 
33
  return response
34
  except Exception as e:
35
  print(f"GPT error: {e}")
@@ -37,7 +43,8 @@ def get_gpt_completion(prompt, system_message):
37
 
38
  def get_claude_completion(prompt, system_message):
39
  try:
40
- response = claude.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
 
41
  return response
42
  except Exception as e:
43
  print(f"Claude error: {e}")
 
 
 
1
  import os
2
+ import requests
3
+ import json
4
  from dotenv import load_dotenv
 
5
 
 
6
  # Load environment variables from .env file
7
  load_dotenv(override=True)
8
 
 
17
  if not anthropic_api_key:
18
  print("❌ Anthropic API Key is missing!")
19
 
 
 
 
 
20
  # Model names
21
  OPENAI_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
22
  CLAUDE_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
23
 
24
+ # Define a helper function to perform the direct API call
25
+ def hf_inference_request(token, model, prompt, max_new_tokens=200):
26
+ url = f"https://api-inference.huggingface.co/models/{model}"
27
+ headers = {"Authorization": f"Bearer {token}"}
28
+ payload = {"inputs": prompt, "parameters": {"max_new_tokens": max_new_tokens}}
29
+
30
+ response = requests.post(url, headers=headers, json=payload)
31
+ # Raise an HTTPError if the API response was unsuccessful
32
+ response.raise_for_status()
33
+ return response.json()
34
+
35
  def get_gpt_completion(prompt, system_message):
36
  try:
37
+ full_prompt = f"{system_message}\n{prompt}"
38
+ response = hf_inference_request(openai_api_key, OPENAI_MODEL, full_prompt, max_new_tokens=200)
39
  return response
40
  except Exception as e:
41
  print(f"GPT error: {e}")
 
43
 
44
  def get_claude_completion(prompt, system_message):
45
  try:
46
+ full_prompt = f"{system_message}\n{prompt}"
47
+ response = hf_inference_request(anthropic_api_key, CLAUDE_MODEL, full_prompt, max_new_tokens=200)
48
  return response
49
  except Exception as e:
50
  print(f"Claude error: {e}")