Spaces:
Running
Running
File size: 1,371 Bytes
e365a68 814ade4 e365a68 a103ecb e365a68 c6201fd e365a68 23f61d7 a103ecb 23f61d7 e365a68 23f61d7 e365a68 20887d9 e365a68 20887d9 e365a68 23f61d7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from openai import OpenAI
import anthropic
import os
from dotenv import load_dotenv
from huggingface_hub import HfApi;
from huggingface_hub import InferenceClient
# Load environment variables from .env file
load_dotenv(override=True)
# Retrieve API keys from environment
openai_api_key = os.getenv("OPENAI_API_KEY")
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
# Warn if any API key is missing
if not openai_api_key:
print("❌ OpenAI API Key is missing!")
if not anthropic_api_key:
print("❌ Anthropic API Key is missing!")
# Initialize disguised API clients
openai = InferenceClient(token=openai_api_key)
claude = InferenceClient(token=anthropic_api_key)
# Model names
OPENAI_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
CLAUDE_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
# Call OpenAI's GPT model with prompt and system message
def get_gpt_completion(prompt, system_message):
try:
response = openai.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
return response
except Exception as e:
print(f"GPT error: {e}")
raise
def get_claude_completion(prompt, system_message):
try:
response = claude.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
return response
except Exception as e:
print(f"Claude error: {e}")
raise
|