Spaces:
Running
Running
from openai import OpenAI | |
import anthropic | |
import os | |
from dotenv import load_dotenv | |
from huggingface_hub import HfApi; | |
from huggingface_hub import InferenceClient | |
# Load environment variables from .env file | |
load_dotenv(override=True) | |
# Retrieve API keys from environment | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") | |
# Warn if any API key is missing | |
if not openai_api_key: | |
print("❌ OpenAI API Key is missing!") | |
if not anthropic_api_key: | |
print("❌ Anthropic API Key is missing!") | |
# Initialize disguised API clients | |
openai = InferenceClient(token=openai_api_key) | |
claude = InferenceClient(token=anthropic_api_key) | |
# Model names | |
OPENAI_MODEL = "mistralai/Mistral-7B-Instruct-v0.3" | |
CLAUDE_MODEL = "mistralai/Mistral-7B-Instruct-v0.3" | |
# Call OpenAI's GPT model with prompt and system message | |
def get_gpt_completion(prompt, system_message): | |
try: | |
response = openai.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200) | |
return response | |
except Exception as e: | |
print(f"GPT error: {e}") | |
raise | |
def get_claude_completion(prompt, system_message): | |
try: | |
response = claude.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200) | |
return response | |
except Exception as e: | |
print(f"Claude error: {e}") | |
raise | |