File size: 1,055 Bytes
1b67eb7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from __future__ import annotations
from ..requests import Session, get_session_from_browser
from ..typing import Messages
from .base_provider import AsyncProvider
class GptChatly(AsyncProvider):
url = "https://gptchatly.com"
working = True
supports_message_history = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
session: Session = None,
**kwargs
) -> str:
if not session:
session = get_session_from_browser(cls.url, proxy=proxy, timeout=timeout)
if model.startswith("gpt-4"):
chat_url = f"{cls.url}/fetch-gpt4-response"
else:
chat_url = f"{cls.url}/felch-response"
data = {
"past_conversations": messages
}
response = session.post(chat_url, json=data)
response.raise_for_status()
return response.json()["chatGPTResponse"] |