# src/llms/openai_llm.py import openai from typing import Optional, List from .base_llm import BaseLLM class OpenAILanguageModel(BaseLLM): def __init__( self, api_key: str, model: str = 'gpt-3.5-turbo' ): """ Initialize OpenAI Language Model Args: api_key (str): OpenAI API key model (str): Name of the OpenAI model to use """ openai.api_key = api_key self.model = model def generate( self, prompt: str, max_tokens: Optional[int] = 150, temperature: float = 0.7, **kwargs ) -> str: """ Generate response using OpenAI API Args: prompt (str): Input prompt max_tokens (Optional[int]): Maximum tokens to generate temperature (float): Sampling temperature Returns: str: Generated response """ response = openai.ChatCompletion.create( model=self.model, messages=[{"role": "user", "content": prompt}], max_tokens=max_tokens, temperature=temperature, **kwargs ) return response.choices[0].message.content.strip() def tokenize(self, text: str) -> List[str]: """ Tokenize text using OpenAI tokenizer Args: text (str): Input text to tokenize Returns: List[str]: List of tokens """ # Note: This is a placeholder. OpenAI doesn't provide a direct # tokenization method without making an API call. return text.split() def count_tokens(self, text: str) -> int: """ Count tokens in the text Args: text (str): Input text to count tokens Returns: int: Number of tokens """ # Approximate token counting return len(self.tokenize(text))