maya-persistence / src /llm /togetherLLM.py
anubhav77's picture
v0.1
ebd06cc
raw
history blame
2.41 kB
import together
import os
import logging,json
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field #, root_validator, model_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class TogetherLLM(LLM):
"""Together large language models."""
model_name: str = "togethercomputer/llama-2-70b-chat"
"""model endpoint to use"""
together_api_key: str = os.environ["TOGETHER_API_KEY"]
"""Together API key"""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 512
"""The maximum number of tokens to generate in the completion."""
class Config:
extra = Extra.forbid
#@model_validator(mode="after")
#def validate_environment(cls, values: Dict) -> Dict:
# """Validate that the API key is set."""
# api_key = get_from_dict_or_env(
# values, "together_api_key", "TOGETHER_API_KEY"
# )
# values["together_api_key"] = api_key
# return values
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "together"
def _call(
self,
prompt: str,
**kwargs: Any,
) -> str:
"""Call to Together endpoint."""
together.api_key = self.together_api_key
output = together.Complete.create(prompt,
model=self.model_name,
max_tokens=self.max_tokens,
temperature=self.temperature,
)
text = output['output']['choices'][0]['text']
return text
def extractJson(self,val:str) -> Any:
"""Helper function to extract json from this LLMs output"""
#This is assuming the json is the first item within ````
v2=val.replace("```json","```").split("```")[1]
v3=v2.replace("\n","").replace("\r","")
v4=json.loads(v3)
return v4
def extractPython(self,val:str) -> Any:
"""Helper function to extract python from this LLMs output"""
#This is assuming the python is the first item within ````
v2=val.replace("```python","```").split("```")[1]
return v2