File size: 3,030 Bytes
22507c4 e4bde4c 22507c4 e4bde4c 22507c4 e4bde4c 22507c4 e4bde4c 22507c4 e4bde4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import json
import re
from typing import Generator
from textwrap import dedent
from litellm.types.utils import ModelResponse
from pydantic import ValidationError
from core.llms.base_llm import BaseLLM
from core.types import ThoughtSteps
from core.prompts.think_mark_think import REVIEW_PROMPT, SYSTEM_PROMPT ,FINAL_ANSWER_PROMPT
import os
import time
from core.utils import parse_with_fallback
from termcolor import colored
from app.app_config import InputConfig
from core.llms.litellm_llm import LLM
from core.llms.utils import user_message_with_images
from PIL import Image
from streamlit.runtime.uploaded_file_manager import UploadedFile
def generate_answer(messages: list[dict], max_steps: int = 20, llm: BaseLLM = None, sleeptime: float = 0.0, force_max_steps: bool = False, **kwargs):
thoughts = []
for i in range(max_steps):
raw_response = llm.chat(messages, **kwargs)
response = raw_response.choices[0].message.content
thought = response_parser(response)
print(colored(f"{i+1} - {response}", 'yellow'))
thoughts.append(thought)
messages.append({"role": "assistant", "content": thought.model_dump_json()})
yield thought
if thought.is_final_answer and not thought.next_step and not force_max_steps:
break
messages.append({"role": "user", "content": REVIEW_PROMPT})
time.sleep(sleeptime)
# Get the final answer after all thoughts are processed
messages += [{"role": "user", "content": FINAL_ANSWER_PROMPT}]
raw_final_answers = llm.chat(messages=messages, **kwargs)
final_answer = raw_final_answers.choices[0].message.content
print(colored(f"final answer - {final_answer}", 'green'))
final_thought = response_parser(final_answer)
yield final_thought
def response_parser(response:str) -> ThoughtSteps:
if isinstance(response, str):
try:
thought_kwargs = json.loads(response)
thought = ThoughtSteps(**thought_kwargs)
except (json.JSONDecodeError, ValidationError):
thought = parse_with_fallback(response, ThoughtSteps)
elif isinstance(response, dict):
thought = ThoughtSteps(**response)
return thought
def dict_to_markdown(d:dict) -> str:
'''use keys as headers and values as content'''
md = ""
for key, value in d.items():
md += f"### {key}\n"
md += f"{value}\n"
return md
def load_llm(config:InputConfig, tools=None) -> BaseLLM:
return LLM(api_key=config.model_api_key, model=config.model_name, tools=tools)
def image_buffer_to_pillow_image(image_buffer:UploadedFile) -> Image.Image:
return Image.open(image_buffer)
def process_user_input(user_input:str, image:Image.Image=None)->dict:
if image:
message = [user_message_with_images(user_msg_str=user_input, images=[image])]
else:
message = [{"role": "user", "content": user_input}]
return message
|