Spaces:
Sleeping
Sleeping
File size: 2,531 Bytes
8f78c8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import json
import random
from typing import List, Optional
import pandas as pd
def get_current_df(dfs: List[pd.DataFrame], current: int) -> pd.DataFrame:
if len(dfs) == 0:
return pd.DataFrame()
else:
return dfs[current]
def query_llm_mock(
messages,
history: List,
df: pd.DataFrame,
llm_type: str,
api_key: str,
system_prompt: str,
):
"""Chat function that streams responses using mock llm.
Args:
messages (str or list): User input message(s).
history (list): Conversation history.
dfs (List[pd.DataFrame): a representation of the data already obtained
system_prompt (str): The syste prompt
openai_client (OpenAI): The OpenAI client
Returns:
str: The assistant's response.
"""
mock_json = json.dumps(
{
"Medications": [
{
"Medication name": "Tamsulosin",
"Passes_RBB": "Yes",
"Random": random.random(),
},
{
"Medication name": "Metoprolol",
"Passes_RBB": "Yes",
"Random": random.random(),
},
{
"Medication name": "Bromocriptine",
"Passes_RBB": "Yes",
"Random": random.random(),
},
{
"Medication name": "Reserpine",
"Passes_RBB": "Yes",
"Random": random.random(),
},
{
"Medication name": "Rasagiline",
"Passes_RBB": "Yes",
"Random": random.random(),
},
]
}
)
yield (
f"Good question!\n"
f"Here's the data frame in JSON format:\n"
f"```json\n"
f"{mock_json if random.random() > 0.5 else ''}\n"
f"```\n\n"
f"Hope this is useful."
)
def llm_extract_table_mock(chat_output, llm_type, api_key) -> str:
dic = {
"Medications": [
{
"Name": "Medication Name",
"key1": "value1",
"key2": "value2",
"Random": random.random(),
},
{
"Name": "Medication Name",
"key1": "value1",
"key2": "value2",
"Random": random.random(),
},
]
}
return json.dumps(dic)
|