MilanM commited on
Commit
26969cf
·
verified ·
1 Parent(s): 1695c47

Upload 3 files

Browse files
Files changed (3) hide show
  1. anton_ego_jimmy.py +43 -0
  2. requirements.txt +5 -0
  3. secretsload.py +25 -0
anton_ego_jimmy.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # Model Configuration
3
+ #[model]
4
+ TYPE = "chat" # so that it uses the chat history
5
+ SELECTED_MODEL = "meta-llama/llama-3-1-70b-instruct" # Pick a model_id, you can find them here https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-api-model-ids.html?context=wx&audience=wdp
6
+ #SELECTED_MODEL = "meta-llama/llama-3-405b-instruct"
7
+ #SELECTED_MODEL = "mistralai/mistral-large"
8
+ #SELECTED_MODEL = "mistralai/mixtral-8x7b-instruct-v01"
9
+ VERIFY = False
10
+
11
+ # Prompt Configuration
12
+ #[prompt]
13
+ SYSTEM_PROMPT = st.secrets["sys_prompt"]
14
+ PROMPT_TEMPLATE = "llama3-instruct (llama-3 & 3.1) - system"
15
+ #PROMPT_TEMPLATE = "mistral & mixtral v2 tokenizer - system" # <pick prompt template from model_family_syntax below> For example "llama3-instruct (llama-3 & 3.1) - user" if you don't use a system prompt.
16
+ BAKE_IN_PROMPT_SYNTAX = True
17
+
18
+ # Generation Parameters
19
+ DECODING_METHOD = "greedy" # greedy or sample
20
+ MAX_NEW_TOKENS = 250
21
+ MIN_NEW_TOKENS = 1
22
+ REPETITION_PENALTY = 1.0
23
+ # LENGTH_PENALTY = {'decay_factor': 1.25, 'start_index': 150}
24
+ STOP_SEQUENCES = ["<|end_of_text|>","</s>"] # This one is set up for llama models, if you use mistral </s> is the preferred stop_sequence
25
+
26
+ # Additional Parameters - Only active if you pick sampling in decoding method
27
+ TEMPERATURE = 0.75
28
+ TOP_P = 1.0
29
+ TOP_K = 50
30
+
31
+ DISPLAY_CHAT_HISTORY = 1 # 0 to display chat history, 1 to not display chat history
32
+
33
+ # model_family_syntax = {
34
+ # "llama3-instruct (llama-3 & 3.1) - system": """\n<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
35
+ # "llama3-instruct (llama-3 & 3.1) - user": """\n<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
36
+ # "granite-13b-chat & instruct - system": """\n<|system|>\n{system_prompt}\n<|user|>\n{prompt}\n<|assistant|>\n\n""",
37
+ # "granite-13b-chat & instruct - user": """\n<|user|>\n{prompt}\n<|assistant|>\n\n""",
38
+ # "llama2-chat - system": """\n[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{prompt} [/INST] """,
39
+ # "llama2-chat - user": """\n[INST] {prompt} [/INST] """,
40
+ # "mistral & mixtral v2 tokenizer - system": """\n<s>[INST]System Prompt:[{system_prompt}]\n\n{prompt} [/INST]\n""",
41
+ # "mistral & mixtral v2 tokenizer - system segmented": """\n<s>[INST]System Prompt:{system_prompt}[/INST][INST]{prompt} [/INST]\n""",
42
+ # "mistral & mixtral v2 tokenizer - user": """\n<s>[INST]{prompt} [/INST]\n"""
43
+ # }
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ ibm-watsonx-ai
3
+ langchain
4
+ langchain-ibm
5
+ requests
secretsload.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import toml
3
+ from typing import Dict
4
+
5
+ def load_stsecrets(config_path: str = '.streamlit/secrets.toml') -> Dict:
6
+ """
7
+ Load configuration from Streamlit secrets TOML file and replace placeholders with environment variables.
8
+
9
+ :param config_path: Path to the Streamlit secrets TOML file
10
+ :return: Dictionary containing the credentials
11
+ """
12
+ # Read the TOML file
13
+ with open(config_path, 'r') as f:
14
+ config = toml.load(f)
15
+
16
+ # Replace placeholders with environment variables
17
+ credentials = config.get('credentials', {})
18
+ for key, value in credentials.items():
19
+ if isinstance(value, str) and value.startswith('${') and value.endswith('}'):
20
+ env_var = value[2:-1] # Remove ${ and }
21
+ credentials[key] = os.environ.get(env_var)
22
+ if credentials[key] is None:
23
+ raise ValueError(f"Environment variable {env_var} is not set")
24
+
25
+ return credentials