DHEIVER commited on
Commit
7c8c122
·
verified ·
1 Parent(s): 9030fbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -11,8 +11,8 @@ from langchain.memory import ConversationBufferMemory
11
  import torch
12
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
13
 
14
- # List of local models (no HF_TOKEN required after download)
15
- list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ"]
16
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
17
 
18
  # Load and split PDF document
@@ -147,7 +147,7 @@ def demo():
147
  with gr.Row():
148
  slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.5, step=0.1, label="Temperature", info="Controls randomness in token generation", interactive=True)
149
  with gr.Row():
150
- slider_maxtokens = gr.Slider(minimum=128, maximum=4096, value=1024, step=128, label="Max New Tokens", info="Maximum number of tokens to be generated", interactive=True)
151
  with gr.Row():
152
  slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k", info="Number of tokens to select the next token from", interactive=True)
153
  with gr.Row():
 
11
  import torch
12
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
13
 
14
+ # List of local models (publicly accessible, no token required)
15
+ list_llm = ["facebook/opt-350m", "distilbert/distilgpt2"]
16
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
17
 
18
  # Load and split PDF document
 
147
  with gr.Row():
148
  slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.5, step=0.1, label="Temperature", info="Controls randomness in token generation", interactive=True)
149
  with gr.Row():
150
+ slider_maxtokens = gr.Slider(minimum=128, maximum=2048, value=512, step=128, label="Max New Tokens", info="Maximum number of tokens to be generated", interactive=True)
151
  with gr.Row():
152
  slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k", info="Number of tokens to select the next token from", interactive=True)
153
  with gr.Row():