GreenTeaLatte commited on
Commit
51a0881
Β·
1 Parent(s): 916c520

username and password

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -7,6 +7,9 @@ import gradio as gr
7
  from huggingface_hub import Repository, InferenceClient
8
 
9
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
 
 
 
10
  API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-180B-chat"
11
  BOT_NAME = "Falcon"
12
 
@@ -126,7 +129,7 @@ with gr.Blocks() as demo:
126
 
127
  ✨ This demo is powered by [Falcon-180B](https://huggingface.co/tiiuae/falcon-180B) and finetuned on a mixture of [Ultrachat](https://huggingface.co/datasets/stingning/ultrachat), [Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) and [Airoboros](https://huggingface.co/datasets/jondurbin/airoboros-2.1). [Falcon-180B](https://huggingface.co/tiiuae/falcon-180b) is a state-of-the-art large language model built by the [Technology Innovation Institute](https://www.tii.ae) in Abu Dhabi. It is trained on 3.5 trillion tokens (including [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)) and available under the [Falcon-180B TII License](https://huggingface.co/spaces/tiiuae/falcon-180b-license/blob/main/LICENSE.txt). It currently holds the πŸ₯‡ 1st place on the [πŸ€— Open LLM leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for a pretrained model.
128
 
129
- πŸ§ͺ This is only a **first experimental preview**: we intend to provide increasingly capable versions of Falcon in the future, based on improved datasets and RLHF/RLAIF.
130
 
131
  πŸ‘€ **Learn more about Falcon LLM:** [falconllm.tii.ae](https://falconllm.tii.ae/)
132
 
@@ -142,4 +145,4 @@ with gr.Blocks() as demo:
142
  additional_inputs=additional_inputs,
143
  )
144
 
145
- demo.queue(concurrency_count=100, api_open=True).launch(show_api=True)
 
7
  from huggingface_hub import Repository, InferenceClient
8
 
9
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
10
+ USER_NAME = os.environ.get("USER_NAME", None)
11
+ APP_PASSWORD = os.environ.get("APP_PASSWORD", None)
12
+
13
  API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-180B-chat"
14
  BOT_NAME = "Falcon"
15
 
 
129
 
130
  ✨ This demo is powered by [Falcon-180B](https://huggingface.co/tiiuae/falcon-180B) and finetuned on a mixture of [Ultrachat](https://huggingface.co/datasets/stingning/ultrachat), [Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) and [Airoboros](https://huggingface.co/datasets/jondurbin/airoboros-2.1). [Falcon-180B](https://huggingface.co/tiiuae/falcon-180b) is a state-of-the-art large language model built by the [Technology Innovation Institute](https://www.tii.ae) in Abu Dhabi. It is trained on 3.5 trillion tokens (including [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)) and available under the [Falcon-180B TII License](https://huggingface.co/spaces/tiiuae/falcon-180b-license/blob/main/LICENSE.txt). It currently holds the πŸ₯‡ 1st place on the [πŸ€— Open LLM leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for a pretrained model.
131
 
132
+ πŸ§ͺ Falcon in the futuremay improved datasets.
133
 
134
  πŸ‘€ **Learn more about Falcon LLM:** [falconllm.tii.ae](https://falconllm.tii.ae/)
135
 
 
145
  additional_inputs=additional_inputs,
146
  )
147
 
148
+ demo.queue(concurrency_count=100, api_open=True).launch(show_api=True, auth=(USER_NAME, APP_PASSWORD))