Update app.py
Browse files
app.py
CHANGED
@@ -10,23 +10,26 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
|
10 |
USER_NAME = os.environ.get("USER_NAME", None)
|
11 |
APP_PASSWORD = os.environ.get("APP_PASSWORD", None)
|
12 |
|
13 |
-
API_URL
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
STOP_SEQUENCES = ["\nUser:", "<|endoftext|>", " User:", "###"]
|
17 |
|
18 |
EXAMPLES = [
|
19 |
-
["
|
20 |
-
["
|
21 |
-
["What is the history of
|
22 |
-
["Can you tell me more about
|
23 |
-
["Can you write a short tweet about the release of our latest AI model, Falcon LLM?"]
|
24 |
]
|
25 |
|
26 |
-
client = InferenceClient(
|
27 |
-
API_URL,
|
28 |
-
headers={"Authorization": f"Bearer {HF_TOKEN}"},
|
29 |
-
)
|
30 |
|
31 |
def format_prompt(message, history, system_prompt):
|
32 |
prompt = ""
|
@@ -34,9 +37,9 @@ def format_prompt(message, history, system_prompt):
|
|
34 |
prompt += f"System: {system_prompt}\n"
|
35 |
for user_prompt, bot_response in history:
|
36 |
prompt += f"User: {user_prompt}\n"
|
37 |
-
prompt += f"
|
38 |
prompt += f"""User: {message}
|
39 |
-
|
40 |
return prompt
|
41 |
|
42 |
seed = 42
|
@@ -120,28 +123,21 @@ additional_inputs=[
|
|
120 |
with gr.Blocks() as demo:
|
121 |
with gr.Row():
|
122 |
with gr.Column(scale=0.4):
|
123 |
-
gr.Image("
|
124 |
with gr.Column():
|
125 |
gr.Markdown(
|
126 |
"""
|
127 |
-
#
|
128 |
-
|
129 |
-
|
130 |
-
**Chat with [Falcon-180B-Chat](https://huggingface.co/tiiuae/falcon-180b-chat), brainstorm ideas, discuss your holiday plans, and more!**
|
131 |
-
|
132 |
-
✨ This is powered by [Falcon-180B](https://huggingface.co/tiiuae/falcon-180B) and finetuned on a mixture of [Ultrachat](https://huggingface.co/datasets/stingning/ultrachat), [Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) and [Airoboros](https://huggingface.co/datasets/jondurbin/airoboros-2.1). [Falcon-180B](https://huggingface.co/tiiuae/falcon-180b) is a state-of-the-art large language model built by the [Technology Innovation Institute](https://www.tii.ae) in Abu Dhabi.
|
133 |
-
|
134 |
-
💁It is trained on 3.5 trillion tokens (including [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)) and available under the [Falcon-180B TII License](https://huggingface.co/spaces/tiiuae/falcon-180b-license/blob/main/LICENSE.txt).
|
135 |
-
|
136 |
-
🧪 Falcon in the futuremay improved datasets.
|
137 |
-
|
138 |
-
👀 **Learn more about Falcon LLM:** [falconllm.tii.ae](https://falconllm.tii.ae/)
|
139 |
-
|
140 |
-
➡️️ **Intended Use**: this demo is intended to showcase an early finetuning of [Falcon-180B](https://huggingface.co/tiiuae/falcon-180b), to illustrate the impact (and limitations) of finetuning on a dataset of conversations and instructions. They encourage the community to further build upon the base model, and to create even better instruct/chat versions!
|
141 |
-
|
142 |
⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words.
|
|
|
|
|
|
|
143 |
"""
|
144 |
)
|
|
|
|
|
|
|
145 |
|
146 |
gr.ChatInterface(
|
147 |
generate,
|
|
|
10 |
USER_NAME = os.environ.get("USER_NAME", None)
|
11 |
APP_PASSWORD = os.environ.get("APP_PASSWORD", None)
|
12 |
|
13 |
+
def get_API(API_URL):
|
14 |
+
client = InferenceClient(
|
15 |
+
API_URL,
|
16 |
+
headers={"Authorization": f"Bearer {HF_TOKEN}"},
|
17 |
+
)
|
18 |
+
return client
|
19 |
+
|
20 |
+
def get_BOT(BOT_NAME):
|
21 |
+
return BOT_NAME
|
22 |
|
23 |
STOP_SEQUENCES = ["\nUser:", "<|endoftext|>", " User:", "###"]
|
24 |
|
25 |
EXAMPLES = [
|
26 |
+
["Please explain me about machine learning"],
|
27 |
+
["Do you know about python programming? Please create simple application for me."],
|
28 |
+
["What is the history of AI?"],
|
29 |
+
["Can you tell me more about Data Science?"],
|
30 |
+
["Can you write a short tweet about the release of our latest AI model, Falcon 180B LLM?"]
|
31 |
]
|
32 |
|
|
|
|
|
|
|
|
|
33 |
|
34 |
def format_prompt(message, history, system_prompt):
|
35 |
prompt = ""
|
|
|
37 |
prompt += f"System: {system_prompt}\n"
|
38 |
for user_prompt, bot_response in history:
|
39 |
prompt += f"User: {user_prompt}\n"
|
40 |
+
prompt += f"GuruAI: {bot_response}\n" # Response already contains "GuruAI: "
|
41 |
prompt += f"""User: {message}
|
42 |
+
GuruAI:"""
|
43 |
return prompt
|
44 |
|
45 |
seed = 42
|
|
|
123 |
with gr.Blocks() as demo:
|
124 |
with gr.Row():
|
125 |
with gr.Column(scale=0.4):
|
126 |
+
gr.Image("https://padek.jawapos.com/wp-content/uploads/2022/10/861213472.jpg", elem_id="banner-image", show_label=False)
|
127 |
with gr.Column():
|
128 |
gr.Markdown(
|
129 |
"""
|
130 |
+
# GuruAI
|
131 |
+
This is AI as Teacher, It will teach you about anything.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words.
|
133 |
+
|
134 |
+
Example:
|
135 |
+
API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-180B-chat" BOT_NAME = "Falcon"
|
136 |
"""
|
137 |
)
|
138 |
+
|
139 |
+
input_API = gr.Interface(fn=get_API, inputs="text", outputs="text")
|
140 |
+
input_BOT = gr.Interface(fn=get_BOT, inputs="text", outputs="text")
|
141 |
|
142 |
gr.ChatInterface(
|
143 |
generate,
|