Bhaskar2611 commited on
Commit
9848ff2
·
verified ·
1 Parent(s): 26c3bae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -94
app.py CHANGED
@@ -1,96 +1,3 @@
1
- # from huggingface_hub import InferenceClient
2
- # import gradio as gr
3
-
4
- # client = InferenceClient(
5
- # "mistralai/Mistral-7B-Instruct-v0.3"
6
- # )
7
-
8
-
9
- # def format_prompt(message, history):
10
- # prompt = "<s>"
11
- # for user_prompt, bot_response in history:
12
- # prompt += f"[INST] {user_prompt} [/INST]"
13
- # prompt += f" {bot_response}</s> "
14
- # prompt += f"[INST] {message} [/INST]"
15
- # return prompt
16
-
17
- # def generate(
18
- # prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
19
- # ):
20
- # temperature = float(temperature)
21
- # if temperature < 1e-2:
22
- # temperature = 1e-2
23
- # top_p = float(top_p)
24
-
25
- # generate_kwargs = dict(
26
- # temperature=temperature,
27
- # max_new_tokens=max_new_tokens,
28
- # top_p=top_p,
29
- # repetition_penalty=repetition_penalty,
30
- # do_sample=True,
31
- # seed=42,
32
- # )
33
-
34
- # formatted_prompt = format_prompt(prompt, history)
35
-
36
- # stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
37
- # output = ""
38
-
39
- # for response in stream:
40
- # output += response.token.text
41
- # yield output
42
- # return output
43
-
44
-
45
- # additional_inputs=[
46
- # gr.Slider(
47
- # label="Temperature",
48
- # value=0.9,
49
- # minimum=0.0,
50
- # maximum=1.0,
51
- # step=0.05,
52
- # interactive=True,
53
- # info="Higher values produce more diverse outputs",
54
- # ),
55
- # gr.Slider(
56
- # label="Max new tokens",
57
- # value=256,
58
- # minimum=0,
59
- # maximum=1048,
60
- # step=64,
61
- # interactive=True,
62
- # info="The maximum numbers of new tokens",
63
- # ),
64
- # gr.Slider(
65
- # label="Top-p (nucleus sampling)",
66
- # value=0.90,
67
- # minimum=0.0,
68
- # maximum=1,
69
- # step=0.05,
70
- # interactive=True,
71
- # info="Higher values sample more low-probability tokens",
72
- # ),
73
- # gr.Slider(
74
- # label="Repetition penalty",
75
- # value=1.2,
76
- # minimum=1.0,
77
- # maximum=2.0,
78
- # step=0.05,
79
- # interactive=True,
80
- # info="Penalize repeated tokens",
81
- # )
82
- # ]
83
-
84
-
85
- # gr.ChatInterface(
86
- # fn=generate,
87
- # chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"),
88
- # additional_inputs=additional_inputs,
89
- # title="""AI Dermatologist"""
90
- # ).launch(show_api=False)
91
-
92
-
93
- # gr.load("models/Bhaskar2611/Capstone").launch()
94
  import gradio as gr
95
  from huggingface_hub import InferenceClient
96
 
@@ -101,7 +8,7 @@ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
101
  def format_prompt(message, history):
102
  prompt = "<s>"
103
  # Start the conversation with a system message
104
- prompt += "[INST] You are an AI Dermatologist designed to assist users with skin and hair care.[/INST]"
105
  for user_prompt, bot_response in history:
106
  prompt += f"[INST] {user_prompt} [/INST]"
107
  prompt += f" {bot_response}</s> "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
8
  def format_prompt(message, history):
9
  prompt = "<s>"
10
  # Start the conversation with a system message
11
+ prompt += "[INST] You are an AI Dermatologist designed to assist users with skin and hair care by providing text.[/INST]"
12
  for user_prompt, bot_response in history:
13
  prompt += f"[INST] {user_prompt} [/INST]"
14
  prompt += f" {bot_response}</s> "