porkz commited on
Commit
dc9e3c8
·
verified ·
1 Parent(s): e445d31

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -19
app.py CHANGED
@@ -1,27 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from gradio_pdf import PDF
3
- from pdf2image import convert_from_path
4
- from transformers import pipeline
5
- from pathlib import Path
6
 
7
- dir_ = Path(__file__).parent
 
 
 
8
 
9
- p = pipeline(
10
- "document-question-answering",
11
- model="impira/layoutlm-document-qa",
12
- )
13
 
14
- def qa(doc: str, question: str) -> str:
15
- img = convert_from_path(doc)[0]
16
- output = p(img, question)
17
- return sorted(output, key=lambda x: x["score"], reverse=True)[0]['answer']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
 
 
 
 
 
 
 
 
19
 
20
- demo = gr.Interface(
21
- qa,
22
- [PDF(label="Document"), gr.Textbox()],
23
- gr.Textbox(),
24
- examples=[[str(dir_ / "invoice_2.pdf"), "What is the total gross worth?"]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  )
26
 
27
- demo.launch()
 
 
 
 
1
+ Hugging Face's logo
2
+ Hugging Face
3
+ Models
4
+ Datasets
5
+ Spaces
6
+ Posts
7
+ Docs
8
+ Enterprise
9
+ Pricing
10
+
11
+
12
+
13
+ Spaces:
14
+
15
+ DevQuasar
16
+ /
17
+ test
18
+
19
+
20
+ like
21
+ 0
22
+ App
23
+ Files
24
+ Community
25
+ test
26
+ /
27
+ app.py
28
+
29
+ csabakecskemeti's picture
30
+ csabakecskemeti
31
+ Duplicate from gradio-templates/chatbot
32
+ 644d5a5
33
+ verified
34
+ 3 months ago
35
+ raw
36
+
37
+ Copy download link
38
+ history
39
+ blame
40
+ contribute
41
+ delete
42
+
43
+ 1.71 kB
44
  import gradio as gr
45
+ from huggingface_hub import InferenceClient
 
 
 
46
 
47
+ """
48
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
49
+ """
50
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
51
 
 
 
 
 
52
 
53
+ def respond(
54
+ message,
55
+ history: list[tuple[str, str]],
56
+ system_message,
57
+ max_tokens,
58
+ temperature,
59
+ top_p,
60
+ ):
61
+ messages = [{"role": "system", "content": system_message}]
62
+
63
+ for val in history:
64
+ if val[0]:
65
+ messages.append({"role": "user", "content": val[0]})
66
+ if val[1]:
67
+ messages.append({"role": "assistant", "content": val[1]})
68
+
69
+ messages.append({"role": "user", "content": message})
70
+
71
+ response = ""
72
 
73
+ for message in client.chat_completion(
74
+ messages,
75
+ max_tokens=max_tokens,
76
+ stream=True,
77
+ temperature=temperature,
78
+ top_p=top_p,
79
+ ):
80
+ token = message.choices[0].delta.content
81
 
82
+ response += token
83
+ yield response
84
+
85
+
86
+ """
87
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
88
+ """
89
+ demo = gr.ChatInterface(
90
+ respond,
91
+ additional_inputs=[
92
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
93
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
94
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
95
+ gr.Slider(
96
+ minimum=0.1,
97
+ maximum=1.0,
98
+ value=0.95,
99
+ step=0.05,
100
+ label="Top-p (nucleus sampling)",
101
+ ),
102
+ ],
103
  )
104
 
105
+
106
+ if __name__ == "__main__":
107
+ demo.launch()
108
+