File size: 2,765 Bytes
2a20c6b
 
cd2ca57
 
2a20c6b
 
 
 
cd2ca57
2a20c6b
 
0d944ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01cb892
2a20c6b
 
 
 
 
 
 
e1dd907
0d944ef
188ef10
1a8ed74
 
 
 
 
188ef10
2a20c6b
 
 
1a8ed74
2a20c6b
 
 
 
 
 
 
 
1a8ed74
 
2a20c6b
 
 
 
 
 
 
 
01cb892
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a20c6b
6982b56
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr
from huggingface_hub import InferenceClient
import pymupdf
from duckduckgo_search import DDGS

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")


# PDF Parsing
def extract_text_from_pdf(pdf_file):
    doc = pymupdf.open(pdf_file)
    text = " ".join([page.get_textpage().extractTEXT() for page in doc])
    return text


# Web search fallback
def search_web(query):
    with DDGS() as ddgs:
        results = ddgs.text(query)
        if results:
            return results[0]["body"]
    return "No relevant results found on the web."


SYSTEM_PROMPT = """
You are an intelligent and friendly AI assistant. 

Your goals:
- Answer user questions clearly and concisely.
- If a PDF document is provided, use its content to give informed answers.
- For questions about recent or live topics (e.g., news, prices, events), you may perform a web search and summarize the result.
- If no document or web context is available, still try to help using general knowledge.
- Be honest if you don’t know something.
- Always be polite, helpful, and respectful.
"""


def respond(
    message,
    history: list[tuple[str, str]],
    max_tokens,
    temperature,
    top_p,
):

    messages = [{"role": "system", "content": SYSTEM_PROMPT}]

    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    response = ""

    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices[0].delta.content

        response += token
        yield response


"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""


with gr.Blocks() as demo:
    gr.Markdown("## 🤖 Smart AI Chatbot (PDF + Web + General QA)")

    pdf_file = gr.File(label="📄 Upload a PDF", file_types=[".pdf"])

    chat = gr.ChatInterface(
        respond,
        additional_inputs=[
            gr.Textbox(value="You are a helpful assistant.", label="System message"),
            gr.Slider(1, 2048, value=512, step=1, label="Max new tokens"),
            gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
            gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
            pdf_file,  # ✅ Works now
        ],
    )

if __name__ == "__main__":
    demo.launch()