jhansi1 commited on
Commit
f95dea8
·
verified ·
1 Parent(s): ca2a44c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -17
app.py CHANGED
@@ -1,31 +1,101 @@
 
 
1
  import streamlit as st
2
  from transformers import pipeline
3
  from datasets import load_dataset
4
 
5
- # Initialize text-generation pipeline with the model
 
 
 
6
  model_name = "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
7
  pipe = pipeline("text-generation", model=model_name)
8
 
9
  # Load the dataset
10
  ds = load_dataset("refugee-law-lab/canadian-legal-data", "default", split="train")
11
 
12
- # Streamlit interface
13
- st.title("Canadian Legal Text Generator")
14
- st.write("Enter a prompt related to Canadian legal data and generate text using Llama-3.1.")
15
 
16
- # Show dataset sample
17
- st.subheader("Sample Data from Canadian Legal Dataset:")
18
- st.write(ds[:5]) # Displaying the first 5 rows of the dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- # Prompt input
21
- prompt = st.text_area("Enter your prompt:", placeholder="Type something...")
 
 
22
 
23
- if st.button("Generate Response"):
24
- if prompt:
25
- # Generate text based on the prompt
26
- with st.spinner("Generating response..."):
27
- generated_text = pipe(prompt, max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"]
28
- st.write("**Generated Text:**")
29
- st.write(generated_text)
30
  else:
31
- st.write("Please enter a prompt to generate a response.")
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
  import streamlit as st
4
  from transformers import pipeline
5
  from datasets import load_dataset
6
 
7
+ # Initialize the Hugging Face InferenceClient
8
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+
10
+ # Initialize text-generation pipeline with the model for Streamlit
11
  model_name = "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
12
  pipe = pipeline("text-generation", model=model_name)
13
 
14
  # Load the dataset
15
  ds = load_dataset("refugee-law-lab/canadian-legal-data", "default", split="train")
16
 
 
 
 
17
 
18
+ # Gradio Function
19
+ def respond(
20
+ message,
21
+ history: list[tuple[str, str]],
22
+ system_message,
23
+ max_tokens,
24
+ temperature,
25
+ top_p,
26
+ ):
27
+ messages = [{"role": "system", "content": system_message}]
28
+
29
+ for val in history:
30
+ if val[0]:
31
+ messages.append({"role": "user", "content": val[0]})
32
+ if val[1]:
33
+ messages.append({"role": "assistant", "content": val[1]})
34
+
35
+ messages.append({"role": "user", "content": message})
36
+
37
+ response = ""
38
+
39
+ for message in client.chat_completion(
40
+ messages,
41
+ max_tokens=max_tokens,
42
+ stream=True,
43
+ temperature=temperature,
44
+ top_p=top_p,
45
+ ):
46
+ token = message.choices[0].delta.content
47
+
48
+ response += token
49
+ yield response
50
+
51
+
52
+ # Gradio interface setup
53
+ demo = gr.ChatInterface(
54
+ respond,
55
+ additional_inputs=[
56
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
57
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
58
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
59
+ gr.Slider(
60
+ minimum=0.1,
61
+ maximum=1.0,
62
+ value=0.95,
63
+ step=0.05,
64
+ label="Top-p (nucleus sampling)",
65
+ ),
66
+ ],
67
+ )
68
+
69
+
70
+ # Streamlit interface setup
71
+ def streamlit_interface():
72
+ st.title("Canadian Legal Text Generator")
73
+ st.write("Enter a prompt related to Canadian legal data and generate text using Llama-3.1.")
74
+
75
+ # Show dataset sample
76
+ st.subheader("Sample Data from Canadian Legal Dataset:")
77
+ st.write(ds[:5]) # Displaying the first 5 rows of the dataset
78
+
79
+ # Prompt input
80
+ prompt = st.text_area("Enter your prompt:", placeholder="Type something...")
81
+
82
+ if st.button("Generate Response"):
83
+ if prompt:
84
+ # Generate text based on the prompt
85
+ with st.spinner("Generating response..."):
86
+ generated_text = pipe(prompt, max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"]
87
+ st.write("**Generated Text:**")
88
+ st.write(generated_text)
89
+ else:
90
+ st.write("Please enter a prompt to generate a response.")
91
+
92
 
93
+ # Running Gradio and Streamlit interfaces
94
+ if __name__ == "__main__":
95
+ st.sidebar.title("Choose an Interface")
96
+ interface = st.sidebar.radio("Select", ("Streamlit", "Gradio"))
97
 
98
+ if interface == "Streamlit":
99
+ streamlit_interface()
 
 
 
 
 
100
  else:
101
+ demo.launch()