Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Initialize the slide groups in session state on first run.
|
4 |
+
if "slide_groups" not in st.session_state:
|
5 |
+
st.session_state.slide_groups = [
|
6 |
+
{
|
7 |
+
"group": "Slide 1: Introduction",
|
8 |
+
"content": r"""
|
9 |
+
**Title:** AI Toolbox: 20 Papers in 5 Minutes
|
10 |
+
**Goal:** Show how these topics (Torch, Ollama, Deepseek, SFT, knowledge distillation, crowdsourcing, etc.) tie together into an end-to-end AI pipeline.
|
11 |
+
**Media:** Quick intro audio & a short video clip highlighting AI breakthroughs.
|
12 |
+
"""
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"group": "Slides 2–3: Torch (PyTorch Foundations)",
|
16 |
+
"content": r"""
|
17 |
+
**Paper 1**
|
18 |
+
*Reference:* Paszke, A. et al. “PyTorch: An Imperative Style, High-Performance Deep Learning Library.” arXiv:1912.01703 (2019)
|
19 |
+
*Key Points:*
|
20 |
+
- Dynamic computation graphs for rapid prototyping.
|
21 |
+
- Strong GPU acceleration and broad community support.
|
22 |
+
*Presentation Element:* Brief code snippet in Python + a Mermaid flowchart showing how forward/backprop flows in PyTorch.
|
23 |
+
|
24 |
+
**Paper 2**
|
25 |
+
*Reference:* Paszke, A. et al. “Automatic Differentiation in PyTorch.” arXiv:1707.?? (Hypothetical reference)
|
26 |
+
*Key Points:*
|
27 |
+
- Core mechanism behind autograd.
|
28 |
+
- How tensor operations are tracked and reversed for gradients.
|
29 |
+
*Presentation Element:* Minimal slides highlighting computational graph merges with HPC concepts.
|
30 |
+
"""
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"group": "Slides 4–5: Ollama & LLaMA-Based Models",
|
34 |
+
"content": r"""
|
35 |
+
**Paper 3**
|
36 |
+
*Reference:* Touvron, H. et al. “LLaMA: Open and Efficient Foundation Language Models.” arXiv:2302.13971 (2023)
|
37 |
+
*Key Points:*
|
38 |
+
- Architecture, training efficiency, and open-source benefits.
|
39 |
+
- Relevance to Ollama (lightweight local LLaMA inference).
|
40 |
+
*Presentation Element:* Short video demo of an Ollama prompt or model reply.
|
41 |
+
|
42 |
+
**Paper 4**
|
43 |
+
*Reference:* Zhang, M. et al. “Exploring LLaMA Derivatives for Local Inference.” arXiv:2303.???? (Hypothetical)
|
44 |
+
*Key Points:*
|
45 |
+
- Techniques for running large models on consumer-grade hardware.
|
46 |
+
- Model quantization, CPU/GPU scheduling.
|
47 |
+
*Presentation Element:* Mermaid sequence diagram comparing server-based vs. local inference pipelines.
|
48 |
+
"""
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"group": "Slides 6–7: Deepseek MoE + Chain of Thought (CoT)",
|
52 |
+
"content": r"""
|
53 |
+
**Paper 5**
|
54 |
+
*Reference:* Fedus, W., Zoph, B., Shazeer, N. “Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity.” arXiv:2101.03961 (2021)
|
55 |
+
*Key Points:*
|
56 |
+
- Mixture-of-Experts (MoE) approach to scale large models.
|
57 |
+
- Efficiency gains via sparse routing.
|
58 |
+
*Presentation Element:* Visual MoE block diagram with color-coded experts.
|
59 |
+
|
60 |
+
**Paper 6**
|
61 |
+
*Reference:* Wei, J. et al. “Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.” arXiv:2201.11903 (2022)
|
62 |
+
*Key Points:*
|
63 |
+
- Step-by-step reasoning prompts improve logical consistency.
|
64 |
+
- Potential synergy with MoE for specialized “reasoning experts.”
|
65 |
+
*Presentation Element:* Mermaid mind map illustrating short CoT vs. detailed CoT.
|
66 |
+
"""
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"group": "Slides 8–9: Hugging Face SFT Trainer",
|
70 |
+
"content": r"""
|
71 |
+
**Paper 7**
|
72 |
+
*Reference:* Wolf, T. et al. “Transformers: State-of-the-Art Natural Language Processing.” arXiv:1910.03771 (2020)
|
73 |
+
*Key Points:*
|
74 |
+
- Core library behind Hugging Face’s ecosystem.
|
75 |
+
- Transformer architecture fundamentals.
|
76 |
+
*Presentation Element:* Show how SFTTrainer (hypothetical name) builds on Trainer for supervised finetuning.
|
77 |
+
|
78 |
+
**Paper 8**
|
79 |
+
*Reference:* Houlsby, N. et al. “Parameter-Efficient Transfer Learning for NLP.” arXiv:1902.00751 (2019)
|
80 |
+
*Key Points:*
|
81 |
+
- Techniques like adapters, LoRA, or selective layer freezing.
|
82 |
+
- Impact on training efficiency and model size.
|
83 |
+
*Presentation Element:* A side-by-side bar chart showing reduction in GPU hours with parameter-efficient methods.
|
84 |
+
"""
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"group": "Slides 10–11: Knowledge Distillation & Mermaid Graphs",
|
88 |
+
"content": r"""
|
89 |
+
**Paper 9**
|
90 |
+
*Reference:* Hinton, G., Vinyals, O., Dean, J. “Distilling the Knowledge in a Neural Network.” arXiv:1503.02531 (2015)
|
91 |
+
*Key Points:*
|
92 |
+
- Transfer knowledge from large “teacher” models to small “student” models.
|
93 |
+
- Temperature scaling and teacher-student training.
|
94 |
+
*Presentation Element:* Mermaid flowchart detailing teacher–student relationships.
|
95 |
+
|
96 |
+
**Paper 10**
|
97 |
+
*Reference:* Chen, X. et al. “Graph-Based Knowledge Distillation for Neural Networks.” arXiv:2105.???? (Hypothetical)
|
98 |
+
*Key Points:*
|
99 |
+
- Represent model layers and hidden states as nodes & edges.
|
100 |
+
- Synergy with SFT and domain adaptation.
|
101 |
+
*Presentation Element:* Mermaid graph diagram linking teacher network nodes to student network nodes.
|
102 |
+
"""
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"group": "Slides 12–13: Crowdsourcing & Agents for Evaluation",
|
106 |
+
"content": r"""
|
107 |
+
**Paper 11**
|
108 |
+
*Reference:* Callison-Burch, C. “Fast, Cheap, and Creative: Evaluating Translation Quality Using Amazon’s Mechanical Turk.” arXiv:0907.5225 (2009)
|
109 |
+
*Key Points:*
|
110 |
+
- Crowdsourcing pipeline for large-scale text evaluation.
|
111 |
+
- Reliability strategies: gold standards, inter-annotator agreement.
|
112 |
+
*Presentation Element:* Timeline comparing tasks for crowdworkers vs. automated agents.
|
113 |
+
|
114 |
+
**Paper 12**
|
115 |
+
*Reference:* Nie, Y. et al. “Adversarial NLI: A New Benchmark for Natural Language Understanding.” arXiv:1910.14599 (2019)
|
116 |
+
*Key Points:*
|
117 |
+
- Human-and-model-in-the-loop adversarial examples.
|
118 |
+
- Incremental data curation to improve robustness.
|
119 |
+
*Presentation Element:* Short audio explanation of adversarial example refinement.
|
120 |
+
"""
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"group": "Slides 14–15: Python + Gradio/Streamlit",
|
124 |
+
"content": r"""
|
125 |
+
**Paper 13**
|
126 |
+
*Reference:* Abid, A. et al. “Gradio: A User Interface for Interactive Machine Learning.” arXiv:2101.???? (Hypothetical)
|
127 |
+
*Key Points:*
|
128 |
+
- Build quick demos and capture user feedback.
|
129 |
+
- Invaluable for crowdsourced data collection and real-time model updates.
|
130 |
+
*Presentation Element:* 10-second video demo of a Gradio UI (e.g. a chatbot or image classifier).
|
131 |
+
|
132 |
+
**Paper 14**
|
133 |
+
*Reference:* [Streamlit Team], “Streamlit: Democratizing Data App Creation.” arXiv:2004.???? (Hypothetical)
|
134 |
+
*Key Points:*
|
135 |
+
- Turning Python scripts into web apps effortlessly.
|
136 |
+
- Useful for HPC dashboards and debugging distributed training.
|
137 |
+
*Presentation Element:* Animated slides showing how to add interactive widgets with minimal code.
|
138 |
+
"""
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"group": "Slides 16–17: HPC for Python-Based AI",
|
142 |
+
"content": r"""
|
143 |
+
**Paper 15**
|
144 |
+
*Reference:* Shoeybi, M. et al. “Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism.” arXiv:1909.08053 (2019)
|
145 |
+
*Key Points:*
|
146 |
+
- Scaling large models via model parallelism on HPC clusters.
|
147 |
+
- Integration with NVIDIA libraries (e.g. NCCL).
|
148 |
+
*Presentation Element:* Mermaid architecture diagram illustrating parallel pipelines.
|
149 |
+
|
150 |
+
**Paper 16**
|
151 |
+
*Reference:* Huang, Y. et al. “GPipe: Efficient Training of Giant Neural Networks using Pipeline Parallelism.” arXiv:1811.06965 (2019)
|
152 |
+
*Key Points:*
|
153 |
+
- Overlap of communication and computation for HPC efficiency.
|
154 |
+
- Synergy with MoE or large LLaMA models.
|
155 |
+
*Presentation Element:* Throughput vs. latency charts and an HPC cluster image.
|
156 |
+
"""
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"group": "Slides 18–19: Semantic & Episodic Memory + RLHF",
|
160 |
+
"content": r"""
|
161 |
+
**Paper 17**
|
162 |
+
*Reference:* Ouyang, X. et al. “Integrating Episodic and Semantic Memory for Task-Oriented Dialogue.” arXiv:2105.???? (Hypothetical)
|
163 |
+
*Key Points:*
|
164 |
+
- Differentiate short-term episodic from long-term semantic context.
|
165 |
+
- Improves consistency and factual correctness in dialogue.
|
166 |
+
*Presentation Element:* Mermaid diagram contrasting ephemeral vs. persistent memory flows.
|
167 |
+
|
168 |
+
**Paper 18**
|
169 |
+
*Reference:* Ouyang, X. et al. “Training Language Models to Follow Instructions with Human Feedback.” arXiv:2203.02155 (2022)
|
170 |
+
*Key Points:*
|
171 |
+
- Reinforcement Learning from Human Feedback (RLHF).
|
172 |
+
- Align model outputs with user preferences and ethical guidelines.
|
173 |
+
*Presentation Element:* RLHF pseudo-code snippet and a timeline of preference collection.
|
174 |
+
"""
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"group": "Slides 20–21: Transfer Learning & “Learning for Good”",
|
178 |
+
"content": r"""
|
179 |
+
**Paper 19**
|
180 |
+
*Reference:* Ruder, S. “A Survey on Transfer Learning for NLP.” arXiv:1910.?? (2019)
|
181 |
+
*Key Points:*
|
182 |
+
- Overview of transfer learning strategies (fine-tuning, adapters, multitask learning).
|
183 |
+
- Quickly customize large pre-trained models.
|
184 |
+
*Presentation Element:* Graph of performance gains vs. training time.
|
185 |
+
|
186 |
+
**Paper 20**
|
187 |
+
*Reference:* Zhang, Y., Yang, Q. “A Survey on Multi-Task Learning.” arXiv:1707.08114 (2017)
|
188 |
+
*Key Points:*
|
189 |
+
- Train one model on multiple tasks to share representations.
|
190 |
+
- Synergy with “Learning for Good” scenarios (e.g., medical, climate).
|
191 |
+
*Presentation Element:* Mermaid multi-task diagram showing convergence in shared layers.
|
192 |
+
"""
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"group": "Slide 22: Closing & Next Steps",
|
196 |
+
"content": r"""
|
197 |
+
**Key Takeaways:**
|
198 |
+
- **Integration:** Every paper contributes to an end-to-end AI pipeline—from HPC scaling to crowdsourced evaluation.
|
199 |
+
- **Modular Approach:** Combining PyTorch, Hugging Face SFT, and knowledge distillation leads to efficient model development.
|
200 |
+
- **Interactive Demonstrations:** Leveraging Gradio/Streamlit and RLHF creates user-friendly, human-centric AI experiences.
|
201 |
+
- **Future Work:** Explore deeper synergies among MoE, HPC, and memory-based architectures.
|
202 |
+
|
203 |
+
**Media:**
|
204 |
+
- Concluding audio clip.
|
205 |
+
- (Optionally) a final Mermaid diagram linking all stages: data ingestion → HPC training → crowdsourcing → RLHF → model deployment.
|
206 |
+
"""
|
207 |
+
}
|
208 |
+
]
|
209 |
+
st.session_state.current_index = 0 # Initialize the current slide index
|
210 |
+
|
211 |
+
|
212 |
+
# Set up the page configuration
|
213 |
+
st.set_page_config(page_title="AI Presentation Outline", layout="wide")
|
214 |
+
st.title("AI Toolbox Presentation Outline")
|
215 |
+
|
216 |
+
# Sidebar: Navigation and slide group addition
|
217 |
+
st.sidebar.header("Navigation")
|
218 |
+
|
219 |
+
# --- Option to add a new slide group ---
|
220 |
+
with st.sidebar.expander("Add New Slide Group"):
|
221 |
+
with st.form("new_slide_form"):
|
222 |
+
new_group = st.text_input("Slide Group Title")
|
223 |
+
new_content = st.text_area("Slide Group Content (Markdown)", height=200)
|
224 |
+
submitted = st.form_submit_button("Add Slide Group")
|
225 |
+
if submitted:
|
226 |
+
if new_group.strip() and new_content.strip():
|
227 |
+
st.session_state.slide_groups.append({
|
228 |
+
"group": new_group.strip(),
|
229 |
+
"content": new_content.strip()
|
230 |
+
})
|
231 |
+
st.success(f"Added slide group: {new_group}")
|
232 |
+
else:
|
233 |
+
st.error("Please provide both a title and content.")
|
234 |
+
|
235 |
+
# --- Slide group selector ---
|
236 |
+
slide_titles = [slide["group"] for slide in st.session_state.slide_groups]
|
237 |
+
# Use a selectbox whose index is synced with session_state.current_index
|
238 |
+
selected_index = st.sidebar.selectbox(
|
239 |
+
"Select Slide Group",
|
240 |
+
range(len(slide_titles)),
|
241 |
+
index=st.session_state.current_index,
|
242 |
+
format_func=lambda i: slide_titles[i]
|
243 |
+
)
|
244 |
+
st.session_state.current_index = selected_index
|
245 |
+
|
246 |
+
# --- Navigation buttons ---
|
247 |
+
cols = st.sidebar.columns(2)
|
248 |
+
if cols[0].button("⟨ Previous"):
|
249 |
+
st.session_state.current_index = max(st.session_state.current_index - 1, 0)
|
250 |
+
if cols[1].button("Next ⟩"):
|
251 |
+
st.session_state.current_index = min(st.session_state.current_index + 1, len(slide_titles) - 1)
|
252 |
+
|
253 |
+
# Main: Display the selected slide group's details
|
254 |
+
current_slide = st.session_state.slide_groups[st.session_state.current_index]
|
255 |
+
st.header(current_slide["group"])
|
256 |
+
st.markdown(current_slide["content"], unsafe_allow_html=True)
|