Spaces:
Sleeping
Sleeping
File size: 7,398 Bytes
34cf551 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
import streamlit as st
import time
# Initialize the slide index in session state (if not already set)
if "slide_idx" not in st.session_state:
st.session_state.slide_idx = 0
# Define a list of 10 slides. Each slide has a left and a right page.
# Each paper entry contains the paper number, title, arXiv link, and code link.
slides = [
{
"left": """
**#1. Neural Module Networks for Reasoning**
[Arxiv](https://arxiv.org/abs/1234.5678) | [Code](https://github.com/example/nnm)
**#2. Neuro-Symbolic AI for Reasoning**
[Arxiv](https://arxiv.org/abs/2345.6789) | [Code](https://github.com/example/nsa)
""",
"right": """
**#3. Transformer Models for Multi-step Reasoning**
[Arxiv](https://arxiv.org/abs/3456.7890) | [Code](https://github.com/example/transformer)
**#4. Graph Neural Networks in AI**
[Arxiv](https://arxiv.org/abs/4567.8901) | [Code](https://github.com/example/gnn)
"""
},
{
"left": """
**#5. Memory-Augmented Networks for Episodic Recall**
[Arxiv](https://arxiv.org/abs/5678.9012) | [Code](https://github.com/example/memory)
**#6. Self-Supervised Learning for AI**
[Arxiv](https://arxiv.org/abs/6789.0123) | [Code](https://github.com/example/selfsup)
""",
"right": """
**#7. Reinforcement Learning from Human Feedback**
[Arxiv](https://arxiv.org/abs/7890.1234) | [Code](https://github.com/example/rlhf)
**#8. Transfer Learning in AI Systems**
[Arxiv](https://arxiv.org/abs/8901.2345) | [Code](https://github.com/example/transfer)
"""
},
{
"left": """
**#9. Deep Learning for Medical Imaging**
[Arxiv](https://arxiv.org/abs/9012.3456) | [Code](https://github.com/example/medimg)
**#10. Computer Vision in Telemedicine**
[Arxiv](https://arxiv.org/abs/0123.4567) | [Code](https://github.com/example/cvtele)
""",
"right": """
**#11. Automated Clinical Documentation via NLP**
[Arxiv](https://arxiv.org/abs/1234.5679) | [Code](https://github.com/example/clinicalnlp)
**#12. Real-Time Transcription and Analysis**
[Arxiv](https://arxiv.org/abs/2345.6780) | [Code](https://github.com/example/realtime)
"""
},
{
"left": """
**#13. Personalized Treatment Recommendation**
[Arxiv](https://arxiv.org/abs/3456.7891) | [Code](https://github.com/example/treatment)
**#14. Integration of Genomic Data in AI**
[Arxiv](https://arxiv.org/abs/4567.8902) | [Code](https://github.com/example/genomics)
""",
"right": """
**#15. Crowdsourcing in AI Evaluation**
[Arxiv](https://arxiv.org/abs/5678.9013) | [Code](https://github.com/example/crowd)
**#16. Evaluating AI with Human Feedback**
[Arxiv](https://arxiv.org/abs/6789.0124) | [Code](https://github.com/example/evaluation)
"""
},
{
"left": """
**#17. Gradio and Streamlit for Rapid Prototyping**
[Arxiv](https://arxiv.org/abs/7890.1235) | [Code](https://github.com/example/gradio)
**#18. Interactive Demos in Python**
[Arxiv](https://arxiv.org/abs/8901.2346) | [Code](https://github.com/example/interactive)
""",
"right": """
**#19. HPC for Scaling AI Models**
[Arxiv](https://arxiv.org/abs/9012.3457) | [Code](https://github.com/example/hpc)
**#20. Model Parallelism and Pipeline Techniques**
[Arxiv](https://arxiv.org/abs/0123.4568) | [Code](https://github.com/example/parallel)
"""
},
{
"left": """
**#21. Imitation Learning for Behavior Cloning**
[Arxiv](https://arxiv.org/abs/1234.5680) | [Code](https://github.com/example/imitate)
**#22. GANs for Mirroring Human Actions**
[Arxiv](https://arxiv.org/abs/2345.6781) | [Code](https://github.com/example/ganmirror)
""",
"right": """
**#23. Empathic AI for Shared World Modeling**
[Arxiv](https://arxiv.org/abs/3456.7892) | [Code](https://github.com/example/empathic)
**#24. Deep Reinforcement Learning in Clinical Support**
[Arxiv](https://arxiv.org/abs/4567.8903) | [Code](https://github.com/example/deeprl)
"""
},
{
"left": """
**#25. Mixture of Experts for AI Systems**
[Arxiv](https://arxiv.org/abs/5678.9014) | [Code](https://github.com/example/moe)
**#26. Conditional Computation and Routing Strategies**
[Arxiv](https://arxiv.org/abs/6789.0125) | [Code](https://github.com/example/routing)
""",
"right": """
**#27. Ensemble Learning in AI**
[Arxiv](https://arxiv.org/abs/7890.1236) | [Code](https://github.com/example/ensemble)
**#28. Knowledge Distillation Across Models**
[Arxiv](https://arxiv.org/abs/8901.2347) | [Code](https://github.com/example/distill)
"""
},
{
"left": """
**#29. Neural Networks for Adversarial Attacks**
[Arxiv](https://arxiv.org/abs/9012.3458) | [Code](https://github.com/example/adversary)
**#30. Robust Training with Natural Transformations**
[Arxiv](https://arxiv.org/abs/0123.4569) | [Code](https://github.com/example/robust)
""",
"right": """
**#31. Text-to-Image Translation with GANs**
[Arxiv](https://arxiv.org/abs/1234.5681) | [Code](https://github.com/example/t2i)
**#32. Controlled Caption Generation via Adversarial Attacks**
[Arxiv](https://arxiv.org/abs/2345.6782) | [Code](https://github.com/example/caption)
"""
},
{
"left": """
**#33. Multi-Modal Autoencoders for Medical Data**
[Arxiv](https://arxiv.org/abs/3456.7893) | [Code](https://github.com/example/multimodal)
**#34. Integration of Vision and Language in Healthcare**
[Arxiv](https://arxiv.org/abs/4567.8904) | [Code](https://github.com/example/visionlang)
""",
"right": """
**#35. Reinforcement Learning for Medical QA Systems**
[Arxiv](https://arxiv.org/abs/5678.9015) | [Code](https://github.com/example/medicalqa)
**#36. Large-Scale Clinical Language Models**
[Arxiv](https://arxiv.org/abs/6789.0126) | [Code](https://github.com/example/clinicalllm)
"""
},
{
"left": """
**#37. Efficient Transformers for Clinical NLP**
[Arxiv](https://arxiv.org/abs/7890.1237) | [Code](https://github.com/example/lightllm)
**#38. Continual Learning for Medical AI**
[Arxiv](https://arxiv.org/abs/8901.2348) | [Code](https://github.com/example/continual)
""",
"right": """
**#39. Active Learning for AI Annotation**
[Arxiv](https://arxiv.org/abs/9012.3459) | [Code](https://github.com/example/active)
**#40. Automated Model Selection and Routing**
[Arxiv](https://arxiv.org/abs/0123.4570) | [Code](https://github.com/example/modelselect)
"""
}
]
num_slides = len(slides)
current_slide = slides[st.session_state.slide_idx]
# Display slide header (e.g. "Slide 1 of 10")
st.markdown(f"## Slide {st.session_state.slide_idx + 1} of {num_slides}")
# Display left and right pages side by side
col_left, col_right = st.columns(2)
with col_left:
st.markdown("### Left Page")
st.markdown(current_slide["left"], unsafe_allow_html=True)
with col_right:
st.markdown("### Right Page")
st.markdown(current_slide["right"], unsafe_allow_html=True)
# Countdown timer (15 seconds) for auto-advancement
for remaining in range(15, 0, -1):
st.markdown(f"**Advancing in {remaining} seconds...**")
time.sleep(1)
# Advance to the next slide (wrap around if at the end)
st.session_state.slide_idx = (st.session_state.slide_idx + 1) % num_slides
# Rerun the app to display the next slide
st.rerun()
|