Owos commited on
Commit
1f81609
·
1 Parent(s): 5710aec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -213,29 +213,29 @@ def set_seed():
213
 
214
 
215
 
216
- with st.sidebar:
217
-
218
- st.image("Koya_Presentation-removebg-preview.png")
219
- st.subheader("Abstract")
220
- st.markdown(
221
- """
222
- <div style="text-align: justify">
223
- <h6> Pretrained large language models (LLMs) are widely used for various downstream tasks in different languages. However, selecting the best
224
- LLM (from a large set of potential LLMs) for a given downstream task and language is a challenging and computationally expensive task, making
225
- the efficient use of LLMs difficult for low-compute communities. To address this challenge, we present Koya, a recommender system built to assist
226
- researchers and practitioners in choosing the right LLM for their task and language, without ever having to finetune the LLMs. Koya is built with
227
- the Koya Pseudo-Perplexity (KPPPL), our adaptation of the pseudo perplexity, and ranks LLMs in order of compatibility with the language of interest,
228
- making it easier and cheaper to choose the most compatible LLM. By evaluating Koya using five pretrained LLMs and three African languages
229
- (Yoruba, Kinyarwanda, and Amharic), we show an average recommender accuracy of 95%, demonstrating its effectiveness. Koya aims to offer
230
- an easy to use (through a simple web interface accessible at https://huggingface.co/spaces/koya-recommender/system), cost-effective, fast and
231
- efficient tool to assist researchers and practitioners with low or limited compute access.</h6>
232
- </div>
233
-
234
- """,
235
- unsafe_allow_html=True
236
- )
237
- url = "https://drive.google.com/file/d/1eWat34ot3j8onIeKDnJscKalp2oYnn8O/view"
238
- st.write("check out the paper [here](%s)" % url)
239
  with st.sidebar:
240
  footer()
241
 
 
213
 
214
 
215
 
216
+ # with st.sidebar:
217
+
218
+ # st.image("Koya_Presentation-removebg-preview.png")
219
+ # st.subheader("Abstract")
220
+ # st.markdown(
221
+ # """
222
+ # <div style="text-align: justify">
223
+ # <h6> Pretrained large language models (LLMs) are widely used for various downstream tasks in different languages. However, selecting the best
224
+ # LLM (from a large set of potential LLMs) for a given downstream task and language is a challenging and computationally expensive task, making
225
+ # the efficient use of LLMs difficult for low-compute communities. To address this challenge, we present Koya, a recommender system built to assist
226
+ # researchers and practitioners in choosing the right LLM for their task and language, without ever having to finetune the LLMs. Koya is built with
227
+ # the Koya Pseudo-Perplexity (KPPPL), our adaptation of the pseudo perplexity, and ranks LLMs in order of compatibility with the language of interest,
228
+ # making it easier and cheaper to choose the most compatible LLM. By evaluating Koya using five pretrained LLMs and three African languages
229
+ # (Yoruba, Kinyarwanda, and Amharic), we show an average recommender accuracy of 95%, demonstrating its effectiveness. Koya aims to offer
230
+ # an easy to use (through a simple web interface accessible at https://huggingface.co/spaces/koya-recommender/system), cost-effective, fast and
231
+ # efficient tool to assist researchers and practitioners with low or limited compute access.</h6>
232
+ # </div>
233
+
234
+ # """,
235
+ # unsafe_allow_html=True
236
+ # )
237
+ # url = "https://drive.google.com/file/d/1eWat34ot3j8onIeKDnJscKalp2oYnn8O/view"
238
+ # st.write("check out the paper [here](%s)" % url)
239
  with st.sidebar:
240
  footer()
241