cecilemacaire commited on
Commit
e04e4a9
·
verified ·
1 Parent(s): 569b928

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -3,12 +3,12 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import pandas as pd
4
 
5
  # Charger le modèle et le tokenizer
6
- checkpoint = "Propicto/t2p-t5-large-orfeo"
7
  tokenizer = AutoTokenizer.from_pretrained(checkpoint)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
9
 
10
  # Lire le lexique
11
- @st.cache
12
  def read_lexicon(lexicon):
13
  df = pd.read_csv(lexicon, sep='\t')
14
  df['keyword_no_cat'] = df['lemma'].str.split(' #').str[0].str.strip().str.replace(' ', '_')
@@ -52,4 +52,4 @@ if sentence:
52
  pictogram_ids = [get_id_picto_from_predicted_lemma(lexicon, lemma) for lemma in sentence_to_map]
53
 
54
  html = generate_html(pictogram_ids)
55
- st.components.v1.html(html, height=600, scrolling=True)
 
3
  import pandas as pd
4
 
5
  # Charger le modèle et le tokenizer
6
+ checkpoint = "your-model-checkpoint"
7
  tokenizer = AutoTokenizer.from_pretrained(checkpoint)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
9
 
10
  # Lire le lexique
11
+ @st.cache_data
12
  def read_lexicon(lexicon):
13
  df = pd.read_csv(lexicon, sep='\t')
14
  df['keyword_no_cat'] = df['lemma'].str.split(' #').str[0].str.strip().str.replace(' ', '_')
 
52
  pictogram_ids = [get_id_picto_from_predicted_lemma(lexicon, lemma) for lemma in sentence_to_map]
53
 
54
  html = generate_html(pictogram_ids)
55
+ st.components.v1.html(html, height=600, scrolling=True)