Spaces:
Sleeping
Sleeping
File size: 1,229 Bytes
935a660 82413ee 4480f3c d0d62c4 4480f3c 09e6c30 4480f3c 3210a90 4480f3c 935a660 5b71e40 f5f665e ee4ac1e f5f665e f414b62 d0d62c4 f007309 6be209f f007309 d0d62c4 6be209f d0d62c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import streamlit as st
import pandas as pd
from sentence_transformers import SentenceTransformer, util
from datasets import load_dataset
@st.cache_resource
def load_model():
return SentenceTransformer('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
model = load_model()
secret_word = "нос"
secred_embedding = model.encode(secret_word)
if 'words' not in st.session_state:
st.session_state['words'] = []
st.write('Try to guess a secret word by semantic similarity')
word = st.text_input("Input a word")
used_words = [w for w, s in st.session_state['words']]
if st.button("Guess") or word:
if word not in used_words:
word_embedding = model.encode(word)
similarity = util.pytorch_cos_sim(secred_embedding, word_embedding).cpu().numpy()[0][0]
st.session_state['words'].append((word, similarity))
words_df = pd.DataFrame(
st.session_state['words'],
columns=["word", "similarity"]
).sort_values(by=["similarity"], ascending=False)
st.dataframe(words_df)
@st.cache_data
def load_words_dataset():
dataset = load_dataset("marksverdhei/wordnet-definitions-en-2021", split="train")
return dataset["Word"]
all_words = load_words_dataset()
st.write(all_words) |