zhixiusue commited on
Commit
ba2392f
ยท
verified ยท
1 Parent(s): 6ce3538

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +109 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,111 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForTokenClassification
3
+ import torch
4
 
5
+ id_to_label = {
6
+ 0: 'O',
7
+ 1: 'B-TOPIC',
8
+ 2: 'I-TOPIC',
9
+ 3: 'B-STYLE',
10
+ 4: 'I-STYLE',
11
+ 5: 'B-LENGTH',
12
+ 6: 'I-LENGTH',
13
+ 7: 'B-LANGUAGE',
14
+ 8: 'I-LANGUAGE'
15
+ }
16
+
17
+ @st.cache_resource
18
+ def load_model():
19
+ tokenizer = AutoTokenizer.from_pretrained(".")
20
+ model = AutoModelForTokenClassification.from_pretrained(".")
21
+ return tokenizer, model
22
+
23
+ tokenizer, model = load_model()
24
+
25
+ def predict(text, model, tokenizer, id_to_label):
26
+ tokens = list(text)
27
+ inputs = tokenizer(tokens, is_split_into_words=True, return_tensors="pt", truncation=True, max_length=128)
28
+ model.eval()
29
+ with torch.no_grad():
30
+ outputs = model(**inputs)
31
+ logits = outputs.logits
32
+ predictions = torch.argmax(logits, dim=-1)
33
+
34
+ word_ids = inputs.word_ids(batch_index=0)
35
+ pred_labels = []
36
+ tokens_out = []
37
+
38
+ for idx, word_idx in enumerate(word_ids):
39
+ if word_idx is None:
40
+ continue
41
+ token = tokens[word_idx]
42
+ label = id_to_label[predictions[0][idx].item()]
43
+ tokens_out.append(token)
44
+ pred_labels.append(label)
45
+
46
+ return tokens_out, pred_labels
47
+
48
+ def post_process(tokens, labels):
49
+ words, word_labels = [], []
50
+ current_word = ""
51
+ current_label = None
52
+ for token, label in zip(tokens, labels):
53
+ if token in ["[CLS]", "[SEP]", "[PAD]"]:
54
+ continue
55
+ if token.startswith("##"):
56
+ current_word += token[2:]
57
+ else:
58
+ if current_word:
59
+ words.append(current_word)
60
+ word_labels.append(current_label)
61
+ current_word = token
62
+ current_label = label
63
+ if current_word:
64
+ words.append(current_word)
65
+ word_labels.append(current_label)
66
+ return words, word_labels
67
+
68
+ def align_words_labels(words, labels):
69
+ return list(zip(words, labels))
70
+
71
+ def extract_entities(aligned_result):
72
+ entities, current_entity, current_text = [], None, ""
73
+ for word, label in aligned_result:
74
+ if label == "O":
75
+ if current_entity:
76
+ entities.append({"entity": current_entity, "text": current_text})
77
+ current_entity, current_text = None, ""
78
+ continue
79
+ prefix, entity_type = label.split("-", 1)
80
+ if prefix == "B":
81
+ if current_entity:
82
+ entities.append({"entity": current_entity, "text": current_text})
83
+ current_entity, current_text = entity_type, word
84
+ elif prefix == "I" and current_entity == entity_type:
85
+ current_text += word
86
+ else:
87
+ if current_entity:
88
+ entities.append({"entity": current_entity, "text": current_text})
89
+ current_entity, current_text = entity_type, word
90
+ if current_entity:
91
+ entities.append({"entity": current_entity, "text": current_text})
92
+ return entities
93
+
94
+ # Streamlit UI
95
+ st.title("๐ŸŽฏ Learning Condition Extractor")
96
+ st.write("์‚ฌ์šฉ์ž์˜ ํ•™์Šต ๋ชฉํ‘œ ๋ฌธ์žฅ์—์„œ ์กฐ๊ฑด(TOPIC, STYLE, LENGTH, LANGUAGE)์„ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.")
97
+
98
+ user_input = st.text_input("ํ•™์Šต ๋ชฉํ‘œ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”:", value="๋”ฅ๋Ÿฌ๋‹์„ ์‹ค์Šต ์œ„์ฃผ๋กœ 30๋ถ„ ์ด๋‚ด์— ๋ฐฐ์šฐ๊ณ  ์‹ถ์–ด์š”")
99
+
100
+ if st.button("์ถ”๋ก  ์‹œ์ž‘"):
101
+ tokens, pred_labels = predict(user_input, model, tokenizer, id_to_label)
102
+ words, word_labels = post_process(tokens, pred_labels)
103
+ aligned = align_words_labels(words, word_labels)
104
+ entities = extract_entities(aligned)
105
+
106
+ result_dict = {'TOPIC': None, 'STYLE': None, 'LENGTH': None, 'LANGUAGE': None}
107
+ for ent in entities:
108
+ result_dict[ent['entity']] = ent['text']
109
+
110
+ st.subheader("๐Ÿ“Œ ์ถ”์ถœ๋œ ์กฐ๊ฑด")
111
+ st.json(result_dict)