firsttry / app.py
s2337a's picture
Update app.py
a9037ce verified
raw
history blame
1.34 kB
# Hugging Face ๋ชจ๋ธ ๋กœ๋“œ
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import streamlit as st
import os
import tensorflow as tf
from absl import logging
# Hugging Face ๋ชจ๋ธ ์„ค์ •
tokenizer = AutoTokenizer.from_pretrained("snunlp/KR-FinBert-SC")
model = AutoModelForSequenceClassification.from_pretrained("snunlp/KR-FinBert-SC")
# ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # oneDNN ์ตœ์ ํ™” ๋น„ํ™œ์„ฑํ™”
# ๋กœ๊ทธ ์ดˆ๊ธฐํ™”
logging.set_verbosity(logging.INFO)
logging.use_absl_handler()
# GPU ์„ค์ •
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print("GPU ๋ฉ”๋ชจ๋ฆฌ ์ฆ๊ฐ€ ํ—ˆ์šฉ ์„ค์ • ์™„๋ฃŒ")
except RuntimeError as e:
print(f"GPU ์„ค์ • ์˜ค๋ฅ˜: {e}")
# TensorFlow ์ •๋ณด ์ถœ๋ ฅ
print("TensorFlow ๋ฒ„์ „:", tf.__version__)
print("์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ์žฅ์น˜:", tf.config.list_physical_devices())
# Streamlit ์•ฑ ์ธํ„ฐํŽ˜์ด์Šค
st.title("Hello, Streamlit!")
st.write("This is a sample Streamlit app.")
# ์ž…๋ ฅ ํ•„๋“œ ์ถ”๊ฐ€
input_text = st.text_input("Enter some text:")
if st.button("Analyze"):
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model(**inputs)
st.write("Model Output:", outputs.logits.tolist())