Update pages/Deeplearning for NLP.py
Browse files
pages/Deeplearning for NLP.py
CHANGED
@@ -80,14 +80,14 @@ Attention mechanisms allow models to focus on key parts of an input sequence, im
|
|
80 |
# Transformer Example
|
81 |
st.subheader('🛠️ Transformer Example (Simplified):')
|
82 |
if st.button('🖥️ Show Transformer Architecture'):
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
st.write("**Transformer Architecture (
|
91 |
st.write(transformer_model)
|
92 |
|
93 |
# Section 4: Key Attention Components
|
|
|
80 |
# Transformer Example
|
81 |
st.subheader('🛠️ Transformer Example (Simplified):')
|
82 |
if st.button('🖥️ Show Transformer Architecture'):
|
83 |
+
input_layer = keras.Input(shape=(None, 512))
|
84 |
+
attention_output = layers.MultiHeadAttention(num_heads=8, key_dim=512)(input_layer, input_layer)
|
85 |
+
pooled_output = layers.GlobalAveragePooling1D()(attention_output)
|
86 |
+
dense1 = layers.Dense(256, activation="relu")(pooled_output)
|
87 |
+
output_layer = layers.Dense(1)(dense1)
|
88 |
+
|
89 |
+
transformer_model = keras.Model(inputs=input_layer, outputs=output_layer)
|
90 |
+
st.write("**Transformer Architecture (Fixed Version):**")
|
91 |
st.write(transformer_model)
|
92 |
|
93 |
# Section 4: Key Attention Components
|