Gopi9177 commited on
Commit
7253e8a
·
verified ·
1 Parent(s): c54bba6

Update pages/Deeplearning for NLP.py

Browse files
Files changed (1) hide show
  1. pages/Deeplearning for NLP.py +8 -8
pages/Deeplearning for NLP.py CHANGED
@@ -80,14 +80,14 @@ Attention mechanisms allow models to focus on key parts of an input sequence, im
80
  # Transformer Example
81
  st.subheader('🛠️ Transformer Example (Simplified):')
82
  if st.button('🖥️ Show Transformer Architecture'):
83
- transformer_model = keras.Sequential([
84
- layers.InputLayer(input_shape=(None, 512)),
85
- layers.MultiHeadAttention(num_heads=8, key_dim=512),
86
- layers.GlobalAveragePooling1D(),
87
- layers.Dense(256, activation="relu"),
88
- layers.Dense(1)
89
- ])
90
- st.write("**Transformer Architecture (Simplified):**")
91
  st.write(transformer_model)
92
 
93
  # Section 4: Key Attention Components
 
80
  # Transformer Example
81
  st.subheader('🛠️ Transformer Example (Simplified):')
82
  if st.button('🖥️ Show Transformer Architecture'):
83
+ input_layer = keras.Input(shape=(None, 512))
84
+ attention_output = layers.MultiHeadAttention(num_heads=8, key_dim=512)(input_layer, input_layer)
85
+ pooled_output = layers.GlobalAveragePooling1D()(attention_output)
86
+ dense1 = layers.Dense(256, activation="relu")(pooled_output)
87
+ output_layer = layers.Dense(1)(dense1)
88
+
89
+ transformer_model = keras.Model(inputs=input_layer, outputs=output_layer)
90
+ st.write("**Transformer Architecture (Fixed Version):**")
91
  st.write(transformer_model)
92
 
93
  # Section 4: Key Attention Components