File size: 5,134 Bytes
ffea4fb
 
 
 
 
 
 
 
 
 
 
e85e1af
ffea4fb
e85e1af
ffea4fb
 
e85e1af
ffea4fb
e85e1af
ffea4fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import streamlit as st
import torch
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import plotly.express as px
from sklearn.decomposition import PCA
from transformers import AutoModel, AutoTokenizer, pipeline, AutoModelForCausalLM

# App Title
st.title(" Transformer Model Explorer")
st.markdown("""
 Transformer models, their architectures, tokenization, and attention mechanisms will be displayed.
""")

#  Select Transformer
model_name = st.selectbox(
    "Choose a Transformer :",
    ["bigscience/bloom", "openai/whisper-base", "facebook/wav2vec2-base-960h"]
)

# Load Tokenizer & Model
st.write(f"Loading model: `{model_name}`...")
if "bloom" in model_name:
    tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
    model = AutoModel.from_pretrained(model_name)
elif "whisper" in model_name:
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(model_name)
elif "wav2vec2" in model_name:
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(model_name)

# Display Model Details
st.subheader("πŸ›  Model Details")
st.write(f"Model Type: `{model.config.model_type}`")
st.write(f"Number of Layers: `{model.config.num_hidden_layers}`")
st.write(f"Number of Attention Heads: `{model.config.num_attention_heads if hasattr(model.config, 'num_attention_heads') else 'N/A'}`")
st.write(f"Total Parameters: `{sum(p.numel() for p in model.parameters())/1e6:.2f}M`")

# Model Size Comparison
st.subheader("πŸ“Š Model Size Comparison")
model_sizes = {
    "bigscience/bloom": 176,
    "openai/whisper-base": 74,
    "facebook/wav2vec2-base-960h": 317
}
df_size = pd.DataFrame(model_sizes.items(), columns=["Model", "Size (Million Parameters)"])
fig = px.bar(df_size, x="Model", y="Size (Million Parameters)", title="Model Size Comparison")
st.plotly_chart(fig)

# Tokenization Section
st.subheader("πŸ“ Tokenization Visualization")
input_text = st.text_input("Enter Text:", "Hello, how are you?")

if "whisper" in model_name:
    st.write("Note: Whisper is an audio model and doesn't use text tokenization")
    st.write("Instead, it processes raw audio waveforms")
else:
    tokens = tokenizer.tokenize(input_text)
    st.write("Tokenized Output:", tokens)

# Token Embeddings Visualization (Fixed PCA Projection)
st.subheader("🧩 Token Embeddings Visualization")
with torch.no_grad():
    if "whisper" in model_name:
        st.write("Note: Whisper uses a different embedding structure for audio features")
        st.write("Cannot directly visualize token embeddings as with text models")
    else:
        inputs = tokenizer(input_text, return_tensors="pt")
        outputs = model(**inputs)
        if hasattr(outputs, "last_hidden_state"):
            embeddings = outputs.last_hidden_state.squeeze(0).numpy()
            # Ensure the number of tokens and embeddings match
            n_tokens = min(len(tokens), embeddings.shape[0])
            embeddings = embeddings[:n_tokens]  # Trim embeddings to match token count
            tokens = tokens[:n_tokens]  # Trim tokens to match embeddings count
            pca = PCA(n_components=2)
            reduced_embeddings = pca.fit_transform(embeddings)
            df_embeddings = pd.DataFrame(reduced_embeddings, columns=["PCA1", "PCA2"])
            df_embeddings["Token"] = tokens
            fig = px.scatter(df_embeddings, x="PCA1", y="PCA2", text="Token",
                           title="Token Embeddings (PCA Projection)")
            st.plotly_chart(fig)

# Attention Visualization (for BERT & RoBERTa models)
if "bloom" in model_name:
    st.subheader("πŸ” Attention Map")
    with torch.no_grad():
        outputs = model(**inputs, output_attentions=True)
        attention = outputs.attentions[-1].squeeze().detach().numpy()
        fig, ax = plt.subplots(figsize=(10, 5))
        sns.heatmap(attention[0], cmap="viridis", xticklabels=tokens, yticklabels=tokens, ax=ax)
        st.pyplot(fig)

# Text Generation Demo (for BLOOM)
if "bloom" in model_name:
    st.subheader("✍️ Text Generation & Token Probabilities")
    generator = pipeline("text-generation", model=model_name, return_full_text=False)
    generated_output = generator(input_text, max_length=50, return_tensors=True)
    st.write("Generated Output:", generated_output[0]["generated_text"])

    # Token Probability Visualization
    model_gen = AutoModelForCausalLM.from_pretrained(model_name)
    with torch.no_grad():
        inputs = tokenizer(input_text, return_tensors="pt")
        logits = model_gen(**inputs).logits[:, -1, :]
        probs = torch.nn.functional.softmax(logits, dim=-1).squeeze().detach().numpy()
        top_tokens = np.argsort(probs)[-10:][::-1]  # Top 10 tokens
        token_probs = {tokenizer.decode([idx]): probs[idx] for idx in top_tokens}
        df_probs = pd.DataFrame(token_probs.items(), columns=["Token", "Probability"])
        fig_prob = px.bar(df_probs, x="Token", y="Probability", title="Top Token Predictions")
        st.plotly_chart(fig_prob)

st.markdown("πŸ’‘ *Explore more about Transformer models!*")