Delta-Vector commited on
Commit
f3e6cda
·
verified ·
1 Parent(s): b2c285e

Upload modeling_mistral.py

Browse files
Files changed (1) hide show
  1. modeling_mistral.py +1265 -0
modeling_mistral.py ADDED
@@ -0,0 +1,1265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/mistral/modular_mistral.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_mistral.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ from functools import partial
8
+ from typing import Callable, List, Optional, Tuple, Union
9
+
10
+ import torch
11
+ from torch import nn
12
+
13
+ from ...activations import ACT2FN
14
+ from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
15
+ from ...generation import GenerationMixin
16
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
17
+ from ...modeling_flash_attention_utils import FlashAttentionKwargs
18
+ from ...modeling_outputs import (
19
+ BaseModelOutputWithPast,
20
+ CausalLMOutputWithPast,
21
+ QuestionAnsweringModelOutput,
22
+ SequenceClassifierOutputWithPast,
23
+ TokenClassifierOutput,
24
+ )
25
+ from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
26
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
27
+ from ...processing_utils import Unpack
28
+ from ...utils import (
29
+ LossKwargs,
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ can_return_tuple,
34
+ logging,
35
+ replace_return_docstrings,
36
+ )
37
+ from ...utils.deprecation import deprecate_kwarg
38
+ from .configuration_mistral import MistralConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "mistralai/Mistral-7B-v0.1"
44
+ _CONFIG_FOR_DOC = "MistralConfig"
45
+
46
+
47
+ class MistralMLP(nn.Module):
48
+ def __init__(self, config):
49
+ super().__init__()
50
+ self.config = config
51
+ self.hidden_size = config.hidden_size
52
+ self.intermediate_size = config.intermediate_size
53
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
54
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
55
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
56
+ self.act_fn = ACT2FN[config.hidden_act]
57
+
58
+ def forward(self, x):
59
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
60
+ return down_proj
61
+
62
+
63
+ def rotate_half(x):
64
+ """Rotates half the hidden dims of the input."""
65
+ x1 = x[..., : x.shape[-1] // 2]
66
+ x2 = x[..., x.shape[-1] // 2 :]
67
+ return torch.cat((-x2, x1), dim=-1)
68
+
69
+
70
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
71
+ """Applies Rotary Position Embedding to the query and key tensors.
72
+
73
+ Args:
74
+ q (`torch.Tensor`): The query tensor.
75
+ k (`torch.Tensor`): The key tensor.
76
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
77
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
78
+ position_ids (`torch.Tensor`, *optional*):
79
+ Deprecated and unused.
80
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
81
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
82
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
83
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
84
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
85
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
86
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
87
+ Returns:
88
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
89
+ """
90
+ cos = cos.unsqueeze(unsqueeze_dim)
91
+ sin = sin.unsqueeze(unsqueeze_dim)
92
+ q_embed = (q * cos) + (rotate_half(q) * sin)
93
+ k_embed = (k * cos) + (rotate_half(k) * sin)
94
+ return q_embed, k_embed
95
+
96
+
97
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
98
+ """
99
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
100
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
101
+ """
102
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
103
+ if n_rep == 1:
104
+ return hidden_states
105
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
106
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
107
+
108
+
109
+ def eager_attention_forward(
110
+ module: nn.Module,
111
+ query: torch.Tensor,
112
+ key: torch.Tensor,
113
+ value: torch.Tensor,
114
+ attention_mask: Optional[torch.Tensor],
115
+ scaling: float,
116
+ dropout: float = 0.0,
117
+ **kwargs,
118
+ ):
119
+ key_states = repeat_kv(key, module.num_key_value_groups)
120
+ value_states = repeat_kv(value, module.num_key_value_groups)
121
+
122
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
123
+ if attention_mask is not None:
124
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
125
+ attn_weights = attn_weights + causal_mask
126
+
127
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
128
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
129
+ attn_output = torch.matmul(attn_weights, value_states)
130
+ attn_output = attn_output.transpose(1, 2).contiguous()
131
+
132
+ return attn_output, attn_weights
133
+
134
+
135
+ class MistralAttention(nn.Module):
136
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
137
+
138
+ def __init__(self, config: MistralConfig, layer_idx: int):
139
+ super().__init__()
140
+ self.config = config
141
+ self.layer_idx = layer_idx
142
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
143
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
144
+ self.scaling = self.head_dim**-0.5
145
+ self.attention_dropout = config.attention_dropout
146
+ self.is_causal = True
147
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
148
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
149
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
150
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
151
+
152
+ def forward(
153
+ self,
154
+ hidden_states: torch.Tensor,
155
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
156
+ attention_mask: Optional[torch.Tensor],
157
+ past_key_value: Optional[Cache] = None,
158
+ cache_position: Optional[torch.LongTensor] = None,
159
+ **kwargs: Unpack[FlashAttentionKwargs],
160
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
161
+ input_shape = hidden_states.shape[:-1]
162
+ hidden_shape = (*input_shape, -1, self.head_dim)
163
+
164
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
165
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
166
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
167
+
168
+ cos, sin = position_embeddings
169
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
170
+
171
+ if past_key_value is not None:
172
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
173
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
174
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
175
+
176
+ attention_interface: Callable = eager_attention_forward
177
+ if self.config._attn_implementation != "eager":
178
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
179
+ logger.warning_once(
180
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
181
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
182
+ )
183
+ else:
184
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
185
+
186
+ attn_output, attn_weights = attention_interface(
187
+ self,
188
+ query_states,
189
+ key_states,
190
+ value_states,
191
+ attention_mask,
192
+ dropout=0.0 if not self.training else self.attention_dropout,
193
+ scaling=self.scaling,
194
+ sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama
195
+ **kwargs,
196
+ )
197
+
198
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
199
+ attn_output = self.o_proj(attn_output)
200
+ return attn_output, attn_weights
201
+
202
+
203
+ class MistralRMSNorm(nn.Module):
204
+ def __init__(self, hidden_size, eps=1e-6):
205
+ """
206
+ MistralRMSNorm is equivalent to T5LayerNorm
207
+ """
208
+ super().__init__()
209
+ self.weight = nn.Parameter(torch.ones(hidden_size))
210
+ self.variance_epsilon = eps
211
+
212
+ def forward(self, hidden_states):
213
+ input_dtype = hidden_states.dtype
214
+ hidden_states = hidden_states.to(torch.float32)
215
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
216
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
217
+ return self.weight * hidden_states.to(input_dtype)
218
+
219
+ def extra_repr(self):
220
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
221
+
222
+
223
+ class MistralDecoderLayer(nn.Module):
224
+ def __init__(self, config: MistralConfig, layer_idx: int):
225
+ super().__init__()
226
+ self.hidden_size = config.hidden_size
227
+ self.self_attn = MistralAttention(config=config, layer_idx=layer_idx)
228
+ self.mlp = MistralMLP(config)
229
+ self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
230
+ self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
231
+
232
+ def forward(
233
+ self,
234
+ hidden_states: torch.Tensor,
235
+ attention_mask: Optional[torch.Tensor] = None,
236
+ position_ids: Optional[torch.LongTensor] = None,
237
+ past_key_value: Optional[Cache] = None,
238
+ output_attentions: Optional[bool] = False,
239
+ use_cache: Optional[bool] = False,
240
+ cache_position: Optional[torch.LongTensor] = None,
241
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
242
+ **kwargs: Unpack[FlashAttentionKwargs],
243
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
244
+ residual = hidden_states
245
+
246
+ hidden_states = self.input_layernorm(hidden_states)
247
+
248
+ # Self Attention
249
+ hidden_states, self_attn_weights = self.self_attn(
250
+ hidden_states=hidden_states,
251
+ attention_mask=attention_mask,
252
+ position_ids=position_ids,
253
+ past_key_value=past_key_value,
254
+ output_attentions=output_attentions,
255
+ use_cache=use_cache,
256
+ cache_position=cache_position,
257
+ position_embeddings=position_embeddings,
258
+ **kwargs,
259
+ )
260
+ hidden_states = residual + hidden_states
261
+
262
+ # Fully Connected
263
+ residual = hidden_states
264
+ hidden_states = self.post_attention_layernorm(hidden_states)
265
+ hidden_states = self.mlp(hidden_states)
266
+ hidden_states = residual + hidden_states
267
+
268
+ outputs = (hidden_states,)
269
+ if output_attentions:
270
+ outputs += (self_attn_weights,)
271
+
272
+ return outputs
273
+
274
+
275
+ class MistralRotaryEmbedding(nn.Module):
276
+ def __init__(self, config: MistralConfig, device=None):
277
+ super().__init__()
278
+ # BC: "rope_type" was originally "type"
279
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
280
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
281
+ else:
282
+ self.rope_type = "default"
283
+ self.max_seq_len_cached = config.max_position_embeddings
284
+ self.original_max_seq_len = config.max_position_embeddings
285
+
286
+ self.config = config
287
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
288
+
289
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
290
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
291
+ self.original_inv_freq = self.inv_freq
292
+
293
+ @torch.no_grad()
294
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
295
+ def forward(self, x, position_ids):
296
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
297
+ position_ids_expanded = position_ids[:, None, :].float()
298
+
299
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
300
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
301
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
302
+ emb = torch.cat((freqs, freqs), dim=-1)
303
+ cos = emb.cos() * self.attention_scaling
304
+ sin = emb.sin() * self.attention_scaling
305
+
306
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
307
+
308
+
309
+ MISTRAL_START_DOCSTRING = r"""
310
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
311
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
312
+ etc.)
313
+
314
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
315
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
316
+ and behavior.
317
+
318
+ Parameters:
319
+ config ([`MistralConfig`]):
320
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
321
+ load the weights associated with the model, only the configuration. Check out the
322
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
323
+ """
324
+
325
+
326
+ @add_start_docstrings(
327
+ "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
328
+ MISTRAL_START_DOCSTRING,
329
+ )
330
+ class MistralPreTrainedModel(PreTrainedModel):
331
+ config_class = MistralConfig
332
+ base_model_prefix = "model"
333
+ supports_gradient_checkpointing = True
334
+ _no_split_modules = ["MistralDecoderLayer"]
335
+ _skip_keys_device_placement = ["past_key_values"]
336
+ _supports_flash_attn_2 = True
337
+ _supports_sdpa = True
338
+ _supports_flex_attn = True
339
+ _supports_cache_class = True
340
+ _supports_quantized_cache = True
341
+ _supports_static_cache = True
342
+ _supports_attention_backend = True
343
+
344
+ def _init_weights(self, module):
345
+ std = self.config.initializer_range
346
+ if isinstance(module, nn.Linear):
347
+ module.weight.data.normal_(mean=0.0, std=std)
348
+ if module.bias is not None:
349
+ module.bias.data.zero_()
350
+ elif isinstance(module, nn.Embedding):
351
+ module.weight.data.normal_(mean=0.0, std=std)
352
+ if module.padding_idx is not None:
353
+ module.weight.data[module.padding_idx].zero_()
354
+
355
+
356
+ MISTRAL_INPUTS_DOCSTRING = r"""
357
+ Args:
358
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
359
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
360
+ it.
361
+
362
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
363
+ [`PreTrainedTokenizer.__call__`] for details.
364
+
365
+ [What are input IDs?](../glossary#input-ids)
366
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
367
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
368
+
369
+ - 1 for tokens that are **not masked**,
370
+ - 0 for tokens that are **masked**.
371
+
372
+ [What are attention masks?](../glossary#attention-mask)
373
+
374
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
375
+ [`PreTrainedTokenizer.__call__`] for details.
376
+
377
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
378
+ `past_key_values`).
379
+
380
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
381
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
382
+ information on the default strategy.
383
+
384
+ - 1 indicates the head is **not masked**,
385
+ - 0 indicates the head is **masked**.
386
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
387
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
388
+ config.n_positions - 1]`.
389
+
390
+ [What are position IDs?](../glossary#position-ids)
391
+ past_key_values (`Cache`, *optional*):
392
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
393
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
394
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
395
+
396
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
397
+
398
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
399
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
400
+ of shape `(batch_size, sequence_length)`.
401
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
402
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
403
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
404
+ model's internal embedding lookup matrix.
405
+ use_cache (`bool`, *optional*):
406
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
407
+ `past_key_values`).
408
+ output_attentions (`bool`, *optional*):
409
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
410
+ tensors for more detail.
411
+ output_hidden_states (`bool`, *optional*):
412
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
413
+ more detail.
414
+ return_dict (`bool`, *optional*):
415
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
416
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
417
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
418
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
419
+ the complete sequence length.
420
+ """
421
+
422
+
423
+ @add_start_docstrings(
424
+ "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
425
+ MISTRAL_START_DOCSTRING,
426
+ )
427
+ class MistralModel(MistralPreTrainedModel):
428
+ """
429
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
430
+
431
+ Args:
432
+ config: MistralConfig
433
+ """
434
+
435
+ def __init__(self, config: MistralConfig):
436
+ super().__init__(config)
437
+ self.padding_idx = config.pad_token_id
438
+ self.vocab_size = config.vocab_size
439
+
440
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
441
+ self.layers = nn.ModuleList(
442
+ [MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
443
+ )
444
+ self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
445
+ self.rotary_emb = MistralRotaryEmbedding(config=config)
446
+ self.gradient_checkpointing = False
447
+
448
+ # Initialize weights and apply final processing
449
+ self.post_init()
450
+
451
+ def get_input_embeddings(self):
452
+ return self.embed_tokens
453
+
454
+ def set_input_embeddings(self, value):
455
+ self.embed_tokens = value
456
+
457
+ @can_return_tuple
458
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
459
+ def forward(
460
+ self,
461
+ input_ids: Optional[torch.LongTensor] = None,
462
+ attention_mask: Optional[torch.Tensor] = None,
463
+ position_ids: Optional[torch.LongTensor] = None,
464
+ past_key_values: Optional[Cache] = None,
465
+ inputs_embeds: Optional[torch.FloatTensor] = None,
466
+ use_cache: Optional[bool] = None,
467
+ output_attentions: Optional[bool] = None,
468
+ output_hidden_states: Optional[bool] = None,
469
+ cache_position: Optional[torch.LongTensor] = None,
470
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
471
+ ) -> BaseModelOutputWithPast:
472
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
473
+ output_hidden_states = (
474
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
475
+ )
476
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
477
+
478
+ if (input_ids is None) ^ (inputs_embeds is not None):
479
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
480
+
481
+ if self.gradient_checkpointing and self.training and use_cache:
482
+ logger.warning_once(
483
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
484
+ )
485
+ use_cache = False
486
+
487
+ # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
488
+ if not isinstance(past_key_values, (type(None), Cache)):
489
+ raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
490
+
491
+ if inputs_embeds is None:
492
+ inputs_embeds = self.embed_tokens(input_ids)
493
+
494
+ if use_cache and past_key_values is None:
495
+ past_key_values = DynamicCache()
496
+
497
+ if cache_position is None:
498
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
499
+ cache_position = torch.arange(
500
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
501
+ )
502
+
503
+ if position_ids is None:
504
+ position_ids = cache_position.unsqueeze(0)
505
+
506
+ causal_mask = self._update_causal_mask(
507
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
508
+ )
509
+
510
+ hidden_states = inputs_embeds
511
+
512
+ # create position embeddings to be shared across the decoder layers
513
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
514
+
515
+ # decoder layers
516
+ all_hidden_states = () if output_hidden_states else None
517
+ all_self_attns = () if output_attentions else None
518
+
519
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
520
+ if output_hidden_states:
521
+ all_hidden_states += (hidden_states,)
522
+
523
+ if self.gradient_checkpointing and self.training:
524
+ layer_outputs = self._gradient_checkpointing_func(
525
+ partial(decoder_layer.__call__, **flash_attn_kwargs),
526
+ hidden_states,
527
+ causal_mask,
528
+ position_ids,
529
+ past_key_values,
530
+ output_attentions,
531
+ use_cache,
532
+ cache_position,
533
+ position_embeddings,
534
+ )
535
+ else:
536
+ layer_outputs = decoder_layer(
537
+ hidden_states,
538
+ attention_mask=causal_mask,
539
+ position_ids=position_ids,
540
+ past_key_value=past_key_values,
541
+ output_attentions=output_attentions,
542
+ use_cache=use_cache,
543
+ cache_position=cache_position,
544
+ position_embeddings=position_embeddings,
545
+ **flash_attn_kwargs,
546
+ )
547
+
548
+ hidden_states = layer_outputs[0]
549
+
550
+ if output_attentions:
551
+ all_self_attns += (layer_outputs[1],)
552
+
553
+ hidden_states = self.norm(hidden_states)
554
+
555
+ # add hidden states from the last decoder layer
556
+ if output_hidden_states:
557
+ all_hidden_states += (hidden_states,)
558
+
559
+ return BaseModelOutputWithPast(
560
+ last_hidden_state=hidden_states,
561
+ past_key_values=past_key_values if use_cache else None,
562
+ hidden_states=all_hidden_states,
563
+ attentions=all_self_attns,
564
+ )
565
+
566
+ def _update_causal_mask(
567
+ self,
568
+ attention_mask: torch.Tensor,
569
+ input_tensor: torch.Tensor,
570
+ cache_position: torch.Tensor,
571
+ past_key_values: Cache,
572
+ output_attentions: bool = False,
573
+ ):
574
+ if self.config._attn_implementation == "flash_attention_2":
575
+ if attention_mask is not None and past_key_values is not None:
576
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
577
+ if is_padding_right:
578
+ raise ValueError(
579
+ "You are attempting to perform batched generation with padding_side='right'"
580
+ " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to "
581
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
582
+ )
583
+ if attention_mask is not None and 0.0 in attention_mask:
584
+ return attention_mask
585
+ return None
586
+
587
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
588
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
589
+ # to infer the attention mask.
590
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
591
+ using_static_cache = isinstance(past_key_values, StaticCache)
592
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
593
+
594
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
595
+ if (
596
+ self.config._attn_implementation == "sdpa"
597
+ and not (using_static_cache or using_sliding_window_cache)
598
+ and not output_attentions
599
+ ):
600
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
601
+ attention_mask,
602
+ inputs_embeds=input_tensor,
603
+ past_key_values_length=past_seen_tokens,
604
+ sliding_window=self.config.sliding_window,
605
+ is_training=self.training,
606
+ ):
607
+ return None
608
+
609
+ dtype, device = input_tensor.dtype, input_tensor.device
610
+ min_dtype = torch.finfo(dtype).min
611
+ sequence_length = input_tensor.shape[1]
612
+ # SlidingWindowCache or StaticCache
613
+ if using_sliding_window_cache or using_static_cache:
614
+ target_length = past_key_values.get_max_cache_shape()
615
+ # DynamicCache or no cache
616
+ else:
617
+ target_length = (
618
+ attention_mask.shape[-1]
619
+ if isinstance(attention_mask, torch.Tensor)
620
+ else past_seen_tokens + sequence_length + 1
621
+ )
622
+
623
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
624
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
625
+ attention_mask,
626
+ sequence_length=sequence_length,
627
+ target_length=target_length,
628
+ dtype=dtype,
629
+ device=device,
630
+ cache_position=cache_position,
631
+ batch_size=input_tensor.shape[0],
632
+ config=self.config,
633
+ past_key_values=past_key_values,
634
+ )
635
+
636
+ if (
637
+ self.config._attn_implementation == "sdpa"
638
+ and attention_mask is not None
639
+ and attention_mask.device.type in ["cuda", "xpu"]
640
+ and not output_attentions
641
+ ):
642
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
643
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
644
+ # Details: https://github.com/pytorch/pytorch/issues/110213
645
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
646
+
647
+ return causal_mask
648
+
649
+ @staticmethod
650
+ def _prepare_4d_causal_attention_mask_with_cache_position(
651
+ attention_mask: torch.Tensor,
652
+ sequence_length: int,
653
+ target_length: int,
654
+ dtype: torch.dtype,
655
+ device: torch.device,
656
+ cache_position: torch.Tensor,
657
+ batch_size: int,
658
+ config: MistralConfig,
659
+ past_key_values: Cache,
660
+ ):
661
+ """
662
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
663
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
664
+
665
+ Args:
666
+ attention_mask (`torch.Tensor`):
667
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
668
+ sequence_length (`int`):
669
+ The sequence length being processed.
670
+ target_length (`int`):
671
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
672
+ dtype (`torch.dtype`):
673
+ The dtype to use for the 4D attention mask.
674
+ device (`torch.device`):
675
+ The device to place the 4D attention mask on.
676
+ cache_position (`torch.Tensor`):
677
+ Indices depicting the position of the input sequence tokens in the sequence.
678
+ batch_size (`torch.Tensor`):
679
+ Batch size.
680
+ config (`MistralConfig`):
681
+ The model's configuration class
682
+ past_key_values (`Cache`):
683
+ The cache class that is being used currently to generate
684
+ """
685
+ if attention_mask is not None and attention_mask.dim() == 4:
686
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
687
+ causal_mask = attention_mask
688
+ else:
689
+ min_dtype = torch.finfo(dtype).min
690
+ causal_mask = torch.full(
691
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
692
+ )
693
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
694
+ if config.sliding_window is not None:
695
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
696
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
697
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
698
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
699
+ cache_position.reshape(-1, 1) - config.sliding_window
700
+ )
701
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
702
+ causal_mask *= diagonal_attend_mask
703
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
704
+ if attention_mask is not None:
705
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
706
+ if attention_mask.shape[-1] > target_length:
707
+ attention_mask = attention_mask[:, :target_length]
708
+ mask_length = attention_mask.shape[-1]
709
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
710
+ causal_mask.device
711
+ )
712
+ padding_mask = padding_mask == 0
713
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
714
+ padding_mask, min_dtype
715
+ )
716
+ return causal_mask
717
+
718
+
719
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
720
+
721
+
722
+ class MistralForCausalLM(MistralPreTrainedModel, GenerationMixin):
723
+ _tied_weights_keys = ["lm_head.weight"]
724
+ _tp_plan = {"lm_head": "colwise_rep"}
725
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
726
+
727
+ def __init__(self, config):
728
+ super().__init__(config)
729
+ self.model = MistralModel(config)
730
+ self.vocab_size = config.vocab_size
731
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
732
+
733
+ # Initialize weights and apply final processing
734
+ self.post_init()
735
+
736
+ def get_input_embeddings(self):
737
+ return self.model.embed_tokens
738
+
739
+ def set_input_embeddings(self, value):
740
+ self.model.embed_tokens = value
741
+
742
+ def get_output_embeddings(self):
743
+ return self.lm_head
744
+
745
+ def set_output_embeddings(self, new_embeddings):
746
+ self.lm_head = new_embeddings
747
+
748
+ def set_decoder(self, decoder):
749
+ self.model = decoder
750
+
751
+ def get_decoder(self):
752
+ return self.model
753
+
754
+ @can_return_tuple
755
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
756
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
757
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
758
+ def forward(
759
+ self,
760
+ input_ids: Optional[torch.LongTensor] = None,
761
+ attention_mask: Optional[torch.Tensor] = None,
762
+ position_ids: Optional[torch.LongTensor] = None,
763
+ past_key_values: Optional[Cache] = None,
764
+ inputs_embeds: Optional[torch.FloatTensor] = None,
765
+ labels: Optional[torch.LongTensor] = None,
766
+ use_cache: Optional[bool] = None,
767
+ output_attentions: Optional[bool] = None,
768
+ output_hidden_states: Optional[bool] = None,
769
+ cache_position: Optional[torch.LongTensor] = None,
770
+ logits_to_keep: Union[int, torch.Tensor] = 0,
771
+ **kwargs: Unpack[KwargsForCausalLM],
772
+ ) -> CausalLMOutputWithPast:
773
+ r"""
774
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
775
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
776
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
777
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
778
+
779
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
780
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
781
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
782
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
783
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
784
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
785
+
786
+ Returns:
787
+
788
+ Example:
789
+
790
+ ```python
791
+ >>> from transformers import AutoTokenizer, MistralForCausalLM
792
+
793
+ >>> model = MistralForCausalLM.from_pretrained("meta-mistral/Mistral-2-7b-hf")
794
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-mistral/Mistral-2-7b-hf")
795
+
796
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
797
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
798
+
799
+ >>> # Generate
800
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
801
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
802
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
803
+ ```"""
804
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
805
+ output_hidden_states = (
806
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
807
+ )
808
+
809
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
810
+ outputs: BaseModelOutputWithPast = self.model(
811
+ input_ids=input_ids,
812
+ attention_mask=attention_mask,
813
+ position_ids=position_ids,
814
+ past_key_values=past_key_values,
815
+ inputs_embeds=inputs_embeds,
816
+ use_cache=use_cache,
817
+ output_attentions=output_attentions,
818
+ output_hidden_states=output_hidden_states,
819
+ cache_position=cache_position,
820
+ **kwargs,
821
+ )
822
+
823
+ if torch.cuda.is_available():
824
+ torch.cuda.empty_cache()
825
+
826
+ hidden_states = outputs.last_hidden_state
827
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
828
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
829
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
830
+
831
+ loss = None
832
+ if labels is not None:
833
+ # First, calculate the standard loss (for comparison)
834
+ standard_loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
835
+
836
+ # Now implement IntermediateSequenceAverageLoss with proper pack boundary handling
837
+ # based on position IDs and cache positions for Axolotl-style packing
838
+ ignore_index = -100 # Standard ignore index
839
+
840
+ # Get proper shapes
841
+ batch_size, seq_len, vocab_size = logits.shape
842
+
843
+ # Apply proper shifting for causal language modeling
844
+ shift_logits = logits[..., :-1, :].contiguous()
845
+ shift_labels = labels[..., 1:].contiguous()
846
+
847
+ # Get new sequence length after shifting
848
+ shifted_seq_len = shift_logits.size(1)
849
+
850
+ # Calculate token-wise loss (without reduction)
851
+ token_losses = torch.nn.functional.cross_entropy(
852
+ shift_logits.view(-1, vocab_size),
853
+ shift_labels.view(-1),
854
+ ignore_index=ignore_index,
855
+ reduction='none'
856
+ )
857
+
858
+ # Reshape back to [batch_size, shifted_seq_len]
859
+ token_losses = token_losses.view(batch_size, shifted_seq_len)
860
+
861
+ # Calculate the cumulative average loss for each prefix length while respecting pack boundaries
862
+ batch_losses = []
863
+
864
+ for i in range(batch_size):
865
+ seq_losses = token_losses[i]
866
+ label_seq = shift_labels[i]
867
+
868
+ # Create a mask for valid tokens (not ignore_index)
869
+ valid_mask = (label_seq != ignore_index).float()
870
+
871
+ # Use position_ids or cache_position to identify pack boundaries if available
872
+ # This is how Axolotl identifies boundaries - through position resets
873
+ pack_starts = []
874
+
875
+ if position_ids is not None:
876
+ # Use position_ids to detect pack boundaries - a reset in position_ids
877
+ # indicates the start of a new sequence in the pack
878
+ pos_ids = position_ids[i, :shifted_seq_len+1] # +1 because we need to check the positions before shifting
879
+
880
+ # A position reset (position going back to 0 or decreasing) indicates a new sequence
881
+ # First token is always a pack start if it's valid
882
+ if valid_mask[0] > 0:
883
+ pack_starts.append(0)
884
+
885
+ # For the rest, check if position decreases or resets
886
+ for j in range(1, shifted_seq_len):
887
+ # Only check valid tokens
888
+ if valid_mask[j] > 0:
889
+ # If position decreased or reset to 0, this is a pack boundary
890
+ if pos_ids[j] < pos_ids[j-1] or pos_ids[j] == 0:
891
+ pack_starts.append(j)
892
+
893
+ elif cache_position is not None:
894
+ # Use cache_position (cu_seqlens in Axolotl) to identify pack boundaries
895
+ # cache_position gives the start positions of each sequence in the pack
896
+ cache_pos = cache_position[i]
897
+ if cache_pos is not None and len(cache_pos) > 0:
898
+ # Convert absolute positions to relative positions in the shifted sequence
899
+ for pos in cache_pos:
900
+ if 0 <= pos-1 < shifted_seq_len and valid_mask[pos-1] > 0:
901
+ pack_starts.append(pos-1) # -1 due to the shift
902
+
903
+ else:
904
+ # Fall back to attention_mask if available - sudden transitions from 0 to 1
905
+ # indicate potential pack boundaries
906
+ if attention_mask is not None:
907
+ attn_mask = attention_mask[i, :shifted_seq_len+1]
908
+
909
+ # First token is a pack start if it's valid
910
+ if valid_mask[0] > 0 and attn_mask[0] > 0:
911
+ pack_starts.append(0)
912
+
913
+ # Check for transitions from padding (0) to valid (1) in attention mask
914
+ for j in range(1, shifted_seq_len):
915
+ if valid_mask[j] > 0 and attn_mask[j] > 0 and attn_mask[j-1] == 0:
916
+ pack_starts.append(j)
917
+
918
+ # If all else fails, use label transitions as a last resort
919
+ # This is a fallback but not as accurate as position_ids or cache_position
920
+ if not pack_starts:
921
+ if valid_mask[0] > 0:
922
+ pack_starts.append(0)
923
+
924
+ for j in range(1, shifted_seq_len):
925
+ if valid_mask[j] > 0 and label_seq[j-1] == ignore_index:
926
+ pack_starts.append(j)
927
+
928
+ # Ensure pack_starts is not empty and sorted
929
+ if not pack_starts:
930
+ # If no boundaries detected but there are valid tokens, consider all valid tokens as one sequence
931
+ if valid_mask.sum() > 0:
932
+ # Find the first valid token position
933
+ first_valid = torch.nonzero(valid_mask > 0)[0].item()
934
+ pack_starts = [first_valid]
935
+
936
+ # Sort pack_starts to ensure they're in increasing order
937
+ pack_starts = sorted(pack_starts)
938
+
939
+ if len(pack_starts) == 0:
940
+ # No valid tokens in this sequence
941
+ batch_losses.append(torch.tensor(0.0, device=logits.device))
942
+ continue
943
+
944
+ # Process each packed sequence separately
945
+ seq_losses_list = []
946
+
947
+ for j in range(len(pack_starts)):
948
+ start_idx = pack_starts[j]
949
+ # End index is either the next pack start or the end of sequence
950
+ end_idx = pack_starts[j+1] if j+1 < len(pack_starts) else shifted_seq_len
951
+
952
+ # Extract this packed sequence
953
+ pack_losses = seq_losses[start_idx:end_idx]
954
+ pack_mask = valid_mask[start_idx:end_idx]
955
+
956
+ # If no valid tokens in this pack, skip it
957
+ if pack_mask.sum() == 0:
958
+ continue
959
+
960
+ # Calculate cumulative sums for this pack only
961
+ cum_losses = torch.cumsum(pack_losses * pack_mask, dim=0)
962
+ cum_valid_tokens = torch.cumsum(pack_mask, dim=0)
963
+
964
+ # Calculate average loss for each prefix length (avoiding division by zero)
965
+ prefix_averages = cum_losses / (cum_valid_tokens + 1e-10)
966
+
967
+ # Only consider positions with valid tokens
968
+ valid_indices = pack_mask.bool()
969
+ valid_averages = prefix_averages[valid_indices]
970
+
971
+ if len(valid_averages) > 0:
972
+ # Average of all prefix averages for this pack
973
+ seq_losses_list.append(valid_averages.mean())
974
+
975
+ if len(seq_losses_list) > 0:
976
+ # Average across all packs in this batch element
977
+ batch_losses.append(torch.stack(seq_losses_list).mean())
978
+ else:
979
+ # Fall back to zero loss if no valid packs
980
+ batch_losses.append(torch.tensor(0.0, device=logits.device))
981
+
982
+ # Average across the batch
983
+ custom_loss = torch.stack(batch_losses).mean()
984
+
985
+ if self.training:
986
+ custom_loss = custom_loss / 2
987
+
988
+ # Print comparison between standard and custom loss
989
+ print(f"Standard Loss: {standard_loss.item():.4f} | Custom Subsequence Loss: {custom_loss.item():.4f}")
990
+
991
+ # Use the custom loss for training
992
+ loss = custom_loss
993
+
994
+ if torch.cuda.is_available():
995
+ torch.cuda.empty_cache()
996
+
997
+ return CausalLMOutputWithPast(
998
+ loss=loss,
999
+ logits=logits,
1000
+ past_key_values=outputs.past_key_values,
1001
+ hidden_states=outputs.hidden_states,
1002
+ attentions=outputs.attentions,
1003
+ )
1004
+
1005
+ @add_start_docstrings(
1006
+ """
1007
+ The Mistral Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1008
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1009
+ """,
1010
+ MISTRAL_START_DOCSTRING,
1011
+ )
1012
+ class MistralForTokenClassification(MistralPreTrainedModel):
1013
+ def __init__(self, config):
1014
+ super().__init__(config)
1015
+ self.num_labels = config.num_labels
1016
+ self.model = MistralModel(config)
1017
+ if getattr(config, "classifier_dropout", None) is not None:
1018
+ classifier_dropout = config.classifier_dropout
1019
+ elif getattr(config, "hidden_dropout", None) is not None:
1020
+ classifier_dropout = config.hidden_dropout
1021
+ else:
1022
+ classifier_dropout = 0.1
1023
+ self.dropout = nn.Dropout(classifier_dropout)
1024
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1025
+
1026
+ # Initialize weights and apply final processing
1027
+ self.post_init()
1028
+
1029
+ def get_input_embeddings(self):
1030
+ return self.model.embed_tokens
1031
+
1032
+ def set_input_embeddings(self, value):
1033
+ self.model.embed_tokens = value
1034
+
1035
+ @can_return_tuple
1036
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
1037
+ @add_code_sample_docstrings(
1038
+ checkpoint=_CHECKPOINT_FOR_DOC,
1039
+ output_type=TokenClassifierOutput,
1040
+ config_class=_CONFIG_FOR_DOC,
1041
+ )
1042
+ def forward(
1043
+ self,
1044
+ input_ids: Optional[torch.LongTensor] = None,
1045
+ attention_mask: Optional[torch.Tensor] = None,
1046
+ position_ids: Optional[torch.LongTensor] = None,
1047
+ past_key_values: Optional[Cache] = None,
1048
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1049
+ labels: Optional[torch.LongTensor] = None,
1050
+ use_cache: Optional[bool] = None,
1051
+ output_attentions: Optional[bool] = None,
1052
+ output_hidden_states: Optional[bool] = None,
1053
+ ) -> TokenClassifierOutput:
1054
+ r"""
1055
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1056
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1057
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1058
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1059
+ """
1060
+
1061
+ outputs: BaseModelOutputWithPast = self.model(
1062
+ input_ids,
1063
+ attention_mask=attention_mask,
1064
+ position_ids=position_ids,
1065
+ past_key_values=past_key_values,
1066
+ inputs_embeds=inputs_embeds,
1067
+ use_cache=use_cache,
1068
+ output_attentions=output_attentions,
1069
+ output_hidden_states=output_hidden_states,
1070
+ )
1071
+ sequence_output = outputs.last_hidden_state
1072
+ sequence_output = self.dropout(sequence_output)
1073
+ logits = self.score(sequence_output)
1074
+
1075
+ loss = None
1076
+ if labels is not None:
1077
+ loss = self.loss_function(logits, labels, self.config)
1078
+
1079
+ return TokenClassifierOutput(
1080
+ loss=loss,
1081
+ logits=logits,
1082
+ hidden_states=outputs.hidden_states,
1083
+ attentions=outputs.attentions,
1084
+ )
1085
+
1086
+
1087
+ @add_start_docstrings(
1088
+ """
1089
+ The Mistral Model transformer with a sequence classification head on top (linear layer).
1090
+
1091
+ [`MistralForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1092
+ (e.g. GPT-2) do.
1093
+
1094
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1095
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1096
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1097
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1098
+ each row of the batch).
1099
+ """,
1100
+ MISTRAL_START_DOCSTRING,
1101
+ )
1102
+ class MistralForSequenceClassification(MistralPreTrainedModel):
1103
+ def __init__(self, config):
1104
+ super().__init__(config)
1105
+ self.num_labels = config.num_labels
1106
+ self.model = MistralModel(config)
1107
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1108
+
1109
+ # Initialize weights and apply final processing
1110
+ self.post_init()
1111
+
1112
+ def get_input_embeddings(self):
1113
+ return self.model.embed_tokens
1114
+
1115
+ def set_input_embeddings(self, value):
1116
+ self.model.embed_tokens = value
1117
+
1118
+ @can_return_tuple
1119
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
1120
+ def forward(
1121
+ self,
1122
+ input_ids: Optional[torch.LongTensor] = None,
1123
+ attention_mask: Optional[torch.Tensor] = None,
1124
+ position_ids: Optional[torch.LongTensor] = None,
1125
+ past_key_values: Optional[Cache] = None,
1126
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1127
+ labels: Optional[torch.LongTensor] = None,
1128
+ use_cache: Optional[bool] = None,
1129
+ output_attentions: Optional[bool] = None,
1130
+ output_hidden_states: Optional[bool] = None,
1131
+ ) -> SequenceClassifierOutputWithPast:
1132
+ r"""
1133
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1134
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1135
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1136
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1137
+ """
1138
+
1139
+ transformer_outputs: BaseModelOutputWithPast = self.model(
1140
+ input_ids,
1141
+ attention_mask=attention_mask,
1142
+ position_ids=position_ids,
1143
+ past_key_values=past_key_values,
1144
+ inputs_embeds=inputs_embeds,
1145
+ use_cache=use_cache,
1146
+ output_attentions=output_attentions,
1147
+ output_hidden_states=output_hidden_states,
1148
+ )
1149
+ hidden_states = transformer_outputs.last_hidden_state
1150
+ logits = self.score(hidden_states)
1151
+
1152
+ if input_ids is not None:
1153
+ batch_size = input_ids.shape[0]
1154
+ else:
1155
+ batch_size = inputs_embeds.shape[0]
1156
+
1157
+ if self.config.pad_token_id is None and batch_size != 1:
1158
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1159
+ if self.config.pad_token_id is None:
1160
+ last_non_pad_token = -1
1161
+ elif input_ids is not None:
1162
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
1163
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
1164
+ token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
1165
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
1166
+ else:
1167
+ last_non_pad_token = -1
1168
+ logger.warning_once(
1169
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1170
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1171
+ )
1172
+
1173
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
1174
+
1175
+ loss = None
1176
+ if labels is not None:
1177
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
1178
+
1179
+ return SequenceClassifierOutputWithPast(
1180
+ loss=loss,
1181
+ logits=pooled_logits,
1182
+ past_key_values=transformer_outputs.past_key_values,
1183
+ hidden_states=transformer_outputs.hidden_states,
1184
+ attentions=transformer_outputs.attentions,
1185
+ )
1186
+
1187
+
1188
+ @add_start_docstrings(
1189
+ """
1190
+ The Mistral Model transformer with a span classification head on top for extractive question-answering tasks like
1191
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1192
+ """,
1193
+ MISTRAL_START_DOCSTRING,
1194
+ )
1195
+ class MistralForQuestionAnswering(MistralPreTrainedModel):
1196
+ base_model_prefix = "model"
1197
+
1198
+ def __init__(self, config):
1199
+ super().__init__(config)
1200
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1201
+ self.model = MistralModel(config) # diff with Llama: transformer->model
1202
+
1203
+ # Initialize weights and apply final processing
1204
+ self.post_init()
1205
+
1206
+ def get_input_embeddings(self):
1207
+ return self.model.embed_tokens
1208
+
1209
+ def set_input_embeddings(self, value):
1210
+ self.model.embed_tokens = value
1211
+
1212
+ @can_return_tuple
1213
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
1214
+ def forward(
1215
+ self,
1216
+ input_ids: Optional[torch.LongTensor] = None,
1217
+ attention_mask: Optional[torch.FloatTensor] = None,
1218
+ position_ids: Optional[torch.LongTensor] = None,
1219
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1220
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1221
+ start_positions: Optional[torch.LongTensor] = None,
1222
+ end_positions: Optional[torch.LongTensor] = None,
1223
+ output_attentions: Optional[bool] = None,
1224
+ output_hidden_states: Optional[bool] = None,
1225
+ **kwargs,
1226
+ ) -> QuestionAnsweringModelOutput:
1227
+ r"""
1228
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1229
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1230
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1231
+ are not taken into account for computing the loss.
1232
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1233
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1234
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1235
+ are not taken into account for computing the loss.
1236
+ """
1237
+
1238
+ outputs: BaseModelOutputWithPast = self.model(
1239
+ input_ids,
1240
+ attention_mask=attention_mask,
1241
+ position_ids=position_ids,
1242
+ past_key_values=past_key_values,
1243
+ inputs_embeds=inputs_embeds,
1244
+ output_attentions=output_attentions,
1245
+ output_hidden_states=output_hidden_states,
1246
+ )
1247
+
1248
+ sequence_output = outputs.last_hidden_state
1249
+
1250
+ logits = self.qa_outputs(sequence_output)
1251
+ start_logits, end_logits = logits.split(1, dim=-1)
1252
+ start_logits = start_logits.squeeze(-1).contiguous()
1253
+ end_logits = end_logits.squeeze(-1).contiguous()
1254
+
1255
+ loss = None
1256
+ if start_positions is not None and end_positions is not None:
1257
+ loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs)
1258
+
1259
+ return QuestionAnsweringModelOutput(
1260
+ loss=loss,
1261
+ start_logits=start_logits,
1262
+ end_logits=end_logits,
1263
+ hidden_states=outputs.hidden_states,
1264
+ attentions=outputs.attentions,
1265
+ )