Add print statements
Browse files- modeling_cogvlm.py +4 -1
modeling_cogvlm.py
CHANGED
|
@@ -325,10 +325,13 @@ class CogVLMDecoderLayer(nn.Module):
|
|
| 325 |
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 326 |
residual = hidden_states
|
| 327 |
|
|
|
|
|
|
|
|
|
|
| 328 |
hidden_states = self.input_layernorm(hidden_states)
|
| 329 |
|
| 330 |
if print_values:
|
| 331 |
-
print("Hidden states before self attention:", hidden_states[0,:3,:3])
|
| 332 |
|
| 333 |
# Self Attention
|
| 334 |
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
|
|
|
| 325 |
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 326 |
residual = hidden_states
|
| 327 |
|
| 328 |
+
if print_values:
|
| 329 |
+
print("Hidden states before RMS norm:", hidden_states[0, :3, :3])
|
| 330 |
+
|
| 331 |
hidden_states = self.input_layernorm(hidden_states)
|
| 332 |
|
| 333 |
if print_values:
|
| 334 |
+
print("Hidden states after RMS norm, before self attention:", hidden_states[0,:3,:3])
|
| 335 |
|
| 336 |
# Self Attention
|
| 337 |
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|