| |
|
| | import math |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| |
|
| | from transformers import PreTrainedModel |
| |
|
| | from typing import List |
| |
|
| | from .config import LidirlLSTMConfig |
| |
|
| | def torch_max_no_pads(model_out, lengths): |
| | indices = torch.arange(model_out.size(1)).to(model_out.device) |
| | mask = (indices < lengths.view(-1, 1)).unsqueeze(-1).expand(model_out.size()) |
| | model_out = torch.where(mask, model_out, torch.tensor(-1e9)) |
| | max_pool = torch.max(model_out, 1)[0] |
| | return max_pool |
| |
|
| |
|
| | class ProjectionLayer(nn.Module): |
| | """ |
| | Noise-aware labels layer or traditional linear projection |
| | """ |
| |
|
| | def __init__(self, |
| | hidden_dim : int, |
| | label_size : int, |
| | montecarlo_layer : bool = False): |
| | super().__init__() |
| | self.montecarlo_layer = montecarlo_layer |
| | if montecarlo_layer: |
| | self.proj = MCSoftmaxDenseFA(hidden_dim, label_size, 1, logits_only=True) |
| | else: |
| | |
| | self.projs = [ |
| | nn.Linear(hidden_dim, label_size) |
| | ] |
| | self.proj = nn.Sequential(*self.projs) |
| | self.init_layer() |
| |
|
| | def forward(self, x): |
| | return self.proj(x) |
| |
|
| | def init_layer(self, pi : float = 0.01): |
| | """ |
| | Initialize the final classification layer so all predictions are close to 0 |
| | """ |
| |
|
| | if self.montecarlo_layer: |
| | self.proj.init_layer(pi) |
| | return |
| | |
| | for layer in self.proj.modules(): |
| | if isinstance(layer, nn.Linear): |
| | nn.init.normal_(layer.weight, mean=0.0, std=0.01) |
| | if layer.bias is not None: |
| | |
| | bias_value = -math.log((1 - pi) / pi) |
| | nn.init.constant_(layer.bias, bias_value) |
| |
|
| |
|
| | class MinLSTMCell(nn.Module): |
| | """ |
| | https://arxiv.org/pdf/2410.01201 |
| | https://github.com/YecanLee/min-LSTM-torch/blob/main/minLSTMcell.py |
| | bidirectional and parallel |
| | hold layer depth and sweep out the other dimensions |
| | """ |
| | def __init__(self, |
| | embed_dim, |
| | hidden_dim): |
| | super(MinLSTMCell, self).__init__() |
| | self.embed_dim = embed_dim |
| | self.hidden_dim = hidden_dim |
| | self.output_dim = embed_dim |
| | |
| | |
| | self.linear_f = nn.Linear(embed_dim, hidden_dim) |
| | self.linear_i = nn.Linear(embed_dim, hidden_dim) |
| | self.linear_h = nn.Linear(embed_dim, hidden_dim) |
| |
|
| | def parallel_scan_log(self, log_coeffs, log_values): |
| | |
| | |
| | a_star = F.pad(torch.cumsum(log_coeffs, dim=1), (0, 0, 1, 0)) |
| | log_h0_plus_b_star = torch.logcumsumexp( |
| | log_values - a_star, dim=1) |
| | log_h = a_star + log_h0_plus_b_star |
| | return torch.exp(log_h)[:, 1:] |
| |
|
| | def g(self, x): |
| | return torch.where(x >= 0, x+0.5, torch.sigmoid(x)) |
| |
|
| | def log_g(self, x): |
| | return torch.where(x >= 0, (F.relu(x)+0.5).log(), -F.softplus(-x)) |
| |
|
| | def forward(self, inputs): |
| | h_init = torch.zeros(inputs.size(0), 1, self.hidden_dim, device=inputs.device) |
| |
|
| | diff = F.softplus(-self.linear_f(inputs)) - F.softplus(-self.linear_i(inputs)) |
| |
|
| | log_f = -F.softplus(diff) |
| | log_i = -F.softplus(-diff) |
| | log_h_0 = torch.log(h_init) |
| |
|
| | log_tilde_h = self.log_g(self.linear_h(inputs)) |
| |
|
| | h = self.parallel_scan_log(log_f, torch.cat([log_h_0, log_i + log_tilde_h], dim=1)) |
| | return h |
| |
|
| |
|
| | class LSTMBlock(nn.Module): |
| | def __init__(self, |
| | embed_dim : int = 512, |
| | hidden_dim : int = 2048, |
| | num_layers : int = 6, |
| | dropout : float = 0.1, |
| | bidirectional : bool = False |
| | ): |
| | super(LSTMBlock, self).__init__() |
| |
|
| | self.layers = [] |
| | last_dim = embed_dim |
| | for _ in range(num_layers): |
| | self.layers.append(MinLSTMCell(last_dim, hidden_dim)) |
| | self.layers.append(nn.LayerNorm(hidden_dim, elementwise_affine=True)) |
| | self.layers.append(nn.GELU()) |
| | self.layers.append(nn.Dropout(dropout)) |
| | last_dim = hidden_dim |
| | self.model = nn.Sequential(*self.layers) |
| | self.bidirectionality_term = 2 if bidirectional else 1 |
| | self.output_dim = hidden_dim * self.bidirectionality_term |
| | self.bidirectional = bidirectional |
| |
|
| | def flip_sequence(self, inputs, lengths): |
| | |
| | |
| | new = [] |
| | for inp, leng in zip(inputs, lengths): |
| | new.append(inp[:leng].flip(0)) |
| | return pad_sequence(new, batch_first=True).to(inputs.device) |
| |
|
| | def forward(self, inputs, lengths): |
| | encoding = self.model(inputs) |
| | last_token = encoding[torch.arange(encoding.size(0)), lengths - 1].view(inputs.size(0), 1, -1) |
| | if self.bidirectional: |
| | reverse_sequence = self.flip_sequence(inputs, lengths) |
| | reverse_encoding = self.model(reverse_sequence) |
| | reverse_last_token = reverse_encoding[torch.arange(reverse_encoding.size(0)), lengths - 1].view(inputs.size(0), 1, -1) |
| | last_token = torch.cat((last_token, reverse_last_token), dim=-1) |
| |
|
| | return last_token, torch.ones((inputs.size(0), 1), device=inputs.device, dtype=torch.long) |
| |
|
| |
|
| | class LidirlLSTM(PreTrainedModel): |
| | """ |
| | Defines the Lidirl LSTM Model |
| | """ |
| |
|
| | config_class = LidirlLSTMConfig |
| | def __init__(self, config): |
| | super().__init__(config) |
| | |
| | self.encoder = LSTMBlock( |
| | embed_dim = config.embed_dim, |
| | hidden_dim = config.hidden_dim, |
| | num_layers = config.num_layers, |
| | dropout = config.dropout, |
| | bidirectional = config.bidirectional |
| | ) |
| | self.embed_layer = nn.Embedding(config.vocab_size, config.embed_dim) |
| | self.proj = ProjectionLayer(self.encoder.output_dim, config.label_size, config.montecarlo_layer) |
| |
|
| | self.label_size = config.label_size |
| | self.max_length = config.max_length |
| | self.multilabel = config.multilabel |
| | self.monte_carlo = config.montecarlo_layer |
| |
|
| | self.labels = ["" for _ in config.labels] |
| | for key, value in config.labels.items(): |
| | self.labels[value] = key |
| |
|
| | def forward(self, inputs, lengths): |
| | inputs = inputs[:, :self.max_length] |
| | lengths = lengths.clamp(max=self.max_length) |
| |
|
| | embeddings = self.embed_layer(inputs) |
| | encoding, lengths = self.encoder(embeddings, lengths=lengths) |
| | max_pool = torch_max_no_pads(encoding, lengths) |
| | projection = self.proj(max_pool) |
| |
|
| | return projection |
| |
|
| | def __call__(self, inputs, lengths): |
| | |
| | with torch.no_grad(): |
| | logits = self.forward(inputs, lengths) |
| | if self.multilabel: |
| | probs = torch.sigmoid(logits) |
| | else: |
| | probs = torch.softmax(logits, dim=-1) |
| | return probs |
| |
|
| | def predict(self, inputs, lengths, threshold=0.5, top_k=None): |
| | probs = self.__call__(inputs, lengths) |
| | if top_k is not None and top_k > 0: |
| | top_k_preds = torch.topk(probs, top_k, dim=1) |
| | pred_labels = [] |
| | for pred, prob in zip(top_k_preds.indices, top_k_preds.values): |
| | pred_labels.append([(self.labels[p.item()], pr.item()) for (p, pr) in zip(pred, prob)]) |
| | return pred_labels |
| | if self.multilabel: |
| | batch_idx, label_idx = torch.where(probs > threshold) |
| | output = [[] for _ in range(len(inputs))] |
| | for batch, label in zip(batch_idx, label_idx): |
| | label_string = self.labels |
| | output[batch.item()].append( |
| | (self.labels[label.item()], probs[batch, label].item()) |
| | ) |
| | return output |
| |
|
| |
|