50 lines
1.7 KiB
Python
50 lines
1.7 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
import math
|
|
|
|
class PositionalEmbedding(nn.Module):
|
|
def __init__(self, d_model, max_len=5000):
|
|
super(PositionalEmbedding, self).__init__()
|
|
# Compute the positional encodings once in log space.
|
|
pe = torch.zeros(max_len, d_model).float()
|
|
pe.require_grad = False
|
|
|
|
position = torch.arange(0, max_len).float().unsqueeze(1)
|
|
div_term = (torch.arange(0, d_model, 2).float()
|
|
* -(math.log(10000.0) / d_model)).exp()
|
|
|
|
pe[:, 0::2] = torch.sin(position * div_term)
|
|
pe[:, 1::2] = torch.cos(position * div_term)
|
|
|
|
pe = pe.unsqueeze(0)
|
|
self.register_buffer('pe', pe)
|
|
|
|
def forward(self, x):
|
|
return self.pe[:, :x.size(1)]
|
|
|
|
class PatchEmbedding(nn.Module):
|
|
def __init__(self, d_model, patch_len, stride, padding, dropout):
|
|
super(PatchEmbedding, self).__init__()
|
|
# Patching
|
|
self.patch_len = patch_len
|
|
self.stride = stride
|
|
self.padding_patch_layer = nn.ReplicationPad1d((0, padding))
|
|
|
|
# Backbone, Input encoding: projection of feature vectors onto a d-dim vector space
|
|
self.value_embedding = nn.Linear(patch_len, d_model, bias=False)
|
|
|
|
# Positional embedding
|
|
self.position_embedding = PositionalEmbedding(d_model)
|
|
|
|
# Residual dropout
|
|
self.dropout = nn.Dropout(dropout)
|
|
|
|
def forward(self, x):
|
|
# do patching
|
|
n_vars = x.shape[1]
|
|
x = self.padding_patch_layer(x)
|
|
x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride)
|
|
x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]))
|
|
# Input encoding
|
|
x = self.value_embedding(x) + self.position_embedding(x)
|
|
return self.dropout(x), n_vars |