REPST #3

Merged
czzhangheng merged 42 commits from REPST into main 2025-12-20 16:03:22 +08:00
27 changed files with 95 additions and 19 deletions
Showing only changes of commit 1a13a32688 - Show all commits

View File

@ -32,6 +32,7 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -17,6 +17,7 @@ data:
steps_per_day: 48 steps_per_day: 48
test_ratio: 0.2 test_ratio: 0.2
val_ratio: 0.2 val_ratio: 0.2
output_dim: 1
model: model:
d_ff: 128 d_ff: 128

View File

@ -32,6 +32,7 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 1
train: train:
batch_size: 16 batch_size: 16

View File

@ -32,6 +32,7 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -32,6 +32,7 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -32,6 +32,7 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 1
train: train:
batch_size: 16 batch_size: 16

View File

@ -32,6 +32,7 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 1
train: train:
batch_size: 64 batch_size: 64

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -33,6 +34,8 @@ model:
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 6 output_dim: 6
graph_dim: 64
graph_embed_dim: 10
train: train:
batch_size: 16 batch_size: 16

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 16 batch_size: 16

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 16 batch_size: 16

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 16 batch_size: 16

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -33,6 +34,9 @@ model:
stride: 7 stride: 7
word_num: 1000 word_num: 1000
output_dim: 6 output_dim: 6
graph_dim: 64
graph_embed_dim: 10
output_dim: 6
train: train:
batch_size: 16 batch_size: 16

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 16 batch_size: 16

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 32 batch_size: 32

View File

@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,6 +33,9 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 16 batch_size: 16

View File

@ -6,7 +6,7 @@ basic:
seed: 2023 seed: 2023
data: data:
batch_size: 64 batch_size: 16
column_wise: false column_wise: false
days_per_week: 7 days_per_week: 7
horizon: 24 horizon: 24
@ -19,6 +19,7 @@ data:
val_ratio: 0.2 val_ratio: 0.2
model: model:
cheb: 3
d_ff: 128 d_ff: 128
d_model: 64 d_model: 64
dropout: 0.2 dropout: 0.2
@ -32,9 +33,12 @@ model:
seq_len: 24 seq_len: 24
stride: 7 stride: 7
word_num: 1000 word_num: 1000
graph_dim: 64
graph_embed_dim: 10
output_dim: 1
train: train:
batch_size: 64 batch_size: 16
debug: false debug: false
early_stop: true early_stop: true
early_stop_patience: 15 early_stop_patience: 15

View File

@ -127,8 +127,11 @@ class ASTRA(nn.Module):
self.gpt_layers = configs['gpt_layers'] # 使用的GPT2层数 self.gpt_layers = configs['gpt_layers'] # 使用的GPT2层数
self.d_ff = configs['d_ff'] # 前馈网络隐藏层维度 self.d_ff = configs['d_ff'] # 前馈网络隐藏层维度
self.gpt_path = configs['gpt_path'] # 预训练GPT2模型路径 self.gpt_path = configs['gpt_path'] # 预训练GPT2模型路径
self.num_nodes = configs.get('num_nodes', 325) # 节点数量 self.num_nodes = configs['num_nodes'] # 节点数量
self.output_dim = configs.get('output_dim', 1) self.output_dim = configs['output_dim']
self.cheb = configs['cheb']
self.graph_dim = configs['graph_dim']
self.graph_embed_dim = configs['graph_embed_dim']
self.word_choice = GumbelSoftmax(configs['word_num']) # 词汇选择层 self.word_choice = GumbelSoftmax(configs['word_num']) # 词汇选择层
@ -152,18 +155,18 @@ class ASTRA(nn.Module):
# 初始化图增强编码器 # 初始化图增强编码器
self.graph_encoder = GraphEnhancedEncoder( self.graph_encoder = GraphEnhancedEncoder(
K=configs.get('chebyshev_order', 3), # Chebyshev多项式阶数 K=self.cheb, # Chebyshev多项式阶数
in_dim=self.d_model, # 输入特征维度 in_dim=self.d_model, # 输入特征维度
hidden_dim=configs.get('graph_hidden_dim', 32), # 隐藏层维度 hidden_dim=self.graph_dim, # 隐藏层维度
num_nodes=self.num_nodes, # 节点数量 num_nodes=self.num_nodes, # 节点数量
embed_dim=configs.get('graph_embed_dim', 10), # 节点嵌入维度 embed_dim=self.graph_embed_dim, # 节点嵌入维度
device=self.device, # 运行设备 device=self.device, # 运行设备
temporal_dim=self.seq_len, # 时间序列长度 temporal_dim=self.seq_len, # 时间序列长度
num_features=self.input_dim # 特征通道数 num_features=self.input_dim # 特征通道数
) )
self.graph_projection = nn.Linear( # 图特征投影层每一k阶的切比雪夫权重映射到隐藏维度 self.graph_projection = nn.Linear( # 图特征投影层每一k阶的切比雪夫权重映射到隐藏维度
configs.get('graph_hidden_dim', 32) * (configs.get('chebyshev_order', 3) + 1), # 输入维度 self.graph_dim * (self.cheb + 1), # 输入维度
self.d_model # 输出维度 self.d_model # 输出维度
) )

View File

@ -127,8 +127,11 @@ class ASTRA(nn.Module):
self.gpt_layers = configs['gpt_layers'] # 使用的GPT2层数 self.gpt_layers = configs['gpt_layers'] # 使用的GPT2层数
self.d_ff = configs['d_ff'] # 前馈网络隐藏层维度 self.d_ff = configs['d_ff'] # 前馈网络隐藏层维度
self.gpt_path = configs['gpt_path'] # 预训练GPT2模型路径 self.gpt_path = configs['gpt_path'] # 预训练GPT2模型路径
self.num_nodes = configs.get('num_nodes', 325) # 节点数量 self.num_nodes = configs['num_nodes'] # 节点数量
self.output_dim = configs.get('output_dim', 1) self.output_dim = configs['output_dim']
self.cheb = configs['cheb']
self.graph_dim = configs['graph_dim']
self.graph_embed_dim = configs['graph_embed_dim']
self.word_choice = GumbelSoftmax(configs['word_num']) # 词汇选择层 self.word_choice = GumbelSoftmax(configs['word_num']) # 词汇选择层
@ -148,23 +151,23 @@ class ASTRA(nn.Module):
self.word_embeddings = self.gpts.get_input_embeddings().weight.to(self.device) # 词嵌入权重 self.word_embeddings = self.gpts.get_input_embeddings().weight.to(self.device) # 词嵌入权重
self.vocab_size = self.word_embeddings.shape[0] # 词汇表大小 self.vocab_size = self.word_embeddings.shape[0] # 词汇表大小
self.mapping_layer = nn.Linear(self.vocab_size, 1) # 映射层 self.mapping_layer = nn.Linear(self.vocab_size, 1) # 映射层
self.reprogramming_layer = ReprogrammingLayer(self.d_model + configs.get('graph_hidden_dim', 32) * (configs.get('chebyshev_order', 3) + 1), self.n_heads, self.d_keys, self.d_llm) # 重编程层 self.reprogramming_layer = ReprogrammingLayer(self.d_model + self.graph_dim * (self.cheb + 1), self.n_heads, self.d_keys, self.d_llm) # 重编程层
# 初始化图增强编码器 # 初始化图增强编码器
self.graph_encoder = GraphEnhancedEncoder( self.graph_encoder = GraphEnhancedEncoder(
K=configs.get('chebyshev_order', 3), # Chebyshev多项式阶数 K=configs.get('chebyshev_order', 3), # Chebyshev多项式阶数
in_dim=self.d_model, # 输入特征维度 in_dim=self.d_model, # 输入特征维度
hidden_dim=configs.get('graph_hidden_dim', 32), # 隐藏层维度 hidden_dim=self.graph_dim, # 隐藏层维度
num_nodes=self.num_nodes, # 节点数量 num_nodes=self.num_nodes, # 节点数量
embed_dim=configs.get('graph_embed_dim', 10), # 节点嵌入维度 embed_dim=self.graph_embed_dim, # 节点嵌入维度
device=self.device, # 运行设备 device=self.device, # 运行设备
temporal_dim=self.seq_len, # 时间序列长度 temporal_dim=self.seq_len, # 时间序列长度
num_features=self.input_dim # 特征通道数 num_features=self.input_dim # 特征通道数
) )
self.graph_projection = nn.Linear( # 图特征投影层每一k阶的切比雪夫权重映射到隐藏维度 self.graph_projection = nn.Linear( # 图特征投影层每一k阶的切比雪夫权重映射到隐藏维度
configs.get('graph_hidden_dim', 32) * (configs.get('chebyshev_order', 3) + 1), # 输入维度 self.graph_dim * (self.cheb + 1), # 输入维度
self.d_model # 输出维度 self.d_model # 输出维度
) )
self.out_mlp = nn.Sequential( self.out_mlp = nn.Sequential(

View File

@ -19,7 +19,7 @@ class repst(nn.Module):
self.gpt_layers = configs['gpt_layers'] self.gpt_layers = configs['gpt_layers']
self.d_ff = configs['d_ff'] self.d_ff = configs['d_ff']
self.gpt_path = configs['gpt_path'] self.gpt_path = configs['gpt_path']
self.output_dim = configs.get('output_dim', 1) self.output_dim = configs['output_dim']
self.word_choice = GumbelSoftmax(configs['word_num']) self.word_choice = GumbelSoftmax(configs['word_num'])

View File

@ -90,7 +90,7 @@ def main(model, data, debug=False):
if __name__ == "__main__": if __name__ == "__main__":
# 调试用 # 调试用
# model_list = ["iTransformer", "PatchTST", "HI"] # model_list = ["iTransformer", "PatchTST", "HI"]
model_list = ["ASTRA_v3", "ASTRA_v2", "ASTRA", "REPST", "STAEFormer", "MTGNN", "iTransformer", "PatchTST", "HI"] model_list = ["ASTRA_v3"]
# model_list = ["MTGNN"] # model_list = ["MTGNN"]
# dataset_list = ["AirQuality", "SolarEnergy", "PEMS-BAY", "METR-LA", "BJTaxi-InFlow", "BJTaxi-OutFlow", "NYCBike-InFlow", "NYCBike-OutFlow"] # dataset_list = ["AirQuality", "SolarEnergy", "PEMS-BAY", "METR-LA", "BJTaxi-InFlow", "BJTaxi-OutFlow", "NYCBike-InFlow", "NYCBike-OutFlow"]
# dataset_list = ["AirQuality"] # dataset_list = ["AirQuality"]