Compare commits

..

No commits in common. "d92490b808ba5c5be2f23d427d96e9a56b066d7f" and "02fb2430f0d633926451552ba96711b6bd0b3612" have entirely different histories.

11 changed files with 27 additions and 107 deletions

View File

@ -2,38 +2,24 @@
![Diffusion Convolutional Recurrent Neural Network](figures/model_architecture.jpg "Model Architecture")
This is a PyTorch implementation of Diffusion Convolutional Recurrent Neural Network in the following paper: \
This is a TensorFlow implementation of Diffusion Convolutional Recurrent Neural Network in the following paper: \
Yaguang Li, Rose Yu, Cyrus Shahabi, Yan Liu, [Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic Forecasting](https://arxiv.org/abs/1707.01926), ICLR 2018.
## Requirements
* torch
* scipy>=0.19.0
* numpy>=1.12.1
* pandas>=0.19.2
* pyyaml
* statsmodels
* tensorflow>=1.3.0
* torch
* tables
* future
- scipy>=0.19.0
- numpy>=1.12.1
- pandas>=0.19.2
- pyaml
- statsmodels
- tensorflow>=1.3.0
Dependency can be installed using the following command:
```bash
pip install -r requirements.txt
```
### Comparison with Tensorflow implementation
In MAE (For LA dataset, PEMS-BAY coming in a while)
| Horizon | Tensorflow | Pytorch |
|:--------|:--------:|:--------:|
| 1 Hour | 3.69 | 3.12 |
| 30 Min | 3.15 | 2.82 |
| 15 Min | 2.77 | 2.56 |
## Data Preparation
The traffic data files for Los Angeles (METR-LA) and the Bay Area (PEMS-BAY), i.e., `metr-la.h5` and `pems-bay.h5`, are available at [Google Drive](https://drive.google.com/open?id=10FOTa6HXPqX8Pf5WRoRwcFnW9BrNZEIX) or [Baidu Yun](https://pan.baidu.com/s/14Yy9isAIZYdU__OYEQGa_g), and should be
put into the `data/` folder.
@ -74,10 +60,10 @@ Besides, the locations of sensors in Los Angeles, i.e., METR-LA, are available a
```bash
# METR-LA
python run_demo_pytorch.py --config_filename=data/model/pretrained/METR-LA/config.yaml
python run_demo.py --config_filename=data/model/pretrained/METR-LA/config.yaml
# PEMS-BAY
python run_demo_pytorch.py --config_filename=data/model/pretrained/PEMS-BAY/config.yaml
python run_demo.py --config_filename=data/model/pretrained/PEMS-BAY/config.yaml
```
The generated prediction of DCRNN is in `data/results/dcrnn_predictions`.
@ -85,11 +71,12 @@ The generated prediction of DCRNN is in `data/results/dcrnn_predictions`.
## Model Training
```bash
# METR-LA
python dcrnn_train_pytorch.py --config_filename=data/model/dcrnn_la.yaml
python dcrnn_train.py --config_filename=data/model/dcrnn_la.yaml
# PEMS-BAY
python dcrnn_train_pytorch.py --config_filename=data/model/dcrnn_bay.yaml
python dcrnn_train.py --config_filename=data/model/dcrnn_bay.yaml
```
Each epoch takes about 5min or 10 min on a single GTX 1080 Ti for METR-LA or PEMS-BAY respectively.
There is a chance that the training loss will explode, the temporary workaround is to restart from the last saved model before the explosion, or to decrease the learning rate earlier in the learning rate schedule.
@ -100,15 +87,7 @@ There is a chance that the training loss will explode, the temporary workaround
python -m scripts.eval_baseline_methods --traffic_reading_filename=data/metr-la.h5
```
### PyTorch Results
![PyTorch Results](figures/result1.png "PyTorch Results")
![PyTorch Results](figures/result2.png "PyTorch Results")
![PyTorch Results](figures/result3.png "PyTorch Results")
![PyTorch Results](figures/result4.png "PyTorch Results")
More details are being added ...
## Citation

Binary file not shown.

View File

@ -18,14 +18,14 @@ model:
num_nodes: 207
num_rnn_layers: 2
output_dim: 1
rnn_units: 64
rnn_units: 16
seq_len: 12
use_curriculum_learning: true
train:
base_lr: 0.01
dropout: 0
epoch: 51
epoch: 0
epochs: 100
epsilon: 1.0e-3
global_step: 0

View File

@ -3,12 +3,12 @@ from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
@ -16,6 +16,9 @@ def main(args):
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
# if args.use_cpu_only:
# tf_config = tf.ConfigProto(device_count={'GPU': 0})
# with tf.Session(config=tf_config) as sess:
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
supervisor.train()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 287 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 254 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 205 KiB

View File

@ -89,23 +89,11 @@ class DCRNNSupervisor:
return 'models/epo%d.tar' % epoch
def load_model(self):
self._setup_graph()
assert os.path.exists('models/epo%d.tar' % self._epoch_num), 'Weights at epoch %d not found' % self._epoch_num
checkpoint = torch.load('models/epo%d.tar' % self._epoch_num, map_location='cpu')
self.dcrnn_model.load_state_dict(checkpoint['model_state_dict'])
self._logger.info("Loaded model at {}".format(self._epoch_num))
def _setup_graph(self):
with torch.no_grad():
self.dcrnn_model = self.dcrnn_model.eval()
val_iterator = self._data['val_loader'].get_iterator()
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
output = self.dcrnn_model(x)
break
def train(self, **kwargs):
kwargs.update(self._train_kwargs)
return self._train(**kwargs)
@ -121,9 +109,6 @@ class DCRNNSupervisor:
val_iterator = self._data['{}_loader'.format(dataset)].get_iterator()
losses = []
y_truths = []
y_preds = []
for _, (x, y) in enumerate(val_iterator):
x, y = self._prepare_data(x, y)
@ -131,33 +116,19 @@ class DCRNNSupervisor:
loss = self._compute_loss(y, output)
losses.append(loss.item())
y_truths.append(y.cpu())
y_preds.append(output.cpu())
mean_loss = np.mean(losses)
self._writer.add_scalar('{} loss'.format(dataset), mean_loss, batches_seen)
y_preds = np.concatenate(y_preds, axis=1)
y_truths = np.concatenate(y_truths, axis=1) # concatenate on batch dimension
y_truths_scaled = []
y_preds_scaled = []
for t in range(y_preds.shape[0]):
y_truth = self.standard_scaler.inverse_transform(y_truths[t])
y_pred = self.standard_scaler.inverse_transform(y_preds[t])
y_truths_scaled.append(y_truth)
y_preds_scaled.append(y_pred)
return mean_loss, {'prediction': y_preds_scaled, 'truth': y_truths_scaled}
return mean_loss
def _train(self, base_lr,
steps, patience=50, epochs=100, lr_decay_ratio=0.1, log_every=1, save_model=1,
test_every_n_epochs=10, epsilon=1e-8, **kwargs):
test_every_n_epochs=10, **kwargs):
# steps is used in learning rate - will see if need to use it?
min_val_loss = float('inf')
wait = 0
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps,
gamma=lr_decay_ratio)
@ -188,7 +159,7 @@ class DCRNNSupervisor:
if batches_seen == 0:
# this is a workaround to accommodate dynamically registered parameters in DCGRUCell
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr, eps=epsilon)
optimizer = torch.optim.Adam(self.dcrnn_model.parameters(), lr=base_lr)
loss = self._compute_loss(y, output)
@ -207,7 +178,7 @@ class DCRNNSupervisor:
lr_scheduler.step()
self._logger.info("evaluating now!")
val_loss, _ = self.evaluate(dataset='val', batches_seen=batches_seen)
val_loss = self.evaluate(dataset='val', batches_seen=batches_seen)
end_time = time.time()
@ -223,7 +194,7 @@ class DCRNNSupervisor:
self._logger.info(message)
if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1:
test_loss, _ = self.evaluate(dataset='test', batches_seen=batches_seen)
test_loss = self.evaluate(dataset='test', batches_seen=batches_seen)
message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, lr: {:.6f}, ' \
'{:.1f}s'.format(epoch_num, epochs, batches_seen,
np.mean(losses), test_loss, lr_scheduler.get_lr()[0],

View File

@ -32,6 +32,6 @@ if __name__ == '__main__':
parser.add_argument('--use_cpu_only', default=False, type=str, help='Whether to run tensorflow on cpu.')
parser.add_argument('--config_filename', default='data/model/pretrained/METR-LA/config.yaml', type=str,
help='Config file for pretrained model.')
parser.add_argument('--output_filename', default='data/dcrnn_predictions_tf.npz')
parser.add_argument('--output_filename', default='data/dcrnn_predictions.npz')
args = parser.parse_args()
run_dcrnn(args)

View File

@ -1,33 +0,0 @@
import argparse
import numpy as np
import os
import sys
import yaml
from lib.utils import load_graph_data
from model.pytorch.dcrnn_supervisor import DCRNNSupervisor
def run_dcrnn(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(graph_pkl_filename)
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
mean_score, outputs = supervisor.evaluate('test')
np.savez_compressed(args.output_filename, **outputs)
print("MAE : {}".format(mean_score))
print('Predictions saved as {}.'.format(args.output_filename))
if __name__ == '__main__':
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument('--use_cpu_only', default=False, type=str, help='Whether to run tensorflow on cpu.')
parser.add_argument('--config_filename', default='data/model/pretrained/METR-LA/config.yaml', type=str,
help='Config file for pretrained model.')
parser.add_argument('--output_filename', default='data/dcrnn_predictions.npz')
args = parser.parse_args()
run_dcrnn(args)