Replace json with yaml for configuration.

This commit is contained in:
Yaguang 2018-04-18 11:51:35 -07:00
parent 3e94a0ff0e
commit e93435c598
11 changed files with 113 additions and 104 deletions

View File

@ -14,6 +14,7 @@ Yaguang Li, Rose Yu, Cyrus Shahabi, Yan Liu, [Diffusion Convolutional Recurrent
- numpy>=1.12.1
- pandas>=0.19.2
- tensorflow>=1.3.0
- pyaml
Dependency can be installed using the following command:

View File

@ -1 +0,0 @@
{"verbose": 0, "num_rnn_layers": 2, "min_learning_rate": 2e-06, "epochs": 100, "patience": 50, "test_ratio": 0.2, "cl_decay_steps": 2000, "write_db": false, "epoch": 100, "max_diffusion_step": 2, "lr_decay_epoch": 20, "dropout": 0.0, "log_dir": "data/model/dcrnn_DR_2_h_12_64-64_lr_0.01_bs_64_d_0.00_sl_12_MAE_1207002222/", "validation_ratio": 0.1, "data_type": "ALL", "learning_rate": 0.01, "batch_size": 64, "filter_type": "dual_random_walk", "graph_pkl_filename": "data/sensor_graph/adj_mx.pkl", "max_grad_norm": 5.0, "model_filename": "data/model/dcrnn_DR_2_h_12_64-64_lr_0.01_bs_64_d_0.00_sl_12_MAE_1207002222/models-1.6253-35451", "global_step": 35451, "use_cpu_only": false, "l1_decay": 0.0, "loss_func": "MAE", "lr_decay": 0.1, "lr_decay_interval": 10, "test_every_n_epochs": 10, "horizon": 12, "null_val": 0.0, "use_curriculum_learning": true, "seq_len": 12, "base_dir": "data/model", "rnn_units": 64}

View File

@ -0,0 +1,35 @@
---
base_dir: data/model
batch_size: 64
cl_decay_steps: 2000
data_type: ALL
dropout: 0
epoch: 100
epochs: 100
filter_type: dual_random_walk
global_step: 35451
graph_pkl_filename: data/sensor_graph/adj_mx.pkl
horizon: 12
l1_decay: 0
learning_rate: 0.01
log_dir: data/model/dcrnn_DR_2_h_12_64-64_lr_0.01_bs_64_d_0.00_sl_12_MAE_1207002222/
loss_func: MAE
lr_decay: 0.1
lr_decay_epoch: 20
lr_decay_interval: 10
max_diffusion_step: 2
max_grad_norm: 5
min_learning_rate: 2.0e-06
model_filename: data/model/dcrnn_DR_2_h_12_64-64_lr_0.01_bs_64_d_0.00_sl_12_MAE_1207002222/models-1.6253-35451
null_val: 0
num_rnn_layers: 2
patience: 50
rnn_units: 64
seq_len: 12
test_every_n_epochs: 10
test_ratio: 0.2
use_cpu_only: false
use_curriculum_learning: true
validation_ratio: 0.1
verbose: 0
write_db: false

View File

@ -1,34 +0,0 @@
{
"verbose": 0,
"num_rnn_layers": 2,
"epochs": 100,
"patience": 50,
"test_ratio": 0.2,
"cl_decay_steps": 2000,
"graph_pkl_filename": "data/sensor_graph/adj_mx.pkl",
"global_step": 0,
"max_diffusion_step": 2,
"epoch": 0,
"lr_decay_epoch": 20,
"learning_rate": 0.01,
"validation_ratio": 0.1,
"data_type": "ALL",
"dropout": 0.0,
"batch_size": 64,
"max_grad_norm": 5.0,
"min_learning_rate": 2e-06,
"use_cpu_only": false,
"l1_decay": 0.0,
"loss_func": "MAE",
"write_db": false,
"lr_decay": 0.1,
"lr_decay_interval": 10,
"test_every_n_epochs": 10,
"horizon": 12,
"null_val": 0.0,
"use_curriculum_learning": true,
"seq_len": 12,
"rnn_units": 64,
"base_dir": "data/model",
"filter_type": "dual_random_walk"
}

View File

@ -0,0 +1,33 @@
---
base_dir: data/model
batch_size: 64
cl_decay_steps: 2000
data_type: ALL
dropout: 0
epoch: 0
epochs: 100
filter_type: dual_random_walk
global_step: 0
graph_pkl_filename: data/sensor_graph/adj_mx.pkl
horizon: 12
l1_decay: 0
learning_rate: 0.01
loss_func: MAE
lr_decay: 0.1
lr_decay_epoch: 20
lr_decay_interval: 10
max_diffusion_step: 2
max_grad_norm: 5
min_learning_rate: 2.0e-06
null_val: 0
num_rnn_layers: 2
patience: 50
rnn_units: 64
seq_len: 12
test_every_n_epochs: 10
test_ratio: 0.2
use_cpu_only: false
use_curriculum_learning: true
validation_ratio: 0.1
verbose: 0
write_db: false

View File

@ -1,35 +0,0 @@
{
"verbose": 0,
"num_rnn_layers": 2,
"epochs": 100,
"patience": 50,
"test_ratio": 0.2,
"method_type": "GCRNN",
"cl_decay_steps": 2000,
"graph_pkl_filename": "data/sensor_graph/adj_mx.pkl",
"global_step": 0,
"max_diffusion_step": 2,
"epoch": 0,
"lr_decay_epoch": 20,
"learning_rate": 0.01,
"validation_ratio": 0.1,
"data_type": "ALL",
"dropout": 0.0,
"batch_size": 64,
"max_grad_norm": 5.0,
"min_learning_rate": 2e-06,
"use_cpu_only": false,
"l1_decay": 0.0,
"loss_func": "MAE",
"write_db": false,
"lr_decay": 0.1,
"lr_decay_interval": 10,
"test_every_n_epochs": 10,
"horizon": 3,
"null_val": 0.0,
"use_curriculum_learning": true,
"seq_len": 3,
"rnn_units": 16,
"base_dir": "data/model",
"filter_type": "random_walk"
}

View File

@ -0,0 +1,34 @@
---
base_dir: data/model
batch_size: 64
cl_decay_steps: 2000
data_type: ALL
dropout: 0
epoch: 0
epochs: 100
filter_type: random_walk
global_step: 0
graph_pkl_filename: data/sensor_graph/adj_mx.pkl
horizon: 3
l1_decay: 0
learning_rate: 0.01
loss_func: MAE
lr_decay: 0.1
lr_decay_epoch: 20
lr_decay_interval: 10
max_diffusion_step: 2
max_grad_norm: 5
method_type: GCRNN
min_learning_rate: 2.0e-06
null_val: 0
num_rnn_layers: 2
patience: 50
rnn_units: 16
seq_len: 3
test_every_n_epochs: 10
test_ratio: 0.2
use_cpu_only: false
use_curriculum_learning: true
validation_ratio: 0.1
verbose: 0
write_db: false

View File

@ -2,10 +2,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pandas as pd
import tensorflow as tf
import yaml
from lib import log_helper
from lib.dcrnn_utils import load_graph_data
@ -47,7 +47,7 @@ flags.DEFINE_integer('verbose', -1, '1: to log individual sensor information.')
def main():
# Reads graph data.
with open(FLAGS.config_filename) as f:
supervisor_config = json.load(f)
supervisor_config = yaml.load(f)
logger = log_helper.get_logger(supervisor_config.get('base_dir'), 'info.log')
logger.info('Loading graph from: ' + FLAGS.graph_pkl_filename)
sensor_ids, sensor_id_to_ind, adj_mx = load_graph_data(FLAGS.graph_pkl_filename)

View File

@ -2,13 +2,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import numpy as np
import os
import sys
import tensorflow as tf
import time
import yaml
from lib import log_helper
from lib import metrics
@ -188,7 +188,7 @@ class TFModelSupervisor(object):
@staticmethod
def _get_config_filename(epoch):
return 'config_%02d.json' % epoch
return 'config_%02d.yaml' % epoch
def restore(self, sess, config):
"""
@ -212,7 +212,7 @@ class TFModelSupervisor(object):
config['model_filename'] = saver.save(sess, os.path.join(self._log_dir, 'models-%.4f' % val_loss),
global_step=global_step, write_meta_graph=False)
with open(os.path.join(self._log_dir, config_filename), 'w') as f:
json.dump(config, f)
yaml.dump(config, f)
return config['model_filename']
def test_and_write_result(self, sess, global_step, **kwargs):

View File

@ -2,4 +2,5 @@ hyperopt>=0.1
scipy>=0.19.0
numpy>=1.12.1
pandas>=0.19.2
pyyaml
tensorflow>=1.3.0

View File

@ -1,8 +1,8 @@
import json
import os
import pandas as pd
import sys
import tensorflow as tf
import yaml
from lib.dcrnn_utils import load_graph_data
from model.dcrnn_supervisor import DCRNNSupervisor
@ -18,10 +18,10 @@ def run_dcrnn(traffic_reading_df):
log_dir = os.path.join('data/model', run_id)
config_filename = 'config_100.json'
config_filename = 'config_100.yaml'
graph_pkl_filename = 'data/sensor_graph/adj_mx.pkl'
with open(os.path.join(log_dir, config_filename)) as f:
config = json.load(f)
config = yaml.load(f)
tf_config = tf.ConfigProto()
if FLAGS.use_cpu_only:
tf_config = tf.ConfigProto(device_count={'GPU': 0})
@ -38,31 +38,6 @@ def run_dcrnn(traffic_reading_df):
print('Predictions saved as data/results/dcrnn_seq2seq_prediction_[1-12].h5...')
# def run_fc_lstm(traffic_reading_df):
# run_id = 'fclstm_h_12_256-256_lr_0.0001_bs_64_d_0.00_sl_12_MAE_1026175218'
# log_dir = os.path.join('data/model', run_id)
#
# config_filename = 'config_59.json'
# # graph_pkl_filename = 'data/sensor_graph/sensor_graph_exp.pkl'
# with open(os.path.join(log_dir, config_filename)) as f:
# config = json.load(f)
# tf_config = tf.ConfigProto()
# if FLAGS.use_cpu_only:
# tf_config = tf.ConfigProto(device_count={'GPU': 0})
# tf_config.gpu_options.allow_growth = True
# # Set small entries to zero for sparseness.
# with tf.Session(config=tf_config) as sess:
# supervisor = LSTMSeq2SeqSupervisor(traffic_reading_df, config=config)
# supervisor.restore(sess, config=config)
# df_preds = supervisor.test_and_write_result(sess, config['global_step'])
# for horizon_i in df_preds:
# df_pred = df_preds[horizon_i]
# # filename = os.path.join('data/results/', 'gcrnn_seq2seq_prediction_%d.h5' % (horizon_i + 1))
# filename = os.path.join('data/results/', 'fc_lstm_prediction_%d.h5' % (horizon_i + 1))
# df_pred.to_hdf(filename, 'results')
# print('Predictions saved as data/results/fc_lstm_prediction_[1-12].h5...')
if __name__ == '__main__':
sys.path.append(os.getcwd())
traffic_df_filename = 'data/df_highway_2012_4mon_sample.h5'