From e041038ed82fb432feca6cb4e567e272a6a99d60 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 25 Mar 2022 10:08:43 +0800 Subject: [PATCH] Optimize redundant comments --- README.md | 2 +- lib/metrics.py | 5 ----- lib/utils.py | 5 ----- model/stden_supervisor.py | 2 +- 4 files changed, 2 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index b3e8e0e..ef9d945 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ This is the implementation of Spatio-temporal Differential Equation Network (STDEN) in the following paper: Jiahao Ji, Jingyuan Wang, Zhe Jiang, Jiawei Jiang, and Hu Zhang, Towards Physics-guided Neural Networks for Traffic Flow Prediction, AAAI 2022. -Thanks [chnsh](https://github.com/chnsh/DCRNN_PyTorch) for the model training framework of this project. +The training framework of this project comes from [chnsh](https://github.com/chnsh/DCRNN_PyTorch). Thanks a lot :) ## Requirement diff --git a/lib/metrics.py b/lib/metrics.py index e7b2f90..b53d947 100644 --- a/lib/metrics.py +++ b/lib/metrics.py @@ -1,7 +1,6 @@ import torch def masked_mae_loss(y_pred, y_true): - # print('y_pred: ', y_pred.shape, 'y_true: ', y_true.shape) y_true[y_true < 1e-4] = 0 mask = (y_true != 0).float() mask /= mask.mean() # assign the sample weights of zeros to nonzero-values @@ -12,23 +11,19 @@ def masked_mae_loss(y_pred, y_true): return loss.mean() def masked_mape_loss(y_pred, y_true): - # print('y_pred: ', y_pred.shape, 'y_true: ', y_true.shape) y_true[y_true < 1e-4] = 0 mask = (y_true != 0).float() mask /= mask.mean() loss = torch.abs((y_pred - y_true) / y_true) loss = loss * mask - # trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3 loss[loss != loss] = 0 return loss.mean() def masked_rmse_loss(y_pred, y_true): y_true[y_true < 1e-4] = 0 - # print('y_pred: ', y_pred.shape, 'y_true: ', y_true.shape) mask = (y_true != 0).float() mask /= mask.mean() loss = torch.pow(y_pred - y_true, 2) loss = loss * mask - # trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3 loss[loss != loss] = 0 return torch.sqrt(loss.mean()) diff --git a/lib/utils.py b/lib/utils.py index 1364123..2afe84c 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -2,16 +2,11 @@ import logging import numpy as np import os import time -import pickle import scipy.sparse as sp import sys -# import tensorflow as tf import torch import torch.nn as nn -from scipy.sparse import linalg - - class DataLoader(object): def __init__(self, xs, ys, batch_size, pad_with_last_sample=True, shuffle=False): """ diff --git a/model/stden_supervisor.py b/model/stden_supervisor.py index f464010..a1893d7 100644 --- a/model/stden_supervisor.py +++ b/model/stden_supervisor.py @@ -9,7 +9,7 @@ from torch.utils.tensorboard import SummaryWriter from lib import utils from model.stden_model import STDENModel -from lib.metrics import masked_mae_loss, masked_mape_loss, masked_mse_loss, masked_rmse_loss +from lib.metrics import masked_mae_loss, masked_mape_loss, masked_rmse_loss device = torch.device("cuda" if torch.cuda.is_available() else "cpu")