TrafficWheel/trainer/Trainer.py

264 lines
10 KiB
Python
Executable File

import math
import os
import time
import copy
import psutil
from tqdm import tqdm
import torch
from lib.logger import get_logger
from lib.loss_function import all_metrics
class TrainingStats:
def __init__(self, device):
self.device = device
self.reset()
def reset(self):
self.gpu_mem_usage_list = []
self.cpu_mem_usage_list = []
self.train_time_list = []
self.infer_time_list = []
self.total_iters = 0
self.start_time = None
self.end_time = None
def start_training(self):
self.start_time = time.time()
def end_training(self):
self.end_time = time.time()
def record_step_time(self, duration, mode):
"""记录单步耗时和总迭代次数"""
if mode == 'train':
self.train_time_list.append(duration)
else:
self.infer_time_list.append(duration)
self.total_iters += 1
def record_memory_usage(self):
"""记录当前 GPU 和 CPU 内存占用"""
process = psutil.Process(os.getpid())
cpu_mem = process.memory_info().rss / (1024 ** 2)
if torch.cuda.is_available():
gpu_mem = torch.cuda.max_memory_allocated(device=self.device) / (1024 ** 2)
torch.cuda.reset_peak_memory_stats(device=self.device)
else:
gpu_mem = 0.0
self.cpu_mem_usage_list.append(cpu_mem)
self.gpu_mem_usage_list.append(gpu_mem)
def report(self, logger):
"""在训练结束时输出汇总统计"""
if not self.start_time or not self.end_time:
logger.warning("TrainingStats: start/end time not recorded properly.")
return
total_time = self.end_time - self.start_time
avg_gpu_mem = sum(self.gpu_mem_usage_list) / len(self.gpu_mem_usage_list) if self.gpu_mem_usage_list else 0
avg_cpu_mem = sum(self.cpu_mem_usage_list) / len(self.cpu_mem_usage_list) if self.cpu_mem_usage_list else 0
avg_train_time = sum(self.train_time_list) / len(self.train_time_list) if self.train_time_list else 0
avg_infer_time = sum(self.infer_time_list) / len(self.infer_time_list) if self.infer_time_list else 0
iters_per_sec = self.total_iters / total_time if total_time > 0 else 0
logger.info("===== Training Summary =====")
logger.info(f"Total training time: {total_time:.2f} s")
logger.info(f"Total iterations: {self.total_iters}")
logger.info(f"Average iterations per second: {iters_per_sec:.2f}")
logger.info(f"Average GPU Memory Usage: {avg_gpu_mem:.2f} MB")
logger.info(f"Average CPU Memory Usage: {avg_cpu_mem:.2f} MB")
if avg_train_time:
logger.info(f"Average training step time: {avg_train_time*1000:.2f} ms")
if avg_infer_time:
logger.info(f"Average inference step time: {avg_infer_time*1000:.2f} ms")
class Trainer:
def __init__(self, model, loss, optimizer, train_loader, val_loader, test_loader,
scaler, args, lr_scheduler=None):
self.model = model
self.loss = loss
self.optimizer = optimizer
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.scaler = scaler
self.args = args
self.lr_scheduler = lr_scheduler
self.train_per_epoch = len(train_loader)
self.val_per_epoch = len(val_loader) if val_loader else 0
# Paths for saving models and logs
self.best_path = os.path.join(args['log_dir'], 'best_model.pth')
self.best_test_path = os.path.join(args['log_dir'], 'best_test_model.pth')
self.loss_figure_path = os.path.join(args['log_dir'], 'loss.png')
# Initialize logger
if not os.path.isdir(args['log_dir']) and not args['debug']:
os.makedirs(args['log_dir'], exist_ok=True)
self.logger = get_logger(args['log_dir'], name=self.model.__class__.__name__, debug=args['debug'])
self.logger.info(f"Experiment log path in: {args['log_dir']}")
# Stats tracker
self.stats = TrainingStats(device=args['device'])
def _run_epoch(self, epoch, dataloader, mode):
if mode == 'train':
self.model.train()
optimizer_step = True
else:
self.model.eval()
optimizer_step = False
total_loss = 0
epoch_time = time.time()
with torch.set_grad_enabled(optimizer_step):
with tqdm(total=len(dataloader), desc=f'{mode.capitalize()} Epoch {epoch}') as pbar:
for batch_idx, (data, target) in enumerate(dataloader):
start_time = time.time()
label = target[..., :self.args['output_dim']]
output = self.model(data).to(self.args['device'])
if self.args['real_value']:
output = self.scaler.inverse_transform(output)
loss = self.loss(output, label)
if optimizer_step and self.optimizer is not None:
self.optimizer.zero_grad()
loss.backward()
if self.args['grad_norm']:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
step_time = time.time() - start_time
self.stats.record_step_time(step_time, mode)
total_loss += loss.item()
if mode == 'train' and (batch_idx + 1) % self.args['log_step'] == 0:
self.logger.info(
f'Train Epoch {epoch}: {batch_idx + 1}/{len(dataloader)} Loss: {loss.item():.6f}')
pbar.update(1)
pbar.set_postfix(loss=loss.item())
avg_loss = total_loss / len(dataloader)
self.logger.info(
f'{mode.capitalize()} Epoch {epoch}: average Loss: {avg_loss:.6f}, time: {time.time() - epoch_time:.2f} s')
# 记录内存
self.stats.record_memory_usage()
return avg_loss
def train_epoch(self, epoch):
return self._run_epoch(epoch, self.train_loader, 'train')
def val_epoch(self, epoch):
return self._run_epoch(epoch, self.val_loader or self.test_loader, 'val')
def test_epoch(self, epoch):
return self._run_epoch(epoch, self.test_loader, 'test')
def train(self):
best_model, best_test_model = None, None
best_loss, best_test_loss = float('inf'), float('inf')
not_improved_count = 0
self.stats.start_training()
self.logger.info("Training process started")
for epoch in range(1, self.args['epochs'] + 1):
train_epoch_loss = self.train_epoch(epoch)
val_epoch_loss = self.val_epoch(epoch)
test_epoch_loss = self.test_epoch(epoch)
if train_epoch_loss > 1e6:
self.logger.warning('Gradient explosion detected. Ending...')
break
if val_epoch_loss < best_loss:
best_loss = val_epoch_loss
not_improved_count = 0
best_model = copy.deepcopy(self.model.state_dict())
self.logger.info('Best validation model saved!')
else:
not_improved_count += 1
if self.args['early_stop'] and not_improved_count == self.args['early_stop_patience']:
self.logger.info(
f"Validation performance didn't improve for {self.args['early_stop_patience']} epochs. Training stops.")
break
if test_epoch_loss < best_test_loss:
best_test_loss = test_epoch_loss
best_test_model = copy.deepcopy(self.model.state_dict())
if not self.args['debug']:
torch.save(best_model, self.best_path)
torch.save(best_test_model, self.best_test_path)
self.logger.info(f"Best models saved at {self.best_path} and {self.best_test_path}")
self.stats.end_training()
self.stats.report(self.logger)
self._finalize_training(best_model, best_test_model)
# 输出参数量
try:
total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
self.logger.info(f"Trainable params: {total_params}")
except Exception:
pass
def _finalize_training(self, best_model, best_test_model):
self.model.load_state_dict(best_model)
self.logger.info("Testing on best validation model")
self.test(self.model, self.args, self.test_loader, self.scaler, self.logger)
self.model.load_state_dict(best_test_model)
self.logger.info("Testing on best test model")
self.test(self.model, self.args, self.test_loader, self.scaler, self.logger)
@staticmethod
def test(model, args, data_loader, scaler, logger, path=None):
if path:
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
model.to(args['device'])
model.eval()
y_pred, y_true = [], []
with torch.no_grad():
for data, target in data_loader:
label = target[..., :args['output_dim']]
output = model(data)
y_pred.append(output)
y_true.append(label)
if args['real_value']:
y_pred = scaler.inverse_transform(torch.cat(y_pred, dim=0))
else:
y_pred = torch.cat(y_pred, dim=0)
y_true = torch.cat(y_true, dim=0)
for t in range(y_true.shape[1]):
mae, rmse, mape = all_metrics(y_pred[:, t, ...], y_true[:, t, ...],
args['mae_thresh'], args['mape_thresh'])
logger.info(f"Horizon {t + 1:02d}, MAE: {mae:.4f}, RMSE: {rmse:.4f}, MAPE: {mape:.4f}")
mae, rmse, mape = all_metrics(y_pred, y_true, args['mae_thresh'], args['mape_thresh'])
logger.info(f"Average Horizon, MAE: {mae:.4f}, RMSE: {rmse:.4f}, MAPE: {mape:.4f}")
@staticmethod
def _compute_sampling_threshold(global_step, k):
return k / (k + math.exp(global_step / k))