use_gpu: True device: 0 seed: 1 federate: mode: standalone total_round_num: 1000 sample_client_rate: 0.2 data: root: data/ type: shakespeare subsample: 0.2 splits: [0.6,0.2,0.2] model: model_num_per_trainer: 3 type: lstm in_channels: 80 out_channels: 80 embed_size: 8 hidden: 256 personalization: local_update_steps: 1 lr: 1.5 train: batch_or_epoch: epoch local_update_steps: 1 optimizer: lr: 1.5 weight_decay: 0.0 criterion: type: character_loss trainer: type: nlptrainer eval: freq: 10 metrics: ['acc', 'correct']