FS-TFP/scripts/example_configs/pfedhpo/mini_graph_dc/pfedhpo.yaml

50 lines
1003 B
YAML

use_gpu: True
device: 0
outdir: graph_exp/pfedhpo
seed: 12345
early_stop:
patience: 100000
improve_indicator_mode: mean
federate:
mode: 'standalone'
make_global_eval: False
total_round_num: 4000
sample_client_rate: 1.0
share_local_model: False
use_diff: True
data:
root: data/
type: mini-graph-dc
dataloader:
type: pyg
model:
task: graph
type: gin
hidden: 64
personalization:
local_param: ['encoder_atom', 'encoder', 'clf']
train:
batch_or_epoch: epoch
local_update_steps: 1
optimizer:
type: SGD
trainer:
type: graphminibatch_trainer
eval:
freq: 1
metrics: ['acc', 'correct']
count_flops: False
split: ['train', 'val', 'test']
hpo:
scheduler: pfedhpo
num_workers: 0
pfedhpo:
use: True
discrete: True
train_fl: False
train_anchor: False
target_fl_total_round: 400
ss: 'scripts/example_configs/pfedhpo/mini_graph_dc/ss.yaml'
metric: 'client_summarized_weighted_avg.val_avg_loss'
working_folder: graph_exp/pfedhpo/working