52 lines
1.5 KiB
YAML
52 lines
1.5 KiB
YAML
# @package _global_
|
|
|
|
# specify here default configuration
|
|
# order of defaults determines the order in which configs override each other
|
|
defaults:
|
|
- _self_
|
|
- data: ljspeech
|
|
- model: matcha
|
|
- callbacks: default
|
|
- logger: tensorboard # set logger here or use command line (e.g. `python train.py logger=tensorboard`)
|
|
- trainer: default
|
|
- paths: default
|
|
- extras: default
|
|
- hydra: default
|
|
|
|
# experiment configs allow for version control of specific hyperparameters
|
|
# e.g. best hyperparameters for given model and datamodule
|
|
- experiment: null
|
|
|
|
# config for hyperparameter optimization
|
|
- hparams_search: null
|
|
|
|
# optional local config for machine/user specific settings
|
|
# it's optional since it doesn't need to exist and is excluded from version control
|
|
- optional local: default
|
|
|
|
# debugging config (enable through command line, e.g. `python train.py debug=default)
|
|
- debug: null
|
|
|
|
# task name, determines output directory path
|
|
task_name: "train"
|
|
|
|
run_name: ???
|
|
|
|
# tags to help you identify your experiments
|
|
# you can overwrite this in experiment configs
|
|
# overwrite from command line with `python train.py tags="[first_tag, second_tag]"`
|
|
tags: ["dev"]
|
|
|
|
# set False to skip model training
|
|
train: True
|
|
|
|
# evaluate on test set, using best model weights achieved during training
|
|
# lightning chooses best weights based on the metric specified in checkpoint callback
|
|
test: True
|
|
|
|
# simply provide checkpoint path to resume training
|
|
ckpt_path: null
|
|
|
|
# seed for random number generators in pytorch, numpy and python.random
|
|
seed: 1234
|