Created
April 28, 2019 15:03
-
-
Save Jsevillamol/94bbf97e062fff7d6cc95200a6e6d78a to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# The data config section configures the data loader | |
data_config: | |
# file_list_fn: path to a txt file where each line is the absolute path | |
# to a .h5 file containing information from an event | |
file_list_fn: /data2/deeplearning/ctlearn/tests/prototype_files_class_balanced.txt | |
# channels: list of image-like features per image that will be loaded. Supports: | |
# image_charge: total charge received by pixel during the event | |
# peak_times: time in seconds when the peak of energy occured | |
# CAUTION: ctalearn assumes that image_charge is always the first channel | |
channels: [image_charge] | |
# img_size: [img_width, img_length] | |
img_size: [108, 108] | |
# selected_tel_types: list of telescopes whose images will be loaded | |
# supported types: 'LST', 'MSTF' | |
selected_tel_types: ['LST'] | |
# data_type: select between loading single images or sequences from the same event | |
# supported modes: 'single_tel', 'array' | |
data_type: 'array' | |
# min_triggers_per_event: filter events with few telescope triggers | |
min_triggers_per_event: 1 | |
# image_mapping_config: configuration for the mapping of images to numpy arrays | |
image_mapping_config: | |
# hex_conversion_algorithm: | |
# supported options: oversampling | |
hex_conversion_algorithm: oversampling | |
# preprocessing_config: configuration for the preprocessing of images | |
preprocessing_config: | |
# normalization: apply normalization to the images. | |
# Supported modes: | |
# null: no normalization | |
# log_normalization | |
normalization: null | |
# resize_mode: method for resizing images | |
# supported modes: 'interpolate' | |
resize_mode: 'interpolate' | |
# event_order_type: order events in array mode | |
# Supported modes | |
# null: no reordering | |
# size: order images by sum of charges | |
event_order_type: size | |
# event_order_reverse: | |
# if true, the images inside an event will be order greates to lowest | |
event_order_reverse: true | |
# min_imgs_per_seq: pads sequences smaller than this with zero images | |
min_imgs_per_seq: 4 | |
# max_imgs_per_seq: truncates sequences bigger than this | |
max_imgs_per_seq: 4 | |
# CAUTION: If using concat combine mode, min_imgs_per_seq must be equal to max_imgs_per_seq | |
# and both of them must be | |
# indicates if padding should be added before or after the original sequence | |
# supported modes: post, pre | |
sequence_padding: post | |
# indicates if padding should be added before or after the original sequence | |
# supported modes: post, pre | |
sequence_truncating: post | |
# Configuration during training | |
train_config: | |
epochs: 30 # epoch = whole pass of the training dataset | |
batch_size: 16 | |
train_split: 0.9 | |
val_split: 0.1 | |
seed: 1111 # seed the random generators | |
shuffle: true # shuffle training dataset between epochs | |
optimizer: adam | |
learning_rate: 1.0e-04 | |
epsilon: 1.0e-08 | |
decay: 0.0 | |
loss: 'categorical_crossentropy' | |
# metrics: List of metrics to collect during training and validation. | |
# Supports: | |
# acc: train accuracy | |
# auc: area under the receiving operator curve | |
metrics: [acc, auc] | |
# stop_early: Metric whose progress to track to perform early stopping | |
# Supports: | |
# loss: tranining loss | |
# val_loss: validation loss | |
# acc: training accuracy | |
# val_acc: validation accuracy | |
# null: disable stop early | |
# min_delta: minimum change in target metric that registers as an improvement | |
# patience: number of epochs without an improvement before stopping early | |
stop_early: loss | |
min_delta: 0 | |
patience: 3 | |
# class_weight: if true, example losses are scaled to give more importance to examples from underrepresented classes | |
class_weight: false | |
# fit_batch_first: if true, it first overfits the model to a single batch before starting training | |
fit_batch_first: false | |
# save_model: if false, the trained weights are discarded instead of saved | |
save_model: false | |
model_config: | |
# input_shape: | |
# for array mode with concatenation of the LSTM output, use [seq_length, img_width, img_height, n_channels] | |
# for array mode without concatenation of the LSTM output, use [null, img_width, img_height, n_channels] | |
# for single_tel mode use [img_width, img_height, n_channels] | |
input_shape: [4, 108, 108, 1] | |
# num_classes: how many classes to classify | |
num_classes: 2 | |
# activation_function: activation function to be used after each layer | |
activation_function: 'relu' | |
# dropout_rate: fraction of units to randomly drop between layers during training for regularization | |
dropout_rate: 0.0 | |
# use_batchnorm: if true, adds batchnorm layers between convolutions | |
use_batchnorm: true | |
# l2_regularization: regularization penalty to the kernel and bias of each layer | |
l2_regularization: 0.0 | |
# cnn_layers: Conv2D layers | |
cnn_layers: | |
- {filters: 32, kernel_size: 3, use_maxpool: true} | |
- {filters: 32, kernel_size: 3, use_maxpool: true} | |
- {filters: 64, kernel_size: 3, use_maxpool: true} | |
- {filters: 128, kernel_size: 3, use_maxpool: true} | |
# lstm_units: dimensionality of output of each LSTM cell | |
# if null or 0 the LSTM is skipped | |
lstm_units: 2048 | |
# combine_mode : specifies how the encoding of each image in the sequence | |
# is to be combined. Supports: | |
# concat : outputs are stacked on top of one another | |
# last : only last hidden state is returned | |
# attention : an attention mechanism is used to combine the hidden states | |
combine_mode: attention | |
# fcn_layers: Dense layers | |
fcn_layers: | |
- {units: 1024} | |
- {units: 512} | |
# the model is finally fed to a dense layer with a softmax activation and | |
# number of units equal to the number of classes |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment