diff --git a/phygnn/layers/custom_layers.py b/phygnn/layers/custom_layers.py index c640ae8..712994a 100644 --- a/phygnn/layers/custom_layers.py +++ b/phygnn/layers/custom_layers.py @@ -12,7 +12,7 @@ class FlexiblePadding(tf.keras.layers.Layer): """Class to perform padding on tensors """ - def __init__(self, paddings, mode='REFLECT'): + def __init__(self, paddings, mode='REFLECT', option='tf'): """ Parameters ---------- @@ -21,13 +21,30 @@ def __init__(self, paddings, mode='REFLECT'): rank of the tensor and elements give the number of leading and trailing pads mode : str - tf.pad() padding mode. Can be REFLECT, CONSTANT, + tf.pad() / np.pad() padding mode. Can be REFLECT, CONSTANT, or SYMMETRIC + option : str + Option for TensorFlow padding ("tf") or numpy ("np"). Default is tf + for tensorflow training. We have observed silent failures of + tf.pad() with larger array sizes, so "np" might be preferable at + inference time on large chunks, but it is much slower when it has + to convert tensors to numpy arrays. """ super().__init__() self.paddings = tf.constant(paddings) self.rank = len(paddings) - self.mode = mode + self.mode = mode.lower() + self.option = option.lower() + + if self.option == 'tf': + self._pad_fun = tf.pad + elif self.option == 'np': + self._pad_fun = np.pad + else: + msg = ('FlexiblePadding option must be "tf" or "np" but ' + f'received: {self.option}') + logger.error(msg) + raise KeyError(msg) def compute_output_shape(self, input_shape): """Computes output shape after padding @@ -62,8 +79,7 @@ def call(self, x): by compute_output_shape """ - return tf.pad(x, self.paddings, - mode=self.mode) + return self._pad_fun(x, self.paddings, mode=self.mode) class ExpandDims(tf.keras.layers.Layer): @@ -168,6 +184,7 @@ def __init__(self, pool_size, strides=None, padding='valid', sigma=1, self.trainable = trainable self.sigma = sigma + # pylint: disable=unused-argument def build(self, input_shape): """Custom implementation of the tf layer build method. @@ -475,11 +492,13 @@ def __init__(self, spatial_mult=1, temporal_mult=1, where the feature axis is unpacked into the temporal axis. t_roll : int Option to roll the temporal axis after expanding. When using - temporal_method="depth_to_time", the default (t_roll=0) will - add temporal steps after the input steps such that if input - temporal shape is 3 and the temporal_mult is 24x, the output will - have the original timesteps at idt=0,24,48 but if t_roll=12, the - output will have the original timesteps at idt=12,36,60 + temporal_method="depth_to_time", the default (t_roll=0) will add + temporal steps after the input steps such that if input temporal + shape is 3 and the temporal_mult is 24x, the output will have the + index-0 timesteps at idt=0,24,48 but if t_roll=12, the output will + have the original timesteps at idt=12,36,60. This is no longer + recommended, as a positive roll will move the features of timestep + -1 from the end of the series to the beginning. """ super().__init__() diff --git a/phygnn/version.py b/phygnn/version.py index 7e5a600..cbd42b5 100644 --- a/phygnn/version.py +++ b/phygnn/version.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- """Physics Guided Neural Network version.""" -__version__ = '0.0.29' +__version__ = '0.0.30' diff --git a/tests/test_layers.py b/tests/test_layers.py index 3a117e0..10758de 100644 --- a/tests/test_layers.py +++ b/tests/test_layers.py @@ -289,17 +289,17 @@ def test_flexible_padding(hidden_layers): layer = HiddenLayers(hidden_layers).layers[0] t = tf.constant([[1, 2, 3], [4, 5, 6]]) - if layer.mode == 'CONSTANT': + if layer.mode.upper() == 'CONSTANT': t_check = tf.constant([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0], [0, 0, 4, 5, 6, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) - elif layer.mode == 'REFLECT': + elif layer.mode.upper() == 'REFLECT': t_check = tf.constant([[6, 5, 4, 5, 6, 5, 4], [3, 2, 1, 2, 3, 2, 1], [6, 5, 4, 5, 6, 5, 4], [3, 2, 1, 2, 3, 2, 1]]) - elif layer.mode == 'SYMMETRIC': + elif layer.mode.upper() == 'SYMMETRIC': t_check = tf.constant([[2, 1, 1, 2, 3, 3, 2], [2, 1, 1, 2, 3, 3, 2], [5, 4, 4, 5, 6, 6, 5],