Skip to content
This repository has been archived by the owner on Dec 30, 2019. It is now read-only.

Commit

Permalink
feat/pooling: stride before padding argument
Browse files Browse the repository at this point in the history
The original issue is autumnai/collenchyma-nn#9
  • Loading branch information
Bernhard Schuster committed Apr 26, 2017
1 parent a16437c commit 4ae37c0
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 14 deletions.
4 changes: 2 additions & 2 deletions src/frameworks/cuda/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -911,8 +911,8 @@ impl<T> Pooling<T> for Backend<Cuda>
{
fn new_pooling_config(&self,
window: &[i32],
padding: &[i32],
stride: &[i32])
stride: &[i32],
padding: &[i32])
-> Result<Self::CPOOL, ::co::error::Error> {
let pooling_avg = ::cudnn::PoolingDescriptor::new(::cudnn::cudnnPoolingMode_t::CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, window, padding, stride).unwrap();
let pooling_max =
Expand Down
14 changes: 7 additions & 7 deletions src/frameworks/native/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -412,13 +412,13 @@ impl<T> ::plugin::Pooling<T> for Backend<Native>
{
fn new_pooling_config(&self,
window: &[i32],
padding: &[i32],
stride: &[i32])
stride: &[i32],
padding: &[i32])
-> Result<Self::CPOOL, ::co::error::Error> {
Ok(helper::PoolingConfig {
window: window.to_vec(),
padding: padding.to_vec(),
stride: stride.to_vec(),
padding: padding.to_vec(),
})
}

Expand Down Expand Up @@ -513,8 +513,8 @@ impl<T> ::plugin::Pooling<T> for Backend<Native>
input_idx_base: &mut [usize],
window: &[i32],
depth: usize,
padding: &[i32],
stride: &[i32],
padding: &[i32],
output: &mut [T],
output_stride: &[usize],
output_dim: &[usize],
Expand All @@ -541,8 +541,8 @@ impl<T> ::plugin::Pooling<T> for Backend<Native>
input_idx_base,
window,
depth + 1,
padding,
&stride[1..],
padding,
output,
&output_stride[1..],
&output_dim[1..],
Expand Down Expand Up @@ -570,8 +570,8 @@ impl<T> ::plugin::Pooling<T> for Backend<Native>
output_idx.resize(output_dim.len(), 0);

let window = &config.window[..];
let padding = &config.padding[..];
let stride = &config.stride[..];
let padding = &config.padding[..];
// do everything for each batch
for batch in 0..input_dim[0] {
// iterate over the batches!
Expand All @@ -591,8 +591,8 @@ impl<T> ::plugin::Pooling<T> for Backend<Native>
&mut input_idx,
&window,
0,
&padding,
&stride,
&padding,
output,
&output_stride[2..],
&output_dim[2..],
Expand Down
2 changes: 1 addition & 1 deletion src/plugin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ pub trait LRN<F> : NN<F> {
/// Provides the functionality for a Backend to support Pooling operations.
pub trait Pooling<F> : NN<F> {
/// Creates a new PoolingConfig, which needs to be passed to further pooling Operations.
fn new_pooling_config(&self, window: &[i32], padding: &[i32], stride: &[i32])
fn new_pooling_config(&self, window: &[i32], stride: &[i32], padding: &[i32])
-> Result<Self::CPOOL, ::co::error::Error>;

/// Computes non-linear down-sampling ([max Pooling][pooling]) over the input Tensor `x`.
Expand Down
38 changes: 34 additions & 4 deletions src/tests/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ pub fn test_pooling_max<T, F: IFramework>(backend: Backend<F>)

let x = filled_tensor(&backend,&[4, 4, 4, 4], &inp);
let mut r = SharedTensor::<T>::new(&[4, 4, 2, 4]);
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[0, 0], &[2, 1])
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[2, 1], &[0, 0])
.unwrap();

backend.pooling_max(&x, &mut r, &conf).unwrap();
Expand All @@ -69,7 +69,7 @@ pub fn test_pooling_max_grad<T, F: IFramework>(backend: Backend<F>)
let dx = filled_tensor(&backend,&[4, 4, 4, 4], &inp);
let r = filled_tensor(&backend,&[4, 4, 2, 2], &inp[0..64]);
let mut dr = SharedTensor::<T>::new(&[4, 4, 2, 2]);
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[0, 0], &[2, 2])
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[2, 2], &[0, 0])
.unwrap();

backend.pooling_max_grad(&x, &dx, &r, &mut dr, &conf).unwrap();
Expand All @@ -90,7 +90,7 @@ pub fn test_pooling_avg<T, F: IFramework>(backend: Backend<F>)

let x = filled_tensor(&backend, &[4, 4, 4, 4], &inp);
let mut r = SharedTensor::<T>::new(&[4, 4, 2, 2]);
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[0, 0], &[2, 2])
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[2, 2], &[0, 0])
.unwrap();

backend.pooling_avg(&x, &mut r, &conf).unwrap();
Expand All @@ -111,7 +111,7 @@ pub fn test_pooling_avg_grad<T, F: IFramework>(backend: Backend<F>)
let dx = filled_tensor(&backend, &[8, 4, 4, 4], &inp);
let r = filled_tensor(&backend, &[8, 4, 2, 2], &inp[0..128]);
let mut dr = SharedTensor::<T>::new(&[8, 4, 2, 2]);
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[0, 0], &[2, 2])
let conf = Pooling::<T>::new_pooling_config(&backend, &[2, 2], &[2, 2], &[0, 0])
.unwrap();

backend.pooling_avg_grad(&x, &dx, &r, &mut dr, &conf).unwrap();
Expand All @@ -124,6 +124,36 @@ pub fn test_pooling_avg_grad<T, F: IFramework>(backend: Backend<F>)
tensor_assert_eq(&dr, &dr_test, 1.0);
}

pub fn cross_test_pooling_max<F: IFramework, G: IFramework>(backend_a: Backend<F>, backend_b: Backend<G>)
where
Backend<F>: Pooling<f32> + IBackend,
Backend<G>: Pooling<f32> + IBackend {

let mut inp = vec![1.0; 256];
inp[0] = 2.0;

let lower : f32 = -128.;
let upper : f32 = 127.;
let x = uniformly_random_tensor(&backend_a, &[4, 4, 4, 4], lower, upper);

let mut r_a = SharedTensor::<f32>::new(&[4, 4, 2, 4]);
let mut r_b = SharedTensor::<f32>::new(&[4, 4, 2, 4]);

let conf_a = Pooling::<f32>::new_pooling_config(&backend_a, &[2, 2], &[2, 1], &[0, 0])
.unwrap();
let conf_b = Pooling::<f32>::new_pooling_config(&backend_b, &[2, 2], &[2, 1], &[0, 0])
.unwrap();

backend_a.pooling_max(&x, &mut r_a, &conf_a).unwrap();
backend_b.pooling_max(&x, &mut r_b, &conf_b).unwrap();
tensor_assert_eq_tensor(&r_a, &r_b, 3.0);
}

mod cross {
use super::*;
test_cross!(cross_test_pooling_max, cross_test_pooling_max_f32);
}

mod cuda {
use super::*;
test_cuda!(test_pooling_avg, pooling_avg_f32, pooling_avg_f64);
Expand Down

0 comments on commit 4ae37c0

Please sign in to comment.