Pooling 層通常在網絡中是跟在Conv卷積層以後,作採樣操做,實際上是爲了進一步縮小feature map,同時也能增大神經元的視野。在Caffe中,pooling層屬於vision_layer的一部分,其相關的定義也在vision_layer.hpp的頭文件中。Pooling層的相關操做比較少,在Caffe的自帶模式下只有Max pooling和Average poooling兩種html
下圖是一個LeNet的網絡結構圖,全鏈接以前主要有2個卷基層,2個池化層,其中sub_sampling layer就是pooling的操做。pooling的範圍是給定的一個region。
網絡
caffe中Pooling的操做相對比較少,結構也簡單,首先看它的Forward_cpu函數,在forward的時候根據相應的Pooling_method選擇相應的pooling方法dom
void PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const int top_count = top[0]->count(); //將mask信息輸出到top[1],若是top大於1 const bool use_top_mask = top.size() > 1; int* mask = NULL; // suppress warnings about uninitalized variables Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX://這裏的case主要是實現max pooling的方法 // Initialize if (use_top_mask) { top_mask = top[1]->mutable_cpu_data(); caffe_set(top_count, Dtype(-1), top_mask); } else { mask = max_idx_.mutable_cpu_data(); caffe_set(top_count, -1, mask); } caffe_set(top_count, Dtype(-FLT_MAX), top_data); // The main loop for (int n = 0; n < bottom[0]->num(); ++n) { for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { int hstart = ph * stride_h_ - pad_h_;//這裏的hstart,wstart,hend,wend指的是pooling窗口在特徵圖中的座標,對應左上右下即x1 y1 x2 y2 int wstart = pw * stride_w_ - pad_w_; int hend = min(hstart + kernel_h_, height_); int wend = min(wstart + kernel_w_, width_); hstart = max(hstart, 0); wstart = max(wstart, 0); const int pool_index = ph * pooled_width_ + pw; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int index = h * width_ + w;//記錄index誤差 if (bottom_data[index] > top_data[pool_index]) {//不停迭代 top_data[pool_index] = bottom_data[index]; if (use_top_mask) { top_mask[pool_index] = static_cast<Dtype>(index);//記錄當前最大值的的座標索引 } else { mask[pool_index] = index; } } } } } } // 計算偏移量,進入下一張圖的index起始地址 bottom_data += bottom[0]->offset(0, 1); top_data += top[0]->offset(0, 1); if (use_top_mask) { top_mask += top[0]->offset(0, 1); } else { mask += top[0]->offset(0, 1); } } } break; case PoolingParameter_PoolMethod_AVE://average_pooling for (int i = 0; i < top_count; ++i) { top_data[i] = 0; } // The main loop for (int n = 0; n < bottom[0]->num(); ++n) {//一樣是主循環 for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { int hstart = ph * stride_h_ - pad_h_; int wstart = pw * stride_w_ - pad_w_; int hend = min(hstart + kernel_h_, height_ + pad_h_); int wend = min(wstart + kernel_w_, width_ + pad_w_); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height_); wend = min(wend, width_); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { top_data[ph * pooled_width_ + pw] += bottom_data[h * width_ + w]; } } top_data[ph * pooled_width_ + pw] /= pool_size;//得到相應的平均值 } } // compute offset同理計算下一個圖的起始地址 bottom_data += bottom[0]->offset(0, 1); top_data += top[0]->offset(0, 1); } } break; case PoolingParameter_PoolMethod_STOCHASTIC: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown pooling method."; }
對於偏差的反向傳導
對於pooling層的偏差傳到,根據下式
\[\delta^l_j=upsample(\delta^{l+1}_{j})\cdot h(a^l_j)' \]
這裏的Upsample具體能夠根據相應的pooling方法來進行上採樣,upsample的基本思想也是將偏差進行的平攤到各個採樣的對應點上。在這裏pooling由於是線性的因此h這一項實際上是能夠省略的。
具體的計算推導過程請結合http://www.cnblogs.com/tornadomeet/p/3468450.html有詳細的推導過程,結合代碼中主循環中的最裏項會更清晰的明白ide
template <typename Dtype> void PoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->cpu_diff();//首先得到上層top_blob的diff Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; // suppress warnings about uninitialized variables const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: // The main loop if (use_top_mask) { top_mask = top[1]->cpu_data(); } else { mask = max_idx_.cpu_data(); } for (int n = 0; n < top[0]->num(); ++n) { for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { const int index = ph * pooled_width_ + pw; const int bottom_index = use_top_mask ? top_mask[index] : mask[index];//根據max pooling記錄的mask位置,進行偏差反轉 bottom_diff[bottom_index] += top_diff[index]; } } bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); if (use_top_mask) { top_mask += top[0]->offset(0, 1); } else { mask += top[0]->offset(0, 1); } } } break; case PoolingParameter_PoolMethod_AVE: // The main loop for (int n = 0; n < top[0]->num(); ++n) { for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { int hstart = ph * stride_h_ - pad_h_; int wstart = pw * stride_w_ - pad_w_; int hend = min(hstart + kernel_h_, height_ + pad_h_); int wend = min(wstart + kernel_w_, width_ + pad_w_); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height_); wend = min(wend, width_); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { bottom_diff[h * width_ + w] += top_diff[ph * pooled_width_ + pw] / pool_size;//mean_pooling中,bottom的偏差值按pooling窗口中的大小計算,從上一層進行填充後,再除窗口大小 } } } } // offset bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } break; case PoolingParameter_PoolMethod_STOCHASTIC: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown pooling method."; } }