SoftmaxWithLossLayer的实现

我们找到SoftmaxWithLossLayer.hpp文件查看声明,如下:


//将实数预测向量通过Softmax计算获得每个类别的概率分布
//这个类比单独SoftmaxLayer + MultinomialLogisticLossLayer在梯度数值计算上更加稳定
//Test阶段,这个层可以直接用SoftmaxLayer代替
/**
 *输入Blob 1为预测结果,形状为N x K x 1 x 1,K为总类别数目,N为批量数。取值范围为(-Inf, Inf),
 *表示每个类别获得的分类score,值越大说明输入图像与该类别越接近
 *输入Blob 2为真实标签,形状为N x 1 x 1 x 1
 *输出Blob为计算得到的交叉熵分类损失E,形状为1 x 1 x 1 x 1
**/
template <typename Dtype>
class SoftmaxWithLossLayer : public LossLayer<Dtype> {
 public:
   /**
    * @param param provides LossParameter loss_param, with options:
    *  - ignore_label (optional)
    *    Specify a label value that should be ignored when computing the loss.
    *  - normalize (optional, default true)
    *    If true, the loss is normalized by the number of (nonignored) labels
    *    present; otherwise the loss is simply summed over spatial locations.
    */
    explicit SoftmaxWithLossLayer(const LayerParameter& param)
      : LossLayer<Dtype>(param) {}
  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual inline const char* type() const { return "SoftmaxWithLoss"; }
  virtual inline int ExactNumTopBlobs() const { return -1; }
  virtual inline int MinTopBlobs() const { return 1; }
  virtual inline int MaxTopBlobs() const { return 2; }

 protected:
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
      
/**
   * @brief Computes the softmax loss error gradient w.r.t. the predictions.
   *
   * Gradients cannot be computed with respect to the label inputs (bottom[1]),
   * so this method ignores bottom[1] and requires !propagate_down[1], crashing
   * if propagate_down[1] is set.
   *
   * @param top output Blob vector (length 1), providing the error gradient with
   *      respect to the outputs
   *   -# @f$ (1 \times 1 \times 1 \times 1) @f$
   *      This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$,
   *      as @f$ \lambda @f$ is the coefficient of this layer's output
   *      @f$\ell_i@f$ in the overall Net loss
   *      @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence
   *      @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$.
   *      (*Assuming that this top Blob is not used as a bottom (input) by any
   *      other layer of the Net.)
   * @param propagate_down see Layer::Backward.
   *      propagate_down[1] must be false as we can't compute gradients with
   *      respect to the labels.
   * @param bottom input Blob vector (length 2)
   *   -# @f$ (N \times C \times H \times W) @f$
   *      the predictions @f$ x @f$; Backward computes diff
   *      @f$ \frac{\partial E}{\partial x} @f$
   *   -# @f$ (N \times 1 \times 1 \times 1) @f$
   *      the labels -- ignored as we can't compute their error gradients
   */
  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
      
/// Read the normalization mode parameter and compute the normalizer based
  /// on the blob size.  If normalization_mode is VALID, the count of valid
  /// outputs will be read from valid_count, unless it is -1 in which case
  /// all outputs are assumed to be valid.
  virtual Dtype get_normalizer(
      LossParameter_NormalizationMode normalization_mode, int valid_count);

  /// The internal SoftmaxLayer used to map predictions to a distribution.(内置一个SoftmaxLayer对象)
  shared_ptr<Layer<Dtype> > softmax_layer_;
  /// prob stores the output probability predictions from the SoftmaxLayer.
  Blob<Dtype> prob_;
  /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward
  vector<Blob<Dtype>*> softmax_bottom_vec_;
  /// top vector holder used in call to the underlying SoftmaxLayer::Forward
  vector<Blob<Dtype>*> softmax_top_vec_;
  /// Whether to ignore instances with a certain label.
  bool has_ignore_label_;
  /// The label indicating that an instance should be ignored.
  int ignore_label_;
  /// How to normalize the output loss.
  LossParameter_NormalizationMode normalization_;

  int softmax_axis_, outer_num_, inner_num_;
};

之后我们来看实现的.cpp文件:

第一个是SetUp函数.
```c++

template
void SoftmaxWithLossLayer::LayerSetUp(
const vector>& bottom, const vector>& top) {
LossLayer::LayerSetUp(bottom, top);
//创建时动态修改本层的LayerParameter参数,适应SoftmaxLayer
LayerParameter softmax_param(this->layer_param_);
softmax_param.set_type("Softmax");
softmax_layer_ = LayerRegistry::CreateLayer(softmax_param);
softmax_bottom_vec_.clear();
softmax_bottom_vec_.push_back(bottom[0]);
softmax_top_vec_.clear();
softmax_top_vec_.push_back(&prob_);
softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_);

has_ignore_label_ =
this->layer_param_.loss_param().has_ignore_label();
if (has_ignore_label_) {
ignore_label_ = this->layer_param_.loss_param().ignore_label();
}
if (!this->layer_param_.loss_param().has_normalization() &&
this->layer_param_.loss_param().has_normalize()) {
normalization_ = this->layer_param_.loss_param().normalize() ?
LossParameter_NormalizationMode_VALID :
LossParameter_NormalizationMode_BATCH_SIZE;
} else {
normalization_ = this->layer_param_.loss_param().normalization();
}
}

可见,在SetUp阶段,创建了内部SoftmaxLayer对象并配置了其输入/输出Blob,然后调用该对象的SetUp函数。


下面看看`SoftmaxWithLossLayer`的前向传播函数:

```c++

template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  // The forward pass computes the softmax prob values.(内部SoftmaxLayer的前向传播计算)
  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
  //获得概率密度
  const Dtype* prob_data = prob_.cpu_data();
  //获得标签值
  const Dtype* label = bottom[1]->cpu_data();
  int dim = prob_.count() / outer_num_;
  int count = 0;
  Dtype loss = 0;
  for (int i = 0; i < outer_num_; ++i) {
    for (int j = 0; j < inner_num_; j++) {
      const int label_value = static_cast<int>(label[i * inner_num_ + j]);
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, prob_.shape(softmax_axis_));
      //计算损失函数-log(prob[label])
      loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
                           Dtype(FLT_MIN)));
      ++count;
    }
  }
  //设置输出Blob值
  top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
  if (top.size() == 2) {
    top[1]->ShareData(prob_);
  }
}


可见通过内部SoftmaxLayer对象非常简洁。我们再看一下 Backward计算:


template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[1]) {
   //label输入Blob不做反向传播
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to label inputs.";
  }
  if (propagate_down[0]) {
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    const Dtype* prob_data = prob_.cpu_data();
    //将概率密度拷贝输入Blob的diff域
    caffe_copy(prob_.count(), prob_data, bottom_diff);
    const Dtype* label = bottom[1]->cpu_data();
    int dim = prob_.count() / outer_num_;
    int count = 0;
    for (int i = 0; i < outer_num_; ++i) {
      for (int j = 0; j < inner_num_; ++j) {
        const int label_value = static_cast<int>(label[i * inner_num_ + j]);
        if (has_ignore_label_ && label_value == ignore_label_) {
          for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
            bottom_diff[i * dim + c * inner_num_ + j] = 0;
          }
        } else {
        //在输入Blob的diff域,计算当前槪率密度与理想概率密度(label 对应类别概率为1,其他类别 概肀为0)之差,实现误差反向传播
          bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;
          ++count;
        }
      }
    }
    // Scale gradient(适当的缩放)
    Dtype loss_weight = top[0]->cpu_diff()[0] /
                        get_normalizer(normalization_, count);
    caffe_scal(prob_.count(), loss_weight, bottom_diff);
  }
}

通过对Caffe损失层的研究,我们了解到,前向传播阶段数据逐层传播,到损失层计算预测概率密度和损失函数;而反向传播阶段则从损失层开始,由预测概率密度与理想概率密度(这就是有监督学习的佐证)差值得到误差(diff),然后将由下一节内容逐层反向传播。我们已经知道一个Blob是由data和diff两部分构成的,如果说数据读取层是data之源,那么损失层就是diff之源。

反向传播的实现

Caffe Net数据结构中的'Backward函数具体的声明和实现文件为net.hppnet.cpp:

//从第start层反向传播到达第end层
template <typename Dtype>
void Net<Dtype>::BackwardFromTo(int start, int end) {
  CHECK_GE(end, 0);
  CHECK_LT(start, layers_.size());
  for (int i = start; i >= end; --i) {
    for (int c = 0; c < before_backward_.size(); ++c) {
      before_backward_[c]->run(i);
    }
    if (layer_need_backward_[i]) {
    //遍历每个居,调用相应的Backward函数
      layers_[i]->Backward(
          top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]);
      if (debug_info_) { BackwardDebugInfo(i); }
    }
    for (int c = 0; c < after_backward_.size(); ++c) {
      after_backward_[c]->run(i);
    }
  }
}

//从第start层幵始到第一层的反向传播过程
template <typename Dtype>
void Net<Dtype>::BackwardFrom(int start) {
  BackwardFromTo(start, 0);
}

//从最后一层开始到第end层的反向传播过程
template <typename Dtype>
void Net<Dtype>::BackwardTo(int end) {
  BackwardFromTo(layers_.size() - 1, end);
}

//整个网络的反向传播过程
template <typename Dtype>
void Net<Dtype>::Backward() {
  BackwardFromTo(layers_.size() - 1, 0);
  if (debug_info_) {
  //如果打幵了调试信息开关(在prototxt中设定),则计算所有权值的data/diff的L1、L2范数,监控其变化情况,避免发散
    Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0;
    for (int i = 0; i < learnable_params_.size(); ++i) {
      asum_data += learnable_params_[i]->asum_data();
      asum_diff += learnable_params_[i]->asum_diff();
      sumsq_data += learnable_params_[i]->sumsq_data();
      sumsq_diff += learnable_params_[i]->sumsq_diff();
    }
    const Dtype l2norm_data = std::sqrt(sumsq_data);
    const Dtype l2norm_diff = std::sqrt(sumsq_diff);
    LOG(ERROR) << "    [Backward] All net params (data, diff): "
               << "L1 norm = (" << asum_data << ", " << asum_diff << "); "
               << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")";
  }
}

//更新权值函数,在反向传播结束后调用
template <typename Dtype>
void Net<Dtype>::Update() {
  for (int i = 0; i < learnable_params_.size(); ++i) {
  //调用内部Blob的Update()函数,具体计算为data = data - diff
    learnable_params_[i]->Update();
  }
}

//权值diff清零
template <typename Dtype>
void Net<Dtype>::ClearParamDiffs() {
  for (int i = 0; i < learnable_params_.size(); ++i) {
    Blob<Dtype>* blob = learnable_params_[i];
    switch (Caffe::mode()) {
    case Caffe::CPU:
      caffe_set(blob->count(), static_cast<Dtype>(0),
                blob->mutable_cpu_diff());
      break;
    case Caffe::GPU:
#ifndef CPU_ONLY
      caffe_gpu_set(blob->count(), static_cast<Dtype>(0),
                    blob->mutable_gpu_diff());
#else
      NO_GPU;
#endif
      break;
    }
  }
}

到此,caffe基本的backward反向传播过程就清楚了,这样对于设计更复杂的有监督学习算法具有指导意义。

2017/7/5 posted in  Caffe反向传播计算

Caffe反向传播计算

反向传播对电脑的计算能力要求很高,所以反向传播过程只有在训练环境下才需要计算,由于消耗时间较长,对计算资源要求较高,一般为离线服务.

反向传播的特点

CNN进行前向传播阶段,依次调用每个LayerForward函数,得到逐层的输出,最后一层与目标函数比较得到损失函数,计算误差更新值,通过反向传播路径逐层到达第一层所有权值层在反向传播结束后一起更新

损失函数

损失层(Loss Layer)是CNN的终点,接受两个Blob作为输入,其中一个为CNN的预测值;另一个是真实标签。损失层则将这两个输入进行一系列运算,得到当前网络的损失函数(Loss Function), —般记为\(L(\theta)\),其中\(\theta\)表示当前网络权值构成的向量空间。机器学习的目的是在权值空间中找到让损失函数\(L(\theta)\)最小的权值\(\theta_{opt}\),可以采用一系列最优化方法(如后面将会介绍的SGD方法)逼近权值\(\theta_{opt}\)

损失函数是在前向传播计算中得到的,同时也是反向传播的起点.

算法描述

Caffe中实现了多种损失层,分别用于不同场合。其中SoftmaxWithLossLayer实现了Softmax+交叉熵损失函数计算过程,适用于单label的分类问题:另外还有欧式损失函数(用于回归问题)、Hinge损失函数(最大间隔分类,SVM)、Sigmoid+交叉熵损失函数(用于多属性/多分类问题)等。今天我们只关注最基本的SoftmaxWithLossLayer,其他损失层的算法可以直接看Caffe相应源码。

假设有K个类别,Softmax计算过程为:

\[
Softmax(a_i) = \frac{exp(a_i)}{\sum_j{exp(a_i)}} ,i=0,1,2,...K-1
\]

Softmax的结果相当于输入图像被分到每个标签的概率分布。根据高等数学知识,该函数是单调增函数,即输入值越大,输出也越大,输入图像属于该标签的概率就越大。

对Softmax的结果计算交叉熵分类损失函数为:

\[
L(\theta) = -{\frac{1}{N}}\sum_ilog[Softmax(a_k)], i=0,1,2,...N-1
\]

其中,k为真实标签值,N为一个批量的大小.

理想的分类器应当是除了真实标签的概率为1,其余标签概率均为0,这样计算得到其损失函数为-ln(1) =0损失函数越大,说明该分类器在真实标签上分类概率越小,性能也就越差,一个非常差的分类器,可能在真实标签上的分类概率接近于0,那么损失函数就接近于正无穷,我们称为训练发散,需要调小学习速率,在ImageNet-1000分类问题中,初始状态为均匀分布,每个类别的分类概率均为0.001,故此时计算损失函数值为-ln(O.OO1) = ln(1000) = 6.907755... 经常有同学问,“我的loss为什么总是在6.9左右(该现象被称为6.9高原反应),训练了好久都不下降呢?”说明还都没有训练收敛的迹象,尝试调大学习速率,或者修改权值初始化方式.

参数描述

先看一下caffe.proto,找到有关Softmax的消息定义:


// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
message SoftmaxParameter {
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;  //使用cudnn引擎计算
  }
  optional Engine engine = 1 [default = DEFAULT]; // 默认为 0 

  // The axis along which to perform the softmax -- may be negative to index
  // from the end (e.g., -1 for the last axis).
  // Any other axes will be evaluated as independent softmaxes.
  // axis为可选参数,指定沿哪个维度计算Softmax,可以是负数,表示从后向前索引
  optional int32 axis = 2 [default = 1];
}

源码分析

损失层的基类声明于include/caffe/layers/loss_layers.hpp中:

/**
 * @brief An interface for Layer%s that take two Blob%s as input -- usually
 *        (1) predictions and (2) ground-truth labels -- and output a
 *        singleton Blob representing the loss.
 *
 * LossLayers are typically only capable of backpropagating to their first input
 * -- the predictions.
 */
 
 
 //损失层的鼻祖类,派生于Layer
template <typename Dtype>
class LossLayer : public Layer<Dtype> {
 public:
 //显式抅造函数
  explicit LossLayer(const LayerParameter& param)
     : Layer<Dtype>(param) {}
//层配置函数
  virtual void LayerSetUp(
      const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
//变形函数
  virtual void Reshape(
      const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
//接受沔个Blob作为输入
  virtual inline int ExactNumBottomBlobs() const { return 2; }

  /**
   * @brief For convenience and backwards compatibility, instruct the Net to
   *        automatically allocate a single top Blob for LossLayers, into which
   *        they output their singleton loss, (even if the user didn't specify
   *        one in the prototxt, etc.).
   */
   //为了方便和后向兼容,指导Net为损失层自动分配单个输出Blob.损失层则会将计算结果L(θ)保存在这里
  virtual inline bool AutoTopBlobs() const { return true; }
  //只有一个输出Blob
  virtual inline int ExactNumTopBlobs() const { return 1; }
  /**
   * We usually cannot backpropagate to the labels; ignore force_backward for
   * these inputs.
   */
  virtual inline bool AllowForceBackward(const int bottom_index) const {
    return bottom_index != 1;
  }
};

用来计算 Softmax 损失函数的层 SoftmaxLayer 声明在 include/caffe/layers/softmaxlayer.hpp中:

/**
 * @brief Computes the softmax function.
 *
 * TODO(dox): thorough documentation for Forward, Backward, and proto params.
 */
 
 //SoftmaxLayer直接派生于Layer
template <typename Dtype>
class SoftmaxLayer : public Layer<Dtype> {
 public:
 //显示构造函数
  explicit SoftmaxLayer(const LayerParameter& param)
      : Layer<Dtype>(param) {}
//变形函数
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
//返回类名字符串
  virtual inline const char* type() const { return "Softmax"; }
  //该层接受一个输入BLOB,传生一个输出Blob
  virtual inline int ExactNumBottomBlobs() const { return 1; }
  virtual inline int ExactNumTopBlobs() const { return 1; }

 protected:
 //前向传播函数
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  //反向传播函数
  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
     const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
//计算参数
  int outer_num_;
  int inner_num_;
  int softmax_axis_;
  /// sum_multiplier is used to carry out sum using BLAS(利用BLAS计算求和)
  Blob<Dtype> sum_multiplier_;
  /// scale is an intermediate Blob to hold temporary results.(用来临时存放中间结果的Blob)
  Blob<Dtype> scale_;
};

SoftmaxLayer实现在src/caffe/layers/softmax_layer.cpp中,我们深入内部来看一下具体实现:

//变形函数
template <typename Dtype>
void SoftmaxLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
//获得正确的维度索引
  softmax_axis_ =
      bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis());
//是输出blob与输入blob形状相同
  top[0]->ReshapeLike(*bottom[0]);
  //sum_multiplier_这里都是1,用于辅助计算,可以看作一个行向量,或者行数为1的矩阵 类似于sum_multiplier_.Reshape(1, bottom[0]->channels(),bottom[0]->height(), bottom[0]->width());  
  vector<int> mult_dims(1, bottom[0]->shape(softmax_axis_));
  sum_multiplier_.Reshape(mult_dims);
  Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
  //乘子初始化为1
  caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data);
  outer_num_ = bottom[0]->count(0, softmax_axis_);
  inner_num_ = bottom[0]->count(softmax_axis_ + 1);
  vector<int> scale_dims = bottom[0]->shape();
  scale_dims[softmax_axis_] = 1;
  //初始化scale_的形状
  scale_.Reshape(scale_dims);
}

//前向计算,得到Softmax(a_k}值
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
    
//获得输入/输出Blob数据指针
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  //中间临时值数据指针
  Dtype* scale_data = scale_.mutable_cpu_data();
  int channels = bottom[0]->shape(softmax_axis_);
  int dim = bottom[0]->count() / outer_num_; //总的类别数目
  caffe_copy(bottom[0]->count(), bottom_data, top_data); //将输入拷贝到输出缓冲区
  // We need to subtract the max to avoid numerical issues, compute the exp,
  // and then normalize.(遍历bottom_data查找最大值,存入scale_data)
  for (int i = 0; i < outer_num_; ++i) {
    // initialize scale_data to the first plane(初始化scale_data为bottom_data首元素)
    caffe_copy(inner_num_, bottom_data + i * dim, scale_data);
    for (int j = 0; j < channels; j++) {
      for (int k = 0; k < inner_num_; k++) {
        scale_data[k] = std::max(scale_data[k],
            bottom_data[i * dim + j * inner_num_ + k]);
      }
    }
    // subtraction(输出缓冲区减去最大值a_k = a_k- max(a_i))
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_,
        1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data);
    // exponentiation(求指数项exp(a_k))
    caffe_exp<Dtype>(dim, top_data, top_data);
    // sum after exp(累加求和1 + exp(a_k),存放在scale_data中)
    caffe_cpu_gemv<Dtype>(CblasTrans, channels, inner_num_, 1.,
        top_data, sum_multiplier_.cpu_data(), 0., scale_data);
    // division 求Softmax值,即exp(a_k)/(1 + exp(a_k))
    for (int j = 0; j < channels; j++) {
    // top_data = top_data / scale_data
      caffe_div(inner_num_, top_data, scale_data, top_data);
      // 加偏移跳转指针
      top_data += inner_num_;
    }
  }
}
//反向传播,与求导有关
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
//获得data,diff指针
  const Dtype* top_diff = top[0]->cpu_diff();
  const Dtype* top_data = top[0]->cpu_data();
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
  Dtype* scale_data = scale_.mutable_cpu_data();
  int channels = top[0]->shape(softmax_axis_);
  int dim = top[0]->count() / outer_num_;
  caffe_copy(top[0]->count(), top_diff, bottom_diff);
  for (int i = 0; i < outer_num_; ++i) {
    // compute dot(top_diff, top_data) and subtract them from the bottom diff
    for (int k = 0; k < inner_num_; ++k) {
      scale_data[k] = caffe_cpu_strided_dot<Dtype>(channels,
          bottom_diff + i * dim + k, inner_num_,
          top_data + i * dim + k, inner_num_);
    }
    // subtraction
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1,
        -1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim);
  }
  // elementwise multiplication(逐点相乘)
  caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}


到这里我们就已经了解了Softmax函数的计算过程,后面我们在来看SoftmaxWithLossLayer的实现.

2017/7/4 posted in  Caffe反向传播计算