歡迎來到Linux教程網
Linux教程網
Linux教程網
Linux教程網
Linux教程網 >> Linux編程 >> Linux編程 >> OpenCV的softcascade代碼解讀

OpenCV的softcascade代碼解讀

日期:2017/3/1 9:42:36   编辑:Linux編程

//頭文件為softcascade.hpp、core.hpp
//實現文件為octave.cpp
//但是還涉及到了ml.hpp和Dtree.cpp等文件
//softcascade檢測器訓練函數的代碼如下:

bool BoostedSoftCascadeOctave::train(const Dataset* dataset, const FeaturePool* pool, int weaks, int treeDepth)
{//第1個參數dataset是一個可以與訓練集通信的實例,第2個參數表示特征集,第3個參數是需要訓練的弱分類器樹數量,第4個參數是分類器樹的深度
CV_Assert(treeDepth == 2); //?要求弱分類樹的深度只能為2嗎?
CV_Assert(weaks > 0);

params.max_depth = treeDepth;
params.weak_count = weaks;

// 1. fill integrals and classes,?這裡計算了每個樣本的所有積分圖和標定了標簽嗎?是的,對於正樣本,response向量中響應的元素值設置為了1,負樣本設置為了0
processPositives(dataset);
generateNegatives(dataset);

// 2. only simple case (all features used) //使用所有的特征
int nfeatures = pool->size();
cv::Mat varIdx(1, nfeatures, CV_32SC1);
int* ptr = varIdx.ptr<int>(0);

for (int x = 0; x < nfeatures; ++x)
ptr[x] = x;

// 3. only simple case (all samples used) //使用所有訓練樣本
int nsamples = npositives + nnegatives;
cv::Mat sampleIdx(1, nsamples, CV_32SC1);
ptr = sampleIdx.ptr<int>(0);

for (int x = 0; x < nsamples; ++x)
ptr[x] = x;

// 4. ICF has an ordered response.
cv::Mat varType(1, nfeatures + 1, CV_8UC1); //指定按特征響應排序?最後多的一個元素表示什麼意思?是不是表示將要輸入訓練函數train()(在倒數第3行)中的response向量保存的是類標簽
uchar* uptr = varType.ptr<uchar>(0);
for (int x = 0; x < nfeatures; ++x)
uptr[x] = CV_VAR_ORDERED; //這個排序無法理解!
uptr[nfeatures] = CV_VAR_CATEGORICAL;

trainData.create(nfeatures, nsamples, CV_32FC1); //生成用來訓練用的數據矩陣,看下面的雙重循環就知道,其中每行對應到一個特征,每列對應到一個樣本,該矩陣每個元素保存的是某樣本某特征的積分圖。

for (int fi = 0; fi < nfeatures; ++fi)
{
float* dptr = trainData.ptr<float>(fi);
for (int si = 0; si < nsamples; ++si)
{
dptr[si] = pool->apply(fi, si, integrals);
}
}

cv::Mat missingMask;

bool ok = train(trainData, responses, varIdx, sampleIdx, varType, missingMask); //?主要的訓練函數,trainData是用特征表示的訓練集矩陣,responses是樣本對應的類標簽,varIdx是所學習的特征的所有索引,sampleIdx是訓練樣本的索引,?varType表示是否對特征響應排序?,missingMask用來保存錯誤的分類。
if (!ok)
CV_Error(CV_StsInternal, "ERROR: tree can not be trained");
return ok;

}

//其中用到的訓練函數如下,可見它調用了boost類的train()函數,並且推測得到的檢測器保存在params中。
//它調用了CvBoostTree::train()函數來,並加入了params參數,看來在BoostedSoftCascadeOctave類中沒有params成員,但是最終的檢測器時保存在該成員中的,到現在為止還是訓練整個的檢測器:
bool BoostedSoftCascadeOctave::train( const cv::Mat& _trainData, const cv::Mat& _responses, const cv::Mat& varIdx,
const cv::Mat& sampleIdx, const cv::Mat& varType, const cv::Mat& missingDataMask)
{
bool update = false;
return cv::Boost::train(_trainData, CV_COL_SAMPLE, _responses, varIdx, sampleIdx, varType, missingDataMask, params,
update);
}

//boost類的train()函數定義如下(在boost.cpp中):
bool
CvBoost::train( const CvMat* _train_data, int _tflag,
const CvMat* _responses, const CvMat* _var_idx,
const CvMat* _sample_idx, const CvMat* _var_type,
const CvMat* _missing_mask,
CvBoostParams _params, bool _update )
{//順著來,到了這裡,_train_data是訓練數據,每行對應到一個特征每列對應到一個樣本;_tflag表明了訓練數據矩陣的每列對應到一個樣本;
//_responsens是各樣本對應的類標簽;_var_idx是特征索引(序號),_sample_idx是樣本索引,_var_type在這裡表示按特征響應排序;_missing_mask不知何意;
//_params保存訓練得到的檢測器;_update為false。這裡使用CvBoost.tree->train()函數訓練每一個弱分類器
//
bool ok = false;
CvMemStorage* storage = 0;

CV_FUNCNAME( "CvBoost::train" );

__BEGIN__;

int i;

set_params( _params );//初始化檢測器參數

cvReleaseMat( &active_vars );
cvReleaseMat( &active_vars_abs );

if( !_update || !data ) //准備好訓練用的數據,並確定只包含正負樣本兩類,分配保存弱分類器的存儲空間
{
clear();
data = new CvDTreeTrainData( _train_data, _tflag, _responses, _var_idx, //准備訓練數據,但是這裡怎麼還要用到boost的參數_params呢?
_sample_idx, _var_type, _missing_mask, _params, true, true );

if( data->get_num_classes() != 2 )
CV_ERROR( CV_StsNotImplemented,
"Boosted trees can only be used for 2-class classification." );
CV_CALL( storage = cvCreateMemStorage() );
weak = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvBoostTree*), storage ); //這是CvBoost類中保存弱分類器的向量?
storage = 0;
}
else
{
data->set_data( _train_data, _tflag, _responses, _var_idx,
_sample_idx, _var_type, _missing_mask, _params, true, true, true );
}

if ( (_params.boost_type == LOGIT) || (_params.boost_type == GENTLE) )
data->do_responses_copy();

update_weights( 0 ); //將各樣本權重平均分配

for( i = 0; i < params.weak_count; i++ ) //訓練weak_count個弱分類器
{
CvBoostTree* tree = new CvBoostTree;
if( !tree->train( data, subsample_mask, this ) ) //主要的訓練函數,subsample_mask似乎是一個輸出參數,查了其初始值是值為0的指針,記錄弱分類器正確分類的樣本,也許初始值是全0的向量?
//第三個參數是訓練出的弱分類器要連接的‘宿主’分類器
{
delete tree;
break;
}
//cvCheckArr( get_weak_response());
cvSeqPush( weak, &tree );
update_weights( tree ); //這裡是不是根據訓練出的弱分類器的分類情況調整各樣本的權重?
trim_weights();
if( cvCountNonZero(subsample_mask) == 0 )
break;
}

if(weak->total > 0)//釋放存儲空間
{
get_active_vars(); // recompute active_vars* maps and condensed_idx's in the splits.
data->is_classifier = true;
data->free_train_data();
ok = true;
}
else
clear();

__END__;

return ok;
}

//CvBoostTree::train()函數定義如下,它用來訓練單個弱分類器,它進一步調用了CvDTree::do_train()函數:
CvBoostTree::train( CvDTreeTrainData* _train_data,
const CvMat* _subsample_idx, CvBoost* _ensemble )
{
clear();
ensemble = _ensemble;
data = _train_data;
data->shared = true;
return do_train( _subsample_idx );
}
//CvDTree::do_train()函數定義如下(在文件tree.cpp中,頭文件為ml.hpp):

bool CvDTree::do_train( const CvMat* _subsample_idx )
{
bool result = false;

CV_FUNCNAME( "CvDTree::do_train" );

__BEGIN__;

root = data->subsample_data( _subsample_idx ); //明顯是選擇參與訓練的樣本

CV_CALL( try_split_node(root));

if( root->split )
{
CV_Assert( root->left );
CV_Assert( root->right );

if( data->params.cv_folds > 0 )
CV_CALL( prune_cv() );

if( !data->shared )
data->free_train_data();

result = true;
}

__END__;

return result;
}

//do_train()的核心函數如下:
CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
{
CvDTreeNode* root = 0;
CvMat* isubsample_idx = 0;
CvMat* subsample_co = 0;

bool isMakeRootCopy = true;

CV_FUNCNAME( "CvDTreeTrainData::subsample_data" );

__BEGIN__;

if( !data_root )
CV_ERROR( CV_StsError, "No training data has been set" );

if( _subsample_idx )
{
CV_CALL( isubsample_idx = cvPreprocessIndexArray( _subsample_idx, sample_count )); //如果已訓練出了一些弱分類器,則在這裡進行一定的處理。_subsample_idx只能是一個行向量或者是列向量
//_subsample_idx中保存的可能是選中的樣本的索引,也可能長度為sample_count的表明選擇的'0''1'掩膜,但
//輸出只包含了選擇的樣本的編號,並且進行了排序。
if( isubsample_idx->cols + isubsample_idx->rows - 1 == sample_count ) //isubsample_idx是一個指向行向量或者列向量的指針,這裡驗證元素個數與樣本數是否相等。
{
const int* sidx = isubsample_idx->data.i;
for( int i = 0; i < sample_count; i++ )
{
if( sidx[i] != i )
{
isMakeRootCopy = false; //若尚無任何弱分類器,則'isMakeRootCopy = true',
break;
}
}
}
else
isMakeRootCopy = false;
}

if( isMakeRootCopy )
{
// make a copy of the root node
CvDTreeNode temp;
int i;
root = new_node( 0, 1, 0, 0 );
temp = *root;
*root = *data_root;
root->num_valid = temp.num_valid;
if( root->num_valid )
{
for( i = 0; i < var_count; i++ )
root->num_valid[i] = data_root->num_valid[i];
}
root->cv_Tn = temp.cv_Tn;
root->cv_node_risk = temp.cv_node_risk;
root->cv_node_error = temp.cv_node_error;
}
else
{
int* sidx = isubsample_idx->data.i;
// co - array of count/offset pairs (to handle duplicated values in _subsample_idx)
int* co, cur_ofs = 0;
int vi, i;
int workVarCount = get_work_var_count(); //得到已使用的特征個數?
int count = isubsample_idx->rows + isubsample_idx->cols - 1; //該弱分類器判斷為正的樣本個數

root = new_node( 0, count, 1, 0 );

CV_CALL( subsample_co = cvCreateMat( 1, sample_count*2, CV_32SC1 ));
cvZero( subsample_co );
co = subsample_co->data.i;
for( i = 0; i < count; i++ )
co[sidx[i]*2]++;
for( i = 0; i < sample_count; i++ )
{
if( co[i*2] )
{
co[i*2+1] = cur_ofs;
cur_ofs += co[i*2];
}
else
co[i*2+1] = -1;
}

cv::AutoBuffer<uchar> inn_buf(sample_count*(2*sizeof(int) + sizeof(float)));
for( vi = 0; vi < workVarCount; vi++ )
{
int ci = get_var_type(vi);

if( ci >= 0 || vi >= var_count )
{
int num_valid = 0;
const int* src = CvDTreeTrainData::get_cat_var_data( data_root, vi, (int*)(uchar*)inn_buf );

if (is_buf_16u)
{
unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
vi*sample_count + root->offset);
for( i = 0; i < count; i++ )
{
int val = src[sidx[i]];
udst[i] = (unsigned short)val;
num_valid += val >= 0;
}
}
else
{
int* idst = buf->data.i + root->buf_idx*get_length_subbuf() +
vi*sample_count + root->offset;
for( i = 0; i < count; i++ )
{
int val = src[sidx[i]];
idst[i] = val;
num_valid += val >= 0;
}
}

if( vi < var_count )
root->set_num_valid(vi, num_valid);
}
else
{
int *src_idx_buf = (int*)(uchar*)inn_buf;
float *src_val_buf = (float*)(src_idx_buf + sample_count);
int* sample_indices_buf = (int*)(src_val_buf + sample_count);
const int* src_idx = 0;
const float* src_val = 0;
get_ord_var_data( data_root, vi, src_val_buf, src_idx_buf, &src_val, &src_idx, sample_indices_buf );
int j = 0, idx, count_i;
int num_valid = data_root->get_num_valid(vi);

if (is_buf_16u)
{
unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
vi*sample_count + data_root->offset);
for( i = 0; i < num_valid; i++ )
{
idx = src_idx[i];
count_i = co[idx*2];
if( count_i )
for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
udst_idx[j] = (unsigned short)cur_ofs;
}

root->set_num_valid(vi, j);

for( ; i < sample_count; i++ )
{
idx = src_idx[i];
count_i = co[idx*2];
if( count_i )
for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
udst_idx[j] = (unsigned short)cur_ofs;
}
}
else
{
int* idst_idx = buf->data.i + root->buf_idx*get_length_subbuf() +
vi*sample_count + root->offset;
for( i = 0; i < num_valid; i++ )
{
idx = src_idx[i];
count_i = co[idx*2];
if( count_i )
for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
idst_idx[j] = cur_ofs;
}

root->set_num_valid(vi, j);

for( ; i < sample_count; i++ )
{
idx = src_idx[i];
count_i = co[idx*2];
if( count_i )
for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ )
idst_idx[j] = cur_ofs;
}
}
}
}
// sample indices subsampling
const int* sample_idx_src = get_sample_indices(data_root, (int*)(uchar*)inn_buf);
if (is_buf_16u)
{
unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
workVarCount*sample_count + root->offset);
for (i = 0; i < count; i++)
sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]];
}
else
{
int* sample_idx_dst = buf->data.i + root->buf_idx*get_length_subbuf() +
workVarCount*sample_count + root->offset;
for (i = 0; i < count; i++)
sample_idx_dst[i] = sample_idx_src[sidx[i]];
}
}

__END__;

cvReleaseMat( &isubsample_idx );
cvReleaseMat( &subsample_co );

return root;
}

//do_train()另一個核心函數如下:

void CvDTree::try_split_node( CvDTreeNode* node )
{
CvDTreeSplit* best_split = 0;
int i, n = node->sample_count, vi;
bool can_split = true;
double quality_scale;

calc_node_value( node );

if( node->sample_count <= data->params.min_sample_count ||
node->depth >= data->params.max_depth )
can_split = false;

if( can_split && data->is_classifier )
{
// check if we have a "pure" node,
// we assume that cls_count is filled by calc_node_value()
int* cls_count = data->counts->data.i;
int nz = 0, m = data->get_num_classes();
for( i = 0; i < m; i++ )
nz += cls_count[i] != 0;
if( nz == 1 ) // there is only one class
can_split = false;
}
else if( can_split )
{
if( sqrt(node->node_risk)/n < data->params.regression_accuracy )
can_split = false;
}

if( can_split )
{
best_split = find_best_split(node);
// TODO: check the split quality ...
node->split = best_split;
}
if( !can_split || !best_split )
{
data->free_node_data(node);
return;
}

quality_scale = calc_node_dir( node );
if( data->params.use_surrogates )
{
// find all the surrogate splits
// and sort them by their similarity to the primary one
for( vi = 0; vi < data->var_count; vi++ )
{
CvDTreeSplit* split;
int ci = data->get_var_type(vi);

if( vi == best_split->var_idx )
continue;

if( ci >= 0 )
split = find_surrogate_split_cat( node, vi );
else
split = find_surrogate_split_ord( node, vi );

if( split )
{
// insert the split
CvDTreeSplit* prev_split = node->split;
split->quality = (float)(split->quality*quality_scale);

while( prev_split->next &&
prev_split->next->quality > split->quality )
prev_split = prev_split->next;
split->next = prev_split->next;
prev_split->next = split;
}
}
}
split_node_data( node );
try_split_node( node->left );
try_split_node( node->right );
}

--------------------------------------分割線 --------------------------------------

編輯推薦

Ubuntu Linux下安裝OpenCV2.4.1所需包 http://www.linuxidc.com/Linux/2012-08/68184.htm

Ubuntu 12.04 安裝 OpenCV2.4.2 http://www.linuxidc.com/Linux/2012-09/70158.htm

CentOS下OpenCV無法讀取視頻文件 http://www.linuxidc.com/Linux/2011-07/39295.htm

Ubuntu 12.04下安裝OpenCV 2.4.5總結 http://www.linuxidc.com/Linux/2013-06/86704.htm

Ubuntu 10.04中安裝OpenCv2.1九步曲 http://www.linuxidc.com/Linux/2010-09/28678.htm

基於QT和OpenCV的人臉識別系統 http://www.linuxidc.com/Linux/2011-11/47806.htm

--------------------------------------分割線 --------------------------------------

Copyright © Linux教程網 All Rights Reserved