首页 > 代码库 > opencv源代码分析之二:cvhaartraining.cpp

opencv源代码分析之二:cvhaartraining.cpp

我使用的是opencv2.4.9。安装后。我的cvboost..cpp文件的路径是........\opencv\sources\apps\haartraining\cvhaartraining.cpp,研究源代码那么多天,有非常多收获。opencv库真是非常强大。当中在这篇博文中我有部分凝视,其它的有关知识请參考我博客http://blog.csdn.net/ding977921830?viewmode=contents。详细内容例如以下:

/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                        Intel License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of Intel Corporation may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

/*
 * cvhaartraining.cpp
 *
 * training of cascade of boosted classifiers based on haar features
 *參考资料:
 *1 http://www.sjsjw.com/kf_www/article/000119ABA007840.asp
 *2 http://www.opencvchina.com/thread-129-1-1.html
 */

#include "cvhaartraining.h"
#include "_cvhaartraining.h"

#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <climits>
#include <ctype.h>

#include "highgui.h"

#ifdef CV_VERBOSE
#include <ctime>

#ifdef _WIN32
/* use clock() function insted of time() */
#define TIME( arg ) (((double) clock()) / CLOCKS_PER_SEC)
#else
#define TIME( arg ) (time( arg ))
#endif /* _WIN32 */

#endif /* CV_VERBOSE */

#if defined CV_OPENMP && (defined _MSC_VER || defined CV_ICC)
#define CV_OPENMP 1
#else
#undef CV_OPENMP
#endif

typedef struct CvBackgroundData
{
    int    count;
    char** filename;
    int    last;
    int    round;
    CvSize winsize;
} CvBackgroundData;

typedef struct CvBackgroundReader
{
    CvMat   src;
    CvMat   img;
    CvPoint offset;
    float   scale;
    float   scalefactor;
    float   stepfactor;
    CvPoint point;
} CvBackgroundReader;

/*
 * Background reader
 * Created in each thread
 */
CvBackgroundReader* cvbgreader = NULL;

#if defined CV_OPENMP
#pragma omp threadprivate(cvbgreader)
#endif

CvBackgroundData* cvbgdata = http://www.mamicode.com/NULL;>

当训练人脸图像时,由于人脸的左右对称性能够设置 Symmetric 为 1,以加速训练。 */ { CvIntHaarFeatures* features = NULL;//存储全部Haar特征的结构体,Haar特征由指针CvTHaarFeature所指向。 CvTHaarFeature haarFeature; //一个Haar特征由2到3个具有对应权重的矩形组成。 //这几个矩形的权重符号相反,而且权重的绝对值跟矩形的大小成反比。 /*内存存储器是一个可用来存储诸如序列,轮廓,图形,子划分等动态增长数据结构的底层结构。 它是由一系列以同等大小的内存块构成,呈列表型,本句选自https://code.csdn.net/snippets/632568/master/.cpp/raw*/ CvMemStorage* storage = NULL; CvSeq* seq = NULL; CvSeqWriter writer; int s0 = 36; /* minimum total area size of basic haar feature */ int s1 = 12; /* minimum total area size of tilted haar features 2 */ int s2 = 18; /* minimum total area size of tilted haar features 3 */ int s3 = 24; /* minimum total area size of tilted haar features 4 */ int x = 0; int y = 0; //(x,y)表示小矩形的位置 int dx = 0; //dx表示小矩形的宽 int dy = 0; //dy表示小矩形的高 /////////////////////////////////////////////////////////////////////////////// //计算缩放因子factor //winsize是训练样本的大小 #if 0 float factor = 1.0F; factor = ((float) winsize.width) * winsize.height / (24 * 24); s0 = (int) (s0 * factor); s1 = (int) (s1 * factor); s2 = (int) (s2 * factor); s3 = (int) (s3 * factor); #else s0 = 1; s1 = 1; s2 = 1; s3 = 1; #endif /* CV_VECTOR_CREATE( vec, CvIntHaarFeature, size, maxsize ) */ storage = cvCreateMemStorage(); //功能:创建新序列。并初始化写入部分 cvStartWriteSeq( 0, sizeof( CvSeq ), sizeof( haarFeature ), storage, &writer ); for( x = 0; x < winsize.width; x++ ) { for( y = 0; y < winsize.height; y++ ) { for( dx = 1; dx <= winsize.width; dx++ ) { for( dy = 1; dy <= winsize.height; dy++ ) { // haar_x2 if ( (x+dx*2 <= winsize.width) && (y+dy <= winsize.height) ) { if (dx*2*dy < s0) continue;///????????

??

?

????

????

??

if (!symmetric || (x+x+dx*2 <=winsize.width)) {//???

????

??

?

?

??

haarFeature = cvHaarFeature( "haar_x2",//类型 x, y, dx*2, dy, -1, //[x,y,dx*2,dy]是一个小矩形左上角的位置和高和宽,-1是其权重 x+dx, y, dx , dy, +2 ); //[x,y,dx*2,dy]是一个小矩形左上角的位置和高和宽,2是其权重 //第一个小矩形中的像素和乘以-1。然后加上第二个小矩形的像素和乘以2,也就是水平方向上两个等宽的小矩形。用右边的减去左边的.黑色为+,白色为- /* CV_VECTOR_PUSH( vec, CvIntHaarFeature, haarFeature, size, maxsize, step ) */ CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // haar_y2 if ( (x+dx <= winsize.width) && (y+dy*2 <= winsize.height) ) { if (dx*2*dy < s0) continue; if (!symmetric || (x+x+dx <= winsize.width)) { haarFeature = cvHaarFeature( "haar_y2", x, y, dx, dy*2, -1, x, y+dy, dx, dy, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // haar_x3 if ( (x+dx*3 <= winsize.width) && (y+dy <= winsize.height) ) { if (dx*3*dy < s0) continue; if (!symmetric || (x+x+dx*3 <=winsize.width)) { haarFeature = cvHaarFeature( "haar_x3", x, y, dx*3, dy, -1, x+dx, y, dx, dy, +3 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // haar_y3 if ( (x+dx <= winsize.width) && (y+dy*3 <= winsize.height) ) { if (dx*3*dy < s0) continue; if (!symmetric || (x+x+dx <= winsize.width)) { haarFeature = cvHaarFeature( "haar_y3", x, y, dx, dy*3, -1, x, y+dy, dx, dy, +3 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } if( mode != 0 /*BASIC*/ ) { // haar_x4 if ( (x+dx*4 <= winsize.width) && (y+dy <= winsize.height) ) { if (dx*4*dy < s0) continue; if (!symmetric || (x+x+dx*4 <=winsize.width)) { haarFeature = cvHaarFeature( "haar_x4", x, y, dx*4, dy, -1, x+dx, y, dx*2, dy, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // haar_y4 if ( (x+dx <= winsize.width ) && (y+dy*4 <= winsize.height) ) { if (dx*4*dy < s0) continue; if (!symmetric || (x+x+dx <=winsize.width)) { haarFeature = cvHaarFeature( "haar_y4", x, y, dx, dy*4, -1, x, y+dy, dx, dy*2, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } } // x2_y2 if ( (x+dx*2 <= winsize.width) && (y+dy*2 <= winsize.height) ) { if (dx*4*dy < s0) continue; if (!symmetric || (x+x+dx*2 <=winsize.width)) { haarFeature = cvHaarFeature( "haar_x2_y2", x , y, dx*2, dy*2, -1, x , y , dx , dy, +2, x+dx, y+dy, dx , dy, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } if (mode != 0 /*BASIC*/) { // point if ( (x+dx*3 <= winsize.width) && (y+dy*3 <= winsize.height) ) { if (dx*9*dy < s0) continue; if (!symmetric || (x+x+dx*3 <=winsize.width)) { haarFeature = cvHaarFeature( "haar_point",//"haar_point"为3*3的小矩形 x , y, dx*3, dy*3, -1, x+dx, y+dy, dx , dy , +9); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } } if (mode == 2 /*ALL*/) { // tilted haar_x2 (x, y, w, h, b, weight) if ( (x+2*dx <= winsize.width) && (y+2*dx+dy <= winsize.height) && (x-dy>= 0) ) { if (dx*2*dy < s1) continue; if (!symmetric || (x <= (winsize.width / 2) )) { haarFeature = cvHaarFeature( "tilted_haar_x2",///????????

????

?

???

??

??????

?

?

??

?? x, y, dx*2, dy, -1, x, y, dx , dy, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // tilted haar_y2 (x, y, w, h, b, weight) if ( (x+dx <= winsize.width) && (y+dx+2*dy <= winsize.height) && (x-2*dy>= 0) ) { if (dx*2*dy < s1) continue; if (!symmetric || (x <= (winsize.width / 2) )) { haarFeature = cvHaarFeature( "tilted_haar_y2", x, y, dx, 2*dy, -1, x, y, dx, dy, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // tilted haar_x3 (x, y, w, h, b, weight) if ( (x+3*dx <= winsize.width) && (y+3*dx+dy <= winsize.height) && (x-dy>= 0) ) { if (dx*3*dy < s2) continue; if (!symmetric || (x <= (winsize.width / 2) )) { haarFeature = cvHaarFeature( "tilted_haar_x3",//////?

??????????????????

?

?

??

?? x, y, dx*3, dy, -1, x+dx, y+dx, dx , dy, +3 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // tilted haar_y3 (x, y, w, h, b, weight) if ( (x+dx <= winsize.width) && (y+dx+3*dy <= winsize.height) && (x-3*dy>= 0) ) { if (dx*3*dy < s2) continue; if (!symmetric || (x <= (winsize.width / 2) )) { haarFeature = cvHaarFeature( "tilted_haar_y3",//?

?

?????????

???

?????????

? x, y, dx, 3*dy, -1, x-dy, y+dy, dx, dy, +3 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // tilted haar_x4 (x, y, w, h, b, weight) if ( (x+4*dx <= winsize.width) && (y+4*dx+dy <= winsize.height) && (x-dy>= 0) ) { if (dx*4*dy < s3) continue; if (!symmetric || (x <= (winsize.width / 2) )) { haarFeature = cvHaarFeature( "tilted_haar_x4",//???????

??

???

???

??????? x, y, dx*4, dy, -1, x+dx, y+dx, dx*2, dy, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } // tilted haar_y4 (x, y, w, h, b, weight) if ( (x+dx <= winsize.width) && (y+dx+4*dy <= winsize.height) && (x-4*dy>= 0) ) { if (dx*4*dy < s3) continue; if (!symmetric || (x <= (winsize.width / 2) )) { haarFeature = cvHaarFeature( "tilted_haar_y4",//???????

???

?

??

??? x, y, dx, 4*dy, -1, x-dy, y+dy, dx, 2*dy, +2 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } /* // tilted point if ( (x+dx*3 <= winsize.width - 1) && (y+dy*3 <= winsize.height - 1) && (x-3*dy>= 0)) { if (dx*9*dy < 36) continue; if (!symmetric || (x <= (winsize.width / 2) )) { haarFeature = cvHaarFeature( "tilted_haar_point", x, y, dx*3, dy*3, -1, x, y+dy, dx , dy, +9 ); CV_WRITE_SEQ_ELEM( haarFeature, writer ); } } */ } } } } } seq = cvEndWriteSeq( &writer );//函数cvEndWriteSeq完毕写入操作并返回指向被写入元素的序列的地址, //同一时候,函数将截取最后那个不完整的序列块.块的剩余部分返回到内存之后,序列即能够被安全地读和写. features = (CvIntHaarFeatures*) cvAlloc( sizeof( CvIntHaarFeatures ) + ( sizeof( CvTHaarFeature ) + sizeof( CvFastHaarFeature ) ) * seq->total ); //opencv中全部的内存分配和释放都是通过cvAlloc和cvFree合作完毕的. //假设cvAlloc/cvFree不是匹配出现, 那么能够觉得出现了内存泄漏. features->feature = (CvTHaarFeature*) (features + 1); features->fastfeature = (CvFastHaarFeature*) ( features->feature + seq->total ); features->count = seq->total; features->winsize = winsize; //复制序列的全部或部分到一个连续内存数组中。 cvCvtSeqToArray( seq, (CvArr*) features->feature ); cvReleaseMemStorage( &storage ); //将普通的特征转化为高速计算的haar特征 icvConvertToFastHaarFeature( features->feature, features->fastfeature, features->count, (winsize.width + 1) ); return features; } static void icvReleaseIntHaarFeatures( CvIntHaarFeatures** intHaarFeatures ) { if( intHaarFeatures != NULL && (*intHaarFeatures) != NULL )//??

??????????????? { cvFree( intHaarFeatures ); (*intHaarFeatures) = NULL; } } /* *icvConvertToFastHaarFeature函数的定义 */ void icvConvertToFastHaarFeature( CvTHaarFeature* haarFeature, //输入数组 CvFastHaarFeature* fastHaarFeature, //输出数组 int size, int step ) //size表示数组的大小。step表示积分图的row step,一步能够是8位。比方灰度图;一步也能够是24位。比方RGB图 { int i = 0;//表示特征的个数 int j = 0;//表示一个特征里小矩形的个数 for( i = 0; i < size; i++ ) { fastHaarFeature[i].tilted = haarFeature[i].tilted; if( !fastHaarFeature[i].tilted )//当为直立特征 { for( j = 0; j < CV_HAAR_FEATURE_MAX; j++ ) { fastHaarFeature[i].rect[j].weight = haarFeature[i].rect[j].weight; //假设没有该矩形特征,或者已经计算结束,则跳出循环 if( fastHaarFeature[i].rect[j].weight == 0.0F ) { break; } //对于垂直的haar特征,利用上述定义的宏把普通的haar特征转化为能高速计算的haar特征 CV_SUM_OFFSETS( fastHaarFeature[i].rect[j].p0, fastHaarFeature[i].rect[j].p1, fastHaarFeature[i].rect[j].p2, fastHaarFeature[i].rect[j].p3, haarFeature[i].rect[j].r, step ) } } else //当为旋转特征时 { for( j = 0; j < CV_HAAR_FEATURE_MAX; j++ ) { fastHaarFeature[i].rect[j].weight = haarFeature[i].rect[j].weight; if( fastHaarFeature[i].rect[j].weight == 0.0F ) { break; } ////对于旋转的haar特征,利用上述定义的宏把普通的haar特征转化为能高速计算的haar特征 CV_TILTED_OFFSETS( fastHaarFeature[i].rect[j].p0, fastHaarFeature[i].rect[j].p1, fastHaarFeature[i].rect[j].p2, fastHaarFeature[i].rect[j].p3, haarFeature[i].rect[j].r, step ) } } } } /* * icvCreateHaarTrainingData * * Create haar training data used in stage training 功能:为训练分类器分配所需内存。而且返回内存地址 输入: Winsize:样本图像大小 Maxnumsamples:样本总数。Maxnumsamples=正样本总是+负样本总数。 输出:CvHaarTrainigData*data,data指向分配的内存首地址。 伪代码: 为data的各个成员分配内存。 Return data;http://www.opencvchina.com/thread-191-1-1.html */ static CvHaarTrainigData* icvCreateHaarTrainingData( CvSize winsize, int maxnumsamples ) { CvHaarTrainigData* data; /* #define CV_FUNCNAME( Name ) / static char cvFuncName[] = Name CV_FUNCNAME 定义变量 cvFuncName存放函数名,用于出错时能够报告出错的函数 */ CV_FUNCNAME( "icvCreateHaarTrainingData" ); /* __BEGIN__ 和__END__配套使用,当出现error时,EXIT cxerror.h 中 #define __BEGIN__ { #define __END__ goto exit; exit: ; } 对于代码中 __BEGIN__ 和__END__后面多加一个分号 的解释: 由于 __BEGIN__;等价于{; ,当中分号(;)为一个空语句,是合理的,但不要也行.__END__也一样. */ __BEGIN__; data = http://www.mamicode.com/NULL;>

//由于size_t类型的数据事实上是保存了一个整数,所以它也能够做加减乘除,也能够转化为int并赋值给int类型的变量。 datasize = sizeof( CvHaarTrainigData ) + /* sum and tilted */ ( 2 * (winsize.width + 1) * (winsize.height + 1) * sizeof( sum_type ) + sizeof( float ) + /* normfactor */ sizeof( float ) + /* cls */ sizeof( float ) /* weight */ ) * maxnumsamples; CV_CALL( data = http://www.mamicode.com/(CvHaarTrainigData*) cvAlloc( datasize ) );//??????

???

???

?

?? /* *void *memset(void *s, int ch, size_t n); *函数解释:将s中前n个字节 (typedef unsigned int size_t )用 ch 替换并返回 s 。 *memset:作用是在一段内存块中填充某个给定的值,它是对较大的结构体或数组进行清零操作的一种最快方法[1] 。 */ memset( (void*)data, 0, datasize ); data->maxnum = maxnumsamples; data->winsize = winsize; ptr = (uchar*)(data + 1); data->sum = cvMat( maxnumsamples, (winsize.width + 1) * (winsize.height + 1), CV_SUM_MAT_TYPE, (void*) ptr ); ptr += sizeof( sum_type ) * maxnumsamples * (winsize.width+1) * (winsize.height+1); data->tilted = cvMat( maxnumsamples, (winsize.width + 1) * (winsize.height + 1), CV_SUM_MAT_TYPE, (void*) ptr ); ptr += sizeof( sum_type ) * maxnumsamples * (winsize.width+1) * (winsize.height+1); data->normfactor = cvMat( 1, maxnumsamples, CV_32FC1, (void*) ptr ); ptr += sizeof( float ) * maxnumsamples; data->cls = cvMat( 1, maxnumsamples, CV_32FC1, (void*) ptr ); ptr += sizeof( float ) * maxnumsamples; data->weights = cvMat( 1, maxnumsamples, CV_32FC1, (void*) ptr ); data->valcache = NULL; data->idxcache = NULL; __END__; return data; } /* *icvReleaseHaarTrainingDataCache的作用 *释放训练样本数据缓存,包括样本特征的缓存,以及排序后的样本特征的缓存 */ static void icvReleaseHaarTrainingDataCache( CvHaarTrainigData** haarTrainingData ) { if( haarTrainingData != NULL && (*haarTrainingData) != NULL ) { //CvMat* valcache; 样本的特征,一共同拥有maxnum行 。特征总数(CvIntHaarFeatures.count)个列 if( (*haarTrainingData)->valcache != NULL ) { cvReleaseMat( &(*haarTrainingData)->valcache ); (*haarTrainingData)->valcache = NULL; } // CvMat* idxcache对样本标号按特征值升序排序。一共同拥有特征总数(CvIntHaarFeatures.count)个行,maxnum 个列 if( (*haarTrainingData)->idxcache != NULL ) { cvReleaseMat( &(*haarTrainingData)->idxcache ); (*haarTrainingData)->idxcache = NULL; } } } /* *icvReleaseHaarTrainingData的作用 *释放训练样本数据缓存 */ static void icvReleaseHaarTrainingData( CvHaarTrainigData** haarTrainingData ) { if( haarTrainingData != NULL && (*haarTrainingData) != NULL ) { icvReleaseHaarTrainingDataCache( haarTrainingData ); cvFree( haarTrainingData ); } } /* *函数icvGetTrainingDataCallback介绍 *功能:对全部样本计算特征编号从first開始的num个特征,并保存到mat里。

*输入: *CvMat* mat矩阵样本总数个行,num个列。保存每一个样本的num个特征值。

*First:特征类型编号的開始处 *Num:要计算的特征类型个数。 *Userdata:积分矩阵和权重、特征模板等信息。

*输出: *CvMat* mat矩阵样本总数个行。num个列。

保存每一个样本的num个特征值。 */ static void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*, int first, int num, void* userdata ) { int i = 0; int j = 0; float val = 0.0F; float normfactor = 0.0F; CvHaarTrainingData* training_data; CvIntHaarFeatures* haar_features; #ifdef CV_COL_ARRANGEMENT assert( mat->rows >= num ); #else assert( mat->cols >= num ); #endif //userdata = http://www.mamicode.com/cvUserdata( data, haarFeatures )>

CV_ELEM_SIZE( idx->type ) : idx->step; for( i = 0; i < idxnum; i++ ) { index = (int) *((float*) (idxdata + i * idxstep)); if( cvEvalFastHaarFeature( fastfeature, (sum_type*) (data->sum.data.ptr + index * data->sum.step), (sum_type*) (data->tilted.data.ptr + index * data->tilted.step) ) < threshold * data->normfactor.data.fl[index] ) { (*left)->data.fl[(*left)->cols++] = (float) index; } else { (*right)->data.fl[(*right)->cols++] = (float) index; } } } } /* * icvCreateCARTStageClassifier * * Create stage classifier with trees as weak classifiers * data - haar training data. It must be created and filled before call * minhitrate - desired min hit rate * maxfalsealarm - desired max false alarm rate * symmetric - if not 0 it is assumed that samples are vertically symmetric * numprecalculated - number of features that will be precalculated. Each precalculated * feature need (number_of_samples*(sizeof( float ) + sizeof( short ))) bytes of memory * weightfraction - weight trimming parameter * numsplits - number of binary splits in each tree * boosttype - type of applied boosting algorithm * stumperror - type of used error if Discrete AdaBoost algorithm is applied * maxsplits - maximum total number of splits in all weak classifiers. * If it is not 0 then NULL returned if total number of splits exceeds <maxsplits>. */ static //icvCreateCARTStageClassifier部分内容选自http://www.sjsjw.com/kf_www/article/000119ABA007840.asp CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data, // 全部训练样本 CvMat* sampleIdx, // 实际训练样本序列 CvIntHaarFeatures* haarFeatures, // 全部HAAR特征 float minhitrate, // 最小正检率(用于确定强分类器阈值) float maxfalsealarm, // 最大误检率(用于确定是否收敛) int symmetric, // HAAR是否对称 float weightfraction, // 样本剔除比例(用于剔除小权值样本) int numsplits, // 每一个弱分类器特征个数(一般为1) CvBoostType boosttype, // adaboost类型 CvStumpError stumperror, // Discrete AdaBoost中的阈值计算方式 int maxsplits ) // 弱分类器最大个数{ #ifdef CV_COL_ARRANGEMENT int flags = CV_COL_SAMPLE; #else int flags = CV_ROW_SAMPLE; #endif CvStageHaarClassifier* stage = NULL; // 强分类器 CvBoostTrainer* trainer; // 暂时训练器。用于更新样本权值 CvCARTClassifier* cart = NULL; // 弱分类器 CvCARTTrainParams trainParams; // 训练參数 CvMTStumpTrainParams stumpTrainParams; // 弱分类器參数 //CvMat* trainData = http://www.mamicode.com/NULL;>

eval = cvMat( 1, m, CV_32FC1, cvAlloc( sizeof( float ) * m ) ); storage = cvCreateMemStorage(); // 最优弱分类器存储序列 seq = cvCreateSeq( 0, sizeof( *seq ), sizeof( classifier ), storage ); // 样本类别,仅仅有logitboost才会用到 weakTrainVals = cvCreateMat( 1, m, CV_32FC1 ); // 初始化样本类别与权重。weakTrainVals为{-1, 1},权重都一样 trainer = cvBoostStartTraining( &data->cls, weakTrainVals, &data->weights, sampleIdx, boosttype ); num_splits = 0; sumalpha = 0.0F; do { #ifdef CV_VERBOSE int v_wt = 0; int v_flipped = 0; #endif /* CV_VERBOSE */ trimmedIdx = cvTrimWeights( &data->weights, sampleIdx, weightfraction ); numtrimmed = (trimmedIdx) ?

MAX( trimmedIdx->rows, trimmedIdx->cols ) : m; #ifdef CV_VERBOSE v_wt = 100 * numtrimmed / numsamples; v_flipped = 0; #endif /* CV_VERBOSE */ // 重要函数,创建CART树的同一时候,计算出当前最优弱分类器,一般仅仅有根节点 cart = (CvCARTClassifier*) cvCreateCARTClassifier( data->valcache, flags, weakTrainVals, 0, 0, 0, trimmedIdx, &(data->weights), (CvClassifierTrainParams*) &trainParams ); // 创建弱分类器 classifier = (CvCARTHaarClassifier*) icvCreateCARTHaarClassifier( numsplits ); // 将CART树转化为弱分类器 icvInitCARTHaarClassifier( classifier, cart, haarFeatures ); num_splits += classifier->count; cart->release( (CvClassifier**) &cart ); // 为何一定要在奇数个弱分类器处计算? if( symmetric && (seq->total % 2) ) { float normfactor = 0.0F; CvStumpClassifier* stump; /* 翻转HAAR特征 */ for( i = 0; i < classifier->count; i++ ) { if( classifier->feature[i].desc[0] == 'h' ) { for( j = 0; j < CV_HAAR_FEATURE_MAX && classifier->feature[i].rect[j].weight != 0.0F; j++ ) { classifier->feature[i].rect[j].r.x = data->winsize.width - classifier->feature[i].rect[j].r.x - classifier->feature[i].rect[j].r.width; } } else { int tmp = 0; /* (x,y) -> (24-x,y) */ /* w -> h; h -> w */ for( j = 0; j < CV_HAAR_FEATURE_MAX && classifier->feature[i].rect[j].weight != 0.0F; j++ ) { classifier->feature[i].rect[j].r.x = data->winsize.width - classifier->feature[i].rect[j].r.x; CV_SWAP( classifier->feature[i].rect[j].r.width, classifier->feature[i].rect[j].r.height, tmp ); } } } // 转化为基于积分图计算的特征 icvConvertToFastHaarFeature( classifier->feature, classifier->fastfeature, classifier->count, data->winsize.width + 1 ); // 为了验证最新翻转特征是否为最优特征 stumpTrainParams.getTrainData = http://www.mamicode.com/NULL;>

'+' : '-' ), threshold, v_hitrate / numpos, v_falsealarm / numneg, v_experr ); printf( "+----+----+-+---------+---------+---------+---------+\n" ); fflush( stdout ); } #endif /* CV_VERBOSE */ // 两种收敛方式,一种是误检率小于规定阈值,还有一种是弱分类器个数小于规定阈值 } while( falsealarm > maxfalsealarm && (!maxsplits || (num_splits < maxsplits) ) ); cvBoostEndTraining( &trainer ); if( falsealarm > maxfalsealarm ) { stage = NULL; } else { stage = (CvStageHaarClassifier*) icvCreateStageHaarClassifier( seq->total, threshold ); cvCvtSeqToArray( seq, (CvArr*) stage->classifier ); } /* CLEANUP */ cvReleaseMemStorage( &storage ); cvReleaseMat( &weakTrainVals ); cvFree( &(eval.data.ptr) ); return (CvIntHaarClassifier*) stage; } static CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize ) { CvBackgroundData* data = http://www.mamicode.com/NULL;>

1.0F / (poscount + negcount) : (0.5F / poscount); negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F / negcount); for( j = 0; j < poscount; j++ ) { data->weights.data.fl[j] = posweight; data->cls.data.fl[j] = 1.0F; } for( j = poscount; j < poscount + negcount; j++ ) { data->weights.data.fl[j] = negweight; data->cls.data.fl[j] = 0.0F; } #ifdef CV_VERBOSE proctime = -TIME( 0 ); #endif /* CV_VERBOSE */ icvPrecalculate( data, haar_features, numprecalculated ); #ifdef CV_VERBOSE printf( "PRECALCULATION TIME: %.2f\n", (proctime + TIME( 0 )) ); #endif /* CV_VERBOSE */ #ifdef CV_VERBOSE proctime = -TIME( 0 ); #endif /* CV_VERBOSE */ cascade->classifier[i] = icvCreateCARTStageClassifier( data, NULL, haar_features, minhitrate, maxfalsealarm, symmetric, weightfraction, numsplits, (CvBoostType) boosttype, (CvStumpError) stumperror, 0 ); #ifdef CV_VERBOSE printf( "STAGE TRAINING TIME: %.2f\n", (proctime + TIME( 0 )) ); #endif /* CV_VERBOSE */ file = fopen( stagename, "w" ); if( file != NULL ) { cascade->classifier[i]->save( (CvIntHaarClassifier*) cascade->classifier[i], file ); fclose( file ); } else { #ifdef CV_VERBOSE printf( "FAILED TO SAVE STAGE CLASSIFIER IN FILE %s\n", stagename ); #endif /* CV_VERBOSE */ } } icvReleaseIntHaarFeatures( &haar_features ); icvReleaseHaarTrainingData( &data ); if( i == nstages ) { char xml_path[1024]; int len = (int)strlen(dirname); CvHaarClassifierCascade* cascade1 = 0; strcpy( xml_path, dirname ); if( xml_path[len-1] == '\\' || xml_path[len-1] == '/' ) len--; strcpy( xml_path + len, ".xml" ); cascade1 = cvLoadHaarClassifierCascade( dirname, cvSize(winwidth,winheight) ); if( cascade1 ) cvSave( xml_path, cascade1 ); cvReleaseHaarClassifierCascade( &cascade1 ); } } else { #ifdef CV_VERBOSE printf( "FAILED TO INITIALIZE BACKGROUND READERS\n" ); #endif /* CV_VERBOSE */ } /* CLEAN UP */ icvDestroyBackgroundReaders(); cascade->release( (CvIntHaarClassifier**) &cascade ); } /* tree cascade classifier */ static int icvNumSplits( CvStageHaarClassifier* stage ) { int i; int num; num = 0; for( i = 0; i < stage->count; i++ ) { num += ((CvCARTHaarClassifier*) stage->classifier[i])->count; } return num; } static void icvSetNumSamples( CvHaarTrainingData* training_data, int num ) { assert( num <= training_data->maxnum ); training_data->sum.rows = training_data->tilted.rows = num; training_data->normfactor.cols = num; training_data->cls.cols = training_data->weights.cols = num; } static void icvSetWeightsAndClasses( CvHaarTrainingData* training_data, int num1, float weight1, float cls1, int num2, float weight2, float cls2 ) { int j; assert( num1 + num2 <= training_data->maxnum ); for( j = 0; j < num1; j++ ) { training_data->weights.data.fl[j] = weight1; training_data->cls.data.fl[j] = cls1; } for( j = num1; j < num1 + num2; j++ ) { training_data->weights.data.fl[j] = weight2; training_data->cls.data.fl[j] = cls2; } } static CvMat* icvGetUsedValues( CvHaarTrainingData* training_data, int start, int num, CvIntHaarFeatures* haar_features, CvStageHaarClassifier* stage ) { CvMat* ptr = NULL; CvMat* feature_idx = NULL; CV_FUNCNAME( "icvGetUsedValues" ); __BEGIN__; int num_splits; int i, j; int r; int total, last; num_splits = icvNumSplits( stage ); CV_CALL( feature_idx = cvCreateMat( 1, num_splits, CV_32SC1 ) ); total = 0; for( i = 0; i < stage->count; i++ ) { CvCARTHaarClassifier* cart; cart = (CvCARTHaarClassifier*) stage->classifier[i]; for( j = 0; j < cart->count; j++ ) { feature_idx->data.i[total++] = cart->compidx[j]; } } icvSort_32s( feature_idx->data.i, total, 0 ); last = 0; for( i = 1; i < total; i++ ) { if( feature_idx->data.i[i] != feature_idx->data.i[last] ) { feature_idx->data.i[++last] = feature_idx->data.i[i]; } } total = last + 1; CV_CALL( ptr = cvCreateMat( num, total, CV_32FC1 ) ); #ifdef CV_OPENMP #pragma omp parallel for #endif for( r = start; r < start + num; r++ ) { int c; for( c = 0; c < total; c++ ) { float val, normfactor; int fnum; fnum = feature_idx->data.i[c]; val = cvEvalFastHaarFeature( haar_features->fastfeature + fnum, (sum_type*) (training_data->sum.data.ptr + r * training_data->sum.step), (sum_type*) (training_data->tilted.data.ptr + r * training_data->tilted.step) ); normfactor = training_data->normfactor.data.fl[r]; val = ( normfactor == 0.0F ) ? 0.0F : (val / normfactor); CV_MAT_ELEM( *ptr, float, r - start, c ) = val; } } __END__; cvReleaseMat( &feature_idx ); return ptr; } /* possible split in the tree */ typedef struct CvSplit { CvTreeCascadeNode* parent; CvTreeCascadeNode* single_cluster; CvTreeCascadeNode* multiple_clusters; int num_clusters; float single_multiple_ratio; struct CvSplit* next; } CvSplit; void cvCreateTreeCascadeClassifier( const char* dirname, const char* vecfilename, const char* bgfilename, int npos, int nneg, int nstages, int numprecalculated, int numsplits, float minhitrate, float maxfalsealarm, float weightfraction, int mode, int symmetric, int equalweights, int winwidth, int winheight, int boosttype, int stumperror, int maxtreesplits, int minpos, bool bg_vecfile ) { CvTreeCascadeClassifier* tcc = NULL; CvIntHaarFeatures* haar_features = NULL; CvHaarTrainingData* training_data = http://www.mamicode.com/NULL;>

1.0F / (poscount + negcount) : (0.5F/poscount); negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/negcount); icvSetWeightsAndClasses( training_data, poscount, posweight, 1.0F, negcount, negweight, 0.0F ); fflush( stdout ); /* precalculate feature values */ proctime = -TIME( 0 ); icvPrecalculate( training_data, haar_features, numprecalculated ); printf( "Precalculation time: %.2f\n", (proctime + TIME( 0 )) ); /* train stage classifier using all positive samples */ CV_CALL( single_cluster = icvCreateTreeCascadeNode() ); fflush( stdout ); proctime = -TIME( 0 ); single_cluster->stage = (CvStageHaarClassifier*) icvCreateCARTStageClassifier( training_data, NULL, haar_features, minhitrate, maxfalsealarm, symmetric, weightfraction, numsplits, (CvBoostType) boosttype, (CvStumpError) stumperror, 0 ); printf( "Stage training time: %.2f\n", (proctime + TIME( 0 )) ); single_num = icvNumSplits( single_cluster->stage ); best_num = single_num; best_clusters = 1; multiple_clusters = NULL; printf( "Number of used features: %d\n", single_num ); if( maxtreesplits >= 0 ) { max_clusters = MIN( max_clusters, maxtreesplits - total_splits + 1 ); } /* try clustering */ vals = NULL; for( k = 2; k <= max_clusters; k++ ) { int cluster; int stop_clustering; printf( "*** %d clusters ***\n", k ); /* check whether clusters are big enough */ stop_clustering = ( k * minpos > poscount ); if( !stop_clustering ) { int num[CV_MAX_CLUSTERS]; if( k == 2 ) { proctime = -TIME( 0 ); CV_CALL( vals = icvGetUsedValues( training_data, 0, poscount, haar_features, single_cluster->stage ) ); printf( "Getting values for clustering time: %.2f\n", (proctime + TIME(0)) ); printf( "Value matirx size: %d x %d\n", vals->rows, vals->cols ); fflush( stdout ); cluster_idx->cols = vals->rows; for( i = 0; i < negcount; i++ ) idx->data.i[i] = poscount + i; } proctime = -TIME( 0 ); CV_CALL( cvKMeans2( vals, k, cluster_idx, CV_TERM_CRITERIA() ) ); printf( "Clustering time: %.2f\n", (proctime + TIME( 0 )) ); for( cluster = 0; cluster < k; cluster++ ) num[cluster] = 0; for( i = 0; i < cluster_idx->cols; i++ ) num[cluster_idx->data.i[i]]++; for( cluster = 0; cluster < k; cluster++ ) { if( num[cluster] < minpos ) { stop_clustering = 1; break; } } } if( stop_clustering ) { printf( "Clusters are too small. Clustering aborted.\n" ); break; } cur_num = 0; cur_node = last_node = NULL; for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ ) { CvTreeCascadeNode* new_node; int num_splits; int last_pos; int total_pos; printf( "Cluster: %d\n", cluster ); last_pos = negcount; for( i = 0; i < cluster_idx->cols; i++ ) { if( cluster_idx->data.i[i] == cluster ) { idx->data.i[last_pos++] = i; } } idx->cols = last_pos; total_pos = idx->cols - negcount; printf( "# pos: %d of %d. (%d%%)\n", total_pos, poscount, 100 * total_pos / poscount ); CV_CALL( new_node = icvCreateTreeCascadeNode() ); if( last_node ) last_node->next = new_node; else cur_node = new_node; last_node = new_node; posweight = (equalweights) ? 1.0F / (total_pos + negcount) : (0.5F / total_pos); negweight = (equalweights) ?

1.0F / (total_pos + negcount) : (0.5F / negcount); icvSetWeightsAndClasses( training_data, poscount, posweight, 1.0F, negcount, negweight, 0.0F ); /* CV_DEBUG_SAVE( idx ); */ fflush( stdout ); proctime = -TIME( 0 ); new_node->stage = (CvStageHaarClassifier*) icvCreateCARTStageClassifier( training_data, idx, haar_features, minhitrate, maxfalsealarm, symmetric, weightfraction, numsplits, (CvBoostType) boosttype, (CvStumpError) stumperror, best_num - cur_num ); printf( "Stage training time: %.2f\n", (proctime + TIME( 0 )) ); if( !(new_node->stage) ) { printf( "Stage training aborted.\n" ); cur_num = best_num + 1; } else { num_splits = icvNumSplits( new_node->stage ); cur_num += num_splits; printf( "Number of used features: %d\n", num_splits ); } } /* for each cluster */ if( cur_num < best_num ) { icvReleaseTreeCascadeNodes( &multiple_clusters ); best_num = cur_num; best_clusters = k; multiple_clusters = cur_node; } else { icvReleaseTreeCascadeNodes( &cur_node ); } } /* try different number of clusters */ cvReleaseMat( &vals ); CvSplit* curSplit; CV_CALL( curSplit = (CvSplit*) cvAlloc( sizeof( *curSplit ) ) ); CV_ZERO_OBJ( curSplit ); if( last_split ) last_split->next = curSplit; else first_split = curSplit; last_split = curSplit; curSplit->single_cluster = single_cluster; curSplit->multiple_clusters = multiple_clusters; curSplit->num_clusters = best_clusters; curSplit->parent = parent; curSplit->single_multiple_ratio = (float) single_num / best_num; } if( parent ) parent = parent->next_same_level; } while( parent ); /* choose which nodes should be splitted */ do { float max_single_multiple_ratio; cur_split = NULL; max_single_multiple_ratio = 0.0F; last_split = first_split; while( last_split ) { if( last_split->single_cluster && last_split->multiple_clusters && last_split->single_multiple_ratio > max_single_multiple_ratio ) { max_single_multiple_ratio = last_split->single_multiple_ratio; cur_split = last_split; } last_split = last_split->next; } if( cur_split ) { if( maxtreesplits < 0 || cur_split->num_clusters <= maxtreesplits - total_splits + 1 ) { cur_split->single_cluster = NULL; total_splits += cur_split->num_clusters - 1; } else { icvReleaseTreeCascadeNodes( &(cur_split->multiple_clusters) ); cur_split->multiple_clusters = NULL; } } } while( cur_split ); /* attach new nodes to the tree */ leaves = last_node = NULL; last_split = first_split; while( last_split ) { cur_node = (last_split->multiple_clusters) ? last_split->multiple_clusters : last_split->single_cluster; parent = last_split->parent; if( parent ) parent->child = cur_node; /* connect leaves via next_same_level and save them */ for( ; cur_node; cur_node = cur_node->next ) { FILE* file; if( last_node ) last_node->next_same_level = cur_node; else leaves = cur_node; last_node = cur_node; cur_node->parent = parent; cur_node->idx = tcc->next_idx; tcc->next_idx++; sprintf( suffix, "%d/%s", cur_node->idx, CV_STAGE_CART_FILE_NAME ); file = NULL; if( icvMkDir( stage_name ) && (file = fopen( stage_name, "w" )) != 0 ) { cur_node->stage->save( (CvIntHaarClassifier*) cur_node->stage, file ); fprintf( file, "\n%d\n%d\n", ((parent) ? parent->idx : -1), ((cur_node->next) ?

tcc->next_idx : -1) ); } else { printf( "Failed to save classifier into %s\n", stage_name ); } if( file ) fclose( file ); } if( parent ) sprintf( buf, "%d", parent->idx ); else sprintf( buf, "NULL" ); printf( "\nParent node: %s\n", buf ); printf( "Chosen number of splits: %d\n\n", (last_split->multiple_clusters) ? (last_split->num_clusters - 1) : 0 ); cur_split = last_split; last_split = last_split->next; cvFree( &cur_split ); } /* for each split point */ printf( "Total number of splits: %d\n", total_splits ); if( !(tcc->root) ) tcc->root = leaves; CV_CALL( icvPrintTreeCascade( tcc->root ) ); } while( leaves ); /* save the cascade to xml file */ { char xml_path[1024]; int len = (int)strlen(dirname); CvHaarClassifierCascade* cascade = 0; strcpy( xml_path, dirname ); if( xml_path[len-1] == '\\' || xml_path[len-1] == '/' ) len--; strcpy( xml_path + len, ".xml" ); cascade = cvLoadHaarClassifierCascade( dirname, cvSize(winwidth,winheight) ); if( cascade ) cvSave( xml_path, cascade ); cvReleaseHaarClassifierCascade( &cascade ); } } /* if( nstages > 0 ) */ /* check cascade performance */ printf( "\nCascade performance\n" ); tcc->eval = icvEvalTreeCascadeClassifier; /* load samples */ consumed = 0; poscount = icvGetHaarTrainingDataFromVec( training_data, 0, npos, (CvIntHaarClassifier*) tcc, vecfilename, &consumed ); printf( "POS: %d %d %f\n", poscount, consumed, (consumed > 0) ?

(((float) poscount)/consumed) : 0 ); if( poscount <= 0 ) fprintf( stderr, "Warning: unable to obtain positive samples\n" ); proctime = -TIME( 0 ); negcount = icvGetHaarTrainingDataFromBG( training_data, poscount, nneg, (CvIntHaarClassifier*) tcc, &false_alarm, bg_vecfile ? bgfilename : NULL ); printf( "NEG: %d %g\n", negcount, false_alarm ); printf( "BACKGROUND PROCESSING TIME: %.2f\n", (proctime + TIME( 0 )) ); if( negcount <= 0 ) fprintf( stderr, "Warning: unable to obtain negative samples\n" ); __END__; if (! bg_vecfile) icvDestroyBackgroundReaders(); if( tcc ) tcc->release( (CvIntHaarClassifier**) &tcc ); icvReleaseIntHaarFeatures( &haar_features ); icvReleaseHaarTrainingData( &training_data ); cvReleaseMat( &cluster_idx ); cvReleaseMat( &idx ); cvReleaseMat( &vals ); cvReleaseMat( &features_idx ); } void cvCreateTrainingSamples( const char* filename, const char* imgfilename, int bgcolor, int bgthreshold, const char* bgfilename, int count, int invert, int maxintensitydev, double maxxangle, double maxyangle, double maxzangle, int showsamples, int winwidth, int winheight ) { CvSampleDistortionData data; assert( filename != NULL ); assert( imgfilename != NULL ); if( !icvMkDir( filename ) ) { fprintf( stderr, "Unable to create output file: %s\n", filename ); return; } if( icvStartSampleDistortion( imgfilename, bgcolor, bgthreshold, &data ) ) { FILE* output = NULL; output = fopen( filename, "wb" ); if( output != NULL ) { int hasbg; int i; CvMat sample; int inverse; hasbg = 0; hasbg = (bgfilename != NULL && icvInitBackgroundReaders( bgfilename, cvSize( winwidth,winheight ) ) ); sample = cvMat( winheight, winwidth, CV_8UC1, cvAlloc( sizeof( uchar ) * winheight * winwidth ) ); icvWriteVecHeader( output, count, sample.cols, sample.rows ); if( showsamples ) { cvNamedWindow( "Sample", CV_WINDOW_AUTOSIZE ); } inverse = invert; for( i = 0; i < count; i++ ) { if( hasbg ) { icvGetBackgroundImage( cvbgdata, cvbgreader, &sample ); } else { cvSet( &sample, cvScalar( bgcolor ) ); } if( invert == CV_RANDOM_INVERT ) { inverse = (rand() > (RAND_MAX/2)); } icvPlaceDistortedSample( &sample, inverse, maxintensitydev, maxxangle, maxyangle, maxzangle, 0 /* nonzero means placing image without cut offs */, 0.0 /* nozero adds random shifting */, 0.0 /* nozero adds random scaling */, &data ); if( showsamples ) { cvShowImage( "Sample", &sample ); if( cvWaitKey( 0 ) == 27 ) { showsamples = 0; } } icvWriteVecSample( output, &sample ); #ifdef CV_VERBOSE if( i % 500 == 0 ) { printf( "\r%3d%%", 100 * i / count ); } #endif /* CV_VERBOSE */ } icvDestroyBackgroundReaders(); cvFree( &(sample.data.ptr) ); fclose( output ); } /* if( output != NULL ) */ icvEndSampleDistortion( &data ); } #ifdef CV_VERBOSE printf( "\r \r" ); #endif /* CV_VERBOSE */ } #define CV_INFO_FILENAME "info.dat" void cvCreateTestSamples( const char* infoname, const char* imgfilename, int bgcolor, int bgthreshold, const char* bgfilename, int count, int invert, int maxintensitydev, double maxxangle, double maxyangle, double maxzangle, int showsamples, int winwidth, int winheight ) { CvSampleDistortionData data; assert( infoname != NULL ); assert( imgfilename != NULL ); assert( bgfilename != NULL ); if( !icvMkDir( infoname ) ) { #if CV_VERBOSE fprintf( stderr, "Unable to create directory hierarchy: %s\n", infoname ); #endif /* CV_VERBOSE */ return; } if( icvStartSampleDistortion( imgfilename, bgcolor, bgthreshold, &data ) ) { char fullname[PATH_MAX]; char* filename; CvMat win; FILE* info; if( icvInitBackgroundReaders( bgfilename, cvSize( 10, 10 ) ) ) { int i; int x, y, width, height; float scale; float maxscale; int inverse; if( showsamples ) { cvNamedWindow( "Image", CV_WINDOW_AUTOSIZE ); } info = fopen( infoname, "w" ); strcpy( fullname, infoname ); filename = strrchr( fullname, '\\' ); if( filename == NULL ) { filename = strrchr( fullname, '/' ); } if( filename == NULL ) { filename = fullname; } else { filename++; } count = MIN( count, cvbgdata->count ); inverse = invert; for( i = 0; i < count; i++ ) { icvGetNextFromBackgroundData( cvbgdata, cvbgreader ); maxscale = MIN( 0.7F * cvbgreader->src.cols / winwidth, 0.7F * cvbgreader->src.rows / winheight ); if( maxscale < 1.0F ) continue; scale = (maxscale - 1.0F) * rand() / RAND_MAX + 1.0F; width = (int) (scale * winwidth); height = (int) (scale * winheight); x = (int) ((0.1+0.8 * rand()/RAND_MAX) * (cvbgreader->src.cols - width)); y = (int) ((0.1+0.8 * rand()/RAND_MAX) * (cvbgreader->src.rows - height)); cvGetSubArr( &cvbgreader->src, &win, cvRect( x, y ,width, height ) ); if( invert == CV_RANDOM_INVERT ) { inverse = (rand() > (RAND_MAX/2)); } icvPlaceDistortedSample( &win, inverse, maxintensitydev, maxxangle, maxyangle, maxzangle, 1, 0.0, 0.0, &data ); sprintf( filename, "%04d_%04d_%04d_%04d_%04d.jpg", (i + 1), x, y, width, height ); if( info ) { fprintf( info, "%s %d %d %d %d %d\n", filename, 1, x, y, width, height ); } cvSaveImage( fullname, &cvbgreader->src ); if( showsamples ) { cvShowImage( "Image", &cvbgreader->src ); if( cvWaitKey( 0 ) == 27 ) { showsamples = 0; } } } if( info ) fclose( info ); icvDestroyBackgroundReaders(); } icvEndSampleDistortion( &data ); } } /* End of file. */



opencv源代码分析之二:cvhaartraining.cpp