体育资讯网

您现在的位置是:首页 > 足球教学 > 正文

足球教学

cvround源码(cvround函数)

hacker2022-06-14 18:12:23足球教学42
本文目录一览:1、我也求一份opencv下提取图片sift特征的项目源码,急用,谢谢您了

本文目录一览:

我也求一份opencv下提取图片sift特征的项目源码,急用,谢谢您了

#include "stdafx.h"

#include opencv2/opencv.hpp

double

compareSURFDescriptors( const float* d1, const float* d2, double best, int length )

{

double total_cost = 0;

assert( length % 4 == 0 );

for( int i = 0; i length; i += 4 )

{

double t0 = d1[i ] - d2[i ];

double t1 = d1[i+1] - d2[i+1];

double t2 = d1[i+2] - d2[i+2];

double t3 = d1[i+3] - d2[i+3];

total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;

if( total_cost best )

break;

}

return total_cost;

}

int

naiveNearestNeighbor( const float* vec, int laplacian,

const CvSeq* model_keypoints,

const CvSeq* model_descriptors )

{

int length = (int)(model_descriptors-elem_size/sizeof(float));

int i, neighbor = -1;

double d, dist1 = 1e6, dist2 = 1e6;

CvSeqReader reader, kreader;

cvStartReadSeq( model_keypoints, kreader, 0 );

cvStartReadSeq( model_descriptors, reader, 0 );

for( i = 0; i model_descriptors-total; i++ )

{

const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;

const float* mvec = (const float*)reader.ptr;

CV_NEXT_SEQ_ELEM( kreader.seq-elem_size, kreader );

CV_NEXT_SEQ_ELEM( reader.seq-elem_size, reader );

if( laplacian != kp-laplacian )

continue;

d = compareSURFDescriptors( vec, mvec, dist2, length );

if( d dist1 )

{

dist2 = dist1;

dist1 = d;

neighbor = i;

}

else if ( d dist2 )

dist2 = d;

}

if ( dist1 0.6*dist2 )

return neighbor;

return -1;

}

void

findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,

const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vectorint ptpairs )

{

int i;

CvSeqReader reader, kreader;

cvStartReadSeq( objectKeypoints, kreader );

cvStartReadSeq( objectDescriptors, reader );

ptpairs.clear();

for( i = 0; i objectDescriptors-total; i++ )

{

const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;

const float* descriptor = (const float*)reader.ptr;

CV_NEXT_SEQ_ELEM( kreader.seq-elem_size, kreader );

CV_NEXT_SEQ_ELEM( reader.seq-elem_size, reader );

int nearest_neighbor = naiveNearestNeighbor( descriptor, kp-laplacian, imageKeypoints, imageDescriptors );

if( nearest_neighbor = 0 )

{

ptpairs.push_back(i);

ptpairs.push_back(nearest_neighbor);

}

}

}

void

flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,

const CvSeq*, const CvSeq* imageDescriptors, vectorint ptpairs )

{

int length = (int)(objectDescriptors-elem_size/sizeof(float));

cv::Mat m_object(objectDescriptors-total, length, CV_32F);

cv::Mat m_image(imageDescriptors-total, length, CV_32F);

// copy descriptors

CvSeqReader obj_reader;

float* obj_ptr = m_object.ptrfloat(0);

cvStartReadSeq( objectDescriptors, obj_reader );

for(int i = 0; i objectDescriptors-total; i++ )

{

const float* descriptor = (const float*)obj_reader.ptr;

CV_NEXT_SEQ_ELEM( obj_reader.seq-elem_size, obj_reader );

memcpy(obj_ptr, descriptor, length*sizeof(float));

obj_ptr += length;

}

CvSeqReader img_reader;

float* img_ptr = m_image.ptrfloat(0);

cvStartReadSeq( imageDescriptors, img_reader );

for(int i = 0; i imageDescriptors-total; i++ )

{

const float* descriptor = (const float*)img_reader.ptr;

CV_NEXT_SEQ_ELEM( img_reader.seq-elem_size, img_reader );

memcpy(img_ptr, descriptor, length*sizeof(float));

img_ptr += length;

}

// find nearest neighbors using FLANN

cv::Mat m_indices(objectDescriptors-total, 2, CV_32S);

cv::Mat m_dists(objectDescriptors-total, 2, CV_32F);

cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4)); // using 4 randomized kdtrees

flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked

int* indices_ptr = m_indices.ptrint(0);

float* dists_ptr = m_dists.ptrfloat(0);

for (int i=0;im_indices.rows;++i) {

if (dists_ptr[2*i]0.6*dists_ptr[2*i+1]) {

ptpairs.push_back(i);

ptpairs.push_back(indices_ptr[2*i]);

}

}

}

/* a rough implementation for object location */

int

locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,

const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,

const CvPoint src_corners[4], CvPoint dst_corners[4] )

{

double h[9];

CvMat _h = cvMat(3, 3, CV_64F, h);

vectorint ptpairs;

vectorCvPoint2D32f pt1, pt2;

CvMat _pt1, _pt2;

int i, n;

#ifdef USE_FLANN

flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

#else

findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

#endif

n = (int)(ptpairs.size()/2);

if( n 4 )

return 0;

pt1.resize(n);

pt2.resize(n);

for( i = 0; i n; i++ )

{

pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))-pt;

pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))-pt;

}

_pt1 = cvMat(1, n, CV_32FC2, pt1[0] );

_pt2 = cvMat(1, n, CV_32FC2, pt2[0] );

if( !cvFindHomography( _pt1, _pt2, _h, CV_RANSAC, 5 ))

return 0;

for( i = 0; i 4; i++ )

{

double x = src_corners[i].x, y = src_corners[i].y;

double Z = 1./(h[6]*x + h[7]*y + h[8]);

double X = (h[0]*x + h[1]*y + h[2])*Z;

double Y = (h[3]*x + h[4]*y + h[5])*Z;

dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));

}

return 1;

}

int main(int argc, char** argv)

{

const char* object_filename = argc == 3 ? argv[1] : "box.png";

const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";

IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );

IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );

if( !object || !image )

{

fprintf( stderr, "Can not load %s and/or %s\n",

object_filename, scene_filename );

exit(-1);

}

CvMemStorage* storage = cvCreateMemStorage(0);

cvNamedWindow("Object", 1);

cvNamedWindow("Object Correspond", 1);

static CvScalar colors[] =

{

{{0,0,255}},

{{0,128,255}},

{{0,255,255}},

{{0,255,0}},

{{255,128,0}},

{{255,255,0}},

{{255,0,0}},

{{255,0,255}},

{{255,255,255}}

};

IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);

cvCvtColor( object, object_color, CV_GRAY2BGR );

CvSeq* objectKeypoints = 0, *objectDescriptors = 0;

CvSeq* imageKeypoints = 0, *imageDescriptors = 0;

int i;

CvSURFParams params = cvSURFParams(500, 1);

double tt = (double)cvGetTickCount();

cvExtractSURF( object, 0, objectKeypoints, objectDescriptors, storage, params );

printf("Object Descriptors: %d\n", objectDescriptors-total);

cvExtractSURF( image, 0, imageKeypoints, imageDescriptors, storage, params );

printf("Image Descriptors: %d\n", imageDescriptors-total);

tt = (double)cvGetTickCount() - tt;

printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));

CvPoint src_corners[4] = {{0,0}, {object-width,0}, {object-width, object-height}, {0, object-height}};

CvPoint dst_corners[4];

IplImage* correspond = cvCreateImage( cvSize(image-width, object-height+image-height), 8, 1 );

cvSetImageROI( correspond, cvRect( 0, 0, object-width, object-height ) );

cvCopy( object, correspond );

cvSetImageROI( correspond, cvRect( 0, object-height, correspond-width, correspond-height ) );

cvCopy( image, correspond );

cvResetImageROI( correspond );

#ifdef USE_FLANN

printf("Using approximate nearest neighbor search\n");

#endif

if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,

imageDescriptors, src_corners, dst_corners ))

{

for( i = 0; i 4; i++ )

{

CvPoint r1 = dst_corners[i%4];

CvPoint r2 = dst_corners[(i+1)%4];

cvLine( correspond, cvPoint(r1.x, r1.y+object-height ),

cvPoint(r2.x, r2.y+object-height ), colors[8] );

}

}

vectorint ptpairs;

#ifdef USE_FLANN

flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

#else

findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

#endif

for( i = 0; i (int)ptpairs.size(); i += 2 )

{

CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );

CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );

cvLine( correspond, cvPointFrom32f(r1-pt),

cvPoint(cvRound(r2-pt.x), cvRound(r2-pt.y+object-height)), colors[8] );

}

cvShowImage( "Object Correspond", correspond );

for( i = 0; i objectKeypoints-total; i++ )

{

CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );

CvPoint center;

int radius;

center.x = cvRound(r-pt.x);

center.y = cvRound(r-pt.y);

radius = cvRound(r-size*1.2/9.*2);

cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );

}

cvShowImage( "Object", object_color );

cvWaitKey(0);

cvDestroyWindow("Object");

cvDestroyWindow("Object Correspond");

return 0;

}

谁知道ENVI里面图像增强中(Enhance)高斯增强是什么原理,文档或者链接都行啊

高斯滤波(高斯平滑)是图像处理,计算机视觉里面最常见的操作。平时,我们都是用matlab或者opencv的函数调用:imfilter或者cvSmooth,并不关心底层的实现。然而当开发者要实做高斯滤波的时候,往往就会很迷惘,往往会被以下几个问题困扰:

给定sigma,即标准偏差,怎么确定离散化后滤波器的窗口大小?

给定窗口大小,怎么计算高斯核的sigma,即标准方差?

怎么实现可分离滤波器?

我在google上搜了一下,还真没几个人把实现的细节讲清楚了。这里,我尝试结合三份源码,做个小小的总结。

三份源码分别是:

opencv中的cvfilter.cpp

autopano-sift-c中的GaussianConvolution.c

GIMP中的blur-gauss.c和unsharp-mask.c

在图像处理中,高斯滤波一般有两种实现方式,一是用离散化窗口滑窗卷积,另一种通过傅里叶变换。最常见的就是第一种滑窗实现,只有当离散化的窗口非常大,用滑窗计算量非常大(即使用可分离滤波器的实现)的情况下,可能会考虑基于傅里叶变化的实现方法。这里我们只讨论第一种方法。

二维高斯函数的形式是这样的:

f(x,y) = A e^{- \left(\frac{(x-x_o)^2}{2\sigma_x^2} + \frac{(y-y_o)^2}{2\sigma_y^2} \right)}.

有着如下的形状,形状很激凸,怎么会叫高斯平滑呢,分明是高斯激凸嘛:

基本上,离散化的主旨就是保留高斯函数中心能量最集中的中间部分,忽略四周能量很小的平坦区域。这只是个很感性的描述,具体实现起来,就会出现千奇百怪的版本。下面结合三份源码,看看现实世界里的高斯平滑到底长的什么样子。

首先是第一个问题:给定sigma,怎么计算窗口大小?

直接上opencv的源码,在cvFilter函数中:

param1 = cvRound(sigma1*(depth == CV_8U ? 3 : 4)*2 + 1)|1;

opencv认为半径为3*sigma的窗口就是高斯函数能量最集中的区域。(为什么在图像深度不是8U的时候,使用4*sigma半径的窗口就不得而知了,有高人指点一下?)

autopan0-sift-c是图像拼接软件hugin里面的sift实现,在实现DoG的时候需要做不同尺度的高斯平滑,实现如下,在GaussianConvolution_new1函数中:

dim = 1 + 2 * ((int) (3.0 * sigma));

可见autopano也是实现的3*sigma半径的窗口。

在GIMP里,实现比较奇特,在blur_gauss.c的make_rle_curve函数里面,

const gdouble sigma2 = 2 * sigma * sigma;

const gdouble l = sqrt (-sigma2 * log (1.0 / 255.0));

int n = ceil (l) * 2;

if ((n % 2) == 0)

n += 1;

我是没看懂那个 log (1.0 / 255.0)是干嘛的。。。惭愧。效果来看,这个实现的窗口半径是约等于2.2*sigma。

然后是第二个问题:给定窗口大小,怎么计算sigma?

opencv的实现,在cvFilter.cpp的init_gaussian_kernel函数中:

sigmaX = sigma 0 ? sigma : (n/2 – 1)*0.3 + 0.8;

再次不可解。。。乘以0.3还可以接受,加上0.8是为嘛啊?

autopano没有实现这个特性。

GIMP的实现:

/* we want to generate a matrix that goes out a certain radius

* from the center, so we have to go out ceil(rad-0.5) pixels,

* inlcuding the center pixel. Of course, that’s only in one direction,

* so we have to go the same amount in the other direction, but not count

* the center pixel again. So we double the previous result and subtract

* one.

* The radius parameter that is passed to this function is used as

* the standard deviation, and the radius of effect is the

* standard deviation * 2. It’s a little confusing.

*/

radius = fabs (radius) + 1.0;

std_dev = radius;

radius = std_dev * 2;

/* go out ‘radius’ in each direction */

matrix_length = 2 * ceil (radius – 0.5) + 1;

注释讲的很清楚了,基本上就是认为sigma应该等于窗口半径的一半。

看完这三分源码,结论就是,关于sigma和半径,你爱怎么算就怎么算吧,差不多就行。。。(额。。费了半天劲,就得到这么一句废话啊)。

第三个问题是可分离滤波器:

由于高斯函数可以写成可分离的形式,因此可以采用可分离滤波器实现来加速。所谓的可分离滤波器,就是可以把多维的卷积化成多个一维卷积。具体到二维的高斯滤波,就是指先对行做一维卷积,再对列做一维卷积。这样就可以将计算复杂度从O(M*M*N*N)降到O(2*M*M*N),M,N分别是图像和滤波器的窗口大小。问题是实现时候怎么计算一维的卷积核呢?

其实很简单,按照前面计算出来的窗口大小,计算所有离散点上一维高斯函数的权值,最后别忘了将权值之和归一化到1.

有码有真相,来自opencv:

for( i = 0; i = n/2; i++ )

{

double t = fixed_kernel ? (double)fixed_kernel[i] : exp(scale2X*i*i);

if( type == CV_32FC1 )

{

cf[(n/2+i)*step] = (float)t;

sum += cf[(n/2+i)*step]*2;

}

else

{

cd[(n/2+i)*step] = t;

sum += cd[(n/2+i)*step]*2;

}

}

sum = 1./sum;

for( i = 0; i = n/2; i++ )

{

if( type == CV_32FC1 )

cf[(n/2+i)*step] = cf[(n/2-i)*step] = (float)(cf[(n/2+i)*step]*sum);

else

cd[(n/2+i)*step] = cd[(n/2-i)*step] = cd[(n/2+i)*step]*sum;

}

怎样使用OpenCV进行人脸识别

网上帖子不是很多么,这个是我自己简单写的,注释的部分删了吧,就可以简单识别人脸了

#include opencv2\core\core.hpp

#include opencv2\imgproc\imgproc.hpp

#include opencv2\highgui\highgui.hpp

#include opencv2\video\background_segm.hpp

#include opencv2/objdetect/objdetect.hpp

#include iostream

using namespace cv;

void detectAndDraw( Mat img, CascadeClassifier cascade,

                    CascadeClassifier nestedCascade,

                    double scale, bool tryflip );

//Mat imageresize(Mat image, Size size);

/*int main(){

    //VideoCapture cap(0);    //打开默认摄像头

    VideoCapture cap("F:/nihao.mp4");

        if(!cap.isOpened())

        {

            return -1;

        }

        Mat frame;

        Mat edges;

        CascadeClassifier cascade, nestedCascade;

        bool stop = false;

        //训练好的文件名称,放置在可执行文件同目录下

        cascade.load("haarcascade_frontalface_alt.xml");

        nestedCascade.load("haarcascade_eye_tree_eyeglasses.xml");

        while(!stop)

        {

            capframe;

            detectAndDraw( frame, cascade, nestedCascade,2,0 );

            if(waitKey(30) =0)

                stop = true;

        }

        return 0;

    }

    */

int main(){

    Mat image=imread("F:/quanjiafu.jpg");

    CascadeClassifier cascade,nestedcascade;

    cascade.load("F:/Opencv2.4.9/opencv/sources/data/haarcascades/haarcascade_frontalface_alt.xml");

    nestedcascade.load("F:/Opencv2.4.9/opencv/sources/data/haarcascades/haarcascade_eye_tree_eyeglasses.xml");

    detectAndDraw(image,cascade,nestedcascade,2,0);

    waitKey(0);

    return 0;

}

    void detectAndDraw( Mat img, CascadeClassifier cascade,

                        CascadeClassifier nestedCascade,

                        double scale, bool tryflip )

    {

        int i = 0;

        double t = 0;

        //建立用于存放人脸的向量容器

        vectorRect faces, faces2;

        //定义一些颜色,用来标示不同的人脸

        const static Scalar colors[] =  {

            CV_RGB(0,0,255),

            CV_RGB(0,128,255),

            CV_RGB(0,255,255),

            CV_RGB(0,255,0),

            CV_RGB(255,128,0),

            CV_RGB(255,255,0),

            CV_RGB(255,0,0),

            CV_RGB(255,0,255)} ;

        //建立缩小的图片,加快检测速度

        //nt cvRound (double value) 对一个double型的数进行四舍五入,并返回一个整型数!

        Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );

        //转成灰度图像,Harr特征基于灰度图

        cvtColor( img, gray, CV_BGR2GRAY );

        //改变图像大小,使用双线性差值

        resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );

        //变换后的图像进行直方图均值化处理

        equalizeHist( smallImg, smallImg );

        //程序开始和结束插入此函数获取时间,经过计算求得算法执行时间

        t = (double)cvGetTickCount();

        //检测人脸

        //detectMultiScale函数中smallImg表示的是要检测的输入图像为smallImg,faces表示检测到的人脸目标序列,1.1表示

        //每次图像尺寸减小的比例为1.1,2表示每一个目标至少要被检测到3次才算是真的目标(因为周围的像素和不同的窗口大

        //小都可以检测到人脸),CV_HAAR_SCALE_IMAGE表示不是缩放分类器来检测,而是缩放图像,Size(30, 30)为目标的

        //最小最大尺寸

        cascade.detectMultiScale( smallImg, faces,

            1.1, 2, 0

            //|CV_HAAR_FIND_BIGGEST_OBJECT

            //|CV_HAAR_DO_ROUGH_SEARCH

            |CV_HAAR_SCALE_IMAGE

            ,

            Size(30, 30));

        //如果使能,翻转图像继续检测

        if( tryflip )

        {

            flip(smallImg, smallImg, 1);

            cascade.detectMultiScale( smallImg, faces2,

                                     1.1, 2, 0

                                     //|CV_HAAR_FIND_BIGGEST_OBJECT

                                     //|CV_HAAR_DO_ROUGH_SEARCH

                                     |CV_HAAR_SCALE_IMAGE

                                     ,

                                     Size(30, 30) );

            for( vectorRect::const_iterator r = faces2.begin(); r != faces2.end(); r++ )

            {

                faces.push_back(Rect(smallImg.cols - r-x - r-width, r-y, r-width, r-height));

            }

        }

        t = (double)cvGetTickCount() - t;

     //   qDebug( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );

        for( vectorRect::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )

        {

            Mat smallImgROI;

            vectorRect nestedObjects;

            Point center;

            Scalar color = colors[i%8];

            int radius;

            double aspect_ratio = (double)r-width/r-height;

            if( 0.75  aspect_ratio  aspect_ratio  1.3 )

            {

                //标示人脸时在缩小之前的图像上标示,所以这里根据缩放比例换算回去

                center.x = cvRound((r-x + r-width*0.5)*scale);

                center.y = cvRound((r-y + r-height*0.5)*scale);

                //Size s=Size(cvRound((r-width + r-height)*0.25*scale)*2,cvRound((r-width + r-height)*0.25*scale)*2);

                //Mat image=imread("F:/yaoming1.jpg");

                //Mat nimage=imageresize(image,s);

                //Mat imageROI=img(Rect(center.x-s.width/2,center.y-s.height/2,nimage.cols,nimage.rows));

                //addWeighted(imageROI,0.1,nimage,3,0.,imageROI);

                radius = cvRound((r-width + r-height)*0.25*scale);

                circle( img, center, radius, color,2, 8, 0 );

            }

            else

                rectangle( img, cvPoint(cvRound(r-x*scale), cvRound(r-y*scale)),

                           cvPoint(cvRound((r-x + r-width-1)*scale), cvRound((r-y + r-height-1)*scale)),

                           color, 3, 8, 0);

            if( nestedCascade.empty() )

                continue;

            smallImgROI = smallImg(*r);

            //同样方法检测人眼

            nestedCascade.detectMultiScale( smallImgROI, nestedObjects,

                1.1, 2, 0

                //|CV_HAAR_FIND_BIGGEST_OBJECT

                //|CV_HAAR_DO_ROUGH_SEARCH

                //|CV_HAAR_DO_CANNY_PRUNING

                |CV_HAAR_SCALE_IMAGE

                ,

                Size(30, 30) );

            for( vectorRect::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )

            {

                center.x = cvRound((r-x + nr-x + nr-width*0.5)*scale);

                center.y = cvRound((r-y + nr-y + nr-height*0.5)*scale);

                radius = cvRound((nr-width + nr-height)*0.25*scale);

                circle( img, center, radius, color, 3, 8, 0 );

            }

        }

        cv::imshow( "result", img );

}

//Mat imageresize(Mat image,Size size){

//    Mat nimage=Mat(size,CV_32S);

//    resize(image,nimage,size);

//    return nimage;

请教大神。基于AdaBoost算法的人脸检测。程序怎么改可以使检测后人脸区域是矩形 (现在是圆形)

目前在实际中应用的人脸检测方法多为基于 Adaboost 学习算法的方法。

Viola人脸检测方法是一种基于积分图、 级联检测器和AdaBoost 算法的方法cvround源码,方法框架可以分为以下三大部分:

第一部分cvround源码,使用Harr-like特征表示人脸cvround源码, 使用逗 积分图地实现特征数值的快速计算;

第二部分, 使用Adaboost算法挑选出一些最能代表人脸的矩形特征( 弱分类器),按照加权投票的方式将弱分类器构造为一个强分类器;

第三部分, 将训练得到的若干强分类器串联组成一个级联结构的层叠分类器,级联结构能有效地提高分类器的检测速度。广州的颜鉴在人脸检测国际榜FDDB排名世界第三。

Adaboost 算法是一种用来分类的方法,它的基本原理就是逗三个臭皮匠,顶个诸葛亮地。它把一些比较弱的分类方法合在一起,组合出新的很强的分类方法。

error C3861: 'AfxSocketInit': identifier not found

我也出现这样的错误,我用的是VS2005平台,

源代码:BOOL CSerialPortApp::InitInstance()//初始化实例

{

if (!AfxSocketInit())

{

AfxMessageBox(IDP_SOCKETS_INIT_FAILED);//套接字初始化失败

return FALSE;

}

//

error C3861: 'AfxSocketInit': identifier not found

我发现找不到它的定义,新手望指点!

发表评论

评论列表

  • 性许槿畔(2022-06-15 00:27:07)回复取消回复

    ximum number of leafs checked int* indices_ptr = m_indices.ptrint(0); float* dists_ptr = m_dists.ptrfloat(0);

  • 可难末屿(2022-06-14 18:26:45)回复取消回复

            for( vectorRect::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )        {            Mat smallImgROI;            vectorRect ne

  • 莣萳绿脊(2022-06-14 23:43:27)回复取消回复

    ader.ptr; const float* descriptor = (const float*)reader.ptr; CV_NEXT_SEQ_ELEM( kreader.seq-elem_si