微信【跳一跳】 opencv视觉识别 + 物理外挂

小说:网上赚钱新方法作者:秉龙北扁更新时间:2019-03-19字数:35270

微信【跳一跳】 opencv视觉识别 + 物理外挂


视频连接:http://v.youku.com/v_show/id_XMzMyNDQxNTA0OA==.html?spm=a2h3j.8428770.3416059.1

 




 

 

 

 

 

 

初入门C++ 与 opencv视觉库,写了一个跳一跳的物理挂,现在识别率还比较差,先记录下过程,以后在慢慢修改整理。

一、外挂结构

上位机:USB摄像头连接windows电脑,用作处理识别拍摄到图像数据。

下位机:STM32单片机,用于控制陀机附带电容笔进行物理点击。

单片机部分很简单,所以下文主要记录上位机的内容。

 



 

二、上位机程序框架

开发平台:Visual Studio 2012 

思路:

  1. 读取摄像头数据;
  2. 提取图像中的手机屏幕、进行屏幕矫正;
  3. 识别人物和方块并计算它们的距离(现方案识别部分采用模板匹配、模板使用photoshop预先裁剪好,加载入程序中);
  4. 把计算结果通过串口交给下位机,由下位机带动陀机进行物理点击;

 



 

三、程序源码

 

ScreenExtraction.cpp用于识别屏幕边缘、提取内容和矫正

#include "ScreenExtraction.h"
#include "main.h"


ScreenExtract::ScreenExtract()
{

}

ScreenExtract::ScreenExtract(Mat srcMat)
{
    setSrc(srcMat);
}

ScreenExtract::~ScreenExtract()
{
}
Mat ScreenExtract::getDst()
{
    return m_MatDstImageStand;
}

bool ScreenExtract::setSrc(Mat srcMat)
{
    m_MatSrcImage = srcMat.clone();
    m_MatEdgeImage = srcMat.clone();
    m_MatHoughImage = srcMat.clone();
    m_MatCornerImage = srcMat.clone();
    Mat temp(1280,720,srcMat.type()); 
    m_MatDstImageLie = temp.clone();
    Mat temp2(720,1280,srcMat.type()); 
    m_MatDstImageStand = temp2.clone();
    return true;
}

Mat ScreenExtract::runExtract()
{
    int cannyThrel=100;
    int failCNT=0;
    while(1)
    {
        m_MatEdgeImage = EdgeDection(m_MatEdgeImage,cannyThrel);
        m_MatHoughImage = HoughLine(m_MatEdgeImage);
        m_MatCornerImage = CornerHarris(m_MatHoughImage,m_PointPerspectiveSrcBuff);
        if(failCNT>20)
        {
            throw("Extract fail");
            return m_MatSrcImage;
        }
        if(m_PointPerspectiveSrcBuff.size() == 8)
        {
            break;
        }
        else if(m_PointPerspectiveSrcBuff.size()>8)
        {
            if(cannyThrel<500)
            {
                cannyThrel+=20;
            }
            failCNT++;
        }
        else if(m_PointPerspectiveSrcBuff.size()<8)
        {
            if(cannyThrel>10)
            {
                cannyThrel-=5;
            }
            failCNT++;
        }
        waitKey(10);
    }

    m_MatDstImageLie = perspectiveChange(m_MatSrcImage,m_PointPerspectiveSrcBuff);
    m_MatDstImageStand = rotation(m_MatDstImageLie,90);
    return m_MatDstImageStand;
} 
Mat ScreenExtract::runFastExtract(Mat srcMat)
{
    m_MatSrcImage = srcMat.clone();
    m_MatDstImageLie = perspectiveChange(m_MatSrcImage,m_PointPerspectiveSrcBuff);
    m_MatDstImageStand = rotation(m_MatDstImageLie,90);
    return m_MatDstImageStand;

}


//private
Mat ScreenExtract::EdgeDection(Mat srcImage,int cannyThrel)
{
    Mat cannyEdge;
    Mat dstImage = m_MatSrcImage.clone(); 

    //降噪
    blur(m_MatSrcImage,cannyEdge,Size(3,3));
#ifdef DEBUG_SHOW_ScreenExtract
    //namedWindow("blur()",CV_WINDOW_AUTOSIZE);
    //imshow(Windows_Edge,g_dstImage);
    imshow("blur()",cannyEdge);    
#endif

    //运行Canny算子
    Canny(cannyEdge,cannyEdge,cannyThrel,cannyThrel*3,3);

    //先将g_dstImage内的所有元素设置为0
    dstImage = Scalar::all(0);

    //使用Canny算子输出的边缘图g_cannyDetectedEdges作为掩码,来将原图g_srcImage拷贝到目标图g_dstImage中
    srcImage.copyTo(dstImage,cannyEdge);

#ifdef DEBUG_SHOW_ScreenExtract
    //namedWindow("EdgeDection()",CV_WINDOW_AUTOSIZE);
    //imshow(Windows_Edge,g_dstImage);
    imshow("EdgeDection()",cannyEdge);    
#endif

    return cannyEdge;
}

Mat ScreenExtract::HoughLine(Mat srcImage)
{
    Mat dstImage = srcImage.clone();  
    //g_houghWithSrc = g_srcImage.clone(); 
    dstImage = Scalar::all(255);

    //houghlinesP
    //vector<Vec4i> mylines;
    //HoughLinesP(g_cannyDetectedEdges,mylines,1,CV_PI/180,valueA+1,valueB,valueC);
    ////循环遍历绘制每一条线段  
    //  for( size_t i = 0; i < mylines.size(); i++ )  
    //  {  
    //      Vec4i lines = mylines[i];  
    //    //line()划线 
    //      line( dstImage, Point(lines[0], lines[1]), Point(lines[2], lines[3]), Scalar(55,100,195), 1,CV_AA);// CV_AA);  
    //  }  

    //houghlines
    vector<Vec2f> mylines;

    HoughLines(srcImage, mylines, 1, (CV_PI-0.2)/180,srcImage.cols/8, 0, 0 );  
    //HoughLines(g_cannyDetectedEdges, mylines, 1, CV_PI/180, valueA+1, valueB, valueC );  

    //依次在图中绘制出每条线段  
    for( size_t i = 0; i < mylines.size(); i++ )  
    {  
        float rho = mylines[i][0], theta = mylines[i][1];  
        Point pt1, pt2;  
        double a = cos(theta), b = sin(theta);  
        double x0 = a*rho, y0 = b*rho;  
        pt1.x = cvRound(x0 + 3000*(-b));  
        pt1.y = cvRound(y0 + 3000*(a));  
        pt2.x = cvRound(x0 - 3000*(-b));  
        pt2.y = cvRound(y0 - 3000*(a));  
        line( dstImage, pt1, pt2, Scalar(0,0,0), 1, CV_AA);  
        //line( g_houghWithSrc, pt1, pt2, Scalar(0,255,0), 1, CV_AA);  
    }

#ifdef DEBUG_SHOW_ScreenExtract
    //显示图片
    //imshow(Windows_Hough,dstImage); 
    //imshow(Windows_Hough,g_houghLine);
    imshow("HoughLine()",dstImage);
#endif

    return dstImage;
}


Mat ScreenExtract::CornerHarris( Mat srcImage ,vector<Point2f>  &vecPoint)  
{  
    //---------------------------【1】定义一些局部变量-----------------------------  
    Mat g_srcImageClone = m_MatSrcImage.clone() ;
    Mat writeImage(m_MatSrcImage.rows,m_MatSrcImage.cols,m_MatSrcImage.type());
    Mat dstImage;//目标图  
    Mat normImage;//归一化后的图  
    Mat scaledImage;//线性变换后的八位无符号整型的图  
    int iCornerThresh = 110;

    //g_CornerWithHoughWithSrc = g_houghWithSrc;
    writeImage = Scalar::all(255);
    //---------------------------【2】初始化---------------------------------------  
    //置零当前需要显示的两幅图,即清除上一次调用此函数时他们的值  
    //dstImage = Mat::zeros( srcImage.size(), CV_32FC1 );  
    //g_srcImageClone=g_srcImage.clone( );  
  
    //---------------------------【3】正式检测-------------------------------------  
    //进行角点检测  
    //cornerHarris( g_srcGrayImage, dstImage, 2, 3, 0.04, BORDER_DEFAULT );  //原图角点检测
    cornerHarris( srcImage, dstImage, 2, 3, 0.04, BORDER_DEFAULT );    //霍夫变换后的角点检测

  
    // 归一化与转换  
    normalize( dstImage, normImage, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );  
    convertScaleAbs( normImage, scaledImage );//将归一化后的图线性变换成8位无符号整型   
  
    //---------------------------【4】进行绘制-------------------------------------  
    // 将检测到的,且符合阈值条件的角点绘制出来  
    for( int j = normImage.rows/100; j < normImage.rows-normImage.rows/100 ; j++ )  
    {
        for( int i = normImage.cols/100; i < normImage.cols-normImage.cols/100; i++ )  
        {  
            if( (int) normImage.at<float>(j,i) > iCornerThresh  )  
            {  
                circle( g_srcImageClone, Point( i, j ), 5,  Scalar(10,10,255), 2, 8, 0 );  
                //circle( g_CornerWithHoughWithSrc, Point( i, j ), 5,  Scalar(10,10,255), 2, 8, 0 );  
                circle( scaledImage, Point( i, j ), 10,  Scalar(0,10,255), -1, 8, 0 );  
                circle( writeImage, Point( i, j ), 10,  Scalar(0,10,255), -1, 8, 0 ); 
            }  
        }  
    }  
    
    vecPoint = GatherPoint( writeImage,iCornerThresh);
#ifdef DEBUG_SHOW_ScreenExtract
    //---------------------------【4】显示最终效果---------------------------------  
    //imshow( "CornerHarris", g_CornerWithHoughWithSrc );  //在原图叠加霍夫图上显示
    //imshow( "CornerHarris", g_srcImageClone );  //在原图上显示
    //imshow( "CornerHarris", scaledImage );  //使用灰度图显示
    imshow( "CornerHarris", writeImage );
#endif
    return scaledImage;
}  

vector<Point2f> ScreenExtract::GatherPoint( Mat srcImage, int CornerThresh ) 
{
    //imshow( "GatherPoint", srcImage );
    //Mat grayImage=srcImage.clone() ;
    Mat canny_output;
    Mat grayImage;
    vector<vector<Point>>contours;
    vector<Vec4i>hierarchy;
    //RNG rng(12345);


    //转成灰度图
    cvtColor(srcImage, grayImage, COLOR_BGR2GRAY);

    //canny边缘检测
    Canny(grayImage, canny_output, 50, 50 * 2, 3);

    //轮廓提取
    findContours(canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));

    //计算图像矩
    vector<Moments>mu(contours.size());
    for (unsigned int i = 0; i < contours.size(); i++)
    {
        mu[i] = moments(contours[i], false);
    }
    //计算图像的质心
    vector<Point2f>mc(contours.size());
    for (unsigned int i = 0; i < contours.size(); i++)
    {
        mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
    }

    //绘制轮廓
    Mat drawing = Mat::zeros(srcImage.size(), CV_8UC3);
    for (unsigned int i = 0; i < contours.size(); i++)
    {
        Scalar color = Scalar(0, 255, 0);
        drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
        circle(drawing, mc[i], 4, color, -1, 8, 0);
    }
#ifdef DEBUG_SHOW_ScreenExtract
    //namedWindow("GatherPoint", WINDOW_AUTOSIZE);
    imshow("GatherPoint", drawing);
#endif
    
    return mc;
}


Mat ScreenExtract::perspectiveChange(Mat srcMat,vector<Point2f> srcBuff)
{
    //Mat变量
    Mat dstMat(srcMat.rows,srcMat.cols,srcMat.type());
    Mat perspectiveMat( 2,3,CV_32FC1);

    //透视变换参数
    Point2f perspectiveSrcBuff[4];
    Point2f perspectiveDesBuff[4];

    //透视变换坐标设置
    perspectiveSrcBuff[0] = Point2f((srcBuff[0].x+srcBuff[1].x)/2,(srcBuff[0].y+srcBuff[1].y)/2) ;  
    perspectiveSrcBuff[1] = Point2f((srcBuff[2].x+srcBuff[3].x)/2,(srcBuff[2].y+srcBuff[3].y)/2) ;   
    perspectiveSrcBuff[2] = Point2f((srcBuff[4].x+srcBuff[5].x)/2,(srcBuff[4].y+srcBuff[5].y)/2) ;
    perspectiveSrcBuff[3] = Point2f((srcBuff[6].x+srcBuff[7].x)/2,(srcBuff[6].y+srcBuff[7].y)/2) ;

    //求变换后坐标
    for(int i=0;i<3;i++)
    {
        if(0== getPointPlace(srcMat,perspectiveSrcBuff[i]) )
        {
            perspectiveDesBuff[i] = Point2f( 0, 0);  
        }
        else if(1 == getPointPlace(srcMat,perspectiveSrcBuff[i]) )
        {
            perspectiveDesBuff[i] = Point2f( 0, static_cast<float>(dstMat.rows-1));         
        }
        else if(2 == getPointPlace(srcMat,perspectiveSrcBuff[i]) )
        {
            perspectiveDesBuff[i] = Point2f( static_cast<float>(dstMat.cols-1), 0); 
        }
        else if(3 == getPointPlace(srcMat,perspectiveSrcBuff[i]) )
        {
            perspectiveDesBuff[i] = Point2f( static_cast<float>(dstMat.cols-1), static_cast<float>(dstMat.rows-1));
        }
    }
 
    //求透视变换
    perspectiveMat = getPerspectiveTransform( perspectiveSrcBuff, perspectiveDesBuff );  

    //对源图像应用刚刚的透视变换
    warpPerspective(srcMat, dstMat, perspectiveMat, dstMat.size());

#ifdef DEBUG_SHOW_ScreenExtract
    //显示
    imshow( "perspectiveChange", dstMat );
#endif
    return dstMat;
}
int ScreenExtract::getPointPlace(Mat srcImage,Point2f point)
{
    if(point.x < srcImage.cols/2)
    {
        if(point.y < srcImage.rows/2)
        {
            return 0; 
        }
        else
        {
            return 1;
        }
    }
    else
    {
        if(point.y < srcImage.rows/2)
        {
            return 2; 
        }
        else
        {
            return 3;
        }    
    }
}


Mat ScreenExtract::rotation(Mat srcMat,float degree)
{
    Mat dstImage(srcMat.rows,srcMat.cols,srcMat.type());
    int len = max(srcMat.cols, srcMat.rows);
    Point2f pt(len/2.f,len/2.f);
    Mat r = getRotationMatrix2D(pt,degree,1.0);
    warpAffine(srcMat,dstImage,r,Size(srcMat.rows,srcMat.cols));

    return dstImage;
}
View Code

ScreenExtraction.h为头文件

#ifndef __SCREENEXTRACTION_H
#define __SCREENEXTRACTION_H

#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
/* 
#include <opencv2/nonfree/nonfree.hpp>  
#include<opencv2/legacy/legacy.hpp> */ 
#include <iostream>  
using namespace cv;
using namespace std;

//#define DEBUG_SHOW_ScreenExtract

class ScreenExtract
{
public:
    ScreenExtract();
    ScreenExtract(Mat srcMat);
    ~ScreenExtract();
    Mat getDst();

    bool setSrc(Mat srcMat);
    Mat runExtract();
    Mat runFastExtract(Mat srcMat);
private:
    Mat EdgeDection(Mat srcImage,int cannyThrel);
    Mat HoughLine(Mat srcImage);
    Mat CornerHarris( Mat srcImage,vector<Point2f> &vecPoint) ;
    vector<Point2f> GatherPoint( Mat srcImage, int CornerThresh ) ;
    Mat perspectiveChange(Mat srcMat,vector<Point2f> srcBuff);
    int ScreenExtract::getPointPlace(Mat srcImage,Point2f point);//获取点在屏幕的位置
    Mat rotation(Mat srcImage,float degree);
private:
    Mat m_MatSrcImage;
    Mat m_MatEdgeImage;
    Mat m_MatHoughImage;
    Mat m_MatCornerImage;
    Mat m_MatDstImageLie;
    Mat m_MatDstImageStand;
    vector<Point2f> m_PointPerspectiveSrcBuff;
};


#endif
View Code

 

ImageMatch.cpp用于识别内容

#include "ImageMatch.h"


ImageMatch::ImageMatch(Mat srcImage)
{    
    m_MatSrcImage = srcImage.clone();
    m_MatMatchImage = srcImage.clone();
    m_MatDstImage = srcImage.clone();
    m_iFeatureMaxSize = 50;
}

ImageMatch::~ImageMatch()
{    

}
/*******************************************************************************
* Function Name  : addObjectFeatureImage
* Description    : add an objcet feature to feature vector
* Input          : None
* Output         : None
* Return         : succeed/failed
*******************************************************************************/
bool ImageMatch::setSrcImage(Mat srcImage)
{
    m_MatSrcImage = srcImage.clone();
    return true;    
}
/*******************************************************************************
* Function Name  : addObjectFeatureImage
* Description    : add an objcet feature to feature vector
* Input          : None
* Output         : None
* Return         : succeed/failed
*******************************************************************************/
bool ImageMatch::addCubeFeatureImage(Mat image)
{
    if(m_vecCubeFeatureImage.size()<m_iFeatureMaxSize)
    {
        m_vecCubeFeatureImage.push_back(image);
        return true;
    }

    return false;
}
bool ImageMatch::addCubeFeatureImage(vector<Mat> vec)
{

    if(m_vecCubeFeatureImage.size()+vec.size()<m_iFeatureMaxSize)
    {
        m_vecCubeFeatureImage.insert(m_vecCubeFeatureImage.end(),vec.begin(),vec.end());
        return true;
    }

    return false;    
}

bool ImageMatch::addConfusingFeatureImage(Mat image)
{
    if(m_vecConfusingFeatureImage.size()<m_iFeatureMaxSize)
    {
        m_vecConfusingFeatureImage.push_back(image);
        return true;
    }
    return false;    
}
bool ImageMatch::addConfusingFeatureImage(vector<Mat> vec)
{

    if(m_vecConfusingFeatureImage.size()+vec.size()<m_iFeatureMaxSize)
    {
        m_vecConfusingFeatureImage.insert(m_vecConfusingFeatureImage.end(),vec.begin(),vec.end());
        return true;
    }

    return false;    
}
/*******************************************************************************
* Function Name  : setPersonFeature
* Description    : set the person feature
* Input          : None
* Output         : None
* Return         : succeed/failed
*******************************************************************************/
bool ImageMatch::setPersonFeature(Mat image)
{
    m_MatPersonFeatureImage = image.clone();
    return true;
}

/*******************************************************************************
* Function Name  : runMatch
* Description    : Match main programm
* Input          : None
* Output         : None
* Return         : destination image
*******************************************************************************/
Mat ImageMatch::runMatch()
{
    //【0】预处理
    Mat dstImage = m_MatSrcImage.clone();
    Mat srcImage_Adjust= AdjectDefinition(m_MatSrcImage,100,0);


    //【1】匹配人物
    m_pLocPerson = SearchPerson( srcImage_Adjust, 10, (int)(srcImage_Adjust.rows*0.35), srcImage_Adjust.cols-20, (int)(srcImage_Adjust.rows*0.35) );
#ifdef DEBUG_SHOW_ImageMatch
    //rectangle( srcImage_Adjust,  Rect(loc,  Size(m_MatPersonFeatureImage.cols, m_MatPersonFeatureImage.rows) ), Scalar(0, 0, 255), 2, 8, 0 );
    circle( srcImage_Adjust, m_pLocPerson, 10,  Scalar(0,0,255), 5, 8, 0 );  
    //imshow("TempleMatch",srcImage_Adjust);
#endif

    //【2】匹配建筑
    if(m_pLocPerson.x < srcImage_Adjust.cols*0.5)//人物在左边,搜索右半区域,为了加速
        m_pLocBlock = SearchBuilding( srcImage_Adjust, (int)(srcImage_Adjust.cols*0.3)+10, (int)(srcImage_Adjust.rows*0.20), (int)(srcImage_Adjust.cols*0.7)-20, (int)(srcImage_Adjust.rows*0.5));
    else//搜索左半区
        m_pLocBlock = SearchBuilding( srcImage_Adjust , 10, (int)(srcImage_Adjust.rows*0.20), (int)(srcImage_Adjust.cols*0.7)-20, (int)(srcImage_Adjust.rows*0.5));
#ifdef DEBUG_SHOW_ImageMatch
    circle( srcImage_Adjust, m_pLocBlock, 20,  Scalar(0,255,0), 5, 8, 0 );  

    imshow("TempleMatch",srcImage_Adjust);
#endif

    m_fDistance    = calculateDistance(m_pLocPerson,m_pLocBlock);

    return dstImage; 
}
/*******************************************************************************
* Function Name  : changeSrcAndrunMatch
* Description    : Change src image and run Match
* Input          : None
* Output         : None
* Return         : destination image
*******************************************************************************/
Mat ImageMatch::changeSrcAndrunMatch(Mat srcImage)
{
    setSrcImage(srcImage);
    return runMatch();
}
float ImageMatch::getDistance()
{
    float result = m_fDistance*2.1f+0;
    if(result<0)
        result=0;
    return result;
}
//private
/*******************************************************************************
* Function Name  : vecFeatureImageToVecFeatureImage
* Description    : update definition
* Input          : None
* Output         : None
* Return         : destination image
*******************************************************************************/
bool ImageMatch::FeatureVectorChangeToEdgeVector()
{
    m_vecFeatureEdgeImage.clear();
    for(unsigned int i=0;i<m_vecCubeFeatureImage.size();i++)
    {
        //m_MatSrcImage = FeatureMatchAndMark(m_vecCubeFeatureImage[i],m_MatSrcImage);
        m_vecFeatureEdgeImage.push_back ( calEdge(m_vecCubeFeatureImage[i],10));
#ifdef DEBUG_SHOW_ImageMatch
        imshow("Edge",m_vecFeatureEdgeImage[i]);
#endif
    }    
    return true;
}
/*******************************************************************************
* Function Name  : definition
* Description    : update definition
* Input          : None
* Output         : None
* Return         : destination image
*******************************************************************************/
Mat ImageMatch::AdjectDefinition(Mat srcImage,int contrast,int brightness)
{
    Mat dstImage;
    //平滑
    blur(srcImage,dstImage,Size(3,3));

    //对比度亮度调节
    for(int y = 0; y < dstImage.rows; y++ ) //遍历图片的纵坐标 
    {  
        for(int x = 0; x < dstImage.cols; x++ )//遍历图片的横坐标  
        {  
            for(int c = 0; c < 3; c++ )  //分开图像的RGB
            {  
                //对比度在0-300之间,所以乘以0.01,并用saturate_cast把计算结果转换成uchar类型
                dstImage.at<Vec3b>(y,x)[c]= saturate_cast<uchar>( (contrast*0.01)*(dstImage.at<Vec3b>(y,x)[c] ) + brightness );  
            }  
        }  
    }  
    //    imshow("src",srcImage);
    //    imshow("dst",dstImage);

    //二次平滑
    //blur(dstImage,dstImage,Size(3,3)); 

    return dstImage;
}
/*******************************************************************************
* Function Name  : addObjectFeatureImage
* Description    : add an objcet feature to feature vector
* Input          : None
* Output         : None
* Return         : succeed/failed
*******************************************************************************/
Mat ImageMatch::calEdge(Mat srcImage,int threl)
{
    Mat dstImage;
    //canny
    Canny(srcImage,dstImage,threl,threl*3,3);

#ifdef DEBUG_SHOW_ImageMatch
    imshow("calEdge",dstImage);

#endif
    return dstImage;
}

/*******************************************************************************
* Function Name  : SearchPerson
* Description    : find block point
* Input          : None
* Output         : None
* Return         : destination image
*******************************************************************************/
Point ImageMatch::SearchPerson(Mat TargetImage,int ROI_Xstart,int ROI_Ystart,int ROI_length,int ROI_heigh )
{
    //【0】局部变量用于返回人物坐标
    Point dstLoc;

    //【1】设置ROI区域
    Mat imageROI=TargetImage( Rect(ROI_Xstart, ROI_Ystart, ROI_length, ROI_heigh ) );    

    //【2】模板匹配
    float matchaValue = (float)TempleMatch(m_MatPersonFeatureImage, imageROI, dstLoc, CV_TM_CCOEFF_NORMED) ;
    if( matchaValue < 0.64)
    {
        throw("Person match fail");
    }
    else
    {
#ifdef DEBUG_SHOW_ImageMatch
        //rectangle( imageROI,  Rect(dstLoc,  Size(m_MatPersonFeatureImage.cols, m_MatPersonFeatureImage.rows) ), Scalar(0, 0, 255), 2, 8, 0 );
        //imshow("SearchqPerson",imageROI);
#endif
        //根据输入的ROI设置纠正坐标
        dstLoc.x+=ROI_Xstart;
        dstLoc.y+=ROI_Ystart;
        //让坐标重新纠正道指向人物与地面的接触中点
        dstLoc.x += (int)(m_MatPersonFeatureImage.cols/2);
        dstLoc.y += (int)(m_MatPersonFeatureImage.rows*0.8);
    }
    return dstLoc;
}

/*******************************************************************************
* Function Name  : FindBuilding
* Description    : find block point
* Input          : None
* Output         : None
* Return         : destination image
*******************************************************************************/
Point ImageMatch::SearchBuilding(Mat TargetImage, int ROI_Xstart,int ROI_Ystart,int ROI_length,int ROI_heigh)
{
    //【0】定义两个容器用于存放每个矩形特征的最佳匹配点和匹配值
    vector<Point> vecCubePoint(m_vecCubeFeatureImage.size());
    vector<float> vecCubeValue(m_vecCubeFeatureImage.size());
    //【0】定义一些阈值
    float completelyMatchThrel = 0.70f;//认为一种图形完全匹配的阈值
    float LowestThrel = 40;//认为基本相近的阈值
    //【0】定义一些临时变量
    Point loc;
    float matchValue;
    //【0】最终配置相关变量
    bool matchSucceed=false;//是否找到完全匹配特征标记
    Point lastLoc(0,0);//最终位置
    float lastValue;//最终匹配值
    int lastIndex=-1;//最终位置对应的特征序号

    //【0】计时
    TimeOperation time;
    int ms=0;

    //【1】转为灰度图
    cvtColor(TargetImage,TargetImage,CV_BGR2GRAY);

    //【2】设置ROI区域
    Mat imageROI=TargetImage( Rect(ROI_Xstart, ROI_Ystart, ROI_length, ROI_heigh ) );    


    //【3】匹配建筑
    for(unsigned int i=0;i<m_vecCubeFeatureImage.size();i++)
    {
        matchValue = (float)TempleMatch(m_vecCubeFeatureImage[i],imageROI,loc,CV_TM_CCOEFF_NORMED );
        if(matchValue > completelyMatchThrel)
        {
            lastLoc = loc ;
            //纠正坐标
            lastLoc.x += ROI_Xstart;
            lastLoc.y += ROI_Ystart;
            lastLoc.x += (int)(m_vecCubeFeatureImage[i].cols*0.5);
            lastLoc.y += (int)(m_vecCubeFeatureImage[i].rows*0.3);
            //若是任务脚底的建筑,继续找
            if(calculateDistance(lastLoc,m_pLocPerson)<LowestThrel+30)
            {
                lastLoc.x=0;
                lastLoc.y=0;
                continue;
            }
            matchSucceed = true;
            lastValue = matchValue;
            lastIndex = i;
#ifdef DEBUG_SHOW_ImageMatch
            imshow("matchfeature",m_vecCubeFeatureImage[i]);
            rectangle( imageROI,  Rect(loc,  Size(m_vecCubeFeatureImage[i].cols, m_vecCubeFeatureImage[i].rows) ), Scalar(0, 0, 255), 2, 8, 0 );
            imshow("SearchPerson",imageROI);
#endif
            break;
        }
        else
        {
            vecCubePoint[i]=loc;
            vecCubeValue[i]=matchValue;
        }
    }

    ////【4】没有完全匹配,寻找最佳匹配
    if(!matchSucceed)
    {
        for(unsigned int i=0;i<m_vecCubeFeatureImage.size();i++)
        {
            Mat featureROI;
            if(m_pLocPerson.x<TargetImage.cols/2)//取特征的右边
            {
                featureROI =m_vecCubeFeatureImage[i]( Rect(m_vecCubeFeatureImage[i].cols*0.4, 0, m_vecCubeFeatureImage[i].cols*0.6-1, m_vecCubeFeatureImage[i].rows ) );    
                matchValue = (float)TempleMatch(featureROI,imageROI,loc,CV_TM_CCOEFF_NORMED );
                if(matchValue > completelyMatchThrel)
                {
                    lastLoc = loc ;
                    //纠正坐标
                    lastLoc.x += ROI_Xstart;
                    lastLoc.y += ROI_Ystart;
                    lastLoc.x += (int)(m_vecCubeFeatureImage[i].cols*(0.5-0.4));
                    lastLoc.y += (int)(m_vecCubeFeatureImage[i].rows*0.3);
                    //若是任务脚底的建筑,继续找
  

当前文章:http://ayzwzx.cn/play/ivju8cu9o6.html

发布时间:2019-03-19 10:26:10

赚道app合法吗 小轿车兼职 大地网赚论坛怎么样 赚钱最快的手机游戏 安卓刷app赚钱 聚享游打码赚钱平台 手机软件脉脉能赚钱吗 手机赚钱app 淘宝返利网官方网 业务佣金合同范本

73124 29597 98333 97314 71966 4842933695 13007 80854

我要说两句: (0人参与)

发布