基于opencv的手掌检测与移动的跟踪等源代码

合集下载

模式识别开发之项目---基于opencv的手势识别

模式识别开发之项目---基于opencv的手势识别

模式识别开发之项⽬---基于opencv的⼿势识别我使⽤OpenCV2.4.4的windows版本+Qt4.8.3+VS2010的编译器做了⼀个⼿势识别的⼩程序。

本程序主要使到了Opencv的特征训练库和最基本的图像处理的知识,包括肤⾊检测等等。

废话不多,先看⼀下基本的界⾯设计,以及主要功能:相信对于Qt有⼀些了解的⼈都不会对这个界⾯的设计感到陌⽣吧!(该死,该死!)我们向下⾛:紧接着是Qt导⼊OPenCV2.4.4的库⽂件:(先看⼀下Qt的⼯程⽂件吧)[cpp]1. #-------------------------------------------------2. #3. # Project created by QtCreator 2013-05-25T11:16:114. #5. #-------------------------------------------------6.7. QT += core gui8.9. CONFIG += warn_off10.11. greaterThan(QT_MAJOR_VERSION, 4): QT += widgets12.13. TARGET = HandGesture14. TEMPLATE = app15.16. INCLUDEPATH += E:/MyQtCreator/MyOpenCV/opencv/build/include17.18. SOURCES += main.cpp\19. handgesturedialog.cpp \20. SRC/GestrueInfo.cpp \21. SRC/AIGesture.cpp22.23. HEADERS += handgesturedialog.h \24. SRC/GestureStruct.h \25. SRC/GestrueInfo.h \26. SRC/AIGesture.h27.28. FORMS += handgesturedialog.ui29.30. #Load OpenCV runtime libs31. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_core24432. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_core244d33.34. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1035. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1036.37. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_features2d24438. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_features2d244d39.40. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1041. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1042.43. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_haartraining_engine44. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_haartraining_engined45.46. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1047. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1048.49. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_highgui24450. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_highgui244d51.52. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1053. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1054.55. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_objdetect24456. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_objdetect244d57.58. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1059. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1060.61. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_video24462. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_video244d63.64. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1065. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1066.67. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_calib3d24468. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_calib3d244d69.70. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1071. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1072.73. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_contrib24474. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_contrib244d75.76. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1077. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1078.79. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_imgproc24480. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_imgproc244d81.82. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1083. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1084.85.86. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_legacy24487. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_legacy244d88.89. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1090. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1091.92. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_ml24493. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_ml244d94.95. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1096. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1097.98. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_photo24499. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_photo244d100.101. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10102. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10103.104. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_nonfree244105. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_nonfree244d106.107. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10108. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10当做好以上的基本配置之后,我们进⾏⼿势识别的开发:第⼀:要采集到原始的图⽚采集好原始图⽚后进⾏修正,包括尺⼨⼤⼩,那时我还使⽤到了matlab这个强⼤的⼯具,紧接着进⾏图像的样本特征提取,到⽹上把,CSDN中有⼤量的关于对图像特征训练库的识别与训练,按照他们⼀步⼀步的操作模式不会有问题的饿下⾯是要通过摄像头进⾏图像的采集,直接贴代码:[cpp]1. void HandGestureDialog::on_pushButton_OpenCamera_clicked()2. {3. cam = cvCreateCameraCapture(0);4. timer->start(time_intervals);5. frame = cvQueryFrame(cam);6.7. ui->pushButton_OpenCamera->setDisabled (true);8. ui->pushButton_CloseCamera->setEnabled (true);9. ui->pushButton_ShowPause->setEnabled (true);10. ui->pushButton_SnapImage->setEnabled (true);11. afterSkin = cvCreateImage (cvSize(frame->width,frame->height),IPL_DEPTH_8U,1);12. }[cpp]1. void HandGestureDialog::readFarme()2. {3. frame = cvQueryFrame(cam);4. QImage image((const uchar*)frame->imageData,5. frame->width,6. frame->height,7. QImage::Format_RGB888);8. image = image.rgbSwapped();9. image = image.scaled(320,240);10. ui->label_CameraShow->setPixmap(QPixmap::fromImage(image));11. gesture.SkinDetect (frame,afterSkin);12.13. /*next to opencv*/14.15. if(status_switch == Recongnise)16. {17. // Flips the frame into mirror image18. cvFlip(frame,frame,1);19.20. // Call the function to detect and draw the hand positions21. StartRecongizeHand(frame);22. }23. }查看⼀下样例图⽚:开始训练的核⼼代码:[cpp]1. void HandGestureDialog::on_pushButton_StartTrain_clicked()2. {3. QProgressDialog* process = new QProgressDialog(this);4. process->setWindowTitle ("Traning Model");5. process->setLabelText("Processing...");6. process->setModal(true);7. process->show ();8. gesture.setMainUIPointer (this);9. gesture.Train(process);10. QMessageBox::about (this,tr("完成"),tr("⼿势训练模型完成"));11. }[cpp]1. void CAIGesture::Train(QProgressDialog *pBar)//对指定训练⽂件夹⾥⾯的所有⼿势进⾏训练2. {3. QString curStr = QDir::currentPath ();4. QString fp1 = "InfoDoc/gestureFeatureFile.yml";5. fp1 = curStr + "/" + fp1;6. CvFileStorage *GestureFeature=cvOpenFileStorage(fp1.toStdString ().c_str (),0,CV_STORAGE_WRITE);7. FILE* fp;8. QString fp2 = "InfoDoc/gestureFile.txt";9. fp2 = curStr + "/" + fp2;10. fp=fopen(fp2.toStdString ().c_str (),"w");11. int FolderCount=0;12.13. /*获取当前的⽬录,然后得到当前的⼦⽬录*/14. QString trainStr = curStr;15. trainStr += "/TraningSample/";16. QDir trainDir(trainStr);17. GestureStruct gesture;18. QFileInfoList list = trainDir.entryInfoList();19.20. pBar->setRange(0,list.size ()-2);21.22.23. for(int i=2;i<list.size ();i++)24. {25. pBar->setValue(i-1);26.27. QFileInfo fileInfo = list.at (i);28. if(fileInfo.isDir () == true)29. {30. FolderCount++;31.32. QString tempStr = fileInfo.fileName ();33. fprintf(fp,"%s\n",tempStr.toStdString ().c_str ());34. gesture.angleName = tempStr.toStdString ()+"angleName";35. gesture.anglechaName = tempStr.toStdString ()+"anglechaName";36. gesture.countName = tempStr.toStdString ()+"anglecountName";37.38. tempStr = trainStr + tempStr + "/";39. QDir subDir(tempStr);40. OneGestureTrain(subDir,GestureFeature,gesture);41. }42. }43. pBar->autoClose ();44. delete pBar;45. pBar = NULL;46. fprintf(fp,"%s%d","Hand Gesture Number: ",FolderCount);47. fclose(fp);48. cvReleaseFileStorage(&GestureFeature);49. }[cpp]1. void CAIGesture::OneGestureTrain(QDir GestureDir,CvFileStorage *fs,GestureStruct gesture)//对单张图⽚进⾏训练2. {3. IplImage* TrainImage=0;4. IplImage* dst=0;5. CvSeq* contour=NULL;6. CvMemStorage* storage;7. storage = cvCreateMemStorage(0);8. CvPoint center=cvPoint(0,0);9. float radius=0.0;10. float angle[FeatureNum][10]={0},anglecha[FeatureNum][10]={0},anglesum[FeatureNum][10]={0},anglechasum[FeatureNum][10]={0};11. float count[FeatureNum]={0},countsum[FeatureNum]={0};12.13. int FileCount=0;14. /*读取该⽬录下的所有jpg⽂件*/15. QFileInfoList list = GestureDir.entryInfoList();16. QString currentDirPath = GestureDir.absolutePath ();17. currentDirPath += "/";18. for(int k=2;k<list.size ();k++)19. {20. QFileInfo tempInfo = list.at (k);21. if(tempInfo.isFile () == true)22. {23. QString fileNamePath = currentDirPath + tempInfo.fileName ();24. TrainImage=cvLoadImage(fileNamePath.toStdString ().c_str(),1);25. if(TrainImage==NULL)26. {27. cout << "can't load image" << endl;28. cvReleaseMemStorage(&storage);29. cvReleaseImage(&dst);30. cvReleaseImage(&TrainImage);31. return;32. }33. if(dst==NULL&&TrainImage!=NULL)34. dst=cvCreateImage(cvGetSize(TrainImage),8,1);35. SkinDetect(TrainImage,dst);36. FindBigContour(dst,contour,storage);37. cvZero(dst);38. cvDrawContours( dst, contour, CV_RGB(255,255,255),CV_RGB(255,255,255), -1, -1, 8 );39. ComputeCenter(contour,center,radius);40.41. GetFeature(dst,center,radius,angle,anglecha,count);42. for(int j=0;j<FeatureNum;j++)43. {44. countsum[j]+=count[j];45. for(int k=0;k<10;k++)46. {47. anglesum[j][k]+=angle[j][k];48. anglechasum[j][k]+=anglecha[j][k];49. }50. }51. FileCount++;52. cvReleaseImage(&TrainImage);53. }54. }55. for(int i=0;i<FeatureNum;i++)56. {57. gesture.count[i]=countsum[i]/FileCount;58. for(int j=0;j<10;j++)59. {60. gesture.angle[i][j]=anglesum[i][j]/FileCount;61. gesture.anglecha[i][j]=anglechasum[i][j]/FileCount;62. }63. }64. cvStartWriteStruct(fs,gesture.angleName.c_str (),CV_NODE_SEQ,NULL);//开始写⼊yml⽂件65.66. int i=0;67. for(i=0;i<FeatureNum;i++)68. cvWriteRawData(fs,&gesture.angle[i][0],10,"f");//写⼊肤⾊⾓度的值69.70. cvEndWriteStruct(fs);71. cvStartWriteStruct(fs,gesture.anglechaName.c_str (),CV_NODE_SEQ,NULL);72.73. for(i=0;i<FeatureNum;i++)74. cvWriteRawData(fs,&gesture.anglecha[i][0],10,"f");//写⼊⾮肤⾊⾓度的值75.76. cvEndWriteStruct(fs);77. cvStartWriteStruct(fs,gesture.countName.c_str (),CV_NODE_SEQ,NULL);78. cvWriteRawData(fs,&gesture.count[0],FeatureNum,"f");//写⼊肤⾊⾓度的个数79. cvEndWriteStruct(fs);80.81. cvReleaseMemStorage(&storage);82. cvReleaseImage(&dst);83. }[cpp]1. void CAIGesture::SkinDetect(IplImage* src,IplImage* dst)2. {3. IplImage* hsv = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);//use to split to HSV4. IplImage* tmpH1 = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);//Use To Skin Detect5. IplImage* tmpS1 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);6. IplImage* tmpH2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);7. IplImage* tmpS3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);8. IplImage* tmpH3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);9. IplImage* tmpS2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);10. IplImage* H = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);11. IplImage* S = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);12. IplImage* V = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);13. IplImage* src_tmp1=cvCreateImage(cvGetSize(src),8,3);14.15. cvSmooth(src,src_tmp1,CV_GAUSSIAN,3,3); //Gaussian Blur16. cvCvtColor(src_tmp1, hsv, CV_BGR2HSV );//Color Space to Convert17. cvCvtPixToPlane(hsv,H,S,V,0);//To Split 3 channel18.19. /*********************Skin Detect**************/20. cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(20.0,0.0,0,0),tmpH1);21. cvInRangeS(S,cvScalar(75.0,0.0,0,0),cvScalar(200.0,0.0,0,0),tmpS1);22. cvAnd(tmpH1,tmpS1,tmpH1,0);23.24. // Red Hue with Low Saturation25. // Hue 0 to 26 degree and Sat 20 to 9026. cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(13.0,0.0,0,0),tmpH2);27. cvInRangeS(S,cvScalar(20.0,0.0,0,0),cvScalar(90.0,0.0,0,0),tmpS2);28. cvAnd(tmpH2,tmpS2,tmpH2,0);29.30. // Red Hue to Pink with Low Saturation31. // Hue 340 to 360 degree and Sat 15 to 9032. cvInRangeS(H,cvScalar(170.0,0.0,0,0),cvScalar(180.0,0.0,0,0),tmpH3);33. cvInRangeS(S,cvScalar(15.0,0.0,0,0),cvScalar(90.,0.0,0,0),tmpS3);34. cvAnd(tmpH3,tmpS3,tmpH3,0);35.36. // Combine the Hue and Sat detections37. cvOr(tmpH3,tmpH2,tmpH2,0);38. cvOr(tmpH1,tmpH2,tmpH1,0);39.40. cvCopy(tmpH1,dst);41.42. cvReleaseImage(&hsv);43. cvReleaseImage(&tmpH1);44. cvReleaseImage(&tmpS1);45. cvReleaseImage(&tmpH2);46. cvReleaseImage(&tmpS2);47. cvReleaseImage(&tmpH3);48. cvReleaseImage(&tmpS3);49. cvReleaseImage(&H);50. cvReleaseImage(&S);51. cvReleaseImage(&V);52. cvReleaseImage(&src_tmp1);53. }[cpp]1. //To Find The biggest Countour2. void CAIGesture::FindBigContour(IplImage* src,CvSeq* (&contour),CvMemStorage* storage)3. {4. CvSeq* contour_tmp,*contourPos;5. int contourcount=cvFindContours(src, storage, &contour_tmp, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE );6. if(contourcount==0)7. return;8. CvRect bndRect = cvRect(0,0,0,0);9. double contourArea,maxcontArea=0;10. for( ; contour_tmp != 0; contour_tmp = contour_tmp->h_next )11. {12. bndRect = cvBoundingRect( contour_tmp, 0 );13. contourArea=bndRect.width*bndRect.height;14. if(contourArea>=maxcontArea)//find Biggest Countour15. {16. maxcontArea=contourArea;17. contourPos=contour_tmp;18. }19. }20. contour=contourPos;21. }[cpp]1. //Calculate The Center2. void CAIGesture::ComputeCenter(CvSeq* (&contour),CvPoint& center,float& radius)3. {4. CvMoments m;5. double M00,X,Y;6. cvMoments(contour,&m,0);7. M00=cvGetSpatialMoment(&m,0,0);8. X=cvGetSpatialMoment(&m,1,0)/M00;9. Y=cvGetSpatialMoment(&m,0,1)/M00;10.11. center.x=(int)X;12. center.y=(int)Y;13.14. /*******************tO find radius**********************/15. int hullcount;16. CvSeq* hull;17. CvPoint pt;18. double tmpr1,r=0;19. hull=cvConvexHull2(contour,0,CV_COUNTER_CLOCKWISE,0);20. hullcount=hull->total;21. for(int i=1;i<hullcount;i++)22. {23. pt=**CV_GET_SEQ_ELEM(CvPoint*,hull,i);//get each point24. tmpr1=sqrt((double)((center.x-pt.x)*(center.x-pt.x))+(double)((center.y-pt.y)*(center.y-pt.y)));//计算与中⼼点的⼤⼩25. if(tmpr1>r)//as the max radius26. r=tmpr1;27. }28. radius=r;29. }[cpp]1. void CAIGesture::GetFeature(IplImage* src,CvPoint& center,float radius,2. float angle[FeatureNum][10],3. float anglecha[FeatureNum][10],4. float count[FeatureNum])5. {6. int width=src->width;7. int height=src->height;8. int step=src->widthStep/sizeof(uchar);9. uchar* data=(uchar*)src->imageData;10.11. float R=0.0;12. int a1,b1,x1,y1,a2,b2,x2,y2;//the distance of the center to other point13. float angle1_tmp[200]={0},angle2_tmp[200]={0},angle1[50]={0},angle2[50]={0};//temp instance to calculate angule14. int angle1_tmp_count=0,angle2_tmp_count=0,angle1count=0,angle2count=0,anglecount=0;15.16. for(int i=0;i<FeatureNum;i++)//分FeatureNum层进⾏特征提取(也就是5层)分析17. {18. R=(i+4)*radius/9;19. for(int j=0;j<=3600;j++)20. {21. if(j<=900)22. {23. a1=(int)(R*sin(j*3.14/1800));//这个要⾃⼰实际画⼀张图就明⽩了24. b1=(int)(R*cos(j*3.14/1800));25. x1=center.x-b1;26. y1=center.y-a1;27. a2=(int)(R*sin((j+1)*3.14/1800));28. b2=(int)(R*cos((j+1)*3.14/1800));29. x2=center.x-b2;30. y2=center.y-a2;31. }32. else33. {34. if(j>900&&j<=1800)35. {36. a1=(int)(R*sin((j-900)*3.14/1800));37. b1=(int)(R*cos((j-900)*3.14/1800));38. x1=center.x+a1;39. y1=center.y-b1;40. a2=(int)(R*sin((j+1-900)*3.14/1800));41. b2=(int)(R*cos((j+1-900)*3.14/1800));42. x2=center.x+a2;43. y2=center.y-b2;44. }45. else46. {47. if(j>1800&&j<2700)48. {49. a1=(int)(R*sin((j-1800)*3.14/1800));50. b1=(int)(R*cos((j-1800)*3.14/1800));51. x1=center.x+b1;52. y1=center.y+a1;53. a2=(int)(R*sin((j+1-1800)*3.14/1800));54. b2=(int)(R*cos((j+1-1800)*3.14/1800));55. x2=center.x+b2;56. y2=center.y+a2;57. }58. else59. {60. a1=(int)(R*sin((j-2700)*3.14/1800));61. b1=(int)(R*cos((j-2700)*3.14/1800));62. x1=center.x-a1;63. y1=center.y+b1;64. a2=(int)(R*sin((j+1-2700)*3.14/1800));65. b2=(int)(R*cos((j+1-2700)*3.14/1800));66. x2=center.x-a2;67. y2=center.y+b2;68. }69. }70. }71.72. if(x1>0&&x1<width&&x2>0&&x2<width&&y1>0&&y1<height&&y2>0&&y2<height)73. {74. if((int)data[y1*step+x1]==255&&(int)data[y2*step+x2]==0)75. {76. angle1_tmp[angle1_tmp_count]=(float)(j*0.1);//从肤⾊到⾮肤⾊的⾓度77. angle1_tmp_count++;78. }79. else if((int)data[y1*step+x1]==0&&(int)data[y2*step+x2]==255)80. {81. angle2_tmp[angle2_tmp_count]=(float)(j*0.1);//从⾮肤⾊到肤⾊的⾓度82. angle2_tmp_count++;83. }84. }85. }86. int j=0;87. for(j=0;j<angle1_tmp_count;j++)88. {89. if(angle1_tmp[j]-angle1_tmp[j-1]<0.2)//忽略太⼩的⾓度90. continue;91. angle1[angle1count]=angle1_tmp[j];92. angle1count++;93. }94.95. for(j=0;j<angle2_tmp_count;j++)96. {97. if(angle2_tmp[j]-angle2_tmp[j-1]<0.2)98. continue;99. angle2[angle2count]=angle2_tmp[j];100. angle2count++;101. }102.103. for(j=0;j<max(angle1count,angle2count);j++)104. {105. if(angle1[0]>angle2[0])106. {107. if(angle1[j]-angle2[j]<7)//忽略⼩于7度的⾓度,因为⼈的⼿指⼀般都⼤于这个值108. continue;109. angle[i][anglecount]=(float)((angle1[j]-angle2[j])*0.01);//肤⾊的⾓度110. anglecha[i][anglecount]=(float)((angle2[j+1]-angle1[j])*0.01);//⾮肤⾊的⾓度,例如⼿指间的⾓度111. anglecount++;112. }113. else114. {115. if(angle1[j+1]-angle2[j]<7)116. continue;117. anglecount++;118. angle[i][anglecount]=(float)((angle1[j+1]-angle2[j])*0.01);119. anglecha[i][anglecount]=(float)((angle2[j]-angle1[j])*0.01);120. }121. }122.123. if(angle1[0]<angle2[0])124. angle[i][0]=(float)((angle1[0]+360-angle2[angle2count-1])*0.01);125. else126. anglecha[i][0]=(float)((angle2[0]+360-angle1[angle1count-1])*0.01);127.128. count[i]=(float)anglecount;129. angle1_tmp_count=0,angle2_tmp_count=0,angle1count=0,angle2count=0,anglecount=0;130. for(j=0;j<200;j++)131. {132. angle1_tmp[j]=0;133. angle2_tmp[j]=0;134. }135. for(j=0;j<50;j++)136. {137. angle1[j]=0;138. angle2[j]=0;139. }140. }141. }基本上对于⾃⼰使⽤代码创建的训练库的特征提取函数和基本的肤⾊检测和连通域的检测的函数的核⼼代码都已经贴到上⾯去了。

基于OpenCV的运动目标检测与跟踪

基于OpenCV的运动目标检测与跟踪

基于OpenCV的运动目标检测与跟踪基于OpenCV的运动目标检测与跟踪摘要:运动目标检测与跟踪在计算机视觉和图像处理领域中具有重要的应用价值。

它可以应用于视频监控、自动驾驶、行人识别等多个领域。

本文将介绍如何使用OpenCV库实现运动目标的检测与跟踪,并通过实例演示其应用。

其中包括运动物体检测、运动轨迹跟踪和背景建模等关键技术。

通过对运动目标的检测和跟踪,可以提供实时的监控和追踪能力,为各种应用场景提供技术支持。

1. 引言运动目标检测与跟踪是计算机视觉领域的一个重要研究方向,它的核心任务是从图像序列中提取有意义的运动目标,并对其进行跟踪和分析。

运动目标检测与跟踪在实际应用中有着广泛的需求和应用场景。

例如,在视频监控系统中,可以通过运动目标的检测和跟踪来提供实时的监控和报警能力。

在自动驾驶系统中,可以通过识别和跟踪其他车辆和行人来实现智能的行车决策。

因此,研究和实现高效准确的运动目标检测与跟踪技术对于提升计算机视觉系统的性能和可靠性具有重要意义。

2. 基于OpenCV的运动目标检测与跟踪方法2.1 运动物体检测运动物体检测是运动目标检测与跟踪的第一步,其目标是从图像序列中分离出具有运动的物体。

在OpenCV中,可以使用背景差分法实现运动物体的检测。

背景差分法基于假设每一帧图像中静止部分为背景,通过对当前帧图像与历史帧图像之间的差异进行比较,提取出具有运动的前景物体。

这种方法简单有效,在实际应用中具有广泛的应用场景。

2.2 运动轨迹跟踪运动轨迹跟踪是对运动目标进行持续追踪的技术,其目标是实时获取目标物体在图像序列中的位置和运动情况。

在OpenCV中,可以使用卡尔曼滤波器实现运动轨迹的跟踪。

卡尔曼滤波器是一种能够根据过去的位置和速度信息来预测当前物体位置的滤波器。

通过不断更新目标物体的位置和速度信息,可以实现准确的运动轨迹跟踪。

2.3 背景建模背景建模是用于建立背景模型的方法,用于对比和识别运动目标。

《2024年基于OpenCV的运动目标检测与跟踪》范文

《2024年基于OpenCV的运动目标检测与跟踪》范文

《基于OpenCV的运动目标检测与跟踪》篇一一、引言运动目标检测与跟踪作为计算机视觉的重要研究领域,其广泛应用于视频监控、智能交通、人机交互等多个领域。

随着计算机视觉技术的不断发展,基于OpenCV的运动目标检测与跟踪技术因其高效、准确的特点,逐渐成为研究热点。

本文旨在介绍基于OpenCV的运动目标检测与跟踪方法,分析其原理、应用及优化方法,以提高目标检测与跟踪的准确性和实时性。

二、OpenCV简介OpenCV(Open Source Computer Vision Library)是一个开源的计算机视觉和机器学习软件库,包含了大量用于图像处理和计算机视觉的算法。

OpenCV提供了丰富的API接口,方便开发者快速实现各种计算机视觉算法。

在运动目标检测与跟踪方面,OpenCV提供了多种方法,如背景减除法、光流法、特征匹配法等。

三、运动目标检测运动目标检测是从视频序列中提取出运动目标的过程。

基于OpenCV的运动目标检测方法主要包括背景减除法和帧间差分法。

1. 背景减除法:通过将当前帧与背景帧进行差分,得到前景目标。

该方法可以有效地提取出运动目标,但对背景的更新和模型的适应性要求较高。

OpenCV提供了多种背景减除算法,如MOG2、KNN等。

2. 帧间差分法:通过比较相邻两帧的差异来检测运动目标。

该方法对光照变化和背景干扰具有一定的鲁棒性,但可能会产生“鬼影”现象。

四、运动目标跟踪运动目标跟踪是在检测出运动目标的基础上,对目标进行持续跟踪的过程。

基于OpenCV的运动目标跟踪方法主要包括特征匹配法和光流法。

1. 特征匹配法:通过提取目标的特征,在后续帧中寻找与该特征相似的区域来实现跟踪。

该方法对目标的形变和部分遮挡具有一定的鲁棒性,但当目标与周围环境相似时,容易产生误匹配。

2. 光流法:利用光流信息来实现目标的跟踪。

光流表示了图像中像素点的运动信息,通过计算相邻帧的光流场,可以估计出目标的运动轨迹。

C++基于OpenCV实现手势识别的源码

C++基于OpenCV实现手势识别的源码

C++基于OpenCV实现⼿势识别的源码先给⼤家上效果图:源码在下⾯使⽤ RGB 值分割⼿部区域,即⼿部的 GB 值将与背景不同或者使⽤边缘检测或者背景减法。

我这⾥使⽤了背景减法模型。

OpenCV为我们提供了不同的背景减法模型,codebook 它的作⽤是对某些帧进⾏⼀段时间的精确校准。

其中对于它获取的所有图像;它计算每个像素的平均值和偏差,并相应地指定框。

在前景中它就像⼀个⿊⽩图像,只有⼿是⽩⾊的⽤ Convex Hull 来找到指尖。

Convex hull 基本上是包围⼿部区域的凸集。

包围⼿的红线是凸包。

基本上它是⼀个凸起;如果我们在红⾊区域内取任意两点并将它们连接起来形成⼀条线,那么这条线就完全位于集合内。

黄点是缺陷点,会有很多这样的缺陷点,即每个⾕都有⼀个缺陷点。

现在根据缺陷点的数量,我们可以计算展开的⼿指数量。

⼤概就是⼿部区域提取是使⽤背景减法完成的。

对于尖端点,深度点凸度缺陷。

提取轮廓和检测凸点的主要代码在函数中⽆效检测(IplImage* img_8uc1,IplImage* img_8uc3);将相机放在稳定的背景前;运⾏代码,等待⼀段时间。

校准完成后。

你会看到显⽰⼀些⼲扰的连接组件图像。

把你的⼿放在相机视图中。

没什么好说的直接看代码会⽐较容易理解核⼼代码int main(int argc, char** argv){const char* filename = 0;IplImage* rawImage = 0, *yuvImage = 0;IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;CvCapture* capture = 0;int c, n, nframes = 0;int nframesToLearnBG = 300;model = cvCreateBGCodeBookModel();model->modMin[0] = 3;model->modMin[1] = model->modMin[2] = 3;model->modMax[0] = 10;model->modMax[1] = model->modMax[2] = 10;model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;bool pause = false;bool singlestep = false;for( n = 1; n < argc; n++ ){static const char* nframesOpt = "--nframes=";if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 ){if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 ){help();return -1;}}elsefilename = argv[n];}if( !filename ){printf("Capture from camera\n");capture = cvCaptureFromCAM( 0 );}else{printf("Capture from file %s\n",filename);capture = cvCreateFileCapture( filename );}if( !capture ){printf( "Can not initialize video capturing\n\n" );help();return -1;}for(;;){if( !pause ){rawImage = cvQueryFrame( capture );++nframes;if(!rawImage)break;}if( singlestep )pause = true;if( nframes == 1 && rawImage ){// CODEBOOK METHOD ALLOCATIONyuvImage = cvCloneImage(rawImage);ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); cvSet(ImaskCodeBook,cvScalar(255));cvNamedWindow( "Raw", 1 );cvNamedWindow( "ForegroundCodeBook",1);cvNamedWindow( "CodeBook_ConnectComp",1);}if( rawImage ){cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );if( !pause && nframes-1 < nframesToLearnBG )cvBGCodeBookUpdate( model, yuvImage );if( nframes-1 == nframesToLearnBG )cvBGCodeBookClearStale( model, model->t/2 );if( nframes-1 >= nframesToLearnBG ){cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );centers if desiredcvCopy(ImaskCodeBook,ImaskCodeBookCC);cvSegmentFGMask( ImaskCodeBookCC );cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);detect(ImaskCodeBookCC,rawImage);}cvShowImage( "Raw", rawImage );cvShowImage( "ForegroundCodeBook",ImaskCodeBook);}c = cvWaitKey(10)&0xFF;c = tolower(c);if(c == 27 || c == 'q')break;switch( c ){case 'h':help();break;case 'p':pause = !pause;break;case 's':singlestep = !singlestep;pause = false;break;case 'r':pause = false;singlestep = false;break;case ' ':cvBGCodeBookClearStale( model, 0 );nframes = 0;break;case 'y': case '0':case 'u': case '1':case 'v': case '2':case 'a': case '3':case 'b':ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3';ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b';ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b';printf("CodeBook YUV Channels active: %d, %d, %d\n", ch[0], ch[1], ch[2] );break;case 'i':case 'o':case 'k':case 'l':{uchar* ptr = c == 'i' || c == 'o' ? model->modMax : model->modMin;for(n=0; n<NCHANNELS; n++){if( ch[n] ){int v = ptr[n] + (c == 'i' || c == 'l' ? 1 : -1);ptr[n] = CV_CAST_8U(v);}printf("%d,", ptr[n]);}printf(" CodeBook %s Side\n", c == 'i' || c == 'o' ? "High" : "Low" );}break;}}cvReleaseCapture( &capture );cvDestroyWindow( "Raw" );cvDestroyWindow( "ForegroundCodeBook");cvDestroyWindow( "CodeBook_ConnectComp");return 0;}要直接跑代码调试的,可以直接去下载到此这篇关于C++基于OpenCV实现⼿势识别的源码的⽂章就介绍到这了,更多相关OpenCV⼿势识别内容请搜索以前的⽂章或继续浏览下⾯的相关⽂章希望⼤家以后多多⽀持!。

OpenCV+python手势识别框架和实例讲解

OpenCV+python手势识别框架和实例讲解

OpenCV+python⼿势识别框架和实例讲解基于OpenCV2.4.8和 python 2.7实现简单的⼿势识别。

以下为基本步骤1.去除背景,提取⼿的轮廓2. RGB->YUV,同时计算直⽅图3.进⾏形态学滤波,提取感兴趣的区域4.找到⼆值化的图像轮廓5.找到最⼤的⼿型轮廓6.找到⼿型轮廓的凸包7.标记⼿指和⼿掌8.把提取的特征点和⼿势字典中的进⾏⽐对,然后判断⼿势和形状提取⼿的轮廓 cv2.findContours()找到最⼤凸包cv2.convexHull(),然后找到⼿掌和⼿指的相对位置,定位⼿型的轮廓和关键点,包括⼿掌的中⼼,⼿指的相对位置特征字典主要包括以下⼏个⽅⾯:名字,⼿掌中⼼点,⼿掌的直径,⼿指的坐标点,⼿指的个数,每个⼿指之间的⾓度例如:# BEGIN ------------------------------------#V=gesture("V")V.set_palm((475,225),45)V.set_finger_pos([(490,90),(415,105)])V.calc_angles()dict[V.getname()]=V# END --------------------------------------#最终的识别结果如下:⽰例代码frame=hand_threshold(fg_frame,hand_histogram)contour_frame=np.copy(frame)contours,hierarchy=cv2.findContours(contour_frame,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)found,hand_contour=hand_contour_find(contours)if(found):hand_convex_hull=cv2.convexHull(hand_contour)frame,hand_center,hand_radius,hand_size_score=mark_hand_center(frame_original,hand_contour)if(hand_size_score):frame,finger,palm=mark_fingers(frame,hand_convex_hull,hand_center,hand_radius)frame,gesture_found=find_gesture(frame,finger,palm)else:frame=frame_original以上这篇OpenCV+python⼿势识别框架和实例讲解就是⼩编分享给⼤家的全部内容了,希望能给⼤家⼀个参考,也希望⼤家多多⽀持。

OpenCv手势识别

OpenCv手势识别

应用OpenCv实现手势识别第一部分环境搭建与算法设计一、环境搭建:1.分别安装Visual Studio 2008和opencv-2.1.0-win32-vs2008,安装OpenCV的过程中注意选择添加环境变量,把“add it to your Current User PATH “前的复选框勾上 .2. 包含相关的库文件、头文件和源文件,过程如下:工具->选项->项目和解决方案->vc++目录,“显示一下内容的目录”下拉列表中选择“包含文件”,添加条目“D:\Program Files\OpenCV2.1\include\opencv”;“显示一下内容的目录”下拉列表中选择“库文件” ,添加条目“D:\Program Files\OpenCV2.1\lib”;"显示一下内容的目录"下拉列表选择“源文件”,添加条目”D:\Program Files\OpenCV2.1\src\cv” ,”D:\Program Files\OpenCV2.1\src\cvaux” ,”D:\Program Files\OpenCV2.1\src\cxcore” ,”D:\Program Files\OpenCV2.1\src\highgui”。

点击“确定”。

3.建立工程与配置工程:新建工程(或者叫解决方案),在解决方案资源管理器中右键点击项目名称opencvhello,选择“属性”,在“配置(C)”下拉列表中选择Debug, 然后“配置属性”->“链接器”->“输入”->附加依赖项,添加cxcore210d.libcv210d.libhighgui210d.lib在“配置(C)”下拉列表中选择Release, 然后“配置属性”->“链接器”->“输入”->附加依赖项,添加 cxcore210d.libcv210d.libhighgui210d.lib中间如果提示要保存的话,就保存。

手部位置跟踪opencv程序

手部位置跟踪opencv程序

* HandVu - a library for computer vision-based hand gesture* recognition.* Copyright (C) 2004 Mathias Kolsch, matz@** This program is free software; you can redistribute it and/or* modify it under the terms of the GNU General Public License* as published by the Free Software Foundation; either version 2* of the License, or (at your option) any later version.** This program is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the* GNU General Public License for more details.** You should have received a copy of the GNU General Public License* along with this program; if not, write to the Free Software* Foundation, Inc., 59 Temple Place - Suite 330,* Boston, MA 02111-1307, USA.** $Id: hv_OpenCV.cpp,v 1.15 2006/01/03 21:44:15 matz Exp $**/#ifdef WIN32#include<windows.h>#endif#include<stdio.h>#include<cv.h>#include<highgui.h>#include<ctype.h>#include<time.h>#include"HandVu.h"IplImage *capture_image = 0;IplImage *display_image = 0;bool async_processing = false;int num_async_bufs = 30;IplImage *m_async_image = 0;int m_async_bufID = -1;bool sync_display = true;CvPoint origin;int select_object = 0;int sel_area_left=0, sel_area_top=0, sel_area_right=0, sel_area_bottom=0; bool correct_distortion = false;void OnMouse( int event, int x, int y, int/*flags*/, void* /*params*/ ) {if( !capture_image )return;if( capture_image->origin )y = capture_image->height - y;if( select_object ){sel_area_left = MIN(x,origin.x);sel_area_top = MIN(y,origin.y);sel_area_right = sel_area_left + CV_IABS(x - origin.x);sel_area_bottom = sel_area_top + CV_IABS(y - origin.y);sel_area_left = MAX( sel_area_left, 0 );sel_area_top = MAX( sel_area_top, 0 );sel_area_right = MIN( sel_area_right, capture_image->width );sel_area_bottom = MIN( sel_area_bottom, capture_image->height );if( sel_area_right-sel_area_left > 0 && sel_area_bottom-sel_area_top> 0 ) hvSetDetectionArea(sel_area_left, sel_area_top,sel_area_right, sel_area_bottom);}switch( event ){case CV_EVENT_LBUTTONDOWN:origin = cvPoint(x,y);sel_area_left = sel_area_right = x;sel_area_top = sel_area_bottom = y;select_object = 1;break;case CV_EVENT_LBUTTONUP:select_object = 0;break;}}void showFrame(IplImage* img, hvAction action){if (action==HV_DROP_FRAME) {// HandVu recommends dropping the frame entirely// printf("HandVuFilter: dropping frame\n");return;} else if (action==HV_SKIP_FRAME) {// HandVu recommends displaying the frame, but not doing any further// processing on it - keep going// printf("HandVuFilter: supposed to skip frame\n");} else if (action==HV_PROCESS_FRAME) {// full processing was done and is recommended for following steps;// keep going//printf("HandVuFilter: processed frame\n");} else {assert(0); // unknown action}hvState state;hvGetState(0, state);cvShowImage( "HandVu", img );}void displayCallback(IplImage* img, hvAction action){if (sync_display) {cvCopy(img, display_image);} else {showFrame(img, action);}}int main( int argc, char** argv ){CvCapture* capture = 0;if (argc<2) {printf("you need to specify a conductor file as first argument\n");printf("for example: ../config/default.conductor\n");return -1;}string conductor_fname(argv[1]);printf("will load conductor from file:\n%s\n", conductor_fname.c_str());if( argc == 2 || argc == 3) {int num = 0;if (argc==3) {num = atoi(argv[2]);}capture = cvCaptureFromCAM( num );if (!capture) {capture = cvCaptureFromAVI( argv[2] );}}if( !capture ){fprintf(stderr,"Could not initialize capturing through OpenCV.\n");return -1;}printf( "Hot keys: \n""\tESC - quit the program\n""\tr - restart the tracking\n""\t0-3 - set the overlay (verbosity) level\n""use the mouse to select the initial detection area\n" );int p = 0; // according to docs, these calls don't work in OpenCV beta 4 yet p = cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 640);p = cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 480);capture_image = cvQueryFrame( capture );if ( !capture_image ) {fprintf(stderr,"Could not retrieve image through OpenCV.\n");return -1;}/* allocate all the buffers */CvSize size = cvGetSize(capture_image);hvInitialize(size.width, size.height);hvLoadConductor(conductor_fname);hvStartRecognition();hvSetOverlayLevel(2);if (async_processing) {hvAsyncSetup(num_async_bufs, displayCallback);if (sync_display) display_image = cvCloneImage(capture_image);}cvSetMouseCallback( "HandVu", OnMouse );int success = cvNamedWindow( "HandVu", 1 );if (success!=1) {printf("can't open window - did you compile OpenCV with highgui support?");return -1;}fprintf(stderr, "initialized highgui\n");for (;;) {int c;if (async_processing) {// asynchronous processing in HandVuif (sync_display) cvShowImage("HandVu", display_image);// ------- main library call ---------hvAsyncGetImageBuffer(&m_async_image, &m_async_bufID);cvCopy(capture_image, m_async_image);hvAsyncProcessFrame(m_async_bufID);// -------} else {// synchronous processing in HandVu// ------- main library call ---------hvAction action = HV_INVALID_ACTION;action = hvProcessFrame(capture_image);// -------showFrame(capture_image, action);}c = cvWaitKey(10);if( c == 27 || c == 'q' )break;switch( c ){case'r':hvStopRecognition();hvStartRecognition();break;case'0':hvSetOverlayLevel(0);break;case'1':hvSetOverlayLevel(1);break;case'2':hvSetOverlayLevel(2);break;case'3':hvSetOverlayLevel(3);break;case'u':if (hvCanCorrectDistortion()) {correct_distortion = !correct_distortion;hvCorrectDistortion(correct_distortion);}break;default:;}// capture next imagecapture_image = cvQueryFrame( capture );if ( !capture_image ) {fprintf(stderr,"Could not retrieve image through OpenCV.\n");break;}}cvReleaseCapture( &capture );cvDestroyWindow("HandVu");return 0;}。

基于Opencv的运动目标的检测和跟踪

基于Opencv的运动目标的检测和跟踪
p c u e w l g t h r e o r i ae , n p l e t i a k n l o t m od tc n r c e mo i g t re a e it r , e wi e e t g t o d n t s a d a py c n r d t c ig a g r h t ee ta d ta k t v n a g t s d l t a c o r i h b o a ma e s q e c .h a g tSp st n c n b ee mi e y te c n e f a t l .h s ag r h u e ma l mo n f n g y i g e u n e et r e ’ o i o a e d t r n d b e tro r c e i l o t m s s s l r T i h p i T i a u to c l u ai n a d e s o k I t b l y a d p e ii n ma ny d p n s o g e me tt n a d t e v l e o r s o dT e a c l t n a y t w r . ssa i t n r c s i l e e d n i o o t i o ma e s g n a i n h a u f h e h l . h o t d t i d p o e sa d te k yc d f e c g rt m a i e h  ̄ ce a s es se d sg s ac n r l ne f c ,S ea l r c s n e o eo e h Op n v a o i l h h d g v n i t e a l , o t y tm e i n o to t r e O n l h i a t a e ma a e a n trt e mo e n f e il sa n t . x e me t l e u t h w a sn h sw y c n a h e e h t h n g rc n mo i v me t h c e t yi t o h ov a me E p r n a s l s o t tu i gt i a a c iv i r s h ie t i ain o emo i gtr e i e l i . d n i c t f vn g t t r a- me f o h t a w h t Ke r s v rg t o c n r i a k n ; vn r e ;Op n v y wo d :a e a e meh d; e tod t c i g mo i g t g t r a e c

.net手势识别_手势识别:python+opencv代码讲解和效果

.net手势识别_手势识别:python+opencv代码讲解和效果

.net⼿势识别_⼿势识别:python+opencv代码讲解和效果1 说明=====1.1 环境:python3.8+opnecv4.4.01.2 gesture recognition(⼿势识别),好东西就需要分享。

1.3 代码来源:感谢原作者,但是有bug,作者不想给“伸⼿党”[笑哭];还是感谢原作者,因为我要公开了,仅供学习。

#https:///qq_25939803/article/details/106414495?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2 1.4 对源代码进⾏增加,修改,注释,讲解,通俗易懂,⼩⽩秒懂。

2 效果展⽰========2.1 ⼿掌图为主2.2 ⼿背图为主(放到后⾯展⽰)3 代码讲解========3.1 原理简述第⼀步: 加载图⽚,⽆论你是⽤cv2导⼊⼀张静态⼿势图⽚还是⽤摄像头导⼊实时图⽚都可以。

第⼆步: 肤⾊检测,基于HSV颜⾊空间H,S,V范围筛选法 HSV中7第三步: 进⾏⾼斯滤波第四步: 边缘轮廓检测第五步: 求出⼿势的凹凸点第六步: 利⽤凹凸点个数判断当前⼿势,例如:0个凹凸点就是拳头,4个凹点就是布也就是5。

3.2 代码:# _*_ coding: UTF-8 _*_#第1步:导⼊模块import cv2import numpy as npimport math#第2步:获取摄像头实时视频#0代表来⾃摄像头cap = cv2.VideoCapture(0)#增4 ⼿背图为主的效果图完美!!适合收藏。

Python学习-使用opencv-python提取手掌和手心及部分掌纹

Python学习-使用opencv-python提取手掌和手心及部分掌纹

Python学习-使⽤opencv-python提取⼿掌和⼿⼼及部分掌纹上次我们成功训练了⼿掌识别器/take-fetter/p/8438747.html,可以成功得到识别的结果如图接下来需要使⽤opencv来获取⼿掌,去除背景部分,这⾥就需要⽤到掩膜(mask)、ROI(region of interest)等相关知识,具体的概念还是不讲了,⽹上很多。

⾸先从图中根据上次的程序画框部分提取⼿掌(当然⾃⼰截图再保存也可以-.-)如下接下来讲解⼀下提取⼿掌的⽅法1. 将图⽚copy,并将图⽚转换为ycrcb模式,根据ycrcb中的肤⾊获取和⼿掌颜⾊相近的部分,⽣成⿊⽩图⽚2. 使⽤⿊⽩图⽚获得最⼤的轮廓并⽣成轮廓图⽚并得到⼀个近似的椭圆3. 根据椭圆⾓度进⾏旋转(原图⽚和⿊⽩图⽚及轮廓图⽚同时旋转)以尽可能的将⼿掌放为竖直4. 根据原图⽚和⿊⽩图⽚,利⽤⿊⽩图⽚作为掩膜,得到的原图⽚如下: 提取⼿掌中⼼: 算法思想:根据⿊⽩图⽚,基于距离变换得到⼿掌中⼼,并根据最⼤半径画出⼿掌的内切圆如图代码如下distance = cv2.distanceTransform(black_and_white, cv2.DIST_L2, 5, cv2.CV_32F)# Calculates the distance to the closest zero pixel for each pixel of the source image.maxdist = 0# rows,cols = img.shapefor i in range(distance.shape[0]):for j in range(distance.shape[1]):dist = distance[i][j]if maxdist < dist:x = jy = imaxdist = distcv2.circle(original, (x, y), maxdist, (255, 100, 255), 1, 8, 0) 提取掌纹 现在我们已知了圆的半径和圆⼼坐标,因此可以根据ROI提取出内切正⽅形(虽然内切正⽅形会损失很多的信息,但是⽬前我还没有想到其他的更好的办法),作出正⽅形如下作正⽅形并提取的代码如下final_img = original.copy()#cv2.circle() this linehalf_slide = maxdist * math.cos(math.pi / 4)(left, right, top, bottom) = ((x - half_slide), (x + half_slide), (y - half_slide), (y + half_slide))p1 = (int(left), int(top))p2 = (int(right), int(bottom))cv2.rectangle(original, p1, p2, (77, 255, 9), 1, 1)final_img = final_img[int(top):int(bottom),int(left):int(right)]运⾏截图可以看到出现了灰⾊部分,按理说是不会存在的,使⽤cv2.imwrite发现没有出现任何问题,如图感觉是cv2.imshow对于输出图⽚的像素⼤⼩有⼀定限制,进⾏了⾃动填充或者是默认有灰⾊作为背景⾊且⽐在这⾥我们提取出的图⽚要⼤代码地址:https:///takefetter/Get_PalmPrint/blob/master/process_palm.py感谢:1.https:///dev-td7/Automatic-Hand-Detection-using-Wrist-localisation 这位⽼哥的repo,基于肤⾊的提取和形成近似椭圆给我的启发很⼤(虽然后半部分完全没有⽤.....)2./question/180668/how-to-find-the-center-of-one-palm-in-the-picture/ 虽然基于距离变化参考⾄这⾥的回答,不过也算是完成了提问者的需求。

基于OpenCV的运动目标检测与跟踪

基于OpenCV的运动目标检测与跟踪

基于OpenCV的运动目标检测与跟踪【摘要】OpenCV是一个基于(开源)发行的跨平台计算机视觉库,实现了图像处理和计算机视觉方面的很多通用算法。

本文主要简述了基于OPENCV开源代码库的运动目标的检测与跟踪的原理及算法实现。

在VC++6。

0编译环境下,用C++语言编写,利用USB摄像头作为视频采集器,实现了对可疑目标的持续跟踪。

【关键词】OPENCV 运动目标检测跟踪背景差分 CamShift算法Abstract:OpenCV is based on (open) issued a cross—platform computer vision library that implements many common algorithms of image processing and computer vision。

This paper outlines the principles and algorithms of OpenCV—based moving target detection and tracking。

Compiled under VC + +6。

0 environment, using C + + language,using USB camera as a video capture device, to achieve a continuous tracking of suspicious targets。

Keywords:OPENCV;Moving target;detection;tracking;Background difference; CamShift algorithm一、项目背景以往的监控系统要监视各厅室的所有角落,需要在多方位安装监控摄像头,随着防盗系统向着网络远程化、智能化和自动化方向的发展,设计一种室内监控摄像头自动跟踪目标的控制系统势在必行,可以通过分析摄像头所传输的图像信息,锁定可疑目标,然后带动步动电机转向,将可疑目标控制在摄像头范围内。

Python如何使用opencv进行手势识别详解

Python如何使用opencv进行手势识别详解

Python如何使⽤opencv进⾏⼿势识别详解⽬录前⾔原理程序部分附另⼀个⼿势识别实例总结前⾔本项⽬是使⽤了⾕歌开源的框架mediapipe,⾥⾯有⾮常多的模型提供给我们使⽤,例如⾯部检测,⾝体检测,⼿部检测等。

原理⾸先先进⾏⼿部的检测,找到之后会做Hand Landmarks。

将⼿掌的21个点找到,然后我们就可以通过⼿掌的21个点的坐标推测出来⼿势,或者在⼲什么。

程序部分第⼀安装Opencvpip install opencv-python第⼆安装mediapipepip install mediapipe程序先调⽤这俩个函数库import cv2import mediapipe as mp然后再调⽤摄像头cap = cv2.VideoCapture(0)函数主体部分while True:ret, img = cap.read()#读取当前数据if ret:cv2.imshow('img',img)#显⽰当前读取到的画⾯if cv2.waitKey(1) == ord('q'):#按q键退出程序break全部函数import cv2import mediapipe as mpimport timecap = cv2.VideoCapture(1)mpHands = mp.solutions.handshands = mpHands.Hands()mpDraw = mp.solutions.drawing_utilshandLmsStyle = mpDraw.DrawingSpec(color=(0, 0, 255), thickness=3)handConStyle = mpDraw.DrawingSpec(color=(0, 255, 0), thickness=5)pTime = 0cTime = 0while True:ret, img = cap.read()if ret:imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)result = hands.process(imgRGB)# print(result.multi_hand_landmarks)imgHeight = img.shape[0]imgWidth = img.shape[1]if result.multi_hand_landmarks:for handLms in result.multi_hand_landmarks:mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS, handLmsStyle, handConStyle)for i, lm in enumerate(ndmark):xPos = int(lm.x * imgWidth)yPos = int(lm.y * imgHeight)# cv2.putText(img, str(i), (xPos-25, yPos+5), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)# if i == 4:# cv2.circle(img, (xPos, yPos), 20, (166, 56, 56), cv2.FILLED)# print(i, xPos, yPos)cTime = time.time()fps = 1/(cTime-pTime)pTime = cTimecv2.putText(img, f"FPS : {int(fps)}", (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)cv2.imshow('img', img)if cv2.waitKey(1) == ord('q'):break这样我们就能再电脑上显⽰我们的⼿部关键点和坐标了,对于⼿势识别或者别的操作就可以通过获取到的关键点的坐标进⾏判断了。

python+mediapipe+opencv实现手部关键点检测功能(手势识别)

python+mediapipe+opencv实现手部关键点检测功能(手势识别)

python+mediapipe+opencv实现⼿部关键点检测功能(⼿势识别)⽬录⼀、mediapipe是什么?⼆、使⽤步骤1.引⼊库2.主代码3.识别结果补充:⼀、mediapipe是什么?⼆、使⽤步骤1.引⼊库代码如下:import cv2from mediapipe import solutionsimport time2.主代码代码如下:cap = cv2.VideoCapture(0)mpHands = solutions.handshands = mpHands.Hands()mpDraw = solutions.drawing_utilspTime = 0count = 0while True:success, img = cap.read()imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)results = hands.process(imgRGB)if results.multi_hand_landmarks:for handLms in results.multi_hand_landmarks:mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)cTime = time.time()fps = 1 / (cTime - pTime)pTime = cTimecv2.putText(img, str(int(fps)), (25, 50), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 3)cv2.imshow("Image", img)cv2.waitKey(1)3.识别结果以上就是今天要讲的内容,本⽂仅仅简单介绍了mediapipe的使⽤,⽽mediapipe提供了⼤量关于图像识别等的⽅法。

补充:下⾯看下基于mediapipe⼈脸⽹状识别。

《2024年基于OpenCV的运动目标检测与跟踪》范文

《2024年基于OpenCV的运动目标检测与跟踪》范文

《基于OpenCV的运动目标检测与跟踪》篇一一、引言在计算机视觉领域,运动目标检测与跟踪是一个重要且具有挑战性的研究课题。

OpenCV(开源计算机视觉库)作为一种强大的工具,被广泛应用于这一领域的研究。

本文将探讨基于OpenCV的运动目标检测与跟踪的方法、技术及其应用。

二、运动目标检测1. 背景减除法背景减除法是运动目标检测的常用方法之一。

该方法通过将当前帧与背景模型进行比较,从而检测出运动目标。

在OpenCV 中,可以使用内置的背景减除算法,如MOG2算法等。

这些算法能够有效地从视频流中提取出运动目标。

2. 光流法光流法是一种基于像素强度变化和运动场估计的检测方法。

它通过计算像素在连续帧之间的运动矢量,从而检测出运动目标。

OpenCV提供了光流法的基本实现,可以用于实时运动目标检测。

3. 基于深度学习的目标检测近年来,基于深度学习的目标检测方法在运动目标检测领域取得了显著的成果。

通过训练深度神经网络,可以实现对运动目标的精确检测和识别。

OpenCV支持多种深度学习框架,如TensorFlow和PyTorch等,可以方便地实现基于深度学习的运动目标检测。

三、运动目标跟踪1. 基于特征点的跟踪基于特征点的跟踪是一种常用的跟踪方法。

该方法通过提取视频帧中的特征点,并利用特征匹配算法实现跟踪。

OpenCV提供了多种特征提取和匹配算法,如SIFT、SURF和ORB等,可以用于实现基于特征点的运动目标跟踪。

2. 基于光流法的跟踪光流法不仅可以用于运动目标检测,还可以用于运动目标跟踪。

通过计算光流场,可以估计出运动目标的轨迹和速度等信息。

OpenCV的光流法实现可以用于实时跟踪运动目标。

3. 基于深度学习的跟踪算法随着深度学习的发展,基于深度学习的跟踪算法在运动目标跟踪领域取得了显著的成果。

这些算法通过训练深度神经网络来学习目标的外观和运动模式,从而实现精确的跟踪。

OpenCV支持多种深度学习框架,可以方便地实现基于深度学习的运动目标跟踪。

《2024年基于OpenCV的运动目标检测与跟踪》范文

《2024年基于OpenCV的运动目标检测与跟踪》范文

《基于OpenCV的运动目标检测与跟踪》篇一一、引言随着计算机视觉技术的飞速发展,运动目标检测与跟踪作为计算机视觉领域的重要研究方向,已经得到了广泛的应用。

OpenCV(开源计算机视觉库)作为一种强大的计算机视觉处理工具,为运动目标检测与跟踪提供了有效的解决方案。

本文旨在探讨基于OpenCV的运动目标检测与跟踪的方法,并对其应用进行深入研究。

二、运动目标检测运动目标检测是计算机视觉领域的一项基本任务,其主要目的是从视频序列中提取出感兴趣的运动目标。

基于OpenCV的运动目标检测方法主要包括背景减除法、光流法、帧间差分法等。

1. 背景减除法背景减除法是一种常用的运动目标检测方法,其基本思想是将当前帧与背景模型进行差分,从而提取出运动目标。

OpenCV 提供了多种背景减除算法,如MOG2、KNN等。

这些算法可以根据视频序列自动学习背景模型,从而实现对运动目标的准确检测。

2. 光流法光流法是一种基于光流场的变化检测方法,其基本思想是通过计算像素在连续帧之间的运动来检测运动目标。

OpenCV的光流法实现了基于Lucas-Kanade算法的光流计算,可以有效地提取出运动目标的轨迹。

3. 帧间差分法帧间差分法是一种简单的运动目标检测方法,其基本思想是通过比较相邻帧之间的差异来检测运动目标。

OpenCV的帧间差分法可以有效地提取出视频序列中的动态区域,从而实现对运动目标的检测。

三、运动目标跟踪运动目标跟踪是计算机视觉领域的另一个重要研究方向,其主要目的是对检测到的运动目标进行精确的定位和跟踪。

基于OpenCV的运动目标跟踪方法主要包括基于特征的方法、基于模型的方法和基于深度学习的方法等。

1. 基于特征的方法基于特征的方法是通过提取运动目标的特征来进行跟踪。

OpenCV提供了多种特征提取和匹配算法,如SIFT、SURF、ORB等。

这些算法可以有效地提取出运动目标的特征,并实现对其精确的定位和跟踪。

2. 基于模型的方法基于模型的方法是通过建立运动目标的模型来进行跟踪。

OpenCV——手势识别

OpenCV——手势识别

OpenCV——⼿势识别使⽤训练数据后进⾏⼿势识别。

#include "header.h"int main(){const int sample_num = 10; //训练每类图⽚数量const int class_num = 3; //训练类数3:⽯头剪⼑布const int image_cols = 30;const int image_rows = 30;string Name, Path;float trainingData[class_num * sample_num][image_cols * image_rows] = { { 0 } }; //每⼀⾏⼀个训练样本float labels[class_num * sample_num][class_num] = { { 0 } }; //训练样本标签cout << "training Data.........\n";for (int i = 0; i < class_num; i++){int j = 0;Path = getstring(i + 1) + "/" + "*.*";HANDLE hFile;LPCTSTR lp = StringToWchar(Path);WIN32_FIND_DATA pNextInfo;hFile = FindFirstFile(lp, &pNextInfo);if (hFile == INVALID_HANDLE_VALUE){cout << "failed" << endl;exit(-1);//搜索失败}cout << "folder name:" << i + 1 << endl;do{//必须加这句,不然会加载.和..的⽂件⽽加载不了图⽚,if (pNextInfo.cFileName[0] == '.')continue;cout << "file name" << WcharToChar(pNextInfo.cFileName) << endl;Mat srcImage = imread(getstring(i+1) + "/" + WcharToChar(pNextInfo.cFileName), CV_LOAD_IMAGE_GRAYSCALE); Mat trainImage;//if (!srcImage.empty())cout << " done \n";//处理样本图像resize(srcImage, trainImage, Size(image_cols, image_rows), (0, 0), (0, 0), CV_INTER_AREA);Canny(trainImage, trainImage, 150, 100, 3, false);for (int k = 0; k < image_rows * image_cols; k++){//cout << "矩阵 k-- " << k << " j--" << j << " i--" << i << endl;trainingData[i*sample_num + j][k] = (float)trainImage.data[k];}j++;} while (FindNextFile(hFile, &pNextInfo));}// 训练好的矩阵Mat DataMat(class_num*sample_num, image_rows*image_cols, CV_32FC1, trainingData);cout << "DataMat done~" << endl;// 初始化标签// 0-⽯头 1-剪⼑ 2-布for (int i = 0; i < class_num ; i++){for (int j = 0; j < sample_num; j++){for (int k = 0; k < class_num; k++){if (k == i)labels[i*sample_num + j][k] = 1;else labels[i*sample_num + j][k] = 0;}}}// 标签矩阵Mat labelsMat(class_num*sample_num, class_num, CV_32FC1, labels);cout << "labelsMat done~" << endl;//训练代码CvANN_MLP bp;CvANN_MLP_TrainParams params;params.train_method = CvANN_MLP_TrainParams::BACKPROP;params.bp_dw_scale = 0.001;params.bp_moment_scale = 0.1;//cvTermCriteria 迭代终⽌规则params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000, 0.0001);//设置⽹络层数Mat layerSizes = (Mat_<int>(1, 4) << image_rows*image_cols, int(image_rows*image_cols / 2),int(image_rows*image_cols / 2), class_num);bp.create(layerSizes, CvANN_MLP::SIGMOID_SYM, 1.0, 1.0);cout << "training...." << endl;bp.train(DataMat, labelsMat, Mat(), Mat(), params);bp.save("detect_gesture.xml");cout << "done" << endl;//测试神经⽹络cout << "testing...." << endl;Mat test = imread("test.jpg");Mat temp;resize(test, temp, Size(image_cols, image_rows), (0, 0), (0, 0), CV_INTER_AREA);Canny(temp, temp, 150, 100, 3, false);Mat_<float>sample(1, image_rows*image_cols);for (int i = 0; i<image_rows*image_cols; ++i){sample.at<float>(0, i) = (float)temp.data[i];}Mat result;bp.predict(sample, result);float* p = result.ptr<float>(0);float max = -1, min = 0;int index = 0;for (int i = 0; i<class_num; i++){cout << (float)(*(p + i)) << "";if (i == class_num - 1)cout << endl;if ((float)(*(p + i))>max){min = max;max = (float)(*(p + i));index = i;}else{if (min < (float)(*(p + i)))min = (float)(*(p + i));}}cout << "Your choice :" << choice[index] << endl << "识别率:"<< (((max - min) * 100) > 100 ? 100 : ((max - min) * 100)) << endl;//⽯头剪⼑布——游戏开局~int computer = random(3);cout << "computer's choice :" << choice[computer] << endl;if (computer == index) cout << "A Draw -_- " << endl << endl;else if ((computer < index && (index - computer == 1)) || (computer == 2 && index == 0)){cout << "You Lose T_T " << endl << endl;}else cout << "You Win o * ̄▽ ̄* " << endl << endl;system("pause");waitKey(100);return0;}运⾏⼀次后,不⽤每次都训练数据,直接加载第⼀次保存的 "detect_gesture.xml"即可CvANN_MLP bp;CvANN_MLP_TrainParams params;bp.load("detect_gesture.xml");PS://CvTermCriteria()//迭代算法的终⽌准则#define CV_TERMCRIT_ITER 1#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER#define CV_TERMCRIT_EPS 2typedef struct CvTermCriteria{int type; // CV_TERMCRIT_ITER 和CV_TERMCRIT_EPS⼆值之⼀,或者⼆者的组合int max_iter; // 最⼤迭代次数double epsilon; // 结果的精确性}CvTermCriteria;// 构造函数inline CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon );// 在满⾜max_iter和epsilon的条件下检查终⽌准则并将其转换使得type=CV_TERMCRIT_ITER+CV_TERMCRIT_EPS CvTermCriteria cvCheckTermCriteria( CvTermCriteria criteria, double default_eps, int default_max_iters );。

python+opencv实现移动侦测(帧差法)

python+opencv实现移动侦测(帧差法)

python+opencv实现移动侦测(帧差法)本⽂实例为⼤家分享了python+opencv实现移动侦测的具体代码,供⼤家参考,具体内容如下1.帧差法原理移动侦测即是根据视频每帧或者⼏帧之间像素的差异,对差异值设置阈值,筛选⼤于阈值的像素点,做掩模图即可选出视频中存在变化的桢。

帧差法较为简单的视频中物体移动侦测,帧差法分为:单帧差、两桢差、和三桢差。

随着帧数的增加是防⽌检测结果的重影。

2.算法思路⽂章以截取视频为例进⾏单帧差法移动侦测3.python实现代码def threh(video,save_video,thres1,area_threh):cam = cv2.VideoCapture(video)#打开⼀个视频input_fps = cam.get(cv2.CAP_PROP_FPS)ret_val, input_image = cam.read()index=[]images=[]images.append(input_image)video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))input_image=cv2.resize(input_image,(512,512))ending_frame = video_lengthfourcc = cv2.VideoWriter_fourcc(*'XVID')out = cv2.VideoWriter(save_video,fourcc, input_fps, (512, 512))gray_lwpCV = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)gray_lwpCV = cv2.GaussianBlur(gray_lwpCV, (21, 21), 0)background=gray_lwpCV# es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 4))i = 0 # default is 0outt=[]while(cam.isOpened()) and ret_val == True and i <2999:## if i % 2==1:ret_val, input_image = cam.read()input_image=cv2.resize(input_image,(512,512))gray_lwpCV = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)gray_lwpCV = cv2.GaussianBlur(gray_lwpCV, (21, 21), 0)diff = cv2.absdiff(background, gray_lwpCV)outt.append(diff)#跟着图像变换背景tem_diff=diff.flatten()tem_ds=pd.Series(tem_diff)tem_per=1-len(tem_ds[tem_ds==0])/len(tem_ds)if (tem_per <0.2 )| (tem_per>0.75):background=gray_lwpCVelse:diff = cv2.threshold(diff, thres1, 255, cv2.THRESH_BINARY)[1]ret,thresh = cv2.threshold(diff.copy(),150,255,0)contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)# contours, hierarchy = cv2.findContours(diff.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)for c in contours:if (cv2.contourArea(c) < area_threh) | (cv2.contourArea(c) >int(512*512*0.3) ) : # 对于矩形区域,只显⽰⼤于给定阈值的轮廓(去除微⼩的变化等噪点) continue(x, y, w, h) = cv2.boundingRect(c) # 该函数计算矩形的边界框cv2.rectangle(input_image, (x, y), (x+w, y+h), (0, 255, 0), 2)index.append(i)# cv2.imshow('contours', input_image)# cv2.imshow('dis', diff)out.write(input_image)images.append(input_image)i = i+1out.release()cam.release()return outt,index,images```##调取函数outt=threh('new_video.mp4','test6.mp4',25,3000)以上就是本⽂的全部内容,希望对⼤家的学习有所帮助,也希望⼤家多多⽀持。

OpenCV实现机器人对物体进行移动跟随的方法实例

OpenCV实现机器人对物体进行移动跟随的方法实例

OpenCV实现机器⼈对物体进⾏移动跟随的⽅法实例1.物体识别本案例实现对特殊颜⾊物体的识别,并实现根据物体位置的改变进⾏控制跟随。

import cv2 as cv# 定义结构元素kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))# print kernelcapture = cv.VideoCapture(0)print capture.isOpened()ok, frame = capture.read()lower_b = (65, 43, 46)upper_b = (110, 255, 255)height, width = frame.shape[0:2]screen_center = width / 2offset = 50while ok:# 将图像转成HSV颜⾊空间hsv_frame = cv.cvtColor(frame, cv.COLOR_BGR2HSV)# 基于颜⾊的物体提取mask = cv.inRange(hsv_frame, lower_b, upper_b)mask2 = cv.morphologyEx(mask, cv.MORPH_OPEN, kernel)mask3 = cv.morphologyEx(mask2, cv.MORPH_CLOSE, kernel)# 找出⾯积最⼤的区域_, contours, _ = cv.findContours(mask3, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)maxArea = 0maxIndex = 0for i, c in enumerate(contours):area = cv.contourArea(c)if area > maxArea:maxArea = areamaxIndex = i# 绘制cv.drawContours(frame, contours, maxIndex, (255, 255, 0), 2)# 获取外切矩形x, y, w, h = cv.boundingRect(contours[maxIndex])cv.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)# 获取中⼼像素点center_x = int(x + w/2)center_y = int(y + h/2)cv.circle(frame, (center_x, center_y), 5, (0, 0, 255), -1)# 简单的打印反馈数据,之后补充运动控制if center_x < screen_center - offset:print "turn left"elif screen_center - offset <= center_x <= screen_center + offset:print "keep"elif center_x > screen_center + offset:print "turn right"cv.imshow("mask4", mask3)cv.imshow("frame", frame)cv.waitKey(1)ok, frame = capture.read()实际效果图2.移动跟随结合ROS控制turtlebot3或其他机器⼈运动,turtlebot3机器⼈的教程见我另⼀个博⽂:⾸先启动turtlebot3,如下代码可以放在机器⼈的树莓派中,将相机插在USB⼝即可import rospyimport cv2 as cvfrom geometry_msgs.msg import Twistdef shutdown():twist = Twist()twist.linear.x = 0twist.angular.z = 0cmd_vel_Publisher.publish(twist)print "stop"if __name__ == '__main__':rospy.init_node("follow_node")rospy.on_shutdown(shutdown)rate = rospy.Rate(100)cmd_vel_Publisher = rospy.Publisher("/cmd_vel", Twist, queue_size=1)# 定义结构元素kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))# print kernelcapture = cv.VideoCapture(0)print capture.isOpened()ok, frame = capture.read()lower_b = (65, 43, 46)upper_b = (110, 255, 255)height, width = frame.shape[0:2]screen_center = width / 2offset = 50while not rospy.is_shutdown():# 将图像转成HSV颜⾊空间hsv_frame = cv.cvtColor(frame, cv.COLOR_BGR2HSV)# 基于颜⾊的物体提取mask = cv.inRange(hsv_frame, lower_b, upper_b)mask2 = cv.morphologyEx(mask, cv.MORPH_OPEN, kernel)mask3 = cv.morphologyEx(mask2, cv.MORPH_CLOSE, kernel)# 找出⾯积最⼤的区域_, contours, _ = cv.findContours(mask3, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) maxArea = 0maxIndex = 0for i, c in enumerate(contours):area = cv.contourArea(c)if area > maxArea:maxArea = areamaxIndex = i# 绘制cv.drawContours(frame, contours, maxIndex, (255, 255, 0), 2)# 获取外切矩形x, y, w, h = cv.boundingRect(contours[maxIndex])cv.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)# 获取中⼼像素点center_x = int(x + w / 2)center_y = int(y + h / 2)cv.circle(frame, (center_x, center_y), 5, (0, 0, 255), -1)# 简单的打印反馈数据,之后补充运动控制twist = Twist()if center_x < screen_center - offset:twist.linear.x = 0.1twist.angular.z = 0.5print "turn left"elif screen_center - offset <= center_x <= screen_center + offset:twist.linear.x = 0.3twist.angular.z = 0print "keep"elif center_x > screen_center + offset:twist.linear.x = 0.1twist.angular.z = -0.5else:twist.linear.x = 0twist.angular.z = 0print "stop"# 将速度发出cmd_vel_Publisher.publish(twist)# cv.imshow("mask4", mask3)# cv.imshow("frame", frame)cv.waitKey(1)rate.sleep()ok, frame = capture.read()总结到此这篇关于OpenCV实现机器⼈对物体进⾏移动跟随的⽂章就介绍到这了,更多相关OpenCV机器⼈对物体移动跟随内容请搜索以前的⽂章或继续浏览下⾯的相关⽂章希望⼤家以后多多⽀持!。

Python使用opencv进行简单的手势检测

Python使用opencv进行简单的手势检测

Python使⽤opencv进⾏简单的⼿势检测代码参考于:简单的⼿势识别,基本思路是基于⽪肤检测,⽪肤的颜⾊在HSV颜⾊空间下与周围环境的区分度更⾼,从RGB转换到HSV颜⾊空间下针对⽪肤颜⾊进⾏⼆值化,得到mask:def HSVBin(img):hsv = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)lower_skin = np.array([100,50,0])upper_skin = np.array([125,255,255])mask = cv2.inRange(hsv,lower_skin,upper_skin)return mask其中:cvtColor⽤于颜⾊空间转换。

inRange中,lower指图像中低于这个值,图像值会变成0;upper指图像中⾼于这个值,图像值会变成0,⽽在这之间的值变为255。

然后通过腐蚀与膨胀等形态学变化去除⼀些噪点,得到更完整的⽩⾊(⽪肤)⾊块,最后找出⾊块的轮廓,并通过⾊块⼤⼩排除⼀些⾯积较⼩的噪点:def getContours(img):kernel = np.ones((5,5),np.uint8)closed = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel)closed = cv2.morphologyEx(closed,cv2.MORPH_CLOSE,kernel)_,contours,h = cv2.findContours(closed,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)vaildContours = []for cont in contours:if cv2.contourArea(cont)>9000:vaildContours.append(cv2.convexHull(cont))return vaildContours膨胀:dilate,进⾏膨胀操作时,将内核 B划过图像,将内核B覆盖区域的最⼤像素值提取,并代替锚点位置的像素,这⼀最⼤化操作会导致图像中的亮区开始“扩展”。

运动目标跟踪与检测的源代码(CAMSHIFT 算法)

运动目标跟踪与检测的源代码(CAMSHIFT 算法)

该运行文件在VC6.0环境下编译通过,是一个stand-alone 运行程序,不需要OPENCV 的DLL库支持。

在运行之前,请先连接好USB接口的摄像头。

然后可以用鼠标选定欲跟踪目标。

#ifdef _CH_#pragma package <opencv>#endif#ifndef _EiC#include "cv.h"#include "highgui.h"#include <stdio.h>#include <ctype.h>#endifIplImage *image = 0, *hsv = 0, *hue = 0, *mask = 0, *backproject = 0, *histimg = 0; CvHistogram *hist = 0;int backproject_mode = 0;int select_object = 0;int track_object = 0;int show_hist = 1;CvPoint origin;CvRect selection;CvRect track_window;CvBox2D track_box; // tracking 返回的区域box,带角度CvConnectedComp track_comp;int hdims = 48; // 划分HIST的个数,越高越精确float hranges_arr[] = {0,180};float* hranges = hranges_arr;int vmin = 10, vmax = 256, smin = 30;void on_mouse( int event, int x, int y, int flags ){if( !image )return;if( image->origin )y = image->height - y;if( select_object ){selection.x = MIN(x,origin.x);selection.y = MIN(y,origin.y);selection.width = selection.x + CV_IABS(x - origin.x);selection.height = selection.y + CV_IABS(y - origin.y);selection.x = MAX( selection.x, 0 );selection.y = MAX( selection.y, 0 );selection.width = MIN( selection.width, image->width );selection.height = MIN( selection.height, image->height );selection.width -= selection.x;selection.height -= selection.y;}switch( event ){case CV_EVENT_LBUTTONDOWN:origin = cvPoint(x,y);selection = cvRect(x,y,0,0);select_object = 1;break;case CV_EVENT_LBUTTONUP:select_object = 0;if( selection.width > 0 && selection.height > 0 )track_object = -1;#ifdef _DEBUGprintf("/n # 鼠标的选择区域:");printf("/n X = %d, Y = %d, Width = %d, Height = %d", selection.x, selection.y, selection.width, selection.height); #endifbreak;}}CvScalar hsv2rgb( float hue ){int rgb[3], p, sector;static const int sector_data[][3]={{0,2,1}, {1,2,0}, {1,0,2}, {2,0,1}, {2,1,0}, {0,1,2}};hue *= 0.033333333333333333333333333333333f;sector = cvFloor(hue);p = cvRound(255*(hue - sector));p ^= sector & 1 ? 255 : 0;rgb[sector_data[sector][0]] = 255;rgb[sector_data[sector][1]] = 0;rgb[sector_data[sector][2]] = p;#ifdef _DEBUGprintf("/n # Convert HSV to RGB:");printf("/n HUE = %f", hue);printf("/n R = %d, G = %d, B = %d", rgb[0],rgb[1],rgb[2]);#endifreturn cvScalar(rgb[2], rgb[1], rgb[0],0);}int main( int argc, char** argv ){CvCapture* capture = 0;IplImage* frame = 0;if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );else if( argc == 2 )capture = cvCaptureFromAVI( argv[1] );if( !capture ){fprintf(stderr,"Could not initialize capturing.../n");return -1;}printf( "Hot keys: /n""/tESC - quit the program/n""/tc - stop the tracking/n""/tb - switch to/from backprojection view/n""/th - show/hide object histogram/n""To initialize tracking, select the object with mouse/n" );//cvNamedWindow( "Histogram", 1 );cvNamedWindow( "CamShiftDemo", 1 );cvSetMouseCallback( "CamShiftDemo", on_mouse ); // on_mouse 自定义事件cvCreateTrackbar( "Vmin", "CamShiftDemo", &vmin, 256, 0 ); cvCreateTrackbar( "Vmax", "CamShiftDemo", &vmax, 256, 0 ); cvCreateTrackbar( "Smin", "CamShiftDemo", &smin, 256, 0 );for(;;){int i, bin_w, c;frame = cvQueryFrame( capture );if( !frame )break;if( !image ){/* allocate all the buffers */image = cvCreateImage( cvGetSize(frame), 8, 3 );image->origin = frame->origin;hsv = cvCreateImage( cvGetSize(frame), 8, 3 );hue = cvCreateImage( cvGetSize(frame), 8, 1 );mask = cvCreateImage( cvGetSize(frame), 8, 1 );backproject = cvCreateImage( cvGetSize(frame), 8, 1 );hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 ); // 计算直方图histimg = cvCreateImage( cvSize(320,200), 8, 3 );cvZero( histimg );}cvCopy( frame, image, 0 );cvCvtColor( image, hsv, CV_BGR2HSV ); // 彩色空间转换BGR to HSVif( track_object ){int _vmin = vmin, _vmax = vmax;cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),cvScalar(180,256,MAX(_vmin,_vmax),0), mask ); // 得到二值的MASK cvSplit( hsv, hue, 0, 0, 0 ); // 只提取HUE 分量if( track_object < 0 ){float max_val = 0.f;cvSetImageROI( hue, selection ); // 得到选择区域for ROIcvSetImageROI( mask, selection ); // 得到选择区域for maskcvCalcHist( &hue, hist, 0, mask ); // 计算直方图cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 ); // 只找最大值cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 ); // 缩放bin 到区间[0,255]cvResetImageROI( hue ); // remove ROIcvResetImageROI( mask );track_window = selection;track_object = 1;cvZero( histimg );bin_w = histimg->width / hdims; // hdims: 条的个数,则bin_w 为条的宽度// 画直方图for( i = 0; i < hdims; i++ ){int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );CvScalar color = hsv2rgb(i*180.f/hdims);cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),cvPoint((i+1)*bin_w,histimg->height - val),color, -1, 8, 0 );}}cvCalcBackProject( &hue, backproject, hist ); // 使用back project 方法cvAnd( backproject, mask, backproject, 0 );// calling CAMSHIFT 算法模块cvCamShift( backproject, track_window,cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),&track_comp, &track_box );track_window = track_comp.rect;if( backproject_mode )cvCvtColor( backproject, image, CV_GRAY2BGR ); // 使用backproject灰度图像if( image->origin )track_box.angle = -track_box.angle;cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );}if( select_object && selection.width > 0 && selection.height > 0 ){cvSetImageROI( image, selection );cvXorS( image, cvScalarAll(255), image, 0 );cvResetImageROI( image );}cvShowImage( "CamShiftDemo", image );cvShowImage( "Histogram", histimg );c = cvWaitKey(10);if( c == 27 )break; // exit from for-loopswitch( c ){case 'b':backproject_mode ^= 1;break;case 'c':track_object = 0;cvZero( histimg );break;case 'h':show_hist ^= 1;if( !show_hist )cvDestroyWindow( "Histogram" );elsecvNamedWindow( "Histogram", 1 );break;default:;}}cvReleaseCapture( &capture );cvDestroyWindow("CamShiftDemo");return 0;}#ifdef _EiCmain(1,"camshiftdemo.c");#endif。

  1. 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
  2. 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
  3. 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。

1.#include <Windows.h>2.3.#include <cv.h>4.#include <cxcore.h>5.#include <highgui.h>6.7.#include <fstream>8.#include <iostream>9.#include <iomanip>10.#include <algorithm>ing namespace std;12.13.#pragma comment(lib,"cv210.lib")14.#pragma comment(lib,"cxcore210.lib")15.#pragma comment(lib,"highgui210.lib")16.17.void ErrorHandler(char* message)18.{19. cout<<message<<endl;20. exit(0);21.}22.#undef UNICODE23.24.void fingerTip(char* imgname);25.26.int main()27.{28. WIN32_FIND_DATA FileData;29. HANDLE hSearch;30. BOOL fFinished = FALSE;31.32. if(!SetCurrentDirectory("images")){33. cout<<"failed to change work directory"<<endl;34. exit(0);35. }36.37. hSearch = FindFirstFile("*.bmp", &FileData);38. if (hSearch == INVALID_HANDLE_VALUE){39. ErrorHandler("No .bmp files found.");40. }41. while (!fFinished){42. fingerTip(FileData.cFileName);43. if (!FindNextFile(hSearch, &FileData)){44. if (GetLastError() ==ERROR_NO_MORE_FILES){45. fFinished = TRUE;46. } else {47. ErrorHandler("Couldn't find next file.");48. }49. }50. cvWaitKey(0);51. }52.53. // Close the search handle.54. if (!FindClose(hSearch)){55. ErrorHandler("Couldn't close search handle.");56. }57.58. return 0;59.}60.61.void fingerTip(char* imgname)62.{63. IplImage* pImgColor=NULL;64. IplImage* pImgGray=NULL;65. IplImage* pImgContourAll=NULL;66. IplImage* pImgContourAppr=NULL;67. IplImage* pImgHull=NULL;68. IplImage* pImgDefects=NULL;69. pImgColor=cvLoadImage(imgname,CV_LOAD_IMAGE_COLOR);70. if (!pImgColor){71. cout<<"failed to load image"<<endl;72. exit(0);73. }74.75. pImgGray=cvCreateImage(cvGetSize(pImgColor),8,1);76. cvCvtColor(pImgColor,pImgGray,CV_RGB2GRAY);77. pImgContourAppr=cvCreateImage(cvGetSize(pImgGray),8,3);78. pImgContourAll=cvCreateImage(cvGetSize(pImgGray),8,3);79. pImgHull=cvCreateImage(cvGetSize(pImgGray),8,3);80. pImgDefects=cvCreateImage(cvGetSize(pImgGray),8,3);81. cvZero(pImgContourAppr);82. cvZero(pImgContourAll);83. cvZero(pImgHull);84. cvZero(pImgDefects);85.86. //canny87. CvMemStorage* storage=cvCreateMemStorage();88. CvSeq* contourSeqAll=cvCreateSeq(0,sizeof(CvSeq),sizeof(CvPoint),storage);89. cvCanny(pImgGray,pImgGray,10,30,5);90. cvFindContours(pImgGray,storage,&contourSeqAll,sizeof(CvContour),CV_RETR_LIST,CV_LINK_RUNS);91. //original contours92. CvSeq* tseq=contourSeqAll;93. for (;contourSeqAll;contourSeqAll=contourSeqAll->h_next){94. cvDrawContours(pImgContourAll,contourSeqAll,cvScalar(255,0,0),cvScalar(0,0,255),0,2);95. }96. contourSeqAll=tseq;97.98. CvMemStorage* storageAppr=cvCreateMemStorage();99. CvSeq* contourAppr=cvCreateSeq(0,sizeof(CvSeq),sizeof(CvPoint),storageAppr);100. contourAppr=cvApproxPoly(contourSeqAll,sizeof(CvContour),storageAppr,CV_POLY_APPROX_ DP,5,1);101. //approximated contours102. tseq=contourAppr;103. for (;contourAppr;contourAppr=contourAppr->h_next){104. cvDrawContours(pImgContourAppr,contourAppr,cvScalar(255,0,0),cvScalar(0,0,255 ),0,2);105. }106. contourAppr=tseq;107.108. //print contours109. /*cout<<"contours:"<<endl;110. for (int i=0;i<contourAppr->total;i++){111. CvPoint* p=(CvPoint*)CV_GET_SEQ_ELEM(CvPoint,contourAppr,i);112. cout<<p->x<<","<<p->y<<endl;113. cvCircle(pImgHull,*p,3,cvScalar(0,255,255));114. cvShowImage("hull",pImgHull);115. cvWaitKey(0);116. }*/117.118.119. ////convex hull120. CvSeq* hull=cvConvexHull2(contourAppr);121. //convexity defects122. CvSeq* defectSeq=cvConvexityDefects(contourAppr,hull);123. //rearrange the detectSeq in linked sequence124.125. for (int i=0;i<defectSeq->total;i++){126. CvConvexityDefect*dp=(CvConvexityDefect*)CV_GET_SEQ_ELEM(CvConvexityDefect,defectSeq,i);127.128. cvLine(pImgDefects,*(dp->start),*(dp->end),cvScalar(0,0,255));129. cvLine(pImgDefects,*(dp->start),*(dp->depth_point),cvScalar(0x00,0x99,0xff)); 130. cvLine(pImgDefects,*(dp->depth_point),*(dp->end),cvScalar(0xff,0x99,0x00)); 131. cvCircle(pImgDefects,*(dp->depth_point),2,cvScalar(0xff,0x99,0x00));132. cout<<i<<" :("<<dp->start->x<<","<<dp->start->y<<")"<<endl; 133. }134.135.136. cvShowImage("original",pImgColor);137. cvShowImage("canny",pImgGray);138. cvShowImage("contour all",pImgContourAll);139. cvShowImage("contour appr",pImgContourAppr);140. cvShowImage("ConvexityDefects",pImgDefects);141. //cvShowImage("hull",pImgHull);142.143.144. cvWaitKey(0);145.146. cvDestroyAllWindows();147.148. cvReleaseImage(&pImgColor);149. cvReleaseImage(&pImgGray);150. cvReleaseImage(&pImgContourAll);151. cvReleaseImage(&pImgContourAppr);152. cvReleaseImage(&pImgDefects);153. cvReleaseImage(&pImgHull);154.155. cvReleaseMemStorage(&storage); 156. cvReleaseMemStorage(&storageAppr);。

相关文档
最新文档