开发环境:Ubuntu 18.04 LTS + ROS Melodic + ViSP 3.3.1
文章内容主要参考ViSP官方教学文档:https://visp-doc.inria.fr/doxygen/visp-daily/tutorial_mainpage.html
本文主要介绍了如何使用ViSP实现关键点跟踪,实现过程使用了OpenCV中的KLT跟踪算法,主要流程是在第一帧图像中选择出(手动或自动)待跟踪的目标关键点,然后在后面的每一帧中都搜索对应的目标关键点实现关键点跟踪。本文主要参考了 keypoint中的 tutorial-klt-tracker.cpp例程。首先要获取这个例程文件并编译它
svn export https://github.com/lagadic/visp.git/trunk/tutorial/tracking/keypoint
cd keypoint/
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Release -DVISP_DIR=$VISP_WS/visp-build
make
执行例程,查看效果
./tutorial-klt-tracker
此时是自动选择关键点的模式,程序会采用Harris关键点检测算法按照默认参数在图像中寻找关键点
./tutorial-klt-tracker --init--by-click
此时是手动选择关键点模式,可以通过鼠标左键单击从图像中选择需要跟踪的关键点
点击右键开始跟踪
下面介绍一下代码实现过程
#include <visp3/core/vpImageConvert.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/klt/vpKltOpencv.h>
//! [Include]
int main(int argc, const char *argv[])
{
//! [Check 3rd party]
#ifdef VISP_HAVE_OPENCV
//! [Check 3rd party]
try {
std::string opt_videoname = "video-postcard.mp4";
bool opt_init_by_click = false;
unsigned int opt_subsample = 1;
for (int i = 0; i < argc; i++) {
if (std::string(argv[i]) == "--videoname")
opt_videoname = std::string(argv[i + 1]);
else if (std::string(argv[i]) == "--init-by-click")
opt_init_by_click = true;
else if (std::string(argv[i]) == "--subsample")
opt_subsample = static_cast<unsigned int>(std::atoi(argv[i + 1]));
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "Usage: " << argv[0]
<< " [--videoname <video name>] [--subsample <scale factor>] [--init-by-click]"
<< " [--help] [-h]" << std::endl;
return 0;
}
}
//! [Create reader]
vpVideoReader reader; //新建视频容器
reader.setFileName(opt_videoname); //读取视频文件
//! [Create reader]
//! [Acquire]
vpImage<unsigned char> I, Iacq;
reader.acquire(Iacq); //开始读取视频并抓取当前帧
Iacq.subsample(opt_subsample, opt_subsample, I);//将当前帧采样保存到图像I中
//! [Acquire]
//! [Convert to OpenCV image]
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
IplImage *cvI = NULL;
#else
cv::Mat cvI;
#endif
vpImageConvert::convert(I, cvI);//把图像I转化为OpenCV中对应的格式
//! [Convert to OpenCV image]
//! [Init display]
vpDisplayOpenCV d(I, 0, 0, "Klt tracking");
vpDisplay::display(I);
vpDisplay::flush(I);
//! [Init display]
//! [Create tracker]
//自动模式下设置Harris关键点检测算法的参数
vpKltOpencv tracker;
tracker.setMaxFeatures(200);
tracker.setWindowSize(10);
//! [Quality]
tracker.setQuality(0.01);//如果想多检测一些关键点 可以减小这个参数
//! [Quality]
tracker.setMinDistance(15);
tracker.setHarrisFreeParameter(0.04);
tracker.setBlockSize(9);
tracker.setUseHarris(1);
tracker.setPyramidLevels(3);
//! [Create tracker]
//手动模式选择关键点
// Initialise the tracking
if (opt_init_by_click) {
vpMouseButton::vpMouseButtonType button = vpMouseButton::button1;
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
std::vector<CvPoint2D32f> feature;//新建OpenCV对应格式的特征容器
#else
std::vector<cv::Point2f> feature;
#endif
vpImagePoint ip;
do {
vpDisplay::displayText(I, 10, 10, "Left click to select a point, right to start tracking", vpColor::red);
if (vpDisplay::getClick(I, ip, button, false)) {
if (button == vpMouseButton::button1) {
feature.push_back(cv::Point2f((float)ip.get_u(), (float)ip.get_v()));//把点击获取的点保存到特征容器中
vpDisplay::displayCross(I, ip, 12, vpColor::green);
}
}
vpDisplay::flush(I);
vpTime::wait(20);
} while (button != vpMouseButton::button3);
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
tracker.initTracking(cvI, &feature[0], feature.size());//根据手动选择的关键点特征进行跟踪
#else
tracker.initTracking(cvI, feature);
#endif
} else {
//! [Init tracker]
tracker.initTracking(cvI);//根据自动选择的关键点进行跟踪
//! [Init tracker]
}
//! [How many features]
std::cout << "Tracker initialized with " << tracker.getNbFeatures() << " features" << std::endl; //输出关键点的数量
//! [How many features]
//! [While loop]
//对视频中的每一帧都执行关键点跟踪
while (!reader.end()) {
double t = vpTime::measureTimeMs();
reader.acquire(Iacq);
Iacq.subsample(opt_subsample, opt_subsample, I);
vpDisplay::display(I);
vpImageConvert::convert(I, cvI);
//如果采用的是手动选择的方法,则会在第20帧的时候再次然你选择一次关键点,重新进行跟踪
if (opt_init_by_click && reader.getFrameIndex() == reader.getFirstFrameIndex() + 20) {
vpMouseButton::vpMouseButtonType button = vpMouseButton::button1;
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
std::vector<CvPoint2D32f> feature;
#else
std::vector<cv::Point2f> feature;
#endif
vpImagePoint ip;
do {
vpDisplay::displayText(I, 10, 10, "Left click to select a point, right to start tracking", vpColor::red);
if (vpDisplay::getClick(I, ip, button, false)) {
if (button == vpMouseButton::button1) {
feature.push_back(cv::Point2f((float)ip.get_u(), (float)ip.get_v()));
vpDisplay::displayCross(I, ip, 12, vpColor::green);
}
}
vpDisplay::flush(I);
vpTime::wait(20);
} while (button != vpMouseButton::button3);
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
tracker.initTracking(cvI, &feature[0], feature.size());
#else
tracker.initTracking(cvI, feature);
#endif
}
tracker.track(cvI);
tracker.display(I, vpColor::red);
vpDisplay::displayText(I, 10, 10, "Click to quit", vpColor::red);
if (vpDisplay::getClick(I, false))
break;
vpDisplay::flush(I);
if (! reader.isVideoFormat()) {
vpTime::wait(t, 40);
}
}
//! [While loop]
//! [Wait click]
vpDisplay::getClick(I);
//! [Wait click]
//! [Release IplImage]
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
cvReleaseImage(&cvI);
#endif
//! [Release IplImage]
return 0;
} catch (const vpException &e) {
std::cout << "Catch an exception: " << e << std::endl;
}
#else
(void)argc;
(void)argv;
#endif
}
如果大家对于深度学习与计算机视觉领域感兴趣,希望获得更多的知识分享与最新的论文解读,欢迎关注我的个人公众号“深视”。
评论(0)
您还未登录,请登录后发表或查看评论