使用Kinect V2进行录制视频
发布时间
阅读量:
阅读量
使用Kinect 2.0 进行视频的录制代码。
#include <Kinect.h>
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <string>
#include "videoGet\videoprocessor.h"
using namespace std;
using namespace cv;
int main(void)
{
IKinectSensor * mySensor = nullptr;
GetDefaultKinectSensor(&mySensor); //获取感应器
mySensor->Open(); //打开感应器
double time0 = static_cast<double>(getTickCount());
IDepthFrameSource * mySource = nullptr; //取得深度数据
mySensor->get_DepthFrameSource(&mySource);
int height = 0, width = 0;
IFrameDescription * myDescription = nullptr; //取得深度数据的分辨率
mySource->get_FrameDescription(&myDescription);
myDescription->get_Height(&height);
myDescription->get_Width(&width);
myDescription->Release();
IDepthFrameReader * myReader = nullptr;
mySource->OpenReader(&myReader); //打开深度数据的Reader
IDepthFrame * myFrame = nullptr;
Mat temp(height, width, CV_16UC1); //建立图像矩阵
Mat img(height, width, CV_8UC1);
IKinectSensor * mySensor1 = nullptr; //第1步打开Sensor
GetDefaultKinectSensor(&mySensor1);
mySensor1->Open();
IColorFrameSource * mySource1 = nullptr; //第2步获取Source
mySensor1->get_ColorFrameSource(&mySource1);
int height1 = 0, width1 = 0; //取得宽和高等下用
IFrameDescription * myDescription1 = nullptr;
mySource1->get_FrameDescription(&myDescription1);
myDescription1->get_Height(&height1);
myDescription1->get_Width(&width1);
IColorFrameReader * myReader1 = nullptr; //第3步打开Reader
mySource1->OpenReader(&myReader1);
Mat img1(height1, width1, CV_8UC4);
IColorFrame * myFrame1 = nullptr;
double rate = 30;
std::cout << "Frame rate: " << rate << "fps" << std::endl;
int delay = 1000 / rate;
long long i = 0;
std::string b = "depth";
std::string ext = ".bmp";
std::string b1 = "color";
cv::VideoWriter write;
write.open("color.avi", CV_FOURCC('M', 'J', 'P', 'G'), 15.0, Size(512, 424));
// VideoProcessor processor;
while (1)
{
// double time0 = static_cast<double>(getTickCount());
//第4步获取Frame
if ((myReader->AcquireLatestFrame(&myFrame) == S_OK) && (myReader1->AcquireLatestFrame(&myFrame1) == S_OK))//通过Reader尝试获取最新的一帧深度数据,放入深度帧中,并判断是否成功获取
{
myFrame->CopyFrameDataToArray(height * width, (UINT16 *)temp.data); //先把数据存入16位的图像矩阵中
temp.convertTo(img, CV_8UC1, 255.0 / 4500); //再把16位转换为8位
imshow("TEST", img);
UINT size1 = 0;
myFrame1->CopyConvertedFrameDataToArray(width1 * height1 * 4, (BYTE *)img1.data, ColorImageFormat_Bgra);
imshow("TEST", img1);
std::string name(b);
std::ostringstream ss; ss << i; name += ss.str(); //i++;
name += ext;
std::cout << name << std::endl;
std::string name1(b1);
std::ostringstream ss1; ss1 << i; name1 += ss1.str(); i++;
name1 += ext;
std::cout << name1 << std::endl;
// cv::imwrite(name, img);
if (cv::waitKey(delay) >= 0)
return 0;
write << img1;
/*
VideoProcessor processor;
processor.setDelay(1000. / 30.0);
processor.setOutput("depth.avi", -1, 30);
// cv::Mat frame = img.clone();
116. processor.run();
*/
write.open("depth.avi", CV_FOURCC('M', 'J', 'P', 'G'), 30.0,Size(512,424));
//write.write(img);
write << img;
myFrame->Release();
myFrame1->Release();
}
else
{
// cout << "It cannot get a new image!!!" << "\n" << endl;
}
if (waitKey(30) == VK_ESCAPE)
break;
}
myReader->Release(); //释放不用的变量并且关闭感应器
mySource->Release();
mySensor->Close();
mySensor->Release();
myReader1->Release(); //释放不用的变量并且关闭感应器
mySource1->Release();
mySensor1->Close();
mySensor1->Release();
全部评论 (0)
还没有任何评论哟~
