本文整理汇总了C++中cvCreateImageHeader函数的典型用法代码示例。如果您正苦于以下问题:C++ cvCreateImageHeader函数的具体用法?C++ cvCreateImageHeader怎么用?C++ cvCreateImageHeader使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvCreateImageHeader函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: printf
void rspfOpenCVSmoothFilter::runUcharTransformation(rspfImageData* tile)
{
IplImage *input;
IplImage *output;
char* bSrc;
char* bDst;
int nChannels = tile->getNumberOfBands();
for(int k=0; k<nChannels; k++) {
printf("Channel %d\n",k);
input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
bSrc = static_cast<char*>(tile->getBuf(k));
input->imageData=bSrc;
bDst = static_cast<char*>(theTile->getBuf(k));
output->imageData=bDst;
cvSmooth(input,output,theSmoothType,theParam1,theParam2,theParam3,theParam4);
cvReleaseImageHeader(&input);
cvReleaseImageHeader(&output);
}
theTile->validate();
}
开发者ID:vapd-radi,项目名称:rspf_v2.0,代码行数:27,代码来源:rspfOpenCVSmoothFilter.cpp
示例2: printf
void rspfOpenCVSobelFilter::runUcharTransformation(rspfImageData* tile) {
IplImage *input;
IplImage *output;
char* bSrc;
char* bDst;
int nChannels = tile->getNumberOfBands();
for(int k=0; k<nChannels; k++) {
printf("Channel %d\n",k);
input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
bSrc = static_cast<char*>(tile->getBuf(k));
input->imageData=bSrc;
bDst = static_cast<char*>(theTile->getBuf(k));
output->imageData=bDst;
IplImage * tmp = cvCreateImage(cvSize(tile->getWidth(),tile->getHeight()),IPL_DEPTH_16S,1);
cvSobel(input,tmp,theXOrder,theYOrder,theApertureSize);
cvConvertScale(tmp,output);
cvReleaseImageHeader(&input);
cvReleaseImageHeader(&output);
cvReleaseImage(&tmp);
}
theTile->validate();
}
开发者ID:vapd-radi,项目名称:rspf_v2.0,代码行数:28,代码来源:rspfOpenCVSobelFilter.cpp
示例3: radial_sample
void radial_sample(int width, int height, char* data, IplImage *unwrapped, int slice)
{
IplImage *cvcast = cvCreateImageHeader(cvSize(width, height),
IPL_DEPTH_8U, 1);
cvcast->imageData = data;
// cvSaveImage("slice.png",cvcast);
CvPoint center = cvPoint(cx,cy);
unsigned char* linebuf;
for(int sample = 0; sample < RADIAL_SAMPLES; sample++) {
float theta = ((float)sample)*((2.0*PI)/(float)RADIAL_SAMPLES);
CvPoint outer = calc_ray_outer(theta, center);
// printf("%g:\t%d,%d\n", theta*(180.0/PI), outer.x, outer.y);
cvClipLine(cvSize(width, height), &outer, ¢er);
int linesize = abs(center.x-outer.x)+abs(center.y-outer.y)+1;
linebuf = (unsigned char*)malloc(linesize);
cvSampleLine(cvcast,outer,center,linebuf,4);
IplImage *castline = cvCreateImageHeader(cvSize(linesize,1), IPL_DEPTH_8U, 1);
castline->imageData = (char*)linebuf;
IplImage *sobel = cvCreateImage(cvSize(linesize,1), IPL_DEPTH_8U, 1);
cvSobel(castline, sobel, 1, 0, 3);
int layer = 0;
for(int i = 0; (i < linesize) && (layer < MAX_LAYERS); i++) {
// printf(" %d,", (int)cvGetReal1D(sobel,i));
if((int)cvGetReal1D(sobel,i) > SOBEL_THRESH) {
int max = 0, max_i = 0;
for(; i < linesize; i++) {
int curval = (int)cvGetReal1D(sobel,i);
if(curval == 0) break;
if(curval > max) {
max = curval;
max_i = i;
}
}
cvSetReal2D(unwrapped,slice,(layer*RADIAL_SAMPLES)+sample,cvGetReal1D(castline,max_i));
// printf("%d\t",max);
layer++;
}
}
// printf("\n");
/*
char filename[] = "line000.png";
sprintf(filename,"line%03d.png",(int)(theta*(180.0/PI)));
cvSaveImage(filename,sobel);
*/
cvReleaseImageHeader(&castline);
cvReleaseImage(&sobel);
free(linebuf);
}
}
开发者ID:viscenter,项目名称:educe,代码行数:60,代码来源:autoseg.cpp
示例4: printf
void rspfOpenCVErodeFilter::runUcharTransformation(rspfImageData* tile)
{
IplImage *input;
IplImage *output;
char* bSrc;
char* bDst;
int nChannels = tile->getNumberOfBands();
for(int k=0; k<nChannels; k++) {
printf("Channel %d\n",k);
input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
bSrc = static_cast<char*>(tile->getBuf(k));
input->imageData=bSrc;
bDst = static_cast<char*>(theTile->getBuf(k));
output->imageData=bDst;
cvErode(input,output,NULL,theIterations); // a 3x3 rectangular structuring element is used
cvReleaseImageHeader(&input);
cvReleaseImageHeader(&output);
}
theTile->validate();
}
开发者ID:vapd-radi,项目名称:rspf_v2.0,代码行数:25,代码来源:rspfOpenCVErodeFilter.cpp
示例5: while
void BleWindowsCaptureSource::run()
{
// TODO make could select screen
// QGuiApplication::screens();
while (!m_stop) {
QElapsedTimer elapsedTimer;
elapsedTimer.start();
QScreen *screen = QGuiApplication::primaryScreen();
if (screen) {
QPixmap pixmap = screen->grabWindow(m_wid, m_x, m_y, m_width, m_height);
#if 1
// TODO to draw cursor to image
QRect desktopRect = QRect(QPoint(0, 0), screen->size());
if (desktopRect.contains(QCursor::pos())) {
drawCursor(&pixmap);
}
#endif
QImage image = pixmap.toImage();
m_modifyMutex.lock(); // Start lock
BleImage be;
be.width = image.width();
be.height = image.height();
int imageSize = be.width * be.height * 3;
be.data = new char[imageSize];
IplImage *oriImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 4);
cvSetData(oriImage, image.bits(), image.bytesPerLine());
IplImage *dstImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 3);
cvSetData(dstImage, be.data, be.width * 3);
cvCvtColor(oriImage, dstImage, CV_BGRA2BGR);
be.dataSize = imageSize;
be.format = BleImage_Format_BGR24;
m_image = be;
cvReleaseImageHeader(&oriImage);
cvReleaseImageHeader(&dstImage);
m_modifyMutex.unlock(); // End unlock
}
int elapsedMs = elapsedTimer.elapsed();
int needSleepMs = m_interval - elapsedMs;
if (needSleepMs < 0) {
needSleepMs = 0;
}
msleep(needSleepMs);
}
log_trace("BleWindowsCaptureSource exit normally.");
}
开发者ID:JaydenChou,项目名称:Bull-Live-Encoder,代码行数:59,代码来源:BleWindowsCaptureSource.cpp
示例6: Q_UNUSED
void BleImageProcess::paintEvent(QPaintEvent *event)
{
Q_UNUSED(event);
QPainter p(this);
p.setRenderHint(QPainter::SmoothPixmapTransform);
// back ground
p.fillRect(rect(), QBrush(QColor(48, 48, 48)));
// element draw
for (int i = 0; i < m_sources.size(); ++i) {
const SourcePair & pair = m_sources.at(i);
BleSourceAbstract *s = pair.source;
// TODO image data may be used by other thread
BleImage image = s->getImage();
if (image.dataSize <= 0) continue;
QImage qimage;
if (image.format == BleImage_Format_BGR24) {
IplImage *oriImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
cvSetData(oriImage, image.data, image.width*3);
IplImage *dstImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
cvSetData(dstImage, image.data, image.width*3);
cvCvtColor(oriImage, dstImage, CV_BGR2RGB);
cvReleaseImageHeader(&oriImage);
cvReleaseImageHeader(&dstImage);
}
qimage = QImage((uchar*)image.data, image.width, image.height, QImage::Format_RGB888);
p.drawPixmap(pair.rect, QPixmap::fromImage(qimage));
// p.drawImage(pair.rect, qimage);
}
if (m_activePair && m_activePair->rect.isValid()) {
QPen pen(Qt::SolidLine);
pen.setColor(Qt::white);
pen.setWidth(2);
pen.setStyle(Qt::DotLine);
p.setPen(pen);
p.drawRect(m_activePair->rect);
QRect topLeftRect(m_activePair->rect.x(), m_activePair->rect.y(), 8, 8);
p.fillRect(topLeftRect, QBrush(Qt::red));
QRect bottomRightRect(m_activePair->rect.bottomRight().x(), m_activePair->rect.bottomRight().y(), -8, -8);
p.fillRect(bottomRightRect, QBrush(Qt::red));
}
}
开发者ID:JaydenChou,项目名称:Bull-Live-Encoder,代码行数:56,代码来源:BleImageProcess.cpp
示例7: gst_opencv_video_filter_set_caps
static gboolean
gst_opencv_video_filter_set_caps (GstBaseTransform * trans, GstCaps * incaps,
GstCaps * outcaps)
{
GstOpencvVideoFilter *transform = GST_OPENCV_VIDEO_FILTER (trans);
GstOpencvVideoFilterClass *klass =
GST_OPENCV_VIDEO_FILTER_GET_CLASS (transform);
gint in_width, in_height;
gint in_depth, in_channels;
gint out_width, out_height;
gint out_depth, out_channels;
GError *in_err = NULL;
GError *out_err = NULL;
if (!gst_opencv_parse_iplimage_params_from_caps (incaps, &in_width,
&in_height, &in_depth, &in_channels, &in_err)) {
GST_WARNING_OBJECT (transform, "Failed to parse input caps: %s",
in_err->message);
g_error_free (in_err);
return FALSE;
}
if (!gst_opencv_parse_iplimage_params_from_caps (outcaps, &out_width,
&out_height, &out_depth, &out_channels, &out_err)) {
GST_WARNING_OBJECT (transform, "Failed to parse output caps: %s",
out_err->message);
g_error_free (out_err);
return FALSE;
}
if (klass->cv_set_caps) {
if (!klass->cv_set_caps (transform, in_width, in_height, in_depth,
in_channels, out_width, out_height, out_depth, out_channels))
return FALSE;
}
if (transform->cvImage) {
cvReleaseImage (&transform->cvImage);
}
if (transform->out_cvImage) {
cvReleaseImage (&transform->out_cvImage);
}
transform->cvImage =
cvCreateImageHeader (cvSize (in_width, in_height), in_depth, in_channels);
transform->out_cvImage =
cvCreateImageHeader (cvSize (out_width, out_height), out_depth,
out_channels);
gst_base_transform_set_in_place (GST_BASE_TRANSFORM (transform),
transform->in_place);
return TRUE;
}
开发者ID:Distrotech,项目名称:gst-plugins-bad,代码行数:53,代码来源:gstopencvvideofilter.c
示例8: haarwrapper_flip
guint32 haarwrapper_flip(t_haarwrapper *hc, t_haarwrapper_image* im, t_haarwrapper_image* im2)
{
IplImage *img = cvCreateImageHeader(cvSize(im->width,im->height), IPL_DEPTH_8U, 3);
img->widthStep = im->rowbytes;
img->imageData = (char*)im->data[0];
IplImage *img2 = cvCreateImageHeader(cvSize(im2->width,im2->height), IPL_DEPTH_8U, 3);
img2->widthStep = im2->rowbytes;
img2->imageData = (char*)im2->data[0];
cvFlip(img, img2, 1);
return(0);
}
开发者ID:miguelao,项目名称:gst_plugins_tsunami,代码行数:14,代码来源:haarwrapper.c
示例9: main
int main (int argc,char* argv[]){
if (argc != 2 && argc != 3){
printf("usage:\n %s /path/to/recoding/filename.oni\n",argv[0]);
return 0;
}
Xn_sensor sensor(WIDTH,HEIGHT);
sensor.play(argv[1],false);
cvNamedWindow( "Model Extractor Viewer", 1 );
IplImage* rgb_image = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
IplImage* test = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
IplImage* gray = cvCreateImage(cvSize(WIDTH,HEIGHT), 8, 1);
Mat img;
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr model (new pcl::PointCloud<pcl::PointXYZRGB>);
//pcl::visualization::CloudViewer viewer("Model Extractor Viewer");
//Read Fiducial from file
Fiducial fiducial("fiducial.yml");
Pose pose;
while(/*!viewer.wasStopped() && */!sensor.endPlaying()){
//Get the frame
sensor.update();
sensor.getPCL(cloud);
cvSetData(rgb_image,sensor.rgb,rgb_image->widthStep);
//Estimate Camera Pose from fiducial
cvCvtColor(rgb_image,gray,CV_BGR2GRAY);
if (fiducial.find(gray,true)){
pose.estimate(gray,fiducial);
//fiducial.draw(rgb_image);
}
if (pose.isFound()){
printf("Rotation");
printMat<double>(pose.getR());
printf("Translation");
printMat<double>(pose.getT());
//Segment volume around the fiducial
boxFilter(cloud,pose);
//Create 3D model
buildModel(cloud,model);
}
//viewer.showCloud (model);
}
pcl::io::savePCDFileBinary ("model.pcd", *model);
sensor.shutdown();
return 0;
}
开发者ID:alantrrs,项目名称:augmented_dev,代码行数:49,代码来源:model_extractor.cpp
示例10: main
int main(int argc, char* argv[])
{
printf("DUOLib Version: v%s\n", GetLibVersion());
// Open DUO camera and start capturing
if(!OpenDUOCamera(WIDTH, HEIGHT, FPS))
{
printf("Could not open DUO camera\n");
return 0;
}
// Create OpenCV windows
cvNamedWindow("Left");
cvNamedWindow("Right");
// Set exposure and LED brightness
SetExposure(50);
SetLed(25);
// Create image headers for left & right frames
IplImage *left = cvCreateImageHeader(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
IplImage *right = cvCreateImageHeader(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
// Run capture loop until <Esc> key is pressed
while((cvWaitKey(1) & 0xff) != 27)
{
// Capture DUO frame
PDUOFrame pFrameData = GetDUOFrame();
if(pFrameData == NULL) continue;
// Set the image data
left->imageData = (char*)pFrameData->leftData;
right->imageData = (char*)pFrameData->rightData;
// Process images here (optional)
// Display images
cvShowImage("Left", left);
cvShowImage("Right", right);
}
// Release image headers
cvReleaseImageHeader(&left);
cvReleaseImageHeader(&right);
// Close DUO camera
CloseDUOCamera();
return 0;
}
开发者ID:l0g1x,项目名称:samples,代码行数:48,代码来源:Sample.cpp
示例11: gst_buffer_get_caps
//
// decode buffer
//
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
if(!buffer)
return false;
if(!frame) {
gint height, width;
GstCaps *buff_caps = gst_buffer_get_caps(buffer);
assert(gst_caps_get_size(buff_caps) == 1);
GstStructure* structure = gst_caps_get_structure(buff_caps, 0);
if(!gst_structure_get_int(structure, "width", &width) ||
!gst_structure_get_int(structure, "height", &height))
return false;
frame = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 3);
gst_caps_unref(buff_caps);
}
// no need to memcpy, just use gstreamer's buffer :-)
frame->imageData = (char *)GST_BUFFER_DATA(buffer);
//memcpy (frame->imageData, GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE (buffer));
//gst_buffer_unref(buffer);
//buffer = 0;
return frame;
}
开发者ID:hksonngan,项目名称:neocortex,代码行数:29,代码来源:cap_gstreamer.cpp
示例12: lk
bool HiwrCameraControllerNodelet::copyRead(){
std::unique_lock<std::mutex> lk(mutex_);
if(!lk.owns_lock()){
try {
lk.lock();
} catch(const std::system_error& e) {
std::cout << "coin2 Caught system_error with code " << e.code()
<< " meaning " << e.what() << '\n';
}
}
if(!new_frame_){
waiter_.wait(lk);
}
image_ipl_ = cvCreateImageHeader(cvSize(config_width_ ,config_height_), 8, 1);
dealMemory();
const int total = config_width_*config_height_*2;
if(total!=bytes_used_)
return false;
int j=0;
for(int i=0; i< total; j++){
final_[j]=frame_[i];
i+=2;
};
new_frame_=false;
lk.unlock();
return true;
}
开发者ID:Hiwr,项目名称:hiwr_camera_controller,代码行数:32,代码来源:hiwr_camera_controller.cpp
示例13: cvCreateImageHeader
bool JpegSaver::saveNUimageAsJpeg(NUimage* image, const std::string& pFileName)
{
unsigned char r,g,b;
char* bgrBuffer = new char[image->width() * image->height() * 3];
int bufferIndex = 0;
pixels::Pixel temp;
for(int y = 0; y < image->height(); y++)
{
for (int x = 0; x < image->width(); x++)
{
temp = image->image[y][x];
ColorModelConversions::fromYCbCrToRGB(temp.y,temp.cb,temp.cr,r,g,b);
bgrBuffer[bufferIndex++] = b;
bgrBuffer[bufferIndex++] = g;
bgrBuffer[bufferIndex++] = r;
}
}
IplImage* fIplImageHeader;
fIplImageHeader = cvCreateImageHeader(cvSize(image->width(), image->height()), 8, 3);
fIplImageHeader->imageData = bgrBuffer;
cvSaveImage(pFileName.c_str(),fIplImageHeader);
if (fIplImageHeader)
{
cvReleaseImageHeader(&fIplImageHeader);
}
delete [] bgrBuffer;
return true;
}
开发者ID:BuddenD,项目名称:robocup,代码行数:29,代码来源:JpegSaver.cpp
示例14: gst_motiondetect_set_caps
static gboolean
gst_motiondetect_set_caps (GstBaseTransform *trans, GstCaps *incaps,
GstCaps *outcaps)
{
gint width, height, depth, ipldepth, channels;
GError *err = NULL;
GstStructure *structure = gst_caps_get_structure (incaps, 0);
StbtMotionDetect *filter = GST_MOTIONDETECT (trans);
if (!filter) {
return FALSE;
}
if (!gst_structure_get_int (structure, "width", &width) ||
!gst_structure_get_int (structure, "height", &height) ||
!gst_structure_get_int (structure, "depth", &depth)) {
g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
"No width/height/depth in caps");
return FALSE;
}
if (gst_structure_has_name (structure, "video/x-raw-rgb")) {
channels = 3;
} else if (gst_structure_has_name (structure, "video/x-raw-gray")) {
channels = 1;
} else {
g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
"Unsupported caps %s", gst_structure_get_name (structure));
return FALSE;
}
if (depth / channels == 8) {
ipldepth = IPL_DEPTH_8U;
} else {
g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
"Unsupported depth/channels %d/%d", depth, channels);
return FALSE;
}
if (filter->cvCurrentImage) {
cvReleaseImageHeader (&filter->cvCurrentImage);
filter->cvCurrentImage = NULL;
}
if (filter->cvReferenceImageGray) {
cvReleaseImage (&filter->cvReferenceImageGray);
filter->cvReferenceImageGray = NULL;
}
if (filter->cvCurrentImageGray) {
cvReleaseImage (&filter->cvCurrentImageGray);
filter->cvCurrentImageGray = NULL;
}
filter->cvCurrentImage =
cvCreateImageHeader (cvSize (width, height), ipldepth, channels);
filter->cvReferenceImageGray = cvCreateImage(
cvSize (width, height), IPL_DEPTH_8U, 1);
filter->cvCurrentImageGray = cvCreateImage(
cvSize (width, height), IPL_DEPTH_8U, 1);
filter->state = MOTION_DETECT_STATE_ACQUIRING_REFERENCE_IMAGE;
return gst_motiondetect_check_mask_compability(filter);
}
开发者ID:ekelly30,项目名称:stb-tester,代码行数:60,代码来源:gstmotiondetect.c
示例15: memcpy
// --------------------------------------------------------------------------
//! @brief Get an image from the AR.Drone's camera.
//! @return An OpenCV image data (IplImage or cv::Mat)
//! @retval NULL Failure
// --------------------------------------------------------------------------
ARDRONE_IMAGE ARDrone::getImage(void)
{
// There is no image
if (!img) return ARDRONE_IMAGE(NULL);
// Enable mutex lock
if (mutexVideo) pthread_mutex_lock(mutexVideo);
// AR.Drone 2.0
if (version.major == ARDRONE_VERSION_2) {
// Copy current frame to an IplImage
memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
}
// AR.Drone 1.0
else {
// If the sizes of the buffer and the IplImage are differnt
if (pCodecCtx->width != img->width || pCodecCtx->height != img->height) {
// Resize the image to 320x240
IplImage *small_img = cvCreateImageHeader(cvSize(pCodecCtx->width, pCodecCtx->height), IPL_DEPTH_8U, 3);
small_img->imageData = (char*)bufferBGR;
cvResize(small_img, img, CV_INTER_CUBIC);
cvReleaseImageHeader(&small_img);
}
// For 320x240 image, just copy it
else memcpy(img->imageData, bufferBGR, pCodecCtx->width * pCodecCtx->height * sizeof(uint8_t) * 3);
}
// The latest image has been read, so change newImage accordingly
newImage = false;
// Disable mutex lock
if (mutexVideo) pthread_mutex_unlock(mutexVideo);
return ARDRONE_IMAGE(img);
}
开发者ID:Riseley,项目名称:Drone,代码行数:40,代码来源:video.cpp
示例16: gst_template_match_handle_sink_event
/* this function handles the link with other elements */
static gboolean
gst_template_match_handle_sink_event (GstPad * pad, GstObject * parent,
GstEvent * event)
{
GstTemplateMatch *filter;
GstVideoInfo info;
gboolean res = TRUE;
filter = GST_TEMPLATE_MATCH (parent);
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_CAPS:
{
GstCaps *caps;
gst_event_parse_caps (event, &caps);
gst_video_info_from_caps (&info, caps);
if (filter->cvImage) {
cvReleaseImageHeader (&filter->cvImage);
}
filter->cvImage =
cvCreateImageHeader (cvSize (info.width, info.height), IPL_DEPTH_8U,
3);
break;
}
default:
break;
}
res = gst_pad_event_default (pad, parent, event);
return res;
}
开发者ID:ndufresne,项目名称:gst-plugins-bad,代码行数:35,代码来源:gsttemplatematch.cpp
示例17: createIplImageFromImageInfo
/* Function: createIplImageFromImageInfo
*
* Description: Populates an IplImage with image information and data stored in
* an ImageInfo struct.
*
* Parameters:
* image: empty IplImage to populate using values from ImageInfo
* imageInfo: ImageInfo struct populated with image info from MATLAB input
*
* Returns: 0 on success, error code on error
*/
int createIplImageFromImageInfo(
_InOut_ IplImage **image,
_In_ ImageInfo imageInfo)
{
// create single-channel IplImage with same height, width, and depth as
// field values in ImageInfo struct
*image = cvCreateImageHeader(cvSize(imageInfo.imageWidth, imageInfo.imageHeight), (imageInfo.imageDepth == UINT16) ? IPL_DEPTH_16U : IPL_DEPTH_8U, 1);
// allocate memory for IplImage image data
(*image)->imageData = (char *)malloc(imageInfo.imageWidthStep * imageInfo.imageHeight * sizeof(char));
if ((*image)->imageData == NULL)
{
return OUT_OF_MEMORY_ERROR;
}
// copy image data into IplImage
memcpy((*image)->imageData, imageInfo.imageData, imageInfo.imageWidthStep * imageInfo.imageHeight);
// set additioinal IplImage info
(*image)->widthStep = imageInfo.imageWidthStep;
(*image)->imageDataOrigin = (*image)->imageData;
return 0;
}
开发者ID:minghuig,项目名称:wing_project,代码行数:36,代码来源:videos.cpp
示例18: cvCreateImageHeader
/******************将图片缩小至640*512方便显示***********************/
void CMainFrame::Resize()
{
CvSize cvSize;
cvSize.width = Width;
cvSize.height = Height;
//生成支持OPENCV的IPLIMAGE数据结构,并使用相机采集的图像数据初始化
IplImage *iplImage = cvCreateImageHeader(cvSize,IPL_DEPTH_8U,3);
cvSetData(iplImage,m_pImageBuffer,Width*3);
CvSize my_cvSize; //新图片尺寸
my_cvSize.width = Width/2;
my_cvSize.height = Height/2;
IplImage *iplgraytemp = cvCreateImage(my_cvSize,IPL_DEPTH_8U,3); //新图片矩阵
cvResize(iplImage,iplgraytemp,CV_INTER_NN);
//从LPLIMAGE数据结构中提取图像数据
memcpy(DisplayBuffer,(BYTE*)iplgraytemp->imageData,Height/2*Width/2*3);
//释放申请的图象空间
cvReleaseImage(&iplgraytemp);
}
开发者ID:lzhang57,项目名称:3D_scanner,代码行数:27,代码来源:MainFrm.cpp
示例19: cvCreateImageHeader
IplImage *boolarr2img(bool* barr, CvSize s) {
IplImage *img = cvCreateImageHeader(s,IPL_DEPTH_8U,1);
img->imageData = (char *)barr;
cvConvertScale(img,img,255);
return img;
}
开发者ID:Nyangawa,项目名称:structured_light,代码行数:7,代码来源:ThreeStepTest.cpp
示例20: cvCreateImageHeader
/** Initialize
*
* PRECONDITION
* REQUIRE(isColor = 0 / 1)
*
* WARNING
* EXAMPLES
*
* @param filename the name of the output video file
* @param fps frame per second
* @param height image height
* @param width image width
* @param isColor whether the image is color
*
* @return void
*/
void CLHVideoWriter::Init(const char* filename, int codeType, float fps, int height, int width, int isColor)
{
mHeight = height;
mWidth = width;
if (isColor != 0)
{
mpFrameImage = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 3);
}
else
{
mpFrameImage = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 1);
}
//mpVideoWriter = cvCreateVideoWriter(filename, CV_FOURCC('D','I','V','X'), fps, cvSize(width, height), isColor);
//mpVideoWriter = cvCreateVideoWriter(filename, CV_FOURCC('X','V','I','D'), fps, cvSize(width, height), isColor);
mpVideoWriter = cvCreateVideoWriter(filename, codeType, fps, cvSize(width, height), isColor);
}
开发者ID:jcchen1987,项目名称:FunctionCode,代码行数:32,代码来源:LHVideoWriter.cpp
注:本文中的cvCreateImageHeader函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论