本文整理汇总了C++中VideoFormat类的典型用法代码示例。如果您正苦于以下问题:C++ VideoFormat类的具体用法?C++ VideoFormat怎么用?C++ VideoFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoFormat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: print_active_format
void print_active_format (const VideoFormat& format)
{
std::cout << "Active format:\n"
<< "Format: \t" << fourcc_to_description(format.get_fourcc())
<< "\nResolution: \t" << format.get_size().width << "x" << format.get_size().height
<< "\nFramerate: \t" << format.get_framerate() << "\n" << std::endl;
}
开发者ID:TheImagingSource,项目名称:tiscamera,代码行数:7,代码来源:formats.cpp
示例2: fmt
void VideoShader::update(VideoMaterial *material)
{
if (!material->bind())
return;
const VideoFormat fmt(material->currentFormat());
//format is out of date because we may use the same shader for different formats
setVideoFormat(fmt);
// uniforms begin
program()->bind(); //glUseProgram(id). for glUniform
// all texture ids should be binded when renderering even for packed plane!
const int nb_planes = fmt.planeCount(); //number of texture id
for (int i = 0; i < nb_planes; ++i) {
// use glUniform1i to swap planes. swap uv: i => (3-i)%3
// TODO: in shader, use uniform sample2D u_Texture[], and use glUniform1iv(u_Texture, 3, {...})
program()->setUniformValue(textureLocation(i), (GLint)i);
}
if (nb_planes < textureLocationCount()) {
for (int i = nb_planes; i < textureLocationCount(); ++i) {
program()->setUniformValue(textureLocation(i), (GLint)(nb_planes - 1));
}
}
//qDebug() << "color mat " << material->colorMatrix();
program()->setUniformValue(colorMatrixLocation(), material->colorMatrix());
program()->setUniformValue(bppLocation(), (GLfloat)material->bpp());
//program()->setUniformValue(matrixLocation(), material->matrix()); //what about sgnode? state.combindMatrix()?
// uniform end. attribute begins
}
开发者ID:aichunyu,项目名称:QtAV,代码行数:28,代码来源:VideoShader.cpp
示例3: DPTR_D
void VideoMaterial::setCurrentFrame(const VideoFrame &frame)
{
DPTR_D(VideoMaterial);
d.update_texure = true;
d.bpp = frame.format().bitsPerPixel(0);
d.width = frame.width();
d.height = frame.height();
const VideoFormat fmt(frame.format());
// http://forum.doom9.org/archive/index.php/t-160211.html
ColorTransform::ColorSpace cs = ColorTransform::RGB;
if (fmt.isRGB()) {
if (fmt.isPlanar())
cs = ColorTransform::GBR;
} else {
if (frame.width() >= 1280 || frame.height() > 576) //values from mpv
cs = ColorTransform::BT709;
else
cs = ColorTransform::BT601;
}
d.colorTransform.setInputColorSpace(cs);
d.frame = frame;
if (fmt != d.video_format) {
qDebug("pixel format changed: %s => %s", qPrintable(d.video_format.name()), qPrintable(fmt.name()));
d.video_format = fmt;
}
}
开发者ID:aichunyu,项目名称:QtAV,代码行数:26,代码来源:VideoShader.cpp
示例4:
QDebug operator<< ( QDebug os, const VideoFormat & videoFormat ){
os << "w: " << videoFormat.frameWidth()
<< ", h: " << videoFormat.frameHeight()
<< ", fps: " << videoFormat.framesPerSecond()
<< ", count: " << videoFormat.frameCount()
<< ", type: " << videoFormat.type();
return os;
}
开发者ID:ElePhontitis,项目名称:Gesture2Midi,代码行数:8,代码来源:videoformat.cpp
示例5: DPTR_D
VideoFrame VideoDecoderDXVA::frame()
{
DPTR_D(VideoDecoderDXVA);
//qDebug("frame size: %dx%d", d.frame->width, d.frame->height);
if (!d.frame->opaque || !d.frame->data[0])
return VideoFrame();
if (d.frame->width <= 0 || d.frame->height <= 0 || !d.codec_ctx)
return VideoFrame();
IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
if (copyMode() == ZeroCopy && d.interop_res) {
dxva::SurfaceInteropDXVA *interop = new dxva::SurfaceInteropDXVA(d.interop_res);
interop->setSurface(d3d, width(), height());
VideoFrame f(width(), height(), VideoFormat::Format_RGB32); //p->width()
f.setBytesPerLine(d.width * 4); //used by gl to compute texture size
f.setMetaData(QStringLiteral("surface_interop"), QVariant::fromValue(VideoSurfaceInteropPtr(interop)));
f.setTimestamp(d.frame->pkt_pts/1000.0);
f.setDisplayAspectRatio(d.getDAR(d.frame));
return f;
}
class ScopedD3DLock {
IDirect3DSurface9 *mpD3D;
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
};
D3DLOCKED_RECT lock;
ScopedD3DLock(d3d, &lock);
if (lock.Pitch == 0) {
return VideoFrame();
}
//picth >= desc.Width
D3DSURFACE_DESC desc;
d3d->GetDesc(&desc);
const VideoFormat fmt = VideoFormat(pixelFormatFromD3D(desc.Format));
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", desc.Format);
return VideoFrame();
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
const bool swap_uv = desc.Format == MAKEFOURCC('I','M','C','3');
return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
开发者ID:151706061,项目名称:QtAV,代码行数:57,代码来源:VideoDecoderDXVA.cpp
示例6: DPTR_D
VideoFrame VideoDecoderFFmpegHW::copyToFrame(const VideoFormat& fmt, int surface_h, quint8 *src[], int pitch[], bool swapUV)
{
DPTR_D(VideoDecoderFFmpegHW);
Q_ASSERT_X(src[0] && pitch[0] > 0, "VideoDecoderFFmpegHW::copyToFrame", "src[0] and pitch[0] must be set");
const int nb_planes = fmt.planeCount();
const int chroma_pitch = nb_planes > 1 ? fmt.bytesPerLine(pitch[0], 1) : 0;
const int chroma_h = fmt.chromaHeight(surface_h);
int h[] = { surface_h, 0, 0};
for (int i = 1; i < nb_planes; ++i) {
h[i] = chroma_h;
// set chroma address and pitch if not set
if (pitch[i] <= 0)
pitch[i] = chroma_pitch;
if (!src[i])
src[i] = src[i-1] + pitch[i-1]*h[i-1];
}
if (swapUV) {
std::swap(src[1], src[2]);
std::swap(pitch[1], pitch[2]);
}
VideoFrame frame;
if (copyMode() == VideoDecoderFFmpegHW::OptimizedCopy && d.gpu_mem.isReady()) {
int yuv_size = 0;
for (int i = 0; i < nb_planes; ++i) {
yuv_size += pitch[i]*h[i];
}
// additional 15 bytes to ensure 16 bytes aligned
QByteArray buf(15 + yuv_size, 0);
const int offset_16 = (16 - ((uintptr_t)buf.data() & 0x0f)) & 0x0f;
// plane 1, 2... is aligned?
uchar* plane_ptr = (uchar*)buf.data() + offset_16;
QVector<uchar*> dst(nb_planes, 0);
for (int i = 0; i < nb_planes; ++i) {
dst[i] = plane_ptr;
// TODO: add VideoFormat::planeWidth/Height() ?
// pitch instead of surface_width
plane_ptr += pitch[i] * h[i];
d.gpu_mem.copyFrame(src[i], dst[i], pitch[i], h[i], pitch[i]);
}
frame = VideoFrame(buf, width(), height(), fmt);
frame.setBits(dst);
frame.setBytesPerLine(pitch);
} else {
frame = VideoFrame(width(), height(), fmt);
frame.setBits(src);
frame.setBytesPerLine(pitch);
// TODO: why clone is faster()?
// TODO: buffer pool and create VideoFrame when needed to avoid copy? also for other va
frame = frame.clone();
}
frame.setTimestamp(double(d.frame->pkt_pts)/1000.0);
frame.setDisplayAspectRatio(d.getDAR(d.frame));
d.updateColorDetails(&frame);
return frame;
}
开发者ID:151706061,项目名称:QtAV,代码行数:55,代码来源:VideoDecoderFFmpegHW.cpp
示例7: to
VideoFrame VideoFrame::to(const VideoFormat &fmt, const QSize& dstSize, const QRectF& roi) const
{
if (!isValid() || !constBits(0)) {// hw surface. map to host. only supports rgb packed formats now
Q_D(const VideoFrame);
const QVariant v = d->metadata.value(QStringLiteral("surface_interop"));
if (!v.isValid())
return VideoFrame();
VideoSurfaceInteropPtr si = v.value<VideoSurfaceInteropPtr>();
if (!si)
return VideoFrame();
VideoFrame f;
f.setDisplayAspectRatio(displayAspectRatio());
f.setTimestamp(timestamp());
if (si->map(HostMemorySurface, fmt, &f)) {
if ((!dstSize.isValid() ||dstSize == QSize(width(), height())) && (!roi.isValid() || roi == QRectF(0, 0, width(), height()))) //roi is not supported now
return f;
return f.to(fmt, dstSize, roi);
}
return VideoFrame();
}
const int w = dstSize.width() > 0 ? dstSize.width() : width();
const int h = dstSize.height() > 0 ? dstSize.height() : height();
if (fmt.pixelFormatFFmpeg() == pixelFormatFFmpeg()
&& w == width() && h == height()
// TODO: roi check.
)
return *this;
Q_D(const VideoFrame);
ImageConverterSWS conv;
conv.setInFormat(pixelFormatFFmpeg());
conv.setOutFormat(fmt.pixelFormatFFmpeg());
conv.setInSize(width(), height());
conv.setOutSize(w, h);
conv.setInRange(colorRange());
if (!conv.convert(d->planes.constData(), d->line_sizes.constData())) {
qWarning() << "VideoFrame::to error: " << format() << "=>" << fmt;
return VideoFrame();
}
VideoFrame f(w, h, fmt, conv.outData());
f.setBits(conv.outPlanes());
f.setBytesPerLine(conv.outLineSizes());
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
f.setTimestamp(timestamp());
f.setDisplayAspectRatio(displayAspectRatio());
f.d_ptr->metadata = d->metadata; // need metadata?
return f;
}
开发者ID:SvetZari,项目名称:QtAV,代码行数:52,代码来源:VideoFrame.cpp
示例8: channelMap
static QMatrix4x4 channelMap(const VideoFormat& fmt)
{
if (fmt.isPlanar()) //currently only for planar
return QMatrix4x4();
switch (fmt.pixelFormat()) {
case VideoFormat::Format_UYVY:
return QMatrix4x4(0.0f, 0.5f, 0.0f, 0.5f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_YUYV:
return QMatrix4x4(0.5f, 0.0f, 0.5f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_VYUY:
return QMatrix4x4(0.0f, 0.5f, 0.0f, 0.5f,
0.0f, 0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_YVYU:
return QMatrix4x4(0.5f, 0.0f, 0.5f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_VYU:
return QMatrix4x4(0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
default:
break;
}
const quint8 *channels = NULL;//{ 0, 1, 2, 3};
for (int i = 0; gl_channel_maps[i].pixfmt != VideoFormat::Format_Invalid; ++i) {
if (gl_channel_maps[i].pixfmt == fmt.pixelFormat()) {
channels = gl_channel_maps[i].channels;
break;
}
}
QMatrix4x4 m;
if (!channels)
return m;
m.fill(0);
for (int i = 0; i < 4; ++i) {
m(i, channels[i]) = 1;
}
qDebug() << m;
return m;
}
开发者ID:cometdlut,项目名称:QtAV,代码行数:51,代码来源:OpenGLHelper.cpp
示例9: DPTR_D
VideoFrame VideoDecoderDXVA::frame()
{
DPTR_D(VideoDecoderDXVA);
if (!d.frame->opaque || !d.frame->data[0])
return VideoFrame();
if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
return VideoFrame();
class ScopedD3DLock {
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect)
: mpD3D(d3d)
{
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
private:
IDirect3DSurface9 *mpD3D;
};
IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
//picth >= desc.Width
//D3DSURFACE_DESC desc;
//d3d->GetDesc(&desc);
D3DLOCKED_RECT lock;
ScopedD3DLock(d3d, &lock);
if (lock.Pitch == 0) {
return VideoFrame();
}
const VideoFormat fmt = VideoFormat((int)D3dFindFormat(d.render)->avpixfmt);
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", d.render);
return VideoFrame();
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
const bool swap_uv = d.render == MAKEFOURCC('I','M','C','3');
return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
开发者ID:lipy77,项目名称:QtAV,代码行数:50,代码来源:VideoDecoderDXVA.cpp
示例10: VideoFrame
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.constBits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
m_cvt->setInRange(frame.colorRange());
const int pal = format.hasPalette();
QVector<const uchar*> pitch(format.planeCount() + pal);
QVector<int> stride(format.planeCount() + pal);
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.constBits(i);
stride[i] = frame.bytesPerLine(i);
}
const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
if (pal > 0) {
pitch[1] = (const uchar*)paldata.constData();
stride[1] = paldata.size();
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
f.setDisplayAspectRatio(frame.displayAspectRatio());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
return f;
}
开发者ID:SvetZari,项目名称:QtAV,代码行数:50,代码来源:VideoFrame.cpp
示例11: Q_UNUSED
void* SurfaceInteropDXVA::mapToHost(const VideoFormat &format, void *handle, int plane)
{
Q_UNUSED(plane);
class ScopedD3DLock {
IDirect3DSurface9 *mpD3D;
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
};
D3DLOCKED_RECT lock;
ScopedD3DLock(m_surface, &lock);
if (lock.Pitch == 0)
return NULL;
//picth >= desc.Width
D3DSURFACE_DESC desc;
m_surface->GetDesc(&desc);
const VideoFormat fmt = VideoFormat(pixelFormatFromFourcc(desc.Format));
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", desc.Format);
return NULL;
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
quint8 *src[] = { (quint8*)lock.pBits, 0, 0}; //compute chroma later
Q_ASSERT(src[0] && pitch[0] > 0);
const bool swap_uv = desc.Format == MAKEFOURCC('I','M','C','3');
// try to use SSE. fallback to normal copy if SSE is not supported
VideoFrame frame(VideoFrame::fromGPU(fmt, frame_width, frame_height, desc.Height, src, pitch, true, swap_uv));
// TODO: check rgb32 because d3d can use hw to convert
if (format != fmt)
frame = frame.to(format);
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
*f = frame;
return f;
}
开发者ID:SindenZhang,项目名称:QtAV,代码行数:49,代码来源:SurfaceInteropDXVA.cpp
示例12: fmt
void GLWidgetRendererPrivate::updateShaderIfNeeded()
{
const VideoFormat& fmt(video_frame.format());
if (fmt != video_format) {
qDebug("pixel format changed: %s => %s", qPrintable(video_format.name()), qPrintable(fmt.name()));
}
VideoMaterialType *newType = materialType(fmt);
if (material_type == newType)
return;
material_type = newType;
// http://forum.doom9.org/archive/index.php/t-160211.html
ColorTransform::ColorSpace cs = ColorTransform::RGB;
if (fmt.isRGB()) {
if (fmt.isPlanar())
cs = ColorTransform::GBR;
} else {
if (video_frame.width() >= 1280 || video_frame.height() > 576) //values from mpv
cs = ColorTransform::BT709;
else
cs = ColorTransform::BT601;
}
if (!prepareShaderProgram(fmt, cs)) {
qWarning("shader program create error...");
return;
} else {
qDebug("shader program created!!!");
}
}
开发者ID:YsqEvilmax,项目名称:QtVideoPlayer,代码行数:28,代码来源:GLWidgetRenderer.cpp
示例13: set_active_format
bool set_active_format (std::shared_ptr<CaptureDevice> dev, const std::string& new_format)
{
VideoFormat v;
bool ret = v.from_string(new_format);
if (ret)
{
return dev->set_video_format(v);
}
else
{
std::cout << "Invalid string description!" << std::endl;
}
return false;
}
开发者ID:TheImagingSource,项目名称:tiscamera,代码行数:16,代码来源:formats.cpp
示例14: allocate
void VideoFrame::allocate(const VideoFormat &format) {
if (format.isEmpty() && d->buffer.isEmpty())
return;
if (!d->buffer.isEmpty() && d->format == format)
return;
d.detach();
d->format = format;
int len = 0;
int offsets[4] = {0};
for (int i=0; i<format.planes(); ++i) {
offsets[i] = len;
len += format.bytesPerPlain(i);
}
d->buffer.resize(len);
for (int i=0; i< format.planes(); ++i)
d->data[i] = (uchar*)d->buffer.data() + offsets[i];
}
开发者ID:akhilo,项目名称:cmplayer,代码行数:17,代码来源:videoframe.cpp
示例15: QPainter
void QPainterFilterContext::initializeOnFrame(VideoFrame *vframe)
{
if (!vframe) {
if (!painter) {
painter = new QPainter(); //warning: more than 1 painter on 1 device
}
if (!paint_device) {
paint_device = painter->device();
}
if (!paint_device && !painter->isActive()) {
qWarning("No paint device and painter is not active. No painting!");
return;
}
if (!painter->isActive())
painter->begin(paint_device);
return;
}
VideoFormat format = vframe->format();
if (!format.isValid()) {
qWarning("Not a valid format");
return;
}
if (format.imageFormat() == QImage::Format_Invalid) {
format.setPixelFormat(VideoFormat::Format_RGB32);
if (!cvt) {
cvt = new VideoFrameConverter();
}
*vframe = cvt->convert(*vframe, format);
}
if (paint_device) {
if (painter && painter->isActive()) {
painter->end(); //destroy a paint device that is being painted is not allowed!
}
delete paint_device;
paint_device = 0;
}
Q_ASSERT(video_width > 0 && video_height > 0);
// direct draw on frame data, so use VideoFrame::constBits()
paint_device = new QImage((uchar*)vframe->constBits(0), video_width, video_height, vframe->bytesPerLine(0), format.imageFormat());
if (!painter)
painter = new QPainter();
own_painter = true;
own_paint_device = true; //TODO: what about renderer is not a widget?
painter->begin((QImage*)paint_device);
}
开发者ID:Czhian,项目名称:QtAV,代码行数:45,代码来源:FilterContext.cpp
示例16: ProgressListenerWrapper
void MPTranscoder::start()
{
m_composer = IMediaComposer::create(ProgressListener(new ProgressListenerWrapper(*this)));
m_composer->addSourceFile(m_input.toStdString());
m_composer->setTargetFile(m_output.toStdString());
VideoFormat videoFormat = IVideoFormat::create(MIMETypeAVC, 640, 480);
videoFormat->setVideoBitRateInKBytes(1500);
videoFormat->setVideoFrameRate(25);
videoFormat->setVideoIFrameInterval(1);
m_composer->setTargetVideoFormat(videoFormat);
AudioFormat audioFormat = IAudioFormat::create(MIMETypeAAC, 48000, 2);
audioFormat->setAudioBitrateInBytes(96 * 1024);
m_composer->setTargetAudioFormat(audioFormat);
m_composer->start();
}
开发者ID:Amlana,项目名称:media-for-mobile,代码行数:19,代码来源:mptranscoder.cpp
示例17: convertTo
bool convertTo(const VideoFormat& fmt, const QSizeF &dstSize, const QRectF &roi) {
if (fmt == format.pixelFormatFFmpeg()
&& roi == QRectF(0, 0, width, height)
&& dstSize == roi.size())
return true;
if (!conv) {
format.setPixelFormat(VideoFormat::Format_Invalid);
return false;
}
format = fmt;
data = conv->outData();
planes = conv->outPlanes();
line_sizes = conv->outLineSizes();
planes.resize(fmt.planeCount());
line_sizes.resize(fmt.planeCount());
textures.resize(fmt.planeCount());
return false;
}
开发者ID:AlexSnet,项目名称:QtAV,代码行数:20,代码来源:VideoFrame.cpp
示例18: VideoFrame
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.bits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
QVector<const uchar*> pitch(format.planeCount());
QVector<int> stride(format.planeCount());
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.bits(i);
stride[i] = frame.bytesPerLine(i);
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(m_cvt->outData(), frame.width(), frame.height(), fmt);
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknow);
}
return f;
}
开发者ID:Jacksonicy,项目名称:QtAV,代码行数:41,代码来源:VideoFrame.cpp
示例19: updateTexturesIfNeeded
void GLWidgetRendererPrivate::updateTexturesIfNeeded()
{
const VideoFormat &fmt = video_frame.format();
bool update_textures = false;
if (fmt != video_format) {
update_textures = true; //FIXME
qDebug("updateTexturesIfNeeded pixel format changed: %s => %s", qPrintable(video_format.name()), qPrintable(fmt.name()));
}
// effective size may change even if plane size not changed
if (update_textures
|| video_frame.bytesPerLine(0) != plane0Size.width() || video_frame.height() != plane0Size.height()
|| (plane1_linesize > 0 && video_frame.bytesPerLine(1) != plane1_linesize)) { // no need to check height if plane 0 sizes are equal?
update_textures = true;
qDebug("---------------------update texture: %dx%d, %s", video_frame.width(), video_frame.height(), video_frame.format().name().toUtf8().constData());
const int nb_planes = fmt.planeCount();
texture_size.resize(nb_planes);
texture_upload_size.resize(nb_planes);
effective_tex_width.resize(nb_planes);
for (int i = 0; i < nb_planes; ++i) {
qDebug("plane linesize %d: padded = %d, effective = %d", i, video_frame.bytesPerLine(i), video_frame.effectiveBytesPerLine(i));
qDebug("plane width %d: effective = %d", video_frame.planeWidth(i), video_frame.effectivePlaneWidth(i));
qDebug("planeHeight %d = %d", i, video_frame.planeHeight(i));
// we have to consider size of opengl format. set bytesPerLine here and change to width later
texture_size[i] = QSize(video_frame.bytesPerLine(i), video_frame.planeHeight(i));
texture_upload_size[i] = texture_size[i];
effective_tex_width[i] = video_frame.effectiveBytesPerLine(i); //store bytes here, modify as width later
// TODO: ratio count the GL_UNPACK_ALIGN?
//effective_tex_width_ratio = qMin((qreal)1.0, (qreal)video_frame.effectiveBytesPerLine(i)/(qreal)video_frame.bytesPerLine(i));
}
plane1_linesize = 0;
if (nb_planes > 1) {
texture_size[0].setWidth(texture_size[1].width() * effective_tex_width[0]/effective_tex_width[1]);
// height? how about odd?
plane1_linesize = video_frame.bytesPerLine(1);
}
effective_tex_width_ratio = (qreal)video_frame.effectiveBytesPerLine(nb_planes-1)/(qreal)video_frame.bytesPerLine(nb_planes-1);
qDebug("effective_tex_width_ratio=%f", effective_tex_width_ratio);
plane0Size.setWidth(video_frame.bytesPerLine(0));
plane0Size.setHeight(video_frame.height());
}
if (update_textures) {
initTextures(fmt);
}
}
开发者ID:YsqEvilmax,项目名称:QtVideoPlayer,代码行数:44,代码来源:GLWidgetRenderer.cpp
示例20: videoFormatToGL
bool videoFormatToGL(const VideoFormat& fmt, GLint* internal_format, GLenum* data_format, GLenum* data_type)
{
struct fmt_entry {
VideoFormat::PixelFormat pixfmt;
GLint internal_format;
GLenum format;
GLenum type;
};
// Very special formats, for which OpenGL happens to have direct support
static const struct fmt_entry pixfmt_to_gl_formats[] = {
#ifdef QT_OPENGL_ES_2
{VideoFormat::Format_ARGB32, GL_BGRA, GL_BGRA, GL_UNSIGNED_BYTE },
{VideoFormat::Format_RGB32, GL_BGRA, GL_BGRA, GL_UNSIGNED_BYTE },
#else
{VideoFormat::Format_RGB32, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE },
{VideoFormat::Format_ARGB32, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE },
#endif
{VideoFormat::Format_RGB24, GL_RGB, GL_RGB, GL_UNSIGNED_BYTE },
#ifdef GL_UNSIGNED_SHORT_1_5_5_5_REV
{VideoFormat::Format_RGB555, GL_RGBA, GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV},
#endif
{VideoFormat::Format_RGB565, GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, //GL_UNSIGNED_SHORT_5_6_5_REV?
//{VideoFormat::Format_BGRA32, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE },
//{VideoFormat::Format_BGR32, GL_BGRA, GL_BGRA, GL_UNSIGNED_BYTE },
{VideoFormat::Format_BGR24, GL_RGB, GL_BGR, GL_UNSIGNED_BYTE },
#ifdef GL_UNSIGNED_SHORT_1_5_5_5_REV
{VideoFormat::Format_BGR555, GL_RGBA, GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV},
#endif
{VideoFormat::Format_BGR565, GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, // need swap r b?
};
for (unsigned int i = 0; i < sizeof(pixfmt_to_gl_formats)/sizeof(pixfmt_to_gl_formats[0]); ++i) {
if (pixfmt_to_gl_formats[i].pixfmt == fmt.pixelFormat()) {
*internal_format = pixfmt_to_gl_formats[i].internal_format;
*data_format = pixfmt_to_gl_formats[i].format;
*data_type = pixfmt_to_gl_formats[i].type;
return true;
}
}
return false;
}
开发者ID:NickD2039,项目名称:QtAV,代码行数:41,代码来源:GLWidgetRenderer.cpp
注:本文中的VideoFormat类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论