本文整理汇总了C++中bytestream2_get_bytes_left函数的典型用法代码示例。如果您正苦于以下问题:C++ bytestream2_get_bytes_left函数的具体用法?C++ bytestream2_get_bytes_left怎么用?C++ bytestream2_get_bytes_left使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bytestream2_get_bytes_left函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: read_rle_sgi
/**
* Read a run length encoded SGI image.
* @param out_buf output buffer
* @param s the current image state
* @return 0 if no error, else return error code.
*/
static int read_rle_sgi(uint8_t *out_buf, SgiState *s)
{
uint8_t *dest_row;
unsigned int len = s->height * s->depth * 4;
GetByteContext g_table = s->g;
unsigned int y, z;
unsigned int start_offset;
int linesize, ret;
/* size of RLE offset and length tables */
if (len * 2 > bytestream2_get_bytes_left(&s->g)) {
return AVERROR_INVALIDDATA;
}
for (z = 0; z < s->depth; z++) {
dest_row = out_buf;
for (y = 0; y < s->height; y++) {
linesize = s->width * s->depth * s->bytes_per_channel;
dest_row -= s->linesize;
start_offset = bytestream2_get_be32(&g_table);
bytestream2_seek(&s->g, start_offset, SEEK_SET);
if (s->bytes_per_channel == 1)
ret = expand_rle_row8(s, dest_row + z, linesize, s->depth);
else
ret = expand_rle_row16(s, (uint16_t *)dest_row + z, linesize, s->depth);
if (ret != s->width)
return AVERROR_INVALIDDATA;
}
}
return 0;
}
开发者ID:0Soul,项目名称:FFmpeg,代码行数:37,代码来源:sgidec.c
示例2: expand_rle_row
/**
* Expand an RLE row into a channel.
* @param s the current image state
* @param out_buf Points to one line after the output buffer.
* @param out_end end of line in output buffer
* @param pixelstride pixel stride of input buffer
* @return size of output in bytes, -1 if buffer overflows
*/
static int expand_rle_row(SgiState *s, uint8_t *out_buf,
uint8_t *out_end, int pixelstride)
{
unsigned char pixel, count;
unsigned char *orig = out_buf;
while (1) {
if (bytestream2_get_bytes_left(&s->g) < 1)
return AVERROR_INVALIDDATA;
pixel = bytestream2_get_byteu(&s->g);
if (!(count = (pixel & 0x7f))) {
return (out_buf - orig) / pixelstride;
}
/* Check for buffer overflow. */
if(out_buf + pixelstride * count >= out_end) return -1;
if (pixel & 0x80) {
while (count--) {
*out_buf = bytestream2_get_byte(&s->g);
out_buf += pixelstride;
}
} else {
pixel = bytestream2_get_byte(&s->g);
while (count--) {
*out_buf = pixel;
out_buf += pixelstride;
}
}
}
}
开发者ID:AVbin,项目名称:libav,代码行数:40,代码来源:sgidec.c
示例3: gif_parse_next_image
static int gif_parse_next_image(GifState *s, AVFrame *frame)
{
while (bytestream2_get_bytes_left(&s->gb) > 0) {
int code = bytestream2_get_byte(&s->gb);
int ret;
av_log(s->avctx, AV_LOG_DEBUG, "code=%02x '%c'\n", code, code);
switch (code) {
case GIF_IMAGE_SEPARATOR:
return gif_read_image(s, frame);
case GIF_EXTENSION_INTRODUCER:
if ((ret = gif_read_extension(s)) < 0)
return ret;
break;
case GIF_TRAILER:
/* end of image */
return AVERROR_EOF;
default:
/* erroneous block label */
return AVERROR_INVALIDDATA;
}
}
return AVERROR_EOF;
}
开发者ID:markjreed,项目名称:vice-emu,代码行数:25,代码来源:gifdec.c
示例4: read_rle_sgi
/**
* Read a run length encoded SGI image.
* @param out_buf output buffer
* @param s the current image state
* @return 0 if no error, else return error number.
*/
static int read_rle_sgi(uint8_t *out_buf, SgiState *s)
{
uint8_t *dest_row;
unsigned int len = s->height * s->depth * 4;
GetByteContext g_table = s->g;
unsigned int y, z;
unsigned int start_offset;
/* size of RLE offset and length tables */
if (len * 2 > bytestream2_get_bytes_left(&s->g)) {
return AVERROR_INVALIDDATA;
}
for (z = 0; z < s->depth; z++) {
dest_row = out_buf;
for (y = 0; y < s->height; y++) {
dest_row -= s->linesize;
start_offset = bytestream2_get_be32(&g_table);
bytestream2_seek(&s->g, start_offset, SEEK_SET);
if (expand_rle_row(s, dest_row + z, dest_row + FFABS(s->linesize),
s->depth) != s->width) {
return AVERROR_INVALIDDATA;
}
}
}
return 0;
}
开发者ID:AVbin,项目名称:libav,代码行数:33,代码来源:sgidec.c
示例5: get_cod
/** get coding parameters for a particular tile or whole image*/
static int get_cod(J2kDecoderContext *s, J2kCodingStyle *c, uint8_t *properties)
{
J2kCodingStyle tmp;
int compno;
if (bytestream2_get_bytes_left(&s->g) < 5)
return AVERROR(EINVAL);
tmp.log2_prec_width =
tmp.log2_prec_height = 15;
tmp.csty = bytestream2_get_byteu(&s->g);
if (bytestream2_get_byteu(&s->g)) { // progression level
av_log(s->avctx, AV_LOG_ERROR, "only LRCP progression supported\n");
return -1;
}
tmp.nlayers = bytestream2_get_be16u(&s->g);
tmp.mct = bytestream2_get_byteu(&s->g); // multiple component transformation
get_cox(s, &tmp);
for (compno = 0; compno < s->ncomponents; compno++) {
if (!(properties[compno] & HAD_COC))
memcpy(c + compno, &tmp, sizeof(J2kCodingStyle));
}
return 0;
}
开发者ID:gaoxiong,项目名称:VideoReverse,代码行数:29,代码来源:j2kdec.c
示例6: tgq_decode_mb
/**
* @return <0 on error
*/
static int tgq_decode_mb(TgqContext *s, int mb_y, int mb_x){
int mode;
int i;
int8_t dc[6];
mode = bytestream2_get_byte(&s->gb);
if (mode>12) {
GetBitContext gb;
init_get_bits(&gb, s->gb.buffer, FFMIN(bytestream2_get_bytes_left(&s->gb), mode) * 8);
for(i=0; i<6; i++)
tgq_decode_block(s, s->block[i], &gb);
tgq_idct_put_mb(s, s->block, mb_x, mb_y);
bytestream2_skip(&s->gb, mode);
}else{
if (mode==3) {
memset(dc, bytestream2_get_byte(&s->gb), 4);
dc[4] = bytestream2_get_byte(&s->gb);
dc[5] = bytestream2_get_byte(&s->gb);
}else if (mode==6) {
bytestream2_get_buffer(&s->gb, dc, 6);
}else if (mode==12) {
for (i = 0; i < 6; i++) {
dc[i] = bytestream2_get_byte(&s->gb);
bytestream2_skip(&s->gb, 1);
}
}else{
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
return -1;
}
tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc);
}
return 0;
}
开发者ID:NullEmpty,项目名称:rk_ffmpeg_android,代码行数:36,代码来源:eatgq.c
示例7: read_uncompressed_sgi
/**
* Read an uncompressed SGI image.
* @param out_buf output buffer
* @param s the current image state
* @return 0 if read success, else return error code.
*/
static int read_uncompressed_sgi(unsigned char *out_buf, SgiState *s)
{
int x, y, z;
unsigned int offset = s->height * s->width * s->bytes_per_channel;
GetByteContext gp[4];
uint8_t *out_end;
/* Test buffer size. */
if (offset * s->depth > bytestream2_get_bytes_left(&s->g))
return AVERROR_INVALIDDATA;
/* Create a reader for each plane */
for (z = 0; z < s->depth; z++) {
gp[z] = s->g;
bytestream2_skip(&gp[z], z * offset);
}
for (y = s->height - 1; y >= 0; y--) {
out_end = out_buf + (y * s->linesize);
if (s->bytes_per_channel == 1) {
for (x = s->width; x > 0; x--)
for (z = 0; z < s->depth; z++)
*out_end++ = bytestream2_get_byteu(&gp[z]);
} else {
uint16_t *out16 = (uint16_t *)out_end;
for (x = s->width; x > 0; x--)
for (z = 0; z < s->depth; z++)
*out16++ = bytestream2_get_ne16u(&gp[z]);
}
}
return 0;
}
开发者ID:0Soul,项目名称:FFmpeg,代码行数:38,代码来源:sgidec.c
示例8: op
/**
* Perform decode operation
* @param dst pointer to destination image buffer
* @param dst_end pointer to end of destination image buffer
* @param gb GetByteContext (optional, see below)
* @param pixel Fill color (optional, see below)
* @param count Pixel count
* @param x Pointer to x-axis counter
* @param width Image width
* @param linesize Destination image buffer linesize
* @return non-zero if destination buffer is exhausted
*
* a copy operation is achieved when 'gb' is set
* a fill operation is achieved when 'gb' is null and pixel is >= 0
* a skip operation is achieved when 'gb' is null and pixel is < 0
*/
static inline int op(uint8_t **dst, const uint8_t *dst_end,
GetByteContext *gb,
int pixel, int count,
int *x, int width, int linesize)
{
int remaining = width - *x;
while(count > 0) {
int striplen = FFMIN(count, remaining);
if (gb) {
if (bytestream2_get_bytes_left(gb) < striplen)
goto exhausted;
bytestream2_get_bufferu(gb, *dst, striplen);
} else if (pixel >= 0)
memset(*dst, pixel, striplen);
*dst += striplen;
remaining -= striplen;
count -= striplen;
if (remaining <= 0) {
*dst += linesize - width;
remaining = width;
}
if (linesize > 0) {
if (*dst >= dst_end) goto exhausted;
} else {
if (*dst <= dst_end) goto exhausted;
}
}
*x = width - remaining;
return 0;
exhausted:
*x = width - remaining;
return 1;
}
开发者ID:Vadiza,项目名称:sage-3.5b,代码行数:50,代码来源:anm.c
示例9: mm_decode_inter
/*
* @param half_horiz Half horizontal resolution (0 or 1)
* @param half_vert Half vertical resolution (0 or 1)
*/
static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert)
{
int data_off = bytestream2_get_le16(&s->gb), y;
GetByteContext data_ptr;
if (bytestream2_get_bytes_left(&s->gb) < data_off)
return AVERROR_INVALIDDATA;
bytestream2_init(&data_ptr, s->gb.buffer + data_off, bytestream2_get_bytes_left(&s->gb) - data_off);
while (s->gb.buffer < data_ptr.buffer_start) {
int i, j;
int length = bytestream2_get_byte(&s->gb);
int x = bytestream2_get_byte(&s->gb) + ((length & 0x80) << 1);
length &= 0x7F;
if (length==0) {
y += x;
continue;
}
if (y + half_vert >= s->avctx->height)
return 0;
for(i=0; i<length; i++) {
int replace_array = bytestream2_get_byte(&s->gb);
for(j=0; j<8; j++) {
int replace = (replace_array >> (7-j)) & 1;
if (replace) {
int color = bytestream2_get_byte(&data_ptr);
s->frame.data[0][y*s->frame.linesize[0] + x] = color;
if (half_horiz)
s->frame.data[0][y*s->frame.linesize[0] + x + 1] = color;
if (half_vert) {
s->frame.data[0][(y+1)*s->frame.linesize[0] + x] = color;
if (half_horiz)
s->frame.data[0][(y+1)*s->frame.linesize[0] + x + 1] = color;
}
}
x += 1 + half_horiz;
}
}
y += 1 + half_vert;
}
return 0;
}
开发者ID:shanewfx,项目名称:FFmpeg,代码行数:51,代码来源:mmvideo.c
示例10: decode_extradata_ps_mp4
/* There are (invalid) samples in the wild with mp4-style extradata, where the
* parameter sets are stored unescaped (i.e. as RBSP).
* This function catches the parameter set decoding failure and tries again
* after escaping it */
static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps,
int err_recognition, void *logctx)
{
int ret;
ret = decode_extradata_ps(buf, buf_size, ps, 1, logctx);
if (ret < 0 && !(err_recognition & AV_EF_EXPLODE)) {
GetByteContext gbc;
PutByteContext pbc;
uint8_t *escaped_buf;
int escaped_buf_size;
av_log(logctx, AV_LOG_WARNING,
"SPS decoding failure, trying again after escaping the NAL\n");
if (buf_size / 2 >= (INT16_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 3)
return AVERROR(ERANGE);
escaped_buf_size = buf_size * 3 / 2 + AV_INPUT_BUFFER_PADDING_SIZE;
escaped_buf = av_mallocz(escaped_buf_size);
if (!escaped_buf)
return AVERROR(ENOMEM);
bytestream2_init(&gbc, buf, buf_size);
bytestream2_init_writer(&pbc, escaped_buf, escaped_buf_size);
while (bytestream2_get_bytes_left(&gbc)) {
if (bytestream2_get_bytes_left(&gbc) >= 3 &&
bytestream2_peek_be24(&gbc) <= 3) {
bytestream2_put_be24(&pbc, 3);
bytestream2_skip(&gbc, 2);
} else
bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
}
escaped_buf_size = bytestream2_tell_p(&pbc);
AV_WB16(escaped_buf, escaped_buf_size - 2);
ret = decode_extradata_ps(escaped_buf, escaped_buf_size, ps, 1, logctx);
av_freep(&escaped_buf);
if (ret < 0)
return ret;
}
return 0;
}
开发者ID:411697643,项目名称:FFmpeg,代码行数:49,代码来源:h264_parse.c
示例11: gif_read_header1
static int gif_read_header1(GifState *s)
{
uint8_t sig[6];
int v, n;
int has_global_palette;
if (bytestream2_get_bytes_left(&s->gb) < 13)
return AVERROR_INVALIDDATA;
/* read gif signature */
bytestream2_get_buffer(&s->gb, sig, 6);
if (memcmp(sig, gif87a_sig, 6) != 0 &&
memcmp(sig, gif89a_sig, 6) != 0)
return AVERROR_INVALIDDATA;
/* read screen header */
s->transparent_color_index = -1;
s->screen_width = bytestream2_get_le16(&s->gb);
s->screen_height = bytestream2_get_le16(&s->gb);
if( (unsigned)s->screen_width > 32767
|| (unsigned)s->screen_height > 32767) {
av_log(NULL, AV_LOG_ERROR, "picture size too large\n");
return AVERROR_INVALIDDATA;
}
v = bytestream2_get_byte(&s->gb);
s->color_resolution = ((v & 0x70) >> 4) + 1;
has_global_palette = (v & 0x80);
s->bits_per_pixel = (v & 0x07) + 1;
s->background_color_index = bytestream2_get_byte(&s->gb);
bytestream2_get_byte(&s->gb); /* ignored */
av_dlog(s->avctx, "gif: screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
s->screen_width, s->screen_height, s->bits_per_pixel,
has_global_palette);
if (has_global_palette) {
n = 1 << s->bits_per_pixel;
if (bytestream2_get_bytes_left(&s->gb) < n * 3)
return AVERROR_INVALIDDATA;
bytestream2_get_buffer(&s->gb, s->global_palette, n * 3);
}
return 0;
}
开发者ID:mgorny,项目名称:libav,代码行数:44,代码来源:gifdec.c
示例12: get_qcx
/** get common part for QCD and QCC segments */
static int get_qcx(J2kDecoderContext *s, int n, J2kQuantStyle *q)
{
int i, x;
if (bytestream2_get_bytes_left(&s->g) < 1)
return AVERROR(EINVAL);
x = bytestream2_get_byteu(&s->g); // Sqcd
q->nguardbits = x >> 5;
q->quantsty = x & 0x1f;
if (q->quantsty == J2K_QSTY_NONE){
n -= 3;
if (bytestream2_get_bytes_left(&s->g) < n || 32*3 < n)
return AVERROR(EINVAL);
for (i = 0; i < n; i++)
q->expn[i] = bytestream2_get_byteu(&s->g) >> 3;
} else if (q->quantsty == J2K_QSTY_SI){
开发者ID:0x0B501E7E,项目名称:ffmpeg,代码行数:20,代码来源:j2kdec.c
示例13: hq_hqa_decode_frame
static int hq_hqa_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
HQContext *ctx = avctx->priv_data;
AVFrame *pic = data;
uint32_t info_tag;
unsigned int data_size;
int ret;
unsigned tag;
bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);
if (bytestream2_get_bytes_left(&ctx->gbc) < 4 + 4) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", avpkt->size);
return AVERROR_INVALIDDATA;
}
info_tag = bytestream2_peek_le32(&ctx->gbc);
if (info_tag == MKTAG('I', 'N', 'F', 'O')) {
int info_size;
bytestream2_skip(&ctx->gbc, 4);
info_size = bytestream2_get_le32(&ctx->gbc);
if (bytestream2_get_bytes_left(&ctx->gbc) < info_size) {
av_log(avctx, AV_LOG_ERROR, "Invalid INFO size (%d).\n", info_size);
return AVERROR_INVALIDDATA;
}
ff_canopus_parse_info_tag(avctx, ctx->gbc.buffer, info_size);
bytestream2_skip(&ctx->gbc, info_size);
}
data_size = bytestream2_get_bytes_left(&ctx->gbc);
if (data_size < 4) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", data_size);
return AVERROR_INVALIDDATA;
}
/* HQ defines dimensions and number of slices, and thus slice traversal
* order. HQA has no size constraint and a fixed number of slices, so it
* needs a separate scheme for it. */
tag = bytestream2_get_le32(&ctx->gbc);
if ((tag & 0x00FFFFFF) == (MKTAG('U', 'V', 'C', ' ') & 0x00FFFFFF)) {
ret = hq_decode_frame(ctx, pic, tag >> 24, data_size);
} else if (tag == MKTAG('H', 'Q', 'A', '1')) {
开发者ID:Diagonactic,项目名称:plex-new-transcoder,代码行数:43,代码来源:hq_hqa.c
示例14: apng_probe
/*
* To be a valid APNG file, we mandate, in this order:
* PNGSIG
* IHDR
* ...
* acTL
* ...
* IDAT
*/
static int apng_probe(AVProbeData *p)
{
GetByteContext gb;
int state = 0;
uint32_t len, tag;
bytestream2_init(&gb, p->buf, p->buf_size);
if (bytestream2_get_be64(&gb) != PNGSIG)
return 0;
for (;;) {
len = bytestream2_get_be32(&gb);
if (len > 0x7fffffff)
return 0;
tag = bytestream2_get_le32(&gb);
/* we don't check IDAT size, as this is the last tag
* we check, and it may be larger than the probe buffer */
if (tag != MKTAG('I', 'D', 'A', 'T') &&
len > bytestream2_get_bytes_left(&gb))
return 0;
switch (tag) {
case MKTAG('I', 'H', 'D', 'R'):
if (len != 13)
return 0;
if (av_image_check_size(bytestream2_get_be32(&gb), bytestream2_get_be32(&gb), 0, NULL))
return 0;
bytestream2_skip(&gb, 9);
state++;
break;
case MKTAG('a', 'c', 'T', 'L'):
if (state != 1 ||
len != 8 ||
bytestream2_get_be32(&gb) == 0) /* 0 is not a valid value for number of frames */
return 0;
bytestream2_skip(&gb, 8);
state++;
break;
case MKTAG('I', 'D', 'A', 'T'):
if (state != 2)
return 0;
goto end;
default:
/* skip other tags */
bytestream2_skip(&gb, len + 4);
break;
}
}
end:
return AVPROBE_SCORE_MAX;
}
开发者ID:alikuro,项目名称:FFmpeg,代码行数:63,代码来源:apngdec.c
示例15: parse_section_header
/* The first three bytes are the size of the section past the header, or zero
* if the length is stored in the next long word. The fourth byte in the first
* long word indicates the type of the current section. */
static int parse_section_header(GetByteContext *gbc, int *section_size,
enum HapSectionType *section_type)
{
if (bytestream2_get_bytes_left(gbc) < 4)
return AVERROR_INVALIDDATA;
*section_size = bytestream2_get_le24(gbc);
*section_type = bytestream2_get_byte(gbc);
if (*section_size == 0) {
if (bytestream2_get_bytes_left(gbc) < 4)
return AVERROR_INVALIDDATA;
*section_size = bytestream2_get_le32(gbc);
}
if (*section_size > bytestream2_get_bytes_left(gbc) || *section_size < 0)
return AVERROR_INVALIDDATA;
else
return 0;
}
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:24,代码来源:hapdec.c
示例16: decode
static int decode(GetByteContext *gb, RangeCoder *rc, unsigned cumFreq, unsigned freq, unsigned total_freq)
{
rc->code -= cumFreq * rc->range;
rc->range *= freq;
while (rc->range < TOP && bytestream2_get_bytes_left(gb) > 0) {
unsigned byte = bytestream2_get_byte(gb);
rc->code = (rc->code << 8) | byte;
rc->range <<= 8;
}
return 0;
}
开发者ID:Diagonactic,项目名称:plex-new-transcoder,代码行数:13,代码来源:scpr.c
示例17: decode_rle
static int decode_rle(AVCodecContext *avctx, AVFrame *p, GetByteContext *gbc,
int step)
{
int i, j;
int offset = avctx->width * step;
uint8_t *outdata = p->data[0];
for (i = 0; i < avctx->height; i++) {
int size, left, code, pix;
uint8_t *out = outdata;
int pos = 0;
/* size of packed line */
size = left = bytestream2_get_be16(gbc);
if (bytestream2_get_bytes_left(gbc) < size)
return AVERROR_INVALIDDATA;
/* decode line */
while (left > 0) {
code = bytestream2_get_byte(gbc);
if (code & 0x80 ) { /* run */
pix = bytestream2_get_byte(gbc);
for (j = 0; j < 257 - code; j++) {
out[pos] = pix;
pos += step;
if (pos >= offset) {
pos -= offset;
pos++;
}
if (pos >= offset)
return AVERROR_INVALIDDATA;
}
left -= 2;
} else { /* copy */
for (j = 0; j < code + 1; j++) {
out[pos] = bytestream2_get_byte(gbc);
pos += step;
if (pos >= offset) {
pos -= offset;
pos++;
}
if (pos >= offset)
return AVERROR_INVALIDDATA;
}
left -= 2 + code;
}
}
outdata += p->linesize[0];
}
return 0;
}
开发者ID:309746069,项目名称:FFmpeg,代码行数:51,代码来源:qdrw.c
示例18: set_palette
static int set_palette(BethsoftvidContext *ctx)
{
uint32_t *palette = (uint32_t *)ctx->frame.data[1];
int a;
if (bytestream2_get_bytes_left(&ctx->g) < 256*3)
return AVERROR_INVALIDDATA;
for(a = 0; a < 256; a++){
palette[a] = bytestream2_get_be24u(&ctx->g) * 4;
}
ctx->frame.palette_has_changed = 1;
return 0;
}
开发者ID:simock85,项目名称:libav,代码行数:14,代码来源:bethsoftvideo.c
示例19: parse_section_header
/* The first three bytes are the size of the section past the header, or zero
* if the length is stored in the next long word. The fourth byte in the first
* long word indicates the type of the current section. */
static int parse_section_header(AVCodecContext *avctx)
{
HapContext *ctx = avctx->priv_data;
GetByteContext *gbc = &ctx->gbc;
int length;
if (bytestream2_get_bytes_left(gbc) < 4)
return AVERROR_INVALIDDATA;
length = bytestream2_get_le24(gbc);
ctx->section_type = bytestream2_get_byte(gbc);
if (length == 0) {
if (bytestream2_get_bytes_left(gbc) < 4)
return AVERROR_INVALIDDATA;
length = bytestream2_get_le32(gbc);
}
if (length > bytestream2_get_bytes_left(gbc) || length == 0)
return AVERROR_INVALIDDATA;
return length;
}
开发者ID:hai046,项目名称:MediaPlayer,代码行数:27,代码来源:hapdec.c
示例20: get_coc
/** get coding parameters for a component in the whole image on a particular tile */
static int get_coc(J2kDecoderContext *s, J2kCodingStyle *c, uint8_t *properties)
{
int compno;
if (bytestream2_get_bytes_left(&s->g) < 2)
return AVERROR(EINVAL);
compno = bytestream2_get_byteu(&s->g);
c += compno;
c->csty = bytestream2_get_byte(&s->g);
get_cox(s, c);
properties[compno] |= HAD_COC;
return 0;
}
开发者ID:0x0B501E7E,项目名称:ffmpeg,代码行数:17,代码来源:j2kdec.c
注:本文中的bytestream2_get_bytes_left函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论