本文整理汇总了Python中snappy.decompress函数的典型用法代码示例。如果您正苦于以下问题:Python decompress函数的具体用法?Python decompress怎么用?Python decompress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了decompress函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: snappy_decode
def snappy_decode(payload):
if not has_snappy():
raise NotImplementedError("Snappy codec is not available")
if payload.startswith(_XERIAL_HEADER):
# TODO ? Should become a fileobj ?
view = memoryview(payload)
out = []
length = len(payload)
cursor = 16
while cursor < length:
block_size = struct.unpack_from('!i', view, cursor)[0]
# Skip the block size
cursor += 4
end = cursor + block_size
# XXX snappy requires a bytes-like object but doesn't accept
# a memoryview, so we must copy.
out.append(snappy.decompress(view[cursor:end].tobytes()))
cursor = end
# See https://atleastfornow.net/blog/not-all-bytes/
return b''.join(out)
else:
return snappy.decompress(payload)
开发者ID:ciena,项目名称:afkak,代码行数:25,代码来源:codec.py
示例2: decode_snappy
def decode_snappy(buff):
"""Decode a buffer using Snappy
If xerial is found to be in use, the buffer is decoded in a fashion
compatible with the xerial snappy library.
Adapted from kafka-python
https://github.com/mumrah/kafka-python/pull/127/files
"""
if snappy is None:
raise ImportError("Please install python-snappy")
if _detect_xerial_stream(buff):
out = StringIO()
body = buffer(buff[16:])
length = len(body)
cursor = 0
while cursor < length:
block_size = struct.unpack_from('!i', body[cursor:])[0]
cursor += 4
end = cursor + block_size
out.write(snappy.decompress(body[cursor:end]))
cursor = end
out.seek(0)
return out.read()
else:
return snappy.decompress(buff)
开发者ID:EricLau2018,项目名称:pykafka,代码行数:26,代码来源:compression.py
示例3: snappy_unpack_blob
def snappy_unpack_blob(blob, sep=SEP):
if len(blob) == 0: return None
if blob[0] == 'S':
return np.array(snappy.decompress(blob[1:]).split(sep))
dt = lookup[blob[0]]
arr = np.frombuffer(snappy.decompress(blob[1:]), dtype=dt)
# hack since arrays arent writable from buffer and we need this for comp_het
# phasing.
if blob[0] == '?':
arr.setflags(write=True)
return arr
开发者ID:arq5x,项目名称:gemini,代码行数:11,代码来源:compression.py
示例4: _get_data
def _get_data(cls, stream, encoding='utf-8', compress_option=None):
if isinstance(stream, six.text_type):
data = stream.encode(encoding)
elif isinstance(stream, six.binary_type):
data = stream
else:
data = stream.read() # due to the restriction of protobuf api, just read the data all
stream.close() # directly close the stream
if isinstance(data, six.text_type):
data = data.encode(encoding)
if compress_option is None or \
compress_option.algorithm == CompressOption.CompressAlgorithm.ODPS_RAW:
return data
elif compress_option.algorithm == CompressOption.CompressAlgorithm.ODPS_ZLIB:
return data # because requests do the unzip automatically, thanks to them O.O
elif compress_option.algorithm == CompressOption.CompressAlgorithm.ODPS_SNAPPY:
try:
import snappy
except ImportError:
raise errors.DependencyNotInstalledError(
'python-snappy library is required for snappy support')
data = snappy.decompress(data)
return data
else:
raise IOError('invalid compression option.')
开发者ID:tradaniel,项目名称:aliyun-odps-python-sdk,代码行数:26,代码来源:io.py
示例5: _ReadChunkFromBevy
def _ReadChunkFromBevy(self, chunk_id, bevy, bevy_index, index_size):
chunk_id_in_bevy = chunk_id % self.chunks_per_segment
if index_size == 0:
LOGGER.error("Index empty in %s: %s", self.urn, chunk_id)
raise IOError("Index empty in %s: %s" % (self.urn, chunk_id))
# The segment is not completely full.
if chunk_id_in_bevy >= index_size:
LOGGER.error("Bevy index too short in %s: %s",
self.urn, chunk_id)
raise IOError("Bevy index too short in %s: %s" % (
self.urn, chunk_id))
# For the last chunk in the bevy, consume to the end of the bevy
# segment.
if chunk_id_in_bevy == index_size - 1:
compressed_chunk_size = bevy.Size() - bevy.Tell()
else:
compressed_chunk_size = (bevy_index[chunk_id_in_bevy + 1] -
bevy_index[chunk_id_in_bevy])
bevy.Seek(bevy_index[chunk_id_in_bevy], 0)
cbuffer = bevy.Read(compressed_chunk_size)
if self.compression == lexicon.AFF4_IMAGE_COMPRESSION_ZLIB:
return zlib.decompress(cbuffer)
if snappy and self.compression == lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY:
return snappy.decompress(cbuffer)
if self.compression == lexicon.AFF4_IMAGE_COMPRESSION_STORED:
return cbuffer
raise RuntimeError(
"Unable to process compression %s" % self.compression)
开发者ID:BwRy,项目名称:aff4,代码行数:34,代码来源:aff4_image.py
示例6: u_slug
def u_slug(username, slug):
user = current_user
post = user.posts.filter_by(slug=slug).first()
if post:
_prev = user.posts.filter(Post.created_timestamp < post.created_timestamp).slice(0, 4)
_next = Post.query.filter(User.username==user.username,Post.created_timestamp > post.created_timestamp).order_by(Post.created_timestamp).slice(0, 4)
_prev_count = _prev.count()
_next_count = _next.count()
if _prev_count < 2:
_next = _next.slice(0, 4 - _prev_count)
elif _next_count < 2:
_prev = _prev.slice(0, 4 - _next_count)
else:
_prev = _prev.slice(0, 2)
_next = _next.slice(0, 2)
if post.content:
# Decrypt
half_key = session[generate_hash(user.user_key_salt)]
key = xor_keys(half_key, app.config['MASTER_KEY'])
content = AES_decrypt(key, post.content)
content = snappy.decompress(content)
return render_template("post.html", content=content, user=user, post=post, next=_next, prev=_prev)
return render_template("post.html", content='', user=user, post=post, next=_next, prev=_prev)
abort(404)
开发者ID:kevinisaac,项目名称:journal,代码行数:26,代码来源:views.py
示例7: UnpackState
def UnpackState(packed_state):
"""Convert a packed State binary string into a StateStruct object. If the
input doesn't have the STATE_MARK_ZIP prefix, it is assumed to be an old-style
compressed state object, and is directly decompressed.
Args:
packed_state - Binary string of the type produces by PackState.
Returns:
Populated StateStruct object.
"""
if not packed_state:
return None
if ord(packed_state[0]) == STATE_MARK_ZIP:
# Extract the meta-data Struct from the packed data.
meta = StateMeta()
meta.Deserialize(packed_state)
# Extract the compressed State from the packed data.
compressed_state = packed_state[meta.Size():]
# Compute the checksum and make sure it matches the metadata.
cksum = zlib.adler32(compressed_state)
if cksum != meta.checksum:
raise ValueError('Compressed State Checksum Error')
# Return the decompressed State.
return pickle.loads(zlib.decompress(compressed_state))
elif ord(packed_state[0]) == STATE_MARK_SNAPPY:
# Extract the meta-data Struct from the packed data.
meta = StateMeta()
meta.Deserialize(packed_state)
# Extract the compressed State from the packed data.
compressed_state = packed_state[meta.Size():]
# Compute the checksum and make sure it matches the metadata.
cksum = zlib.adler32(compressed_state)
if cksum != meta.checksum:
raise ValueError('Compressed State Checksum Error')
# Return the decompressed State.
return pickle.loads(snappy.decompress(compressed_state))
elif ord(packed_state[0]) == STATE_MARK_LIGHT:
# Extract the meta-data Struct from the packed data.
meta = StateMeta()
meta.Deserialize(packed_state)
# Extract the State buffer from the packed data.
state_buffer = packed_state[meta.Size():]
# Return the decompressed State.
return pickle.load(state_buffer)
else:
# Unsupported format.
raise ValueError('Unrecognized State serialization format')
开发者ID:dgouldin,项目名称:taba,代码行数:60,代码来源:taba_state.py
示例8: _read_page
def _read_page(fo, page_header, column_metadata):
"""Internal function to read the data page from the given file-object
and convert it to raw, uncompressed bytes (if necessary)."""
bytes_from_file = fo.read(page_header.compressed_page_size)
codec = column_metadata.codec
if codec is not None and codec != CompressionCodec.UNCOMPRESSED:
if column_metadata.codec == CompressionCodec.SNAPPY:
raw_bytes = snappy.decompress(bytes_from_file)
elif column_metadata.codec == CompressionCodec.GZIP:
io_obj = StringIO.StringIO(bytes_from_file)
with gzip.GzipFile(fileobj=io_obj, mode='rb') as f:
raw_bytes = f.read()
else:
raise ParquetFormatException(
"Unsupported Codec: {0}".format(codec))
else:
raw_bytes = bytes_from_file
logger.debug(
"Read page with compression type {0}. Bytes {1} -> {2}".format(
_get_name(CompressionCodec, codec),
page_header.compressed_page_size,
page_header.uncompressed_page_size))
assert len(raw_bytes) == page_header.uncompressed_page_size, \
"found {0} raw bytes (expected {1})".format(
len(raw_bytes),
page_header.uncompressed_page_size)
return raw_bytes
开发者ID:arahuja,项目名称:parquet-python,代码行数:27,代码来源:__init__.py
示例9: get_subvolume
def get_subvolume(self, box_zyx, scale=0):
"""
Fetch a subvolume from the remote BrainMaps volume.
Args:
box: (start, stop) tuple, in ZYX order.
scale: Which scale to fetch the subvolume from.
Returns:
volume (ndarray), where volume.shape = (stop - start)
"""
box_zyx = np.asarray(box_zyx)
corner_zyx = box_zyx[0]
shape_zyx = box_zyx[1] - box_zyx[0]
corner_xyz = corner_zyx[::-1]
shape_xyz = shape_zyx[::-1]
snappy_data = fetch_subvol_data( self.http,
self.project,
self.dataset,
self.volume_id,
corner_xyz,
shape_xyz,
scale,
self.change_stack_id,
self.use_gzip )
volume_buffer = snappy.decompress(snappy_data)
volume = np.frombuffer(volume_buffer, dtype=self.dtype).reshape(shape_zyx)
return volume
开发者ID:janelia-flyem,项目名称:DVIDSparkServices,代码行数:31,代码来源:brainmaps.py
示例10: decompress
def decompress(x):
# Luckily \x78\x9c is an invalid preamble for Snappy:
# If the block was 120 bytes, the preamble would be \x78\x00.
# The first byte cannot be \x78 in any other case.
if x[0] == '\x78' and x[1] in ('\x9c', '\xda', '\x01'):
return zlib.decompress(x)
else:
return snappy.decompress(x)
开发者ID:bitdeli,项目名称:bitdeli-py,代码行数:8,代码来源:bencode.py
示例11: decode_ins_ops
def decode_ins_ops(self, event):
"""Parses the data field of a MicroEventLog event and returns
a sequence of instruction ops (micro ops, grouped by instruction)."""
assert event.HasField('micro_event_log')
io_class = self._io_for_arch()
fp = StringIO.StringIO(snappy.decompress(event.micro_event_log.data))
with contextlib.closing(fp):
for ins_op in InstructionOpsDecoder(io_class(fp)).decode_stream():
yield ins_op
开发者ID:bls,项目名称:ztracer,代码行数:9,代码来源:trace_file.py
示例12: decode_micro_ops
def decode_micro_ops(self, event):
"""Parses the data field of a MicroEventLog event and returns
a sequence of micro ops. """
assert event.HasField('micro_event_log')
io_class = self._io_for_arch()
fp = StringIO.StringIO(snappy.decompress(event.micro_event_log.data))
with contextlib.closing(fp):
for op in MicroOpDecoder(io_class(fp)).decode_stream():
yield op
开发者ID:bls,项目名称:ztracer,代码行数:9,代码来源:trace_file.py
示例13: untransform
def untransform(self, buf):
for trans_id in self.__read_transforms:
if trans_id == self.ZLIB_TRANSFORM:
buf = zlib.decompress(buf)
elif trans_id == self.SNAPPY_TRANSFORM:
buf = snappy.decompress(buf)
if not trans_id in self.__write_transforms:
self.__write_transforms.append(trans_id)
return buf
开发者ID:nemith,项目名称:fbthrift,代码行数:9,代码来源:THeaderTransport.py
示例14: loads
def loads(classifier_ser):
d = pickle.loads(snappy.decompress(classifier_ser))
if d['classifier_name'] == 'plslinearsvmxval':
def decision_function(x):
for step_name, step in d['classifier'].steps[:-1]:
x = step.transform(x)
return d['classifier'].steps[-1][1].decision_function(x)
d['classifier'].decision_function = decision_function
return d['classifier']
开发者ID:objects-in-space-and-time,项目名称:picarus,代码行数:9,代码来源:_classifiers.py
示例15: _get
def _get(key, callback, args):
r = cache_table.find_one({'_id': key})
if not r:
content = callback(*args)
data = bson.binary.Binary(snappy.compress(content))
cache_table.insert_one({'_id': key, 'data': data})
else:
data = r['data']
content = snappy.decompress(data)
return content
开发者ID:dirtysalt,项目名称:dirtysalt.github.io,代码行数:10,代码来源:tunein_email.py
示例16: snappy_read_block
def snappy_read_block(stream, buffer):
"""Read a block of data with the 'snappy' codec."""
block_len = read_long(stream)
data = stream.read(block_len)
# Trim off last 4 bytes which hold the CRC32
decompressed = snappy.decompress(data[:-4])
buffer.truncate(0)
buffer.seek(0)
buffer.write(decompressed)
buffer.seek(0)
开发者ID:e-heller,项目名称:fastavro,代码行数:10,代码来源:reader.py
示例17: untransform
def untransform(self, buf):
for trans_id in self.__read_transforms:
if trans_id == TRANSFORM.ZLIB:
buf = zlib.decompress(buf)
elif trans_id == TRANSFORM.SNAPPY:
buf = snappy.decompress(buf)
elif trans_id == TRANSFORM.ZSTD:
buf = zstd.ZstdDecompressor().decompress(buf)
if trans_id not in self.__write_transforms:
self.__write_transforms.append(trans_id)
return buf
开发者ID:facebook,项目名称:fbthrift,代码行数:11,代码来源:THeaderTransport.py
示例18: read
def read(self):
header = safe_recv(self._sock, self.HEADER_LEN)
if not header: return False
length = struct.unpack(self.HEADER_STRUCT, header)[0]
chunks = []
while length:
recv = safe_recv(self._sock, length)
if not recv: return False
chunks.append(recv)
length -= len(recv)
return snappy.decompress("".join(chunks))
开发者ID:Tefx,项目名称:Thinkpol,代码行数:11,代码来源:port.py
示例19: Decompress
def Decompress(Input):
Output = Input + '.unsnappy'
file_in = file(Input, "rb")
c_data = file_in.read()
file_out = file(Output, "wb")
data = snappy.decompress(c_data)
file_out.write(data)
file_out.close()
file_in.close()
开发者ID:xianglei,项目名称:arm-cubietruck,代码行数:11,代码来源:snzip.py
示例20: fget
def fget(self , inst):
if hasattr(inst, self.name+'_array') :
return getattr(inst, self.name+'_array')
nprow = getattr(inst, 'NumpyArrayTable__'+self.name)
#~ print 'fget',self.name, nprow, inst.id
if nprow is None or nprow.shape is None or nprow.dtype is None:
return None
if nprow.shape =='':
shape = ()
else:
shape = tuple([ int(v) for v in nprow.shape.split(',') ])
dt = np.dtype(nprow.dtype)
if nprow.compress == 'blosc':
buf = blosc.decompress(nprow.blob)
elif nprow.compress == 'zlib':
buf = zlib.decompress(nprow.blob)
elif nprow.compress == 'lz4':
buf = lz4.decompress(nprow.blob)
elif nprow.compress == 'snappy':
buf = snappy.decompress(nprow.blob)
elif nprow.compress is None:
buf = nprow.blob
if np.prod(shape)==0:
if len(buf) != 0:
arr = np.frombuffer( buf , dtype = dt)
else:
arr= np.empty( shape, dtype = dt )
else:
arr = np.frombuffer( buf , dtype = dt)
arr.flags.writeable = True
arr = arr.reshape(shape)
if self.arraytype == pq.Quantity:
arr = pq.Quantity(arr, units = nprow.units, copy =False)
# next access will be direct
setattr(inst, self.name+'_array', arr)
#~ delattr(inst, 'NumpyArrayTable__'+self.name)
return arr
开发者ID:OpenElectrophy,项目名称:OpenElectrophy,代码行数:52,代码来源:sqlmapper.py
注:本文中的snappy.decompress函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论