• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python zlib.decompressobj函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中zlib.decompressobj函数的典型用法代码示例。如果您正苦于以下问题:Python decompressobj函数的具体用法?Python decompressobj怎么用?Python decompressobj使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了decompressobj函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: startDecompressMessage

 def startDecompressMessage(self):
     if self._isServer:
         if self._decompressor is None or self.c2s_no_context_takeover:
             self._decompressor = zlib.decompressobj(-self.c2s_max_window_bits)
     else:
         if self._decompressor is None or self.s2c_no_context_takeover:
             self._decompressor = zlib.decompressobj(-self.s2c_max_window_bits)
开发者ID:rshravan,项目名称:AutobahnPython,代码行数:7,代码来源:compress_deflate.py


示例2: _parse_headers

    def _parse_headers(self, data):
        idx = data.find(b("\r\n\r\n"))
        if idx < 0:  # we don't have all headers
            return False

        # Split lines on \r\n keeping the \r\n on each line
        lines = [bytes_to_str(line) + "\r\n"
                 for line in data[:idx].split(b("\r\n"))]

        # Parse headers into key/value pairs paying attention
        # to continuation lines.
        while len(lines):
            # Parse initial header name : value pair.
            curr = lines.pop(0)
            if curr.find(":") < 0:
                raise InvalidHeader("invalid line %s" % curr.strip())
            name, value = curr.split(":", 1)
            name = name.rstrip(" \t").upper()
            if HEADER_RE.search(name):
                raise InvalidHeader("invalid header name %s" % name)
            name, value = name.strip(), [value.lstrip()]

            # Consume value continuation lines
            while len(lines) and lines[0].startswith((" ", "\t")):
                value.append(lines.pop(0))
            value = ''.join(value).rstrip()

            # store new header value
            self._headers.add_header(name, value)

            # update WSGI environ
            key = 'HTTP_%s' % name.upper().replace('-', '_')
            self._environ[key] = value

        # detect now if body is sent by chunks.
        clen = self._headers.get('content-length')
        te = self._headers.get('transfer-encoding', '').lower()

        if clen is not None:
            try:
                self._clen_rest = self._clen = int(clen)
            except ValueError:
                pass
        else:
            self._chunked = (te == 'chunked')
            if not self._chunked:
                self._clen_rest = MAXSIZE

        # detect encoding and set decompress object
        encoding = self._headers.get('content-encoding')
        if self.decompress:
            if encoding == "gzip":
                self.__decompress_obj = zlib.decompressobj(16+zlib.MAX_WBITS)
            elif encoding == "deflate":
                self.__decompress_obj = zlib.decompressobj()

        rest = data[idx+4:]
        self._buf = [rest]
        self.__on_headers_complete = True
        return len(rest)
开发者ID:carriercomm,项目名称:circuits,代码行数:60,代码来源:http.py


示例3: recoverFile

def recoverFile(filename, output_file):
    output = open(output_file, "wb")
    decompressor = zlib.decompressobj()
    unused = ""
    for response in readFile(filename):
        if not response:
            break
        to_decompress = decompressor.unconsumed_tail + unused + response
        unused = ""
        while to_decompress:
            try:
                decompressed = decompressor.decompress(to_decompress)
            except:
                print "%s couldn't be decompressed" % filename
                return
            if decompressed:
                output.write(decompressed)
                to_decompress = decompressor.unconsumed_tail
                if decompressor.unused_data:
                    unused = decompressor.unused_data
                    remainder = decompressor.flush()
                    output.write(remainder)
                    decompressor = zlib.decompressobj()
            else:
                to_decompress = None
    remainder = decompressor.flush()
    if remainder:
        output.write(remainder)
开发者ID:pswheeler,项目名称:rescueRsrcData,代码行数:28,代码来源:recoverRsrcData.py


示例4: compute

    def compute(self, split):
        f = open(self.path, 'rb', 4096 * 1024)
        last_line = ''
        if split.index == 0:
            zf = gzip.GzipFile(fileobj=f)
            zf._read_gzip_header()
            start = f.tell()
        else:
            start = self.find_block(f, split.index * self.splitSize)
            if start >= split.index * self.splitSize + self.splitSize:
                return
            for i in xrange(1, 100):
                if start - i * self.BLOCK_SIZE <= 4:
                    break
                last_block = self.find_block(f, start - i * self.BLOCK_SIZE)
                if last_block < start:
                    f.seek(last_block)
                    d = f.read(start - last_block)
                    dz = zlib.decompressobj(-zlib.MAX_WBITS)
                    last_line = dz.decompress(d).split('\n')[-1]
                    break

        end = self.find_block(f, split.index * self.splitSize + self.splitSize)
        f.seek(start)
        d = f.read(end - start)
        f.close()
        if not d: return

        dz = zlib.decompressobj(-zlib.MAX_WBITS)
        io = cStringIO.StringIO(dz.decompress(d))
        yield last_line + io.readline()
        for line in io:
            if line.endswith('\n'): # drop last line
                yield line
开发者ID:cute,项目名称:dpark,代码行数:34,代码来源:rdd.py


示例5: _decode

 def _decode(self, body, encoding, max_length=0):
     if encoding == 'gzip' or encoding == 'x-gzip':
         body = gunzip(body, max_length)
     elif encoding == 'deflate':
         try:
             if max_length:
                 dobj = zlib.decompressobj()
                 body = dobj.decompress(body, max_length)
                 if dobj.unconsumed_tail:
                     raise DecompressSizeError(
                         'Response exceeded %s bytes' % max_length)
             else:
                 body = zlib.decompress(body)
         except zlib.error:
             # ugly hack to work with raw deflate content that may
             # be sent by microsoft servers. For more information, see:
             # http://carsten.codimi.de/gzip.yaws/
             # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx
             # http://www.gzip.org/zlib/zlib_faq.html#faq38
             if max_length:
                 dobj = zlib.decompressobj(-15)
                 body = dobj.decompress(body, max_length)
                 if dobj.unconsumed_tail:
                     raise DecompressSizeError(
                         'Response exceeded %s bytes' % max_length)
             else:
                 body = zlib.decompress(body, -15)
     return body
开发者ID:Mimino666,项目名称:crawlmi,代码行数:28,代码来源:http_compression.py


示例6: __next__

    def __next__(self):
        chunk = self.read()
        if not chunk:
            if self._decoder:
                chunk = self._decoder.flush()
                self._decoder = None
                return chunk
            else:
                raise StopIteration
        else:
            ce = self._content_encoding
            if ce in ('gzip', 'deflate'):
                if not self._decoder:
                    import zlib
                    if ce == 'gzip':
                        self._decoder = zlib.decompressobj(16 + zlib.MAX_WBITS)
                    else:
                        self._decoder = zlib.decompressobj()
                        try:
                            return self._decoder.decompress(chunk)
                        except zlib.error:
                            self._decoder = zlib.decompressobj(-zlib.MAX_WBITS)
                try:
                    return self._decoder.decompress(chunk)
                except (IOError, zlib.error) as e:
                    raise ContentDecodingError(e)

            if ce:
                raise ContentDecodingError('Unknown encoding: %s' % ce)
            return chunk
开发者ID:cuongnvth,项目名称:hieuhien.vn,代码行数:30,代码来源:urlfetch.py


示例7: format_body

def format_body(message, body_fp):
    """ return (is_compressed, body) """

    t_enc = message.get('Transfer-Encoding', '').strip().lower()
    c_enc = message.get('Content-Encoding', '').strip().lower()
    c_type = message.get('Content-Type', '').strip().lower()
    charset = 'latin1'
    m = RE_CHARSET.search(c_type)
    if m:
        charset = m.group(1)

    body = read_body(body_fp, t_enc == 'chunked')
    if c_enc in ('gzip', 'x-gzip', 'deflate'):
        try:
            if c_enc != 'deflate':
                buf = StringIO(body)
                read_gzip_header(buf)
                body = buf.read()
                do = zlib.decompressobj(-zlib.MAX_WBITS)
            else:
                do = zlib.decompressobj()
            decompressed = do.decompress(body)
            #print "<gzipped>\n" + decompressed
            return (True, decompressed)
        except:
            import traceback
            traceback.print_exc()
    else:
        return (False, body)
开发者ID:wil,项目名称:httpsniff,代码行数:29,代码来源:httpsniff.py


示例8: start_decompress_message

 def start_decompress_message(self):
     if self._is_server:
         if self._decompressor is None or self.client_no_context_takeover:
             self._decompressor = zlib.decompressobj(-self.client_max_window_bits)
     else:
         if self._decompressor is None or self.server_no_context_takeover:
             self._decompressor = zlib.decompressobj(-self.server_max_window_bits)
开发者ID:crossbario,项目名称:autobahn-python,代码行数:7,代码来源:compress_deflate.py


示例9: decode_deflate

def decode_deflate(chunks, z=None):

    if z is None:
        z = zlib.decompressobj()
        retry = True
    else:
        retry = False

    for chunk in chunks:
        if hasattr(z, 'unconsumed_tail'): # zlib
            compressed = (z.unconsumed_tail + chunk)
        else: # brotli
            compressed = chunk
        try:
            decompressed = z.decompress(compressed)
        except zlib.error:
            if not retry:
                raise
            z = zlib.decompressobj(-zlib.MAX_WBITS)
            retry = False
            decompressed = z.decompress(compressed)

        if decompressed:
            yield decompressed

    yield z.flush()
开发者ID:gilesbrown,项目名称:python-icapservice,代码行数:26,代码来源:content.py


示例10: parse_blob

 def parse_blob(self):
     """Unzip and parse the blob. Everything we get is big endian. Each block contains 16*16*16 nodes, a node is the ingame block size. """
     dec_o = zlib.decompressobj()
     (self.param0, self.param1, self.param2) = struct.unpack("8192s4096s4096s", dec_o.decompress(self.blob[4:]))
     self.param0 = array.array("H", self.param0)
     self.param0.byteswap()
     #import pdb;pdb.set_trace()
     tail = dec_o.unused_data
     dec_o = zlib.decompressobj() #Must make new obj or .unused_data will get messed up.
     blah = dec_o.decompress(tail) #throw away metadata
      
     (static_version, static_count,) = struct.unpack(">BH", dec_o.unused_data[0:3])
     ptr=3
     if static_count:
         for i in range(static_count):
             (object_type, pos_x_nodes, pos_y_nodes, pos_z_nodes, data_size) = struct.unpack(">BiiiH", dec_o.unused_data[ptr:ptr+15])
             ptr = ptr+15+data_size
     
     (self.timestamp,) = struct.unpack(">I", dec_o.unused_data[ptr:ptr+4])
     if self.timestamp == 0xffffffff: #This is define as as unknown timestamp
         self.timestamp = None
     ptr=ptr+4
     (name_id_mapping_version, num_name_id_mappings) = struct.unpack(">BH", dec_o.unused_data[ptr:ptr+3])
     ptr=ptr+3
     start=ptr
     self.id_to_name = {}
     for i in range(0, num_name_id_mappings):
         (node_id, name_len) = struct.unpack(">HH", dec_o.unused_data[start:start+4])
         (name,) = struct.unpack(">{}s".format(name_len), dec_o.unused_data[start+4:start+4+name_len])
         self.id_to_name[node_id] = name.decode('utf8')
         start=start+4+name_len
开发者ID:mikaelfrykholm,项目名称:minetest-slippy,代码行数:31,代码来源:minetest-slippy.py


示例11: _initialize_decompressor

 def _initialize_decompressor(self):
   if self._compression_type == CompressionTypes.BZIP2:
     self._decompressor = bz2.BZ2Decompressor()
   elif self._compression_type == CompressionTypes.DEFLATE:
     self._decompressor = zlib.decompressobj()
   else:
     assert self._compression_type == CompressionTypes.GZIP
     self._decompressor = zlib.decompressobj(self._gzip_mask)
开发者ID:eralmas7,项目名称:beam,代码行数:8,代码来源:filesystem.py


示例12: test_header_auto_detect

 def test_header_auto_detect(self):
     """autodetect zlib and gzip header"""
     do = zlib.decompressobj(zlib.MAX_WBITS | 32)
     self.assertEqual(do.decompress(self.gzip_data), self.text)
     do = zlib.decompressobj(zlib.MAX_WBITS | 32)
     self.assertEqual(do.decompress(self.zlib_data), self.text)
     self.assertEqual(zlib.decompress(self.gzip_data, zlib.MAX_WBITS | 32), self.text)
     self.assertEqual(zlib.decompress(self.zlib_data, zlib.MAX_WBITS | 32), self.text)
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:8,代码来源:test_zlib.py


示例13: _read_headers

    def _read_headers(self, data):
        """
        Read the headers of an HTTP response from the socket, and the response
        body as well, into a new HTTPResponse instance. Then call the request
        handler.
        """
        do_close = False

        try:
            initial_line, data = data.split(CRLF, 1)
            try:
                try:
                    http_version, status, status_text = initial_line.split(' ', 2)
                    status = int(status)
                except ValueError:
                    http_version, status = initial_line.split(' ')
                    status = int(status)
                    status_text = HTTP.get(status, '')
            except ValueError:
                raise BadRequest('Invalid HTTP status line %r.' % initial_line)

            # Parse the headers.
            headers = read_headers(data)

            # Construct an HTTPResponse object.
            self.current_response = response = HTTPResponse(self,
                self._requests[0], http_version, status, status_text, headers)

            # Do we have a Content-Encoding header?
            if 'Content-Encoding' in headers:
                encoding = headers['Content-Encoding']
                if encoding == 'gzip':
                    response._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
                elif encoding == 'deflate':
                    response._decompressor = zlib.decompressobj(-zlib.MAX_WBITS)

            # Do we have a Content-Length header?
            if 'Content-Length' in headers:
                self._stream.on_read = self._read_body
                self._stream.read_delimiter = int(headers['Content-Length'])

            elif 'Transfer-Encoding' in headers:
                if headers['Transfer-Encoding'] == 'chunked':
                    self._stream.on_read = self._read_chunk_head
                    self._stream.read_delimiter = CRLF
                else:
                    raise BadRequest("Unsupported Transfer-Encoding: %s" % headers['Transfer-Encoding'])

            # Is this a HEAD request? If so, then handle the request NOW.
            if response.method == 'HEAD':
                self._on_response()

        except BadRequest, e:
            log.info('Bad response from %r: %s',
                self._server, e)
            do_close = True
开发者ID:ixokai,项目名称:pants,代码行数:56,代码来源:client.py


示例14: zlib_gzin

 def zlib_gzin(self, compress = False, data = None):
     """Return the compressed or decompressed object with Zlib, string or file data"""
     if not compress:
         try:
             if data:
                 return zlib.decompressobj().decompress('x\x9c' + data)
             else:
                 return zlib.decompressobj().decompress('x\x9c' + self.data)
         except Exception, e:
             return '[!] Error Zlib inflate decompress: %s.' % e
开发者ID:SeTX,项目名称:utils,代码行数:10,代码来源:decoder.py


示例15: decompress

    def decompress(self, value):
        if not self.decompressobj:
            try:
                self.decompressobj = zlib.decompressobj()
                return self.decompressobj.decompress(value)
            except zlib.error:
                self.decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
                return self.decompressobj.decompress(value)

        return self.decompressobj.decompress(value)
开发者ID:Super-Rad,项目名称:wpull,代码行数:10,代码来源:decompression.py


示例16: decrypt_file

 def decrypt_file(self, file_content, filename):
     # each log file is built from a header section and a content section, the two are divided by a |==| mark
     file_split_content = file_content.split("|==|\n")
     # get the header section content
     file_header_content = file_split_content[0]
     # get the log section content
     file_log_content = file_split_content[1]
     # if the file is not encrypted - the "key" value in the file header is '-1'
     file_encryption_key = file_header_content.find("key:")
     if file_encryption_key == -1:
         # uncompress the log content
         uncompressed_and_decrypted_file_content = zlib.decompressobj().decompress(file_log_content)
     # if the file is encrypted
     else:
         content_encrypted_sym_key = file_header_content.split("key:")[1].splitlines()[0]
         # we expect to have a 'keys' folder that will have the stored private keys
         if not os.path.exists(os.path.join(self.config_path, "keys")):
             self.logger.error("No encryption keys directory was found and file %s is encrypted", filename)
             raise Exception("No encryption keys directory was found")
         # get the public key id from the log file header
         public_key_id = file_header_content.split("publicKeyId:")[1].splitlines()[0]
         # get the public key directory in the filesystem - each time we upload a new key this id is incremented
         public_key_directory = os.path.join(os.path.join(self.config_path, "keys"), public_key_id)
         # if the key directory does not exists
         if not os.path.exists(public_key_directory):
             self.logger.error(
                 "Failed to find a proper certificate for : %s who has the publicKeyId of %s",
                 filename,
                 public_key_id,
             )
             raise Exception("Failed to find a proper certificate")
         # get the checksum
         checksum = file_header_content.split("checksum:")[1].splitlines()[0]
         # get the private key
         private_key = open(os.path.join(public_key_directory, "Private.key"), "r").read()
         try:
             rsa_private_key = M2Crypto.RSA.load_key_string(private_key)
             content_decrypted_sym_key = rsa_private_key.private_decrypt(
                 base64.b64decode(bytearray(content_encrypted_sym_key)), M2Crypto.RSA.pkcs1_padding
             )
             uncompressed_and_decrypted_file_content = zlib.decompressobj().decompress(
                 AES.new(base64.b64decode(bytearray(content_decrypted_sym_key)), AES.MODE_CBC, 16 * "\x00").decrypt(
                     file_log_content
                 )
             )
             # we check the content validity by checking the checksum
             content_is_valid = self.validate_checksum(checksum, uncompressed_and_decrypted_file_content)
             if not content_is_valid:
                 self.logger.error("Checksum verification failed for file %s", filename)
                 raise Exception("Checksum verification failed")
         except Exception, e:
             self.logger.error(
                 "Error while trying to decrypt the file %s", filename, e.message, traceback.format_exc()
             )
             raise Exception("Error while trying to decrypt the file" + filename)
开发者ID:Incapsula,项目名称:logs-downloader,代码行数:55,代码来源:LogsDownloader.py


示例17: _fetch_to_internal_buffer

  def _fetch_to_internal_buffer(self, num_bytes):
    """Fetch up to num_bytes into the internal buffer."""
    if (not self._read_eof and self._read_position > 0 and
        (self._read_buffer.tell() - self._read_position) < num_bytes):
      # There aren't enough number of bytes to accommodate a read, so we
      # prepare for a possibly large read by clearing up all internal buffers
      # but without dropping any previous held data.
      self._read_buffer.seek(self._read_position)
      data = self._read_buffer.read()
      self._clear_read_buffer()
      self._read_buffer.write(data)

    while not self._read_eof and (self._read_buffer.tell() - self._read_position
                                 ) < num_bytes:
      # Continue reading from the underlying file object until enough bytes are
      # available, or EOF is reached.
      buf = self._file.read(self._read_size)
      if buf:
        decompressed = self._decompressor.decompress(buf)
        del buf  # Free up some possibly large and no-longer-needed memory.
        self._read_buffer.write(decompressed)
      else:
        # EOF of current stream reached.
        #
        # Any uncompressed data at the end of the stream of a gzip or bzip2
        # file that is not corrupted points to a concatenated compressed
        # file. We read concatenated files by recursively creating decompressor
        # objects for the unused compressed data.
        if (self._compression_type == CompressionTypes.BZIP2 or
            self._compression_type == CompressionTypes.DEFLATE or
            self._compression_type == CompressionTypes.GZIP):
          if self._decompressor.unused_data != b'':
            buf = self._decompressor.unused_data

            if self._compression_type == CompressionTypes.BZIP2:
              self._decompressor = bz2.BZ2Decompressor()
            elif self._compression_type == CompressionTypes.DEFLATE:
              self._decompressor = zlib.decompressobj()
            else:
              self._decompressor = zlib.decompressobj(self._gzip_mask)

            decompressed = self._decompressor.decompress(buf)
            self._read_buffer.write(decompressed)
            continue
        else:
          # Deflate, Gzip and bzip2 formats do not require flushing
          # remaining data in the decompressor into the read buffer when
          # fully decompressing files.
          self._read_buffer.write(self._decompressor.flush())

        # Record that we have hit the end of file, so we won't unnecessarily
        # repeat the completeness verification step above.
        self._read_eof = True
开发者ID:eralmas7,项目名称:beam,代码行数:53,代码来源:filesystem.py


示例18: compute

    def compute(self, split):
        f = self.open_file()
        last_line = ''
        if split.index == 0:
            zf = gzip.GzipFile(fileobj=f)
            zf._read_gzip_header()
            start = f.tell()
        else:
            start = self.find_block(f, split.index * self.splitSize)
            if start >= split.index * self.splitSize + self.splitSize:
                return
            for i in xrange(1, 100):
                if start - i * self.BLOCK_SIZE <= 4:
                    break
                last_block = self.find_block(f, start - i * self.BLOCK_SIZE)
                if last_block < start:
                    f.seek(last_block)
                    d = f.read(start - last_block)
                    dz = zlib.decompressobj(-zlib.MAX_WBITS)
                    last_line = dz.decompress(d).split('\n')[-1]
                    if last_line.endswith('\n'):
                        last_line = ''
                    break

        end = self.find_block(f, split.index * self.splitSize + self.splitSize)
        # TODO: speed up
        f.seek(start)
        if self.fileinfo:
            f.length = end
        dz = zlib.decompressobj(-zlib.MAX_WBITS)
        while start < end:
            d = f.read(min(64<<10, end-start))
            start += len(d)
            if not d: break

            io = cStringIO.StringIO(dz.decompress(d))
            
            last_line += io.readline()
            yield last_line
            last_line = ''

            ll = list(io)
            if not ll: continue

            last_line = ll.pop()
            for line in ll:
                yield line
            if last_line.endswith('\n'):
                yield last_line
                last_line = ''

        f.close()
开发者ID:Dshadowzh,项目名称:dpark,代码行数:52,代码来源:rdd.py


示例19: deflate_decoder

def deflate_decoder(wbits=None):
    if wbits is None:
        obj = zlib.decompressobj()
    else:
        obj = zlib.decompressobj(wbits)

    def enc(data, final):
        ret = obj.decompress(data)
        if final:
            ret += obj.flush()
        return ret

    return enc
开发者ID:hubo1016,项目名称:vlcp,代码行数:13,代码来源:encoders.py


示例20: get_title_html

def get_title_html(url, type, stream=None, **kwds):
    if stream is None:
        request = urllib2.Request(url)
        for header in default_headers:
            request.add_header(*header)
        stream = get_opener().open(
            request, timeout=TIMEOUT_S)

    with closing(stream):
        charset = stream.info().getparam('charset')
        content_enc = stream.info().dict.get('content-encoding', 'identity')
        if content_enc == 'identity':
            data = stream.read(READ_BYTES_MAX)
        elif content_enc == 'gzip':
            raw_data = stream.read(READ_BYTES_MAX)
            data = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(raw_data)
        elif content_enc == 'deflate':
            raw_data = stream.read(READ_BYTES_MAX)
            try:
                data = zlib.decompressobj().decompress(raw_data)
            except zlib.error:
                data = zlib.decompressobj(-zlib.MAX_WBITS).decompress(raw_data)
        else:
            raise PageURLError(
                'Unsupported content-encoding: "%s"' % content_enc)

    soup = BeautifulSoup(data, BS4_PARSER, from_encoding=charset)

    # The page title according to the <title> tag.
    title = soup.find('title')
    if title:
        title = ''.join(re.sub(r'\s+', ' ', s) for s in title.strings).strip()

    # The page title according to the <meta> tags.
    title_meta = soup.find('meta', attrs={'name': 'title'}) or \
                 soup.find('meta', attrs={'name': 'og:title'})
    if title_meta:
        title_meta = title_meta.attrs.get('content')

    if not title and not title_meta:
        return
    elif title and (not title_meta or title_meta in title):
        title_str = 'Title: %s' % format_title(title)
    elif title_meta and (not title or title in title_meta):
        title_str = 'Title: %s' % format_title(title_meta)
    else:
        title_str = 'Title (meta): %s -- Title (primary): %s' % (
            format_title(title_meta), format_title(title))
    return { 'title': title_str }
开发者ID:joodicator,项目名称:PageBot,代码行数:49,代码来源:url.py



注:本文中的zlib.decompressobj函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ifunctionfactory_plugin.propartial函数代码示例发布时间:2022-05-26
下一篇:
Python zlib.decompress函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap