• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python util.GzipDecompressor类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tornado.util.GzipDecompressor的典型用法代码示例。如果您正苦于以下问题:Python GzipDecompressor类的具体用法?Python GzipDecompressor怎么用?Python GzipDecompressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了GzipDecompressor类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _GzipMessageDelegate

class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
    """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
    """

    def __init__(self, delegate: httputil.HTTPMessageDelegate, chunk_size: int) -> None:
        self._delegate = delegate
        self._chunk_size = chunk_size
        self._decompressor = None  # type: Optional[GzipDecompressor]

    def headers_received(
        self,
        start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
        headers: httputil.HTTPHeaders,
    ) -> Optional[Awaitable[None]]:
        if headers.get("Content-Encoding") == "gzip":
            self._decompressor = GzipDecompressor()
            # Downstream delegates will only see uncompressed data,
            # so rename the content-encoding header.
            # (but note that curl_httpclient doesn't do this).
            headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
            del headers["Content-Encoding"]
        return self._delegate.headers_received(start_line, headers)

    async def data_received(self, chunk: bytes) -> None:
        if self._decompressor:
            compressed_data = chunk
            while compressed_data:
                decompressed = self._decompressor.decompress(
                    compressed_data, self._chunk_size
                )
                if decompressed:
                    ret = self._delegate.data_received(decompressed)
                    if ret is not None:
                        await ret
                compressed_data = self._decompressor.unconsumed_tail
        else:
            ret = self._delegate.data_received(chunk)
            if ret is not None:
                await ret

    def finish(self) -> None:
        if self._decompressor is not None:
            tail = self._decompressor.flush()
            if tail:
                # The tail should always be empty: decompress returned
                # all that it can in data_received and the only
                # purpose of the flush call is to detect errors such
                # as truncated input. If we did legitimately get a new
                # chunk at this point we'd need to change the
                # interface to make finish() a coroutine.
                raise ValueError(
                    "decompressor.flush returned data; possile truncated input"
                )
        return self._delegate.finish()

    def on_connection_close(self) -> None:
        return self._delegate.on_connection_close()
开发者ID:rgbkrk,项目名称:tornado,代码行数:57,代码来源:http1connection.py


示例2: _GzipMessageDelegate

class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
    """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
    """
    def __init__(self, delegate, chunk_size):
        self._delegate = delegate
        self._chunk_size = chunk_size
        self._decompressor = None

    def headers_received(self, start_line, headers):
        if headers.get("Content-Encoding") == "gzip":
            self._decompressor = GzipDecompressor()
            # Downstream delegates will only see uncompressed data,
            # so rename the content-encoding header.
            # (but note that curl_httpclient doesn't do this).
            headers.add("X-Consumed-Content-Encoding",
                        headers["Content-Encoding"])
            del headers["Content-Encoding"]
        return self._delegate.headers_received(start_line, headers)

    @gen.coroutine
    def data_received(self, chunk):
        if self._decompressor:
            compressed_data = chunk
            while compressed_data:
                decompressed = self._decompressor.decompress(
                    compressed_data, self._chunk_size)
                if decompressed:
                    ret = self._delegate.data_received(decompressed)
                    if ret is not None:
                        yield ret
                compressed_data = self._decompressor.unconsumed_tail
        else:
            ret = self._delegate.data_received(chunk)
            if ret is not None:
                yield ret

    def finish(self):
        if self._decompressor is not None:
            tail = self._decompressor.flush()
            if tail:
                # I believe the tail will always be empty (i.e.
                # decompress will return all it can).  The purpose
                # of the flush call is to detect errors such
                # as truncated input.  But in case it ever returns
                # anything, treat it as an extra chunk
                self._delegate.data_received(tail)
        return self._delegate.finish()

    def on_connection_close(self):
        return self._delegate.on_connection_close()
开发者ID:eomsoft,项目名称:teleport,代码行数:50,代码来源:http1connection.py


示例3: _on_headers

    def _on_headers(self, data):
        data = native_str(data.decode("latin1"))
        first_line, _, header_data = data.partition("\n")
        match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line)
        assert match
        code = int(match.group(1))
        self.headers = HTTPHeaders.parse(header_data)
        if 100 <= code < 200:
            self._handle_1xx(code)
            return
        else:
            self.code = code
            self.reason = match.group(2)

        if "Content-Length" in self.headers:
            if "," in self.headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" %
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            # re-attach the newline we split on earlier
            self.request.header_callback(first_line + _)
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))
            self.request.header_callback('\r\n')

        if self.request.method == "HEAD" or self.code == 304:
            # HEAD requests and 304 responses never have content, even
            # though they may have content-length headers
            self._on_body(b"")
            return
        if 100 <= self.code < 200 or self.code == 204:
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if ("Transfer-Encoding" in self.headers or
                    content_length not in (None, 0)):
                raise ValueError("Response with code %d should not have body" %
                                 self.code)
            self._on_body(b"")
            return

        if (self.request.use_gzip and
                self.headers.get("Content-Encoding") == "gzip"):
            self._decompressor = GzipDecompressor()
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b"\r\n", self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)
开发者ID:zhkzyth,项目名称:tornado-reading-notes,代码行数:60,代码来源:simple_httpclient.py


示例4: headers_received

 def headers_received(self, start_line, headers):
     if headers.get("Content-Encoding") == "gzip":
         self._decompressor = GzipDecompressor()
         # Downstream delegates will only see uncompressed data,
         # so rename the content-encoding header.
         # (but note that curl_httpclient doesn't do this).
         headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
         del headers["Content-Encoding"]
     return self._delegate.headers_received(start_line, headers)
开发者ID:EasyIME,项目名称:PIME,代码行数:9,代码来源:http1connection.py


示例5: headers_received

 def headers_received(
     self,
     start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
     headers: httputil.HTTPHeaders,
 ) -> Optional[Awaitable[None]]:
     if headers.get("Content-Encoding") == "gzip":
         self._decompressor = GzipDecompressor()
         # Downstream delegates will only see uncompressed data,
         # so rename the content-encoding header.
         # (but note that curl_httpclient doesn't do this).
         headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
         del headers["Content-Encoding"]
     return self._delegate.headers_received(start_line, headers)
开发者ID:rgbkrk,项目名称:tornado,代码行数:13,代码来源:http1connection.py


示例6: headers_received

    def headers_received(self, first_line, headers):
        if self.request.decompress_response and headers.get("Content-Encoding") == "gzip":
            self._decompressor = GzipDecompressor()

            # Downstream delegates will only see uncompressed data,
            # so rename the content-encoding header.
            headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
            del headers["Content-Encoding"]

        self.headers = headers
        self.code = first_line.code
        self.reason = first_line.reason

        if self.request.header_callback is not None:
            # Reassemble the start line.
            self.request.header_callback("%s %s %s\r\n" % first_line)
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))
            self.request.header_callback("\r\n")
开发者ID:mSOHU,项目名称:http2,代码行数:19,代码来源:tornado4.py


示例7: _HTTPConnection


#.........这里部分代码省略.........
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" %
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            # re-attach the newline we split on earlier
            self.request.header_callback(first_line + _)
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))
            self.request.header_callback('\r\n')

        if self.request.method == "HEAD" or self.code == 304:
            # HEAD requests and 304 responses never have content, even
            # though they may have content-length headers
            self._on_body(b"")
            return
        if 100 <= self.code < 200 or self.code == 204:
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if ("Transfer-Encoding" in self.headers or
                    content_length not in (None, 0)):
                raise ValueError("Response with code %d should not have body" %
                                 self.code)
            self._on_body(b"")
            return

        if (self.request.use_gzip and
                self.headers.get("Content-Encoding") == "gzip"):
            self._decompressor = GzipDecompressor()
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b"\r\n", self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)

    def _on_body(self, data):
        self._remove_timeout()
        original_request = getattr(self.request, "original_request",
                                   self.request)
        if (self.request.follow_redirects and
            self.request.max_redirects > 0 and
                self.code in (301, 302, 303, 307)):
            assert isinstance(self.request, _RequestProxy)
            new_request = copy.copy(self.request.request)
            new_request.url = urlparse.urljoin(self.request.url,
                                               self.headers["Location"])
            new_request.max_redirects = self.request.max_redirects - 1
            del new_request.headers["Host"]
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
            # Client SHOULD make a GET request after a 303.
            # According to the spec, 302 should be followed by the same
            # method as the original request, but in practice browsers
            # treat 302 the same as 303, and many servers use 302 for
            # compatibility with pre-HTTP/1.1 user agents which don't
            # understand the 303 status.
            if self.code in (302, 303):
                new_request.method = "GET"
                new_request.body = None
                for h in ["Content-Length", "Content-Type",
开发者ID:zhkzyth,项目名称:tornado-reading-notes,代码行数:67,代码来源:simple_httpclient.py


示例8: _HTTP2Stream


#.........这里部分代码省略.........
            password = request.auth_password or ""
        if username is not None:
            if request.auth_mode not in (None, "basic"):
                raise ValueError("unsupported auth_mode %s", request.auth_mode)
            auth = utf8(username) + b":" + utf8(password)
            request.headers["Authorization"] = b"Basic " + base64.b64encode(auth)
        if request.user_agent:
            request.headers["User-Agent"] = request.user_agent
        if not request.allow_nonstandard_methods:
            # Some HTTP methods nearly always have bodies while others
            # almost never do. Fail in this case unless the user has
            # opted out of sanity checks with allow_nonstandard_methods.
            body_expected = request.method in ("POST", "PATCH", "PUT")
            body_present = request.body is not None or request.body_producer is not None
            if (body_expected and not body_present) or (body_present and not body_expected):
                raise ValueError(
                    "Body must %sbe None for method %s (unless "
                    "allow_nonstandard_methods is true)" % ("not " if body_expected else "", request.method)
                )
        if request.body is not None:
            # When body_producer is used the caller is responsible for
            # setting Content-Length (or else chunked encoding will be used).
            request.headers["Content-Length"] = str(len(request.body))
        if request.method == "POST" and "Content-Type" not in request.headers:
            request.headers["Content-Type"] = "application/x-www-form-urlencoded"
        if request.decompress_response:
            request.headers["Accept-Encoding"] = "gzip"

        request.url = (parsed.path or "/") + (("?" + parsed.query) if parsed.query else "")
        return request

    def headers_received(self, first_line, headers):
        if self.request.decompress_response and headers.get("Content-Encoding") == "gzip":
            self._decompressor = GzipDecompressor()

            # Downstream delegates will only see uncompressed data,
            # so rename the content-encoding header.
            headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
            del headers["Content-Encoding"]

        self.headers = headers
        self.code = first_line.code
        self.reason = first_line.reason

        if self.request.header_callback is not None:
            # Reassemble the start line.
            self.request.header_callback("%s %s %s\r\n" % first_line)
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))
            self.request.header_callback("\r\n")

    def _run_callback(self, response):
        if self._finalized:
            return

        if self.release_callback is not None:
            self.release_callback()

        with stack_context.NullContext():
            self.io_loop.add_callback(functools.partial(self.final_callback, response))
        self._finalized = True

    def handle_event(self, event):
        if isinstance(event, h2.events.ResponseReceived):
            headers = self.build_http_headers(event.headers)
            status_code = int(headers.pop(":status"))
开发者ID:mSOHU,项目名称:http2,代码行数:67,代码来源:tornado4.py


示例9: _HTTPConnection


#.........这里部分代码省略.........
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" %
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))

        if self.request.method == "HEAD":
            # HEAD requests never have content, even though they may have
            # content-length headers
            self._on_body(b(""))
            return
        if 100 <= self.code < 200 or self.code in (204, 304):
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if ("Transfer-Encoding" in self.headers or
                content_length not in (None, 0)):
                raise ValueError("Response with code %d should not have body" %
                                 self.code)
            self._on_body(b(""))
            return

        if (self.request.use_gzip and
            self.headers.get("Content-Encoding") == "gzip"):
            self._decompressor = GzipDecompressor()
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b("\r\n"), self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)

    def _on_body(self, data):
        if self._timeout is not None:
            self.io_loop.remove_timeout(self._timeout)
            self._timeout = None
        original_request = getattr(self.request, "original_request",
                                   self.request)
        if (self.request.follow_redirects and
            self.request.max_redirects > 0 and
            self.code in (301, 302, 303, 307)):
            new_request = copy.copy(self.request)
            new_request.url = urllib.parse.urljoin(self.request.url,
                                               self.headers["Location"])
            new_request.max_redirects -= 1
            del new_request.headers["Host"]
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
            # client SHOULD make a GET request
            if self.code == 303:
                new_request.method = "GET"
                new_request.body = None
                for h in ["Content-Length", "Content-Type",
                          "Content-Encoding", "Transfer-Encoding"]:
                    try:
                        del self.request.headers[h]
                    except KeyError:
开发者ID:EdwinGriffin,项目名称:TornadoSchoolWebsite,代码行数:67,代码来源:simple_httpclient.py



注:本文中的tornado.util.GzipDecompressor类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python util.ObjectDict类代码示例发布时间:2022-05-27
下一篇:
Python util.unicode_type函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap