本文整理汇总了Python中werkzeug._internal._decode_unicode函数的典型用法代码示例。如果您正苦于以下问题:Python _decode_unicode函数的具体用法?Python _decode_unicode怎么用?Python _decode_unicode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_decode_unicode函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: uri_to_iri
def uri_to_iri(uri, charset = 'utf-8', errors = 'ignore'):
uri = url_fix(str(uri), charset)
scheme, auth, hostname, port, path, query, fragment = _uri_split(uri)
scheme = _decode_unicode(scheme, 'ascii', errors)
try:
hostname = hostname.decode('idna')
except UnicodeError:
if errors not in ('ignore', 'replace'):
raise
hostname = hostname.decode('ascii', errors)
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _decode_unicode(_unquote(auth), charset, errors)
if password:
auth += u':' + _decode_unicode(_unquote(password), charset, errors)
hostname = auth + u'@' + hostname
if port:
hostname += u':' + port.decode(charset, errors)
path = _decode_unicode(_unquote(path, '/;?'), charset, errors)
query = _decode_unicode(_unquote(query, ';/?:@&=+,$'), charset, errors)
return urlparse.urlunsplit([scheme,
hostname,
path,
query,
fragment])
开发者ID:connoryang,项目名称:dec-eve-serenity,代码行数:29,代码来源:urls.py
示例2: uri_to_iri
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""Converts a URI in a given charset to a IRI.
Examples for URI versus IRI
>>> uri_to_iri('http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri('http://%C3%BCser:p%C3%[email protected]/p%C3%A5th')
u'http://\xfcser:p\[email protected]\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: the URI to convert
:param charset: the charset of the URI
:param errors: the error handling on decode
"""
uri = url_fix(str(uri), charset)
scheme, auth, hostname, port, path, query, fragment = _uri_split(uri)
scheme = _decode_unicode(scheme, 'ascii', errors)
try:
hostname = hostname.decode('idna')
except UnicodeError:
# dammit, that codec raised an error. Because it does not support
# any error handling we have to fake it.... badly
if errors not in ('ignore', 'replace'):
raise
hostname = hostname.decode('ascii', errors)
if ':' in hostname:
hostname = '[' + hostname + ']'
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _decode_unicode(_unquote(auth), charset, errors)
if password:
auth += u':' + _decode_unicode(_unquote(password),
charset, errors)
hostname = auth + u'@' + hostname
if port:
# port should be numeric, but you never know...
hostname += u':' + port.decode(charset, errors)
path = _decode_unicode(_unquote(path, '/;?'), charset, errors)
query = _decode_unicode(_unquote(query, ';/?:@&=+,$'),
charset, errors)
return urlparse.urlunsplit([scheme, hostname, path, query, fragment])
开发者ID:glyphobet,项目名称:werkzeug,代码行数:57,代码来源:urls.py
示例3: from_file
def from_file(cls, file, charset='utf-8', errors='strict',
unicode_mode=True, encoding=None):
"""Load a template from a file.
.. versionchanged:: 0.5
The encoding parameter was renamed to charset.
:param file: a filename or file object to load the template from.
:param charset: the charset of the template to load.
:param errors: the error behavior of the charset decoding.
:param unicode_mode: set to `False` to disable unicode mode.
:return: a template
"""
if encoding is not None:
from warnings import warn
warn(DeprecationWarning('the encoding parameter is deprecated. '
'use charset instead.'), stacklevel=2)
charset = encoding
close = False
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset,
errors, unicode_mode)
开发者ID:AndryulE,项目名称:kitsune,代码行数:29,代码来源:templates.py
示例4: parse_cookie
def parse_cookie(header, charset="utf-8", errors="replace", cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get("HTTP_COOKIE", "")
if cls is None:
cls = TypeConversionDict
cookie = _ExtendedCookie()
cookie.load(header)
result = {}
# decode to unicode and skip broken items. Our extended morsel
# and extended cookie will catch CookieErrors and convert them to
# `None` items which we have to skip here.
for key, value in cookie.iteritems():
if value.value is not None:
result[key] = _decode_unicode(unquote_header_value(value.value), charset, errors)
return cls(result)
开发者ID:carriercomm,项目名称:glasshouse,代码行数:35,代码来源:http.py
示例5: path
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
path = '/' + (self.environ.get('PATH_INFO') or '').lstrip('/')
return _decode_unicode(path, self.charset, self.encoding_errors)
开发者ID:danaspiegel,项目名称:softball_stat_manager,代码行数:7,代码来源:wrappers.py
示例6: from_file
def from_file(cls, file, charset='utf-8', errors='strict',
unicode_mode=True):
"""Load a template from a file.
.. versionchanged:: 0.5
The encoding parameter was renamed to charset.
:param file: a filename or file object to load the template from.
:param charset: the charset of the template to load.
:param errors: the error behavior of the charset decoding.
:param unicode_mode: set to `False` to disable unicode mode.
:return: a template
"""
close = False
f = file
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset,
errors, unicode_mode)
开发者ID:EnTeQuAk,项目名称:werkzeug,代码行数:25,代码来源:templates.py
示例7: __init__
def __init__(self, source, filename = '<template>', charset = 'utf-8', errors = 'strict', unicode_mode = True):
if isinstance(source, str):
source = _decode_unicode(source, charset, errors)
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
node = Parser(tokenize(u'\n'.join(source.splitlines()), filename), filename).parse()
self.code = TemplateCodeGenerator(node, filename).getCode()
self.filename = filename
self.charset = charset
self.errors = errors
self.unicode_mode = unicode_mode
开发者ID:connoryang,项目名称:dec-eve-serenity,代码行数:11,代码来源:templates.py
示例8: start_file_streaming
def start_file_streaming(self, filename, headers, total_content_length):
filename = _decode_unicode(filename, self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
开发者ID:AnIrishDuck,项目名称:werkzeug,代码行数:11,代码来源:formparser.py
示例9: __init__
def __init__(self, source, filename="<template>", charset="utf-8", errors="strict", unicode_mode=True):
if isinstance(source, str):
source = _decode_unicode(source, charset, errors)
if isinstance(filename, unicode):
filename = filename.encode("utf-8")
node = Parser(tokenize(u"\n".join(source.splitlines()), filename), filename).parse()
self.code = TemplateCodeGenerator(node, filename).getCode()
self.filename = filename
self.charset = charset
self.errors = errors
self.unicode_mode = unicode_mode
开发者ID:bguided,项目名称:synctester,代码行数:11,代码来源:templates.py
示例10: from_file
def from_file(cls, file, charset = 'utf-8', errors = 'strict', unicode_mode = True):
close = False
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset, errors, unicode_mode)
开发者ID:connoryang,项目名称:dec-eve-serenity,代码行数:12,代码来源:templates.py
示例11: parse_cookie
def parse_cookie(header, charset = 'utf-8', errors = 'ignore', cls = None):
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
if cls is None:
cls = TypeConversionDict
cookie = _ExtendedCookie()
cookie.load(header)
result = {}
for key, value in cookie.iteritems():
if value.value is not None:
result[key] = _decode_unicode(unquote_header_value(value.value), charset, errors)
return cls(result)
开发者ID:Pluckyduck,项目名称:eve,代码行数:13,代码来源:utils.py
示例12: url_unquote_plus
def url_unquote_plus(s, charset='utf-8', errors='ignore'):
"""URL decode a single string with the given decoding and decode
a "+" to whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding.
"""
return _decode_unicode(_unquote_plus(s), charset, errors)
开发者ID:t11e,项目名称:werkzeug,代码行数:13,代码来源:urls.py
示例13: url_unquote
def url_unquote(s, charset='utf-8', errors='replace'):
"""URL decode a single string with a given decoding.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding.
"""
if isinstance(s, unicode):
s = s.encode(charset)
return _decode_unicode(_unquote(s), charset, errors)
开发者ID:Chitrank-Dixit,项目名称:werkzeug,代码行数:14,代码来源:urls.py
示例14: _url_decode_impl
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty,
errors):
for pair in pair_iter:
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
if not include_empty:
continue
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
yield key, url_unquote_plus(value, charset, errors)
开发者ID:Chitrank-Dixit,项目名称:werkzeug,代码行数:16,代码来源:urls.py
示例15: url_decode
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='ignore', separator='&', cls=None):
"""Parse a querystring and return it as :class:`MultiDict`. Per default
only values are decoded into unicode strings. If `decode_keys` is set to
`True` the same will happen for keys.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string.
:param decode_keys: set to `True` if you want the keys to be decoded
as well.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
result = []
for pair in str(s).split(separator):
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
result.append((key, url_unquote_plus(value, charset, errors)))
return cls(result)
开发者ID:t11e,项目名称:werkzeug,代码行数:47,代码来源:urls.py
示例16: url_decode
def url_decode(s, charset = 'utf-8', decode_keys = False, include_empty = True, errors = 'ignore', separator = '&', cls = None):
if cls is None:
cls = MultiDict
result = []
for pair in str(s).split(separator):
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
result.append((key, url_unquote_plus(value, charset, errors)))
return cls(result)
开发者ID:connoryang,项目名称:dec-eve-serenity,代码行数:18,代码来源:urls.py
示例17: parse_parts
def parse_parts(self, file, boundary, content_length):
"""Generate `('file', (name, val))` and `('form', (name
,val))` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, _decode_unicode(''.join(container),
part_charset, self.errors)))
开发者ID:Chitrank-Dixit,项目名称:werkzeug,代码行数:43,代码来源:formparser.py
示例18: parse_multipart
def parse_multipart(
file,
boundary,
content_length,
stream_factory=None,
charset="utf-8",
errors="ignore",
buffer_size=10 * 1024,
max_form_memory_size=None,
):
"""Parse a multipart/form-data stream. This is invoked by
:func:`utils.parse_form_data` if the content type matches. Currently it
exists for internal usage only, but could be exposed as separate
function if it turns out to be useful and if we consider the API stable.
"""
# XXX: this function does not support multipart/mixed. I don't know of
# any browser that supports this, but it should be implemented
# nonetheless.
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, "buffer size has to be at least 1KB"
if stream_factory is None:
stream_factory = default_stream_factory
if not boundary:
raise ValueError("Missing boundary")
if not is_valid_multipart_boundary(boundary):
raise ValueError("Invalid boundary: %s" % boundary)
if len(boundary) > buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happen is for non debug builds where
# the assert i skipped.
raise ValueError("Boundary longer than buffer size")
total_content_length = content_length
next_part = "--" + boundary
last_part = next_part + "--"
form = []
files = []
in_memory = 0
# convert the file into a limited stream with iteration capabilities
file = LimitedStream(file, content_length)
iterator = chain(make_line_iter(file, buffer_size=buffer_size), _empty_string_iter)
try:
terminator = _find_terminator(iterator)
if terminator != next_part:
raise ValueError("Expected boundary at start of multipart data")
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get("content-disposition")
if disposition is None:
raise ValueError("Missing Content-Disposition header")
disposition, extra = parse_options_header(disposition)
name = extra.get("name")
transfer_encoding = headers.get("content-transfer-encoding")
try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings
filename = extra.get("filename")
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
is_file = False
container = []
_write = container.append
guard_memory = max_form_memory_size is not None
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
content_type = headers.get("content-type")
content_type = parse_options_header(content_type)[0] or "text/plain"
is_file = True
guard_memory = False
if filename is not None:
filename = _fix_ie_filename(_decode_unicode(filename, charset, errors))
try:
content_length = int(headers["content-length"])
except (KeyError, ValueError):
content_length = 0
container = stream_factory(total_content_length, content_type, filename, content_length)
_write = container.write
buf = ""
for line in iterator:
if not line:
raise ValueError("unexpected end of stream")
if line[:2] == "--":
#.........这里部分代码省略.........
开发者ID:mjlarrabee,项目名称:WebPutty,代码行数:101,代码来源:formparser.py
示例19: path
def path(self):
path = "/" + (self.environ.get("PATH_INFO") or "").lstrip("/")
return _decode_unicode(path, self.url_charset, self.encoding_errors)
开发者ID:Reve,项目名称:eve,代码行数:3,代码来源:wrappers.py
示例20: parse
#.........这里部分代码省略.........
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
part_charset = self.get_part_charset(headers)
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
buf = ''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == '--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
try:
line = line.decode(transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
_write(buf)
buf = ''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == '\r\n':
buf = '\r\n'
cutoff = -2
else:
buf = line[-1]
cutoff = -1
_write(line[:cutoff])
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(line)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in ('', '\r', '\n', '\r\n'):
_write(buf)
if is_file:
container.seek(0)
files.append((name, FileStorage(container, filename, name,
headers=headers)))
else:
form.append((name, _decode_unicode(''.join(container),
part_charset, self.errors)))
return self.cls(form), self.cls(files)
开发者ID:AnIrishDuck,项目名称:werkzeug,代码行数:101,代码来源:formparser.py
注:本文中的werkzeug._internal._decode_unicode函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论