本文整理汇总了Python中six.moves.urllib.request.urlretrieve函数的典型用法代码示例。如果您正苦于以下问题:Python urlretrieve函数的具体用法?Python urlretrieve怎么用?Python urlretrieve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlretrieve函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: dcos_install_cli
def dcos_install_cli(install_location=None, client_version="1.8"):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(system)
)
base_url = "https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}"
if system == "Windows":
file_url = base_url.format("windows", client_version, "dcos.exe")
elif system == "Linux":
# TODO Support ARM CPU here
file_url = base_url.format("linux", client_version, "dcos")
elif system == "Darwin":
file_url = base_url.format("darwin", client_version, "dcos")
else:
raise CLIError("Proxy server ({}) does not exist on the cluster.".format(system))
logger.info("Downloading client to %s", install_location)
try:
urlretrieve(file_url, install_location)
except IOError as err:
raise CLIError("Connection error while attempting to download client ({})".format(err))
开发者ID:Azure,项目名称:azure-cli,代码行数:26,代码来源:custom.py
示例2: _find_jar
def _find_jar(url=None):
"""
Finds the location of loci_tools.jar, if necessary download it to a
writeable location.
"""
for loc in _gen_jar_locations():
if os.path.isfile(os.path.join(loc, 'loci_tools.jar')):
return os.path.join(loc, 'loci_tools.jar')
warn('loci_tools.jar not found, downloading')
for loc in _gen_jar_locations():
# check if dir exists and has write access:
if os.path.exists(loc) and os.access(loc, os.W_OK):
break
# if directory is pims and it does not exist, so make it (if allowed)
if os.path.basename(loc) == 'pims' and \
os.access(os.path.dirname(loc), os.W_OK):
os.mkdir(loc)
break
else:
raise IOError('No writeable location found. In order to use the '
'Bioformats reader, please download '
'loci_tools.jar to the pims program folder or one of '
'the locations provided by _gen_jar_locations().')
from six.moves.urllib.request import urlretrieve
if url is None:
url = ('http://downloads.openmicroscopy.org/bio-formats/5.1.0/' +
'artifacts/loci_tools.jar')
urlretrieve(url, os.path.join(loc, 'loci_tools.jar'))
return os.path.join(loc, 'loci_tools.jar')
开发者ID:sciunto,项目名称:pims,代码行数:32,代码来源:bioformats.py
示例3: load_cifar10
def load_cifar10(datadir="cifar-10-batches-py"):
# CIFAR-10 データセットがなければダウンロードする
if os.path.exists(datadir) == False:
print("Downloading cifar-10...")
request.urlretrieve("https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz","cifar10.tar.gz")
tar = tarfile.open("cifar10.tar.gz")
tar.extractall()
tar.close()
train_data = []
train_target = []
# 訓練データをロード
for i in range(1, 6):
d = unpickle("%s/data_batch_%d" % (datadir, i))
train_data.extend(d["data"])
train_target.extend(d["labels"])
# テストデータをロード
d = unpickle("%s/test_batch" % (datadir))
test_data = d["data"]
test_target = d["labels"]
# データはfloat32、ラベルはint32のndarrayに変換
train_data = np.array(train_data, dtype=np.float32)
train_target = np.array(train_target, dtype=np.int32)
test_data = np.array(test_data, dtype=np.float32)
test_target = np.array(test_target, dtype=np.int32)
# 画像のピクセル値を0-1に正規化
train_data /= 255.0
test_data /= 255.0
return train_data, test_data, train_target, test_target
开发者ID:miyatin,项目名称:trial,代码行数:34,代码来源:cnn2.py
示例4: get_mnist_file
def get_mnist_file(fpath, origin):
datadir = os.path.dirname(fpath)
if not os.path.exists(datadir):
os.makedirs(datadir)
try:
f = open(fpath)
except:
print('Downloading data from', origin)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = Progbar(total_size)
else:
progbar.update(count*block_size)
urlretrieve(origin, fpath + '.gz', dl_progress)
progbar = None
fin = gzip.open(fpath + '.gz', 'rb')
fout = open(fpath, 'wb')
fout.write(fin.read())
fin.close()
fout.close()
return fpath
开发者ID:ColaWithIce,项目名称:Mozi,代码行数:29,代码来源:mnist_utils.py
示例5: _cache_download
def _cache_download(url, filename, sha256sum=None):
"""Returns local path to cached copy of URL using given filename."""
cache = os.environ.get("DOWNLOAD_CACHE", "./download_cache/")
# TODO - expose this as a command line option
if not os.path.isdir(cache):
os.mkdir(cache)
local = os.path.join(cache, filename)
if not os.path.isfile(local):
# Must download it...
try:
# TODO - log this nicely...
sys.stderr.write("Downloading %s to %r\n" % (url, local))
urlretrieve(url, local)
except URLError:
# Most likely server is down, could be bad URL in XML action:
raise RuntimeError("Unable to download %s" % url)
except FTPErrors:
# Most likely server is down, could be bad URL in XML action:
raise RuntimeError("Unable to download %s" % url)
# Verifying the checksum is slow, only do this on a fresh
# download. Assume locally cached files are already OK.
if sha256sum:
# TODO - log this nicely...
sys.stderr.write("Verifying checksum for %s\n" % filename)
filehash = subprocess.check_output(['shasum', '-a', '256', local])[0:64].strip()
if filehash != sha256sum:
raise RuntimeError("Checksum failure for %s, got %r but wanted %r" % (local, filehash, sha256sum))
return local
开发者ID:gregvonkuster,项目名称:planemo,代码行数:33,代码来源:base.py
示例6: download_one
def download_one(url, output_file, skip_existing=True):
"""Download a single URL.
Parameters
----------
url : str
URL to download.
output_file : str
Path to save the downloaded file.
skip_existing : bool, default=True
If True, down download URLs that exist in the output directory.
Returns
-------
success : bool
True if the file was downloaded successfully.
"""
if os.path.exists(output_file) and skip_existing:
print(" Skipping (exists): {}".format(url))
return
print("[{}] Fetching: {}".format(time.asctime(), url))
try:
surl = urlparse.quote(url, safe=':./')
urlrequest.urlretrieve(surl, output_file)
except urlerror.HTTPError:
logger.warning("FAILED to download file at: {}".format(surl))
logger.warning("\nOriginal link: {}\nOutput file:{}\n".format(
url, output_file))
logger.warning("Skipping...")
finally:
return os.path.exists(output_file)
开发者ID:faroit,项目名称:minst-dataset,代码行数:34,代码来源:download.py
示例7: download_url
def download_url(url, filename, headers, args):
"""
Downloads the given url in filename.
"""
if is_youtube_url(url):
download_youtube_url(url, filename, headers, args)
else:
import ssl
# FIXME: Ugly hack for coping with broken SSL sites:
# https://www.cs.duke.edu/~angl/papers/imc10-cloudcmp.pdf
#
# We should really ask the user if they want to stop the downloads
# or if they are OK proceeding without verification.
#
# Note that skipping verification by default could be a problem for
# people's lives if they happen to live ditatorial countries.
#
# Note: The mess with various exceptions being caught (and their
# order) is due to different behaviors in different Python versions
# (e.g., 2.7 vs. 3.4).
try:
urlretrieve(url, filename)
except ssl.SSLError as e:
compat_print('[warning] Got SSL error: %s' % e)
raise e
except HTTPError as e:
compat_print('[warning] Got HTTP error: %s' % e)
raise e
except URLError as e:
compat_print('[warning] Got URL error: %s' % e)
raise e
except IOError as e:
compat_print('[warning] Got a connection error: %s' % e)
raise e
开发者ID:dbin25,项目名称:edx-downloader,代码行数:35,代码来源:edx_dl.py
示例8: download_dbsnp_vcf
def download_dbsnp_vcf(dbsnp_build=None,genome_build=None,url=None,outpath=None):
"""
Download the NCBI dbSNP VCF for a given human genome build and dbSNP build
Args:
dbsnp_build: b147
genome_build: GRCh37p13
url: Direct URL to file, e.g. ftp://ftp.ncbi.nlm.nih.gov/snp/organisms/human_9606_b147_GRCh37p13/VCF/00-All.vcf.gz
outpath: Constructed from genome_build and dbsnp_build. If not given, a random filename will be generated.
Returns:
Name of file into which we saved the data (will be constructed from builds, or random name)
"""
if url is None:
if not genome_build.startswith("GRC"):
raise ValueError("Genome build should begin with GRC")
if not dbsnp_build.startswith("b"):
raise ValueError("dbSNP build should look like b147, b148, etc.")
url = NCBI_VCF_TEMPLATE_URL.format(dbsnp_build,genome_build)
if outpath is None:
if genome_build is None or dbsnp_build is None:
outpath = "dbsnp.vcf.gz"
else:
outpath = "human_9606_{}_{}_All.vcf.gz".format(dbsnp_build,genome_build)
with tqdm(unit='B',unit_scale=True,miniters=1,desc=url.split('/')[-1]) as t:
urlcleanup()
urlretrieve(url,filename=outpath,reporthook=tqdm_hook(t),data=None)
return outpath
开发者ID:welchr,项目名称:Swiss,代码行数:34,代码来源:create_data.py
示例9: download_numpy_wheel
def download_numpy_wheel():
base_url = os.getenv('NUMPY_URL')
if base_url is None:
raise ValueError('NUMPY_URL environment variable is missing.')
version = '1.10.4+mkl'
py = 'cp{0[0]}{0[1]}'.format(sys.version_info)
if py not in {'cp27', 'cp34', 'cp35'}:
print('NumPy wheel not available for {}'.format(py))
return None
bits = struct.calcsize('P') * 8
if bits == 32:
arch = 'win32'
elif bits == 64:
arch = 'win_amd64'
else:
raise ValueError("Couldn't determine 32/64 bits.")
filename = 'numpy-{}-{}-none-{}.whl'.format(version, py, arch)
directory = 'astrodynamics-numpy-wheels'
os.mkdir(directory)
filepath = os.path.join(directory, filename)
url = base_url + filename
# Disable SSL. Shouldn't do this ever. This is just a script.
ssl._create_default_https_context = ssl._create_unverified_context
urlretrieve(url, filepath)
return filepath
开发者ID:python-astrodynamics,项目名称:astrodynamics,代码行数:31,代码来源:install_numpy.py
示例10: download_onnx_model
def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
model_dir = os.path.join(zoo_dir, model_name)
if os.path.exists(model_dir):
if use_cache:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
return
else:
shutil.rmtree(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/latest/{}.tar.gz'.format(model_name)
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Downloading ONNX model {} from {} and save in {} ...\n'.format(
model_name, url, download_file.name))
urlretrieve(url, download_file.name)
with tarfile.open(download_file.name) as t:
print('Extracting ONNX model {} to {} ...\n'.format(model_name, zoo_dir))
t.extractall(zoo_dir)
except Exception as e:
print('Failed to download/backup data for ONNX model {}: {}'.format(model_name, e))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
finally:
os.remove(download_file.name)
if not only_local:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
开发者ID:gtgalone,项目名称:pytorch,代码行数:28,代码来源:update-models-from-caffe2.py
示例11: maybe_download
def maybe_download(filename,expected_bytes,force=False):
'''
Download file if file not exsits.
@param:
filename: Name of file to download.
expected: The size of file should download.
force: Download without whether exsits.
'''
#destinate file path
dest_filename = os.path.join(data_root,filename)
#if download file
if force or not os.path.exists(dest_filename):
print('Attempting to download:{}'.format(filename))
urlretrieve(url+filename,dest_filename,reporthook = download_progress_hook)
print('\nDownload Complete!')
else:
print('File {} exists!'.format(filename))
#is file Complete
statinfo = os.stat(dest_filename)
if expected_bytes == statinfo.st_size:
print('File {} is downloaded completely!'.format(filename))
else:
raise Exception(
'File {} is not downloaded completely!'.format(filename)
)
return dest_filename
开发者ID:MrH2S,项目名称:py,代码行数:28,代码来源:notMNIST1.py
示例12: k8s_install_cli
def k8s_install_cli(client_version='latest', install_location=None):
"""
Downloads the kubectl command line from Kubernetes
"""
if client_version == 'latest':
version = urlopen('https://storage.googleapis.com/kubernetes-release/release/stable.txt').read()
client_version = version.decode('UTF-8').strip()
file_url = ''
system = platform.system()
base_url = 'https://storage.googleapis.com/kubernetes-release/release/{}/bin/{}/amd64/{}'
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s from %s', install_location, file_url)
try:
urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
开发者ID:Visual-Studio-China,项目名称:azure-cli-int,代码行数:29,代码来源:custom.py
示例13: _download_database_template
def _download_database_template(
galaxy_root,
database_location,
latest=False,
galaxy_sqlite_database=None
):
if galaxy_sqlite_database is not None:
shutil.copyfile(galaxy_sqlite_database, database_location)
return True
if latest or not galaxy_root:
template_url = DOWNLOADS_URL + urlopen(LATEST_URL).read()
urlretrieve(template_url, database_location)
return True
newest_migration = _newest_migration_version(galaxy_root)
download_migration = None
for migration in DOWNLOADABLE_MIGRATION_VERSIONS:
if newest_migration > migration:
download_migration = migration
break
if download_migration:
download_name = "db_gx_rev_0%d.sqlite" % download_migration
download_url = DOWNLOADS_URL + download_name
urlretrieve(download_url, database_location)
return True
else:
return False
开发者ID:lecorguille,项目名称:planemo,代码行数:29,代码来源:config.py
示例14: download_zip
def download_zip(url, name=None, check_dir=None):
"""Download and unzip zip file from url to $XTAS_DATA.
Does nothing if $XTAS_DATA/check_dir exists.
Parameters
----------
url : string
URL of resource.
name : string
Used by the logger, to display "Downloading [name]".
check_dir : string
Name of directory to which the resource is unzipped.
Derived from the URL by default.
"""
if check_dir is None:
check_dir = os.path.basename(url)
if check_dir.endswith('.zip'):
check_dir = check_dir[:-4]
if name is None:
name = url
home = make_data_home()
check_dir = os.path.join(home, check_dir)
# XXX race condition with multiple workers
if not os.path.exists(check_dir):
with NamedTemporaryFile() as temp:
logger.info("Downloading %s" % name)
urlretrieve(url, temp.name, reporthook=progress)
with ZipFile(temp.name) as z:
z.extractall(path=home)
return check_dir
开发者ID:LourensVeen,项目名称:xtas,代码行数:33,代码来源:_downloader.py
示例15: download_and_decompress
def download_and_decompress(url, download_path, verbose=True):
"""
Download an archive from a resource URL and
decompresses/unarchives to the given location
:param url: URL of the compressed file to download
:param download_path: location where the file should be extracted
"""
# Extract the filename from the URL
parsed = urlparse(url)
filename = basename(parsed.path)
# Ensure the output directory exists
if not os.path.exists(download_path):
os.makedirs(download_path)
# Get a temporary file path for the compressed file download
downloaded_file = os.path.join(tempfile.gettempdir(), filename)
# Download the file
if verbose:
urlretrieve(url, downloaded_file, reporthook=progress_bar_wrapper)
else:
urlretrieve(url, downloaded_file)
# Decompress and extract all files to the specified local path
tar = tarfile.open(downloaded_file, "r")
tar.extractall(download_path)
tar.close()
# Remove the downloaded file
os.remove(downloaded_file)
开发者ID:fepegar,项目名称:NiftyNet,代码行数:33,代码来源:download.py
示例16: download_file
def download_file(url, download_path):
"""
Download a file from a resource URL to the given location
:param url: URL of the file to download
:param download_path: location where the file should be saved
"""
# Extract the filename from the URL
parsed = urlparse(url)
filename = basename(parsed.path)
# Ensure the output directory exists
if not os.path.exists(download_path):
os.makedirs(download_path)
# Get a temporary file path for the compressed file download
downloaded_file = os.path.join(tempfile.gettempdir(), filename)
# Download the file
urlretrieve(url, downloaded_file, reporthook=progress_bar_wrapper)
# Move the file to the destination folder
destination_path = os.path.join(download_path, filename)
move(downloaded_file, destination_path)
开发者ID:fepegar,项目名称:NiftyNet,代码行数:25,代码来源:download.py
示例17: download_url
def download_url(url, filename, headers, args):
"""
Downloads the given url in filename.
"""
if is_youtube_url(url):
download_youtube_url(url, filename, headers, args)
else:
import ssl
# FIXME: Ugly hack for coping with broken SSL sites:
# https://www.cs.duke.edu/~angl/papers/imc10-cloudcmp.pdf
#
# We should really ask the user if they want to stop the downloads
# or if they are OK proceeding without verification.
#
# Note that skipping verification by default could be a problem for
# people's lives if they happen to live ditatorial countries.
#
# Note: The mess with various exceptions being caught (and their
# order) is due to different behaviors in different Python versions
# (e.g., 2.7 vs. 3.4).
try:
urlretrieve(url, filename)
except Exception as e:
logging.warn('Got SSL/Connection error: %s', e)
if not args.ignore_errors:
logging.warn('Hint: if you want to ignore this error, add '
'--ignore-errors option to the command line')
raise e
else:
logging.warn('SSL/Connection error ignored: %s', e)
开发者ID:avfx,项目名称:edx-dl,代码行数:31,代码来源:edx_dl.py
示例18: fetch_data
def fetch_data(dest_dir='.', clobber=False, url=DATA_URL):
"""
Download data from NCBI required to generate local taxonomy
database. Default url is ncbi.DATA_URL
* dest_dir - directory in which to save output files (created if necessary).
* clobber - don't download if False and target of url exists in dest_dir
* url - url to archive; default is ncbi.DATA_URL
Returns (fname, downloaded), where fname is the name of the
downloaded zip archive, and downloaded is True if a new files was
downloaded, false otherwise.
see ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump_readme.txt
"""
dest_dir = os.path.abspath(dest_dir)
try:
os.mkdir(dest_dir)
except OSError:
pass
fout = os.path.join(dest_dir, os.path.split(url)[-1])
if os.access(fout, os.F_OK) and not clobber:
downloaded = False
logging.info(fout + ' exists; not downloading')
else:
downloaded = True
logging.info('downloading {} to {}'.format(url, fout))
request.urlretrieve(url, fout)
return (fout, downloaded)
开发者ID:fhcrc,项目名称:taxtastic,代码行数:33,代码来源:ncbi.py
示例19: _download_log_files
def _download_log_files(
client,
resource_group_name,
server_name,
file_name):
"""
Download log file(s) of a given server to current directory.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: Name of the server.
:type server_name: str
:param file_name: Space separated list of log filenames on the server to download.
:type filename_contains: str
"""
from six.moves.urllib.request import urlretrieve # pylint: disable=import-error
# list all files
files = client.list_by_server(resource_group_name, server_name)
for f in files:
if f.name in file_name:
urlretrieve(f.url, f.name)
开发者ID:LukaszStem,项目名称:azure-cli,代码行数:25,代码来源:custom.py
示例20: get_file
def get_file(fpath, origin, untar=False):
datadir = os.path.dirname(fpath)
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(fpath):
print('Downloading data from', origin)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = Progbar(total_size)
else:
progbar.update(count*block_size)
urlretrieve(origin, fpath, dl_progress)
progbar = None
if untar:
tfile = tarfile.open(fpath, 'r:gz')
names = tfile.getnames()
dirname = names[0]
not_exists = [int(not os.path.exists("{}/{}".format(datadir, fname))) for fname in names]
if sum(not_exists) > 0:
print('Untaring file...')
tfile.extractall(path=datadir)
else:
print('Files already untarred')
tfile.close()
return "{}/{}".format(datadir, dirname)
开发者ID:WuCPMark,项目名称:Mozi,代码行数:33,代码来源:utils.py
注:本文中的six.moves.urllib.request.urlretrieve函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论