• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python wget.download函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中wget.download函数的典型用法代码示例。如果您正苦于以下问题:Python download函数的具体用法?Python download怎么用?Python download使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了download函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __make_icon_osx

def __make_icon_osx():
    lisa_shortcut = op.expanduser("~/Desktop/lisa")
    if not os.path.exists(lisa_shortcut):
        with open(lisa_shortcut, 'w') as outfile:
            outfile.write(
            "\
#!/bin/bash\n\
export PATH=$HOME/miniconda2/bin:$HOME/anaconda2/bin:$HOME/miniconda/bin:$HOME/anaconda/bin:$PATH\n\
lisa"
            )
        os.chmod(lisa_shortcut,
                 stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
                 stat.S_IRUSR | stat.S_IRGRP | stat.S_IXOTH |
                 stat.S_IWUSR | stat.S_IWGRP
                 )

    import wget
    lisa_icon_path= op.expanduser("~/lisa_data/.lisa/LISA256.icns")
    if not os.path.exists(lisa_icon_path):
        try:
            wget.download(
                "https://raw.githubusercontent.com/mjirik/lisa/master/applications/LISA256.icns",
                out=lisa_icon_path
            )
        except:
            logger.warning('logo download failed')
            pass
开发者ID:andrlikjirka,项目名称:lisa,代码行数:27,代码来源:lisa_data.py


示例2: download_file

def download_file(filename, destination):
    """Download remote file using the `wget` Python module."""
    destdir = os.path.split(destination)[0]
    if not os.path.isdir(destdir):
        os.makedirs(destdir)
    url = get_remote_url(filename)
    wget.download(url, out=destination)
开发者ID:UNH-CORE,项目名称:RM2-tow-tank,代码行数:7,代码来源:download.py


示例3: updateFiles

    def updateFiles(self):

        print "Update Files"
        # Clean out file array
        self.data.params.files = []

        # Always assume that the most up to date runtime is not yet available
        runtime = ((self.current_time.hour-6) / 6) * 6	# Get the Model Runtime
        if runtime < 0:
            runtime = 0

        launch_time_offset = self.launch_time - self.current_time

        # For now, if the prediction take place in the past... don't
        if launch_time_offset < timedelta(0):
            launch_time_offset = timedelta(0)

        prediction_offset = (launch_time_offset.seconds / 3600 / 3) * 3

        ### NOTE THIS ISN'T DONE!
        self.data.params.files.append("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset, runtime, prediction_offset))
        if not os.path.isfile("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset, runtime, prediction_offset)):
            download_url = "http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_hd.pl?file=gfs.t%02dz.mastergrb2f%02d&leftlon=290&rightlon=294&toplat=49&bottomlat=43&dir=%%2Fgfs.%04d%02d%02d%02d%%2Fmaster" % (runtime, prediction_offset, self.launch_time.year, self.launch_time.month, self.launch_time.day, runtime)
            print download_url
            print (runtime, prediction_offset, self.current_time.year, self.current_time.month, self.current_time.day, runtime)
            file = wget.download(download_url)
            shutil.move(file, './wind/49-43-290-294-%04d%02d%02d%02d-%s' % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset, file))
        self.data.params.files.append("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset+3, runtime, prediction_offset+3))
        if not os.path.isfile("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset+3, runtime, prediction_offset+3)):
            download_url = "http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_hd.pl?file=gfs.t%02dz.mastergrb2f%02d&leftlon=290&rightlon=294&toplat=49&bottomlat=43&dir=%%2Fgfs.%04d%02d%02d%02d%%2Fmaster" % (runtime, prediction_offset+3, self.current_time.year, self.current_time.month, self.current_time.day, runtime)
            file = wget.download(download_url)
            shutil.move(file, './wind/49-43-290-294-%04d%02d%02d%02d-%s' % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset+3, file))
开发者ID:UMaineECE498,项目名称:PythonBallooning,代码行数:32,代码来源:wind.py


示例4: main

def main(argv=None):
    if argv is None:
        argv = sys.argv

    print('Creating simple wiki serialized corpus')
    # Download the raw file if we do not have it already
    if not os.path.isfile(WIKIFILE):
        # Get the file
        wget.download(WIKIURL)
    wiki = WikiCorpus(WIKIFILE, lemmatize=False)
    i = 0
    article_dict = {}
    for text in wiki.get_texts(meta=True):
        url_string = 'https://simple.wikipedia.org/wiki/?curid={}'
        article_dict[i] = (url_string.format(text[0]), text[1])
        i += 1
    with open(ARTICLEDICT, 'w') as f:
        json.dump(article_dict, f)
    wiki.dictionary.filter_extremes(no_below=20, no_above=0.1,
                                    keep_n=DEFAULT_DICT_SIZE)
    MmCorpus.serialize(MMFILE, wiki, progress_cnt=10000, )
    wiki.dictionary.save_as_text(DICTFILE)
    print('Simple wiki serialized corpus created')
    # Now run LSI
    dictionary = Dictionary.load_from_text(DICTFILE)
    mm = MmCorpus(MMFILE)
    tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
    tfidf.save(TDIFMODEL)
    MmCorpus.serialize(TDIFFILE, tfidf[mm], progress_cnt=10000)
    mm_tdif = MmCorpus(TDIFFILE)
    lsi = LsiModel(mm_tdif, id2word=dictionary, num_topics=300)
    index = similarities.MatrixSimilarity(lsi[mm_tdif])
    index.save(SIMMATRIX)
    lsi.save(LSIMODEL)
    print("LSI model and index created")
开发者ID:fajifr,项目名称:recontent,代码行数:35,代码来源:make_simple_wiki_corpus.py


示例5: download_gif

    def download_gif(self, term, slide_num):
        # If we have at least 3 local gifs, use one of those
        if (term in self.gifs) and (len(self.gifs[term]) > 3):
            return os.path.join("GIFs", "%s.gif" % random.choice(self.gifs[term]))

        try:
            # Download the gif
            #img = translate(term, app_key=self.GIPHY_API_KEY)
            img = translate(term)
            
            image_path = os.path.join(self.resources_dir, "%d.gif" % slide_num)
            wget.download(img.media_url, image_path)

            if not (term in self.gifs):
                self.gifs[term] = []

            if not (img.id in self.gifs[term]):
                self.gifs[term].append(img.id)
                shutil.copy(image_path, os.path.join("GIFs", "%s.gif" % img.id))
                with open(os.path.join("GIFs", "hashes.json"), "w") as f:
                    json.dump(self.gifs, f, indent=2)

            return image_path
        except:
            return None
开发者ID:pflammertsma,项目名称:Trollette,代码行数:25,代码来源:talk_roulette.py


示例6: _parse_page_urls_and_make_url_list

def _parse_page_urls_and_make_url_list(url_list, credentials, downloaddir, file_extns_of_intrest):

    for url in url_list:
        if credentials != None:
            page_url = _convert_url_to_url_with_password(url, credentials)
        else:
            page_url = url

        logger.info("downloading " + page_url)

        try:
            # remove any previously existing temp file, this is needed because if a file exists then
            # wget does some name mangling to create a file with a different name and then that would
            # need to be passed to BS4 and then ultimately that file would also be deleted, so just delete
            # before hand.
            if os.path.exists(TEMP_DOWNLOAD_FILE):
                os.remove(TEMP_DOWNLOAD_FILE)
            wget.download(page_url, TEMP_DOWNLOAD_FILE, bar=_download_progress_bar)
            soup = BeautifulSoup(open(TEMP_DOWNLOAD_FILE))

            links = soup.findAll(ANCHOR_TAG)

            _make_list_of_download_candidates(page_url, links, downloaddir, file_extns_of_intrest)
        except Exception, e:
            logger.error("Exception: " + str(e))
开发者ID:aarora79,项目名称:sitapt,代码行数:25,代码来源:ingest.py


示例7: download_if_needed

def download_if_needed(url, filename):

    """ Downloads the data from a given URL, if not already present in the directory, or displays any of the following:
        1. The file already exists
        2. URL does not exist
        3. Server is not responding
    """

    if os.path.exists(filename):
        explanation = filename+ ' already exists'
        return explanation
    else:
        try:
            r = urlopen(url)
        except URLError as e:
            r = e
        if r.code < 400:
            wget.download(url)
            explanation = 'downloading'
            return explanation
        elif r.code>=400 and r.code<500:
            explanation = 'Url does not exist'
            return explanation
        else:
            explanation = 'Server is not responding'
            return explanation
开发者ID:pearlphilip,项目名称:UW_SoftwareEngineeringForDataScientists,代码行数:26,代码来源:pronto_utils.py


示例8: doTask

    def doTask(self, tstamp):
        """Download image."""
        tstamp = coils.string2time(tstamp)
        fname = coils.time2fname(tstamp) + '.jpg'
        dest_dir = os.path.join(self._config['pics_dir'], coils.time2dir(tstamp))
        dest_fname = os.path.join(
            dest_dir,
            fname,
            )
        if os.path.exists(dest_fname):
            print('Skipping {}'.format(dest_fname))
            return    
        try:
            os.makedirs(dest_dir)
        except os.error:
            pass
        saved = os.getcwd()
        os.chdir(dest_dir)
        url = '{}/pics/{}.jpg'.format(
            self._url,
            coils.time2fname(tstamp, full=True),
            )
        print(url)
        wget.download(url, bar=None)
        os.chdir(saved)

        # Propagate timestamp downstream.
        return tstamp
开发者ID:vmlaker,项目名称:wabbit,代码行数:28,代码来源:mpipe_stages.py


示例9: update

def update():
    print("Downloading Update")
    wget.download('<zip>', 'update.zip')

    try:
        shutil.rmtree(dir+'\config')
    except:
        print("Continuing")
    try:
        shutil.rmtree(dir+'\mods')
    except:
        print("Continuing")
    try:
        shutil.rmtree(dir+'\jarmods')
    except:
        print("Continuing")

    with zipfile.ZipFile('update.zip') as myzip:
        myzip.extractall(dir)
        myzip.close()

    os.remove('svn.txt')
    os.remove('update.zip')

    os.rename('svnnew.txt', 'svn.txt')
    print("Update Complete")
开发者ID:desgyz,项目名称:MinecraftPatcher,代码行数:26,代码来源:main.py


示例10: foo

def foo():
    fin=open(sys.argv[1],'r')
    for line in fin:
        a,b=line.strip().rstrip('\n').split(',')
        c=b.strip('"')+'_'+a.strip('"')+'.pdf'
        makeurl='http://www.tpcuiet.com/resume_upload/cannot_find_it_haha/{}'.format(c)
        wget.download(makeurl)
开发者ID:shantanu561993,项目名称:downloadresumeTpc,代码行数:7,代码来源:download.py


示例11: download

def download(url):
	"""Copy the contents of a file from a given URL
	to a local file.
	"""
	
	wf = urllib2.urlopen(url)
	html=wf.read()
	# print html
	flist=[]

	mhdf = re.findall('\"M.*\.hdf\"', html)
	mhdfs =[f for f in mhdf if 'h08v04' in f or 'h08v05' in f or 'h09v04' in f]
	# print mhdfs
	for line in mhdfs:
		# print 'a line', line.replace('\"', '')
		fileUrl=url+line.replace('\"', '')
		print fileUrl
		wget.download(fileUrl)

	xhdf = re.findall('\"M.*\.hdf.xml\"', html)
	xhdfs =[f for f in xhdf if 'h08v04' in f or 'h08v05' in f or 'h09v04' in f]
	for line in xhdfs:
		# print 'a line', line.replace('\"', '')
		xfileUrl=url+line.replace('\"', '')
		print xfileUrl
		wget.download(xfileUrl)
开发者ID:WanRuYang,项目名称:modisResample,代码行数:26,代码来源:modisDownload.py


示例12: download_files

def download_files(answer, download_list):
    if answer == 'y' or answer == 'yes':
        for item in download_list:
            print item
            wget.download(download_list[item])
    else:
        print 'Thank you and have a really great day!'
开发者ID:jstone28,项目名称:mac-installs,代码行数:7,代码来源:mac_installs.py


示例13: download_img

def download_img(url):
    text = requests.get(url).text
    soup = bs(text, "lxml")

    # total
    total = soup.find('span', {'style': 'color: #DB0909'}).text
    total = total[: -3]
    total = int(total)

    # title
    title = soup.find('h1', {'id': 'htilte'}).text

    url_pattern = soup.find('ul', {'id': 'hgallery'})
    url_pattern = url_pattern.img.get('src').replace('/0.jpg', '/{:03d}.jpg')
    print title
    if os.path.exists(title):
        return

    os.mkdir(title)
    for i in xrange(total):
        file_url = url_pattern.format(i)
        file_name = "{:03d}.jpg".format(i)
        output_file = os.path.join(title, file_name)
        if i == 0:
            file_url = file_url.replace("000", "0")
        wget.download(file_url, out=output_file)
开发者ID:jelly-ape,项目名称:dts_server,代码行数:26,代码来源:zngirls.py


示例14: create_lisa_data_dir_tree

def create_lisa_data_dir_tree(oseg=None):

    odp = op.expanduser('~/lisa_data/.lisa/')
    if not op.exists(odp):
        os.makedirs(odp)

    import wget
    lisa_icon_path= path(".lisa/LISA256.png")
    if not op.exists(lisa_icon_path):
        try:
            wget.download(
                "https://raw.githubusercontent.com/mjirik/lisa/master/lisa/icons/LISA256.png",
                out=lisa_icon_path)
        except:
            import traceback
            logger.warning('logo download failed')
            logger.warning(traceback.format_exc())

    if oseg is not None:
        # used for server sync
        oseg._output_datapath_from_server = op.join(oseg.output_datapath, 'sync', oseg.sftp_username, "from_server/")
        # used for server sync
        oseg._output_datapath_to_server = op.join(oseg.output_datapath, 'sync', oseg.sftp_username, "to_server/")
        odp = oseg.output_datapath
        if not op.exists(odp):
            os.makedirs(odp)
        odp = oseg._output_datapath_from_server
        if not op.exists(odp):
            os.makedirs(odp)
        odp = oseg._output_datapath_to_server
        if not op.exists(odp):
            os.makedirs(odp)
开发者ID:andrlikjirka,项目名称:lisa,代码行数:32,代码来源:lisa_data.py


示例15: gdb

def gdb():
    try:
        gdb = dict()
        pre1 = "http://sourceware.org/gdb/current/onlinedocs/"
        pre2 = "https://sourceware.org/gdb/talks/esc-west-1999/"
        gdb[1] = pre1 + "gdb.pdf.gz"
        gdb[2] = pre2 + "paper.pdf"
        gdb[3] = pre2 + "slides.pdf"
        print stringa
        print "GDB Documentation"
        print "GDB User Manual"
        filename = wget.download(gdb[1])
        print "\nThe Heisenberg Debugging Technology"
        print "Slides/Paper/Enter(for both)"
        decision = raw_input()
        if decision == "Paper":
            filename = wget.download(gdb[2])
        elif decision == "Slides":
            filename = wget.download(gdb[3])
        else:
            for key in range(2,4):
#                print key
                filename = wget.download(gdb[key])
            print "\nCompleted\n"
    except:
        print "\n Did something else happen ? \n"
开发者ID:ahiliation,项目名称:dollect,代码行数:26,代码来源:dollect.py


示例16: _download

    def _download(self):
        if os.path.exists(self._target_file):
            if self.overwrite:
                log.info("Chose to overwrite old files.")
                self._clean()
            elif not self.verify():
                log.error("Previous download seems corrupted.")
                self._clean()
            else:
                log.info("Using previously downloaded %s" % self.filename)
                return self.filename
        elif not os.path.exists(self.directory):
            log.debug("Creating %s" % self.directory)
            os.mkdir(self.directory)

        try:
            for filename in [self.filename, self.filename + self.CHECKSUM_SUFFIX]:
                log.debug("Downloading %s" % filename)
                wget.download(self.base_url + filename, out=self.directory, bar=None)
            if self.verify():
                log.info(("Successfully downloaded: %s" % filename))
                return self._target_file
            else:
                return None
        except Exception as e:
            log.debug("Failed to download %s: %s" % (filename, e))
开发者ID:steveeJ,项目名称:gen2fetcher,代码行数:26,代码来源:__init__.py


示例17: download_bigbird_models

def download_bigbird_models():

	if not os.path.exists(RAW_DOWNLOAD_DIR):
		os.mkdir(RAW_DOWNLOAD_DIR)

	url = "http://rll.berkeley.edu/bigbird/aliases/772151f9ac/"
	req = urllib2.Request(url)
	res = urllib2.urlopen(req)

	html_split = res.read().split()

	model_names = []
	for txt in html_split:
	    if "/bigbird/images" in txt:
	        model_names.append(txt[29:-5])
	
	for model_name in model_names:
		print ""
		print model_name
		if not os.path.exists(RAW_DOWNLOAD_DIR + '/' + model_name):
			if os.path.exists(os.getcwd() + '/' + TAR_FILE_NAME):
				os.remove(os.getcwd() + '/' + TAR_FILE_NAME)

			download_url = "http://rll.berkeley.edu/bigbird/aliases/772151f9ac/export/" + model_name + "/" + TAR_FILE_NAME
			wget.download(download_url)
			t = tarfile.open(os.getcwd() + '/' + TAR_FILE_NAME, 'r')
			t.extractall(RAW_DOWNLOAD_DIR)
开发者ID:CURG,项目名称:gazebo_data_gen_gdl,代码行数:27,代码来源:get_big_bird_models.py


示例18: get_ipr_hierarchy

def get_ipr_hierarchy():
	if not os.path.isfile('interpro.xml.gz'):
		print 'downloading interpro data'
		wget.download('ftp://ftp.ebi.ac.uk/pub/databases/interpro/Current/interpro.xml.gz')
	#if os.path.isfile('interpro.hierarchy.p'):
	#	with open('interpro.hierarchy.p','rU') as filehandle:
	#		ipr_hierarchy = pickle.load(filehandle)
	#	return ipr_hierarchy
	print 'preparing interpro data'
	ipr_hierarchy = IprHierarchy()
	with gzip.open('interpro.xml.gz','rb') as filehandle:
		#filecontent = filehandle.read()
		soup = BeautifulSoup(filehandle,'xml')
		for domain in soup.find_all('interpro'):
			name = str(domain.find('name').string)
			parents_list = []
			contains_list = []
			child_list = []
			found_in_list = []
			domain_features = get_domain_features(domain)
			ipr = IprObject(ID=domain['id'],name=name,domain_type=domain['type'],domain_features=domain_features)
			ipr_hierarchy.update(ipr)
	ipr_hierarchy.set_contained_by()
	#print ipr_hierarchy
	with open('interpro.hierarchy.p','w') as filehandle:
		pickle.dump(ipr_hierarchy,filehandle)
	return ipr_hierarchy
开发者ID:holmrenser,项目名称:bioportal,代码行数:27,代码来源:add_interpro_hierarchy.py


示例19: run

def run(argv):
    if not os.path.exists(clean_filepath):
        print('dbsnp will be stored at {!r}'.format(clean_filepath))
        if not os.path.exists(raw_filepath):

            # dbSNP downloads are described at <https://www.ncbi.nlm.nih.gov/variation/docs/human_variation_vcf/>
            # This file includes chr-pos-ref-alt-rsid and 4X a bunch of useless columns:
            dbsnp_url = 'ftp://ftp.ncbi.nlm.nih.gov/snp/organisms/human_9606_b{}_GRCh37p13/VCF/00-All.vcf.gz'.format(dbsnp_version)

            print('Downloading dbsnp!')
            make_basedir(raw_filepath)
            raw_tmp_filepath = get_tmp_path(raw_filepath)
            wget.download(url=dbsnp_url, out=raw_tmp_filepath)
            print('')
            os.rename(raw_tmp_filepath, raw_filepath)
            print('Done downloading.')

        print('Converting {} -> {}'.format(raw_filepath, clean_filepath))
        make_basedir(clean_filepath)
        clean_tmp_filepath = get_tmp_path(clean_filepath)
        run_script(r'''
        gzip -cd '{raw_filepath}' |
        grep -v '^#' |
        perl -F'\t' -nale 'print "$F[0]\t$F[1]\t$F[2]\t$F[3]\t$F[4]"' | # Gotta declare that it's tab-delimited, else it's '\s+'-delimited I think.
        gzip > '{clean_tmp_filepath}'
        '''.format(raw_filepath=raw_filepath, clean_tmp_filepath=clean_tmp_filepath))
        os.rename(clean_tmp_filepath, clean_filepath)

    print("dbsnp is at '{clean_filepath}'".format(clean_filepath=clean_filepath))
开发者ID:statgen,项目名称:pheweb,代码行数:29,代码来源:download_rsids.py


示例20: get_webapi_brand_image_link_per_country_lang

def get_webapi_brand_image_link_per_country_lang(csku, lang=None, directory=None):
    """ Accesses the Zalando Website API and pulls information for article brand, as well as a link
            for an article picture.
            :param csku: The csku name to pull data for
            :param lang: The country to access
            :type csku: str
            :type lang: str-str
            :return: The url of the csku picture, and the brand name of the csku
            :rtype: dictionary_object
            """
    try:
        web_request = \
            'https://api.zalando.com/articles/{c}?fields=media.images.largeHdUrl'.format(c=csku)
        webapi_brand_image_url = requests.get(web_request, headers={'x-client-name': 'Team AIS Preorder PQRS API'})
        result = json.loads(webapi_brand_image_url.text)

        # In case of 404 http error or any http error.
        # the result will be assigned here with the error message.
        # Then the default values are returned.
        if 'status' in result.keys():
            raise DataNotFound

        elif result is not None:
            # Get the brand


            if 'media' in result.keys() and 'images' in result['media'].keys():
                for x in result['media']['images']:
                    if 'largeHdUrl' in x.keys():
                        pic_url = x['largeHdUrl']
                        wget.download(pic_url, out=directory)


    except DataNotFound:
        pass
开发者ID:narendhrancs,项目名称:learning,代码行数:35,代码来源:get_images_from_list.py



注:本文中的wget.download函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python wh.xlt函数代码示例发布时间:2022-05-26
下一篇:
Python language.load_language函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap