• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python zipfile.ZipFile类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中zipfile.ZipFile的典型用法代码示例。如果您正苦于以下问题:Python ZipFile类的具体用法?Python ZipFile怎么用?Python ZipFile使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了ZipFile类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: zip_layer_folder

def zip_layer_folder(dir_path, layer_name):
    """
    Create a zip archive with the content of the folder located at `dir_path`
    and name it with `layer_name`.

    Parameters
    ----------
    dir_path: str
        The path to the temporary folder in which are located the files to
        be zipped.

    layer_name: str
        The name of the concerned layer (will be used as file name for the
        zip archive).

    Returns
    -------
    raw_content: str
        The zip archive
    archive_name: str
        The name of the archive (used later in the header of the response).
    """
    filenames = os.listdir(dir_path)
    zip_stream = BytesIO()
    myZip = ZipFile(zip_stream, "w", compression=ZIP_DEFLATED)
    for filename in filenames:
        if not filename.endswith('.geojson'):
            f_name = path_join(dir_path, filename)
            myZip.write(f_name, filename, ZIP_DEFLATED)
    myZip.close()
    zip_stream.seek(0)
    return zip_stream.read(), ''.join([layer_name, ".zip"])
开发者ID:mthh,项目名称:noname-stuff,代码行数:32,代码来源:misc.py


示例2: layer_type

def layer_type(filename):
    """Finds out if a filename is a Feature or a Vector
       returns a gsconfig resource_type string
       that can be either 'featureType' or 'coverage'
    """
    base_name, extension = os.path.splitext(filename)

    shp_exts = ['.shp',]
    cov_exts = ['.tif', '.tiff', '.geotiff', '.geotif']
    csv_exts = ['.csv']
    kml_exts = ['.kml']

    if extension.lower() == '.zip':
        zf = ZipFile(filename)
        # ZipFile doesn't support with statement in 2.6, so don't do it
        try:
            for n in zf.namelist():
                b, e = os.path.splitext(n.lower())
                if e in shp_exts or e in cov_exts or e in csv_exts:
                    base_name, extension = b,e
        finally:
            zf.close()

    if extension.lower() in shp_exts + csv_exts + kml_exts:
         return FeatureType.resource_type
    elif extension.lower() in cov_exts:
         return Coverage.resource_type
    else:
        msg = ('Saving of extension [%s] is not implemented' % extension)
        raise GeoNodeException(msg)
开发者ID:frippe12573,项目名称:geonode,代码行数:30,代码来源:utils.py


示例3: import_view

def import_view(request):
    """
    Gets the existing declared parsers for the current project.
    This view handles only the file based import parsers.
    """
    choices = []
    choices_url = []
    render_dict = {}

    choices, choices_url, classes = discover_available_parsers()

    form = ImportDatasetFormWithFile(choices, prefix="with-file")
    form_without_file = ImportDatasetForm(
        choices_url, prefix="without-file")

    if request.method == 'POST':
        if 'upload-file' in request.POST:
            form = ImportDatasetFormWithFile(
                choices, request.POST, request.FILES, prefix="with-file")

            if form.is_valid():
                print(request.FILES)
                uploaded = request.FILES['with-file-zipfile']

                destination_dir, destination_file = create_tmp_destination(
                    uploaded.name)

                with open(destination_file, 'w+') as f:
                    f.write(uploaded.file.read())
                    zfile = ZipFile(f)
                    for name in zfile.namelist():
                        try:
                            zfile.extract(
                                name, os.path.dirname(os.path.realpath(f.name)))
                            if name.endswith('shp'):
                                parser = classes[int(form['parser'].value())]
                                import_datas.delay(
                                    '/'.join((destination_dir, name)),
                                    parser.__name__, parser.__module__
                                )
                                continue
                        except Exception:
                            raise

        if 'import-web' in request.POST:
            form_without_file = ImportDatasetForm(
                choices_url, request.POST, prefix="without-file")

            if form_without_file.is_valid():
                parser = classes[int(form_without_file['parser'].value())]
                import_datas_from_web.delay(
                    parser.__name__, parser.__module__
                )

    # Hide second form if parser has no web based imports.
    render_dict['form'] = form
    if choices_url:
        render_dict['form_without_file'] = form_without_file

    return render(request, 'common/import_dataset.html', render_dict)
开发者ID:brianpessers,项目名称:Geotrek,代码行数:60,代码来源:views.py


示例4: get_results

    def get_results(self):
        """Get analysis results.
        @return: data.
        """
        root = self._get_root(container="cuckoo", create=False)

        if not os.path.exists(root):
            return False

        zip_data = StringIO()
        zip_file = ZipFile(zip_data, "w", ZIP_DEFLATED)

        root_len = len(os.path.abspath(root))
        
        for root, dirs, files in os.walk(root):
            archive_root = os.path.abspath(root)[root_len:]
            for name in files:
                path = os.path.join(root, name)
                archive_name = os.path.join(archive_root, name)
                zip_file.write(path, archive_name, ZIP_DEFLATED)
        
        zip_file.close()
        data = xmlrpclib.Binary(zip_data.getvalue())
        zip_data.close()

        return data
开发者ID:Rafiot,项目名称:cuckoo,代码行数:26,代码来源:agent.py


示例5: save_pickle_in_cfile

    def save_pickle_in_cfile(self, local_fname, networkref):
        """ Creates a pickled version of the graph and stores it in the
        cfile
        
        Parameters
        ----------
        local_fname: string
            The filename used in the Pickle folder to store
        networkref: NetworkX Graph instance
            The NetworkX graph to pickle
        
        """

        logger.info('Write a generated graph pickle to the connectome file.')
        picklefilepath = os.path.join(tempfile.gettempdir(),local_fname)
        from networkx import write_gpickle
        # add nodekeys, edgekeys, graphid to helpernode 'n0' before storage
        helperdict = {'nodekeys': networkref.nodekeys.copy(), \
                      'edgekeys': networkref.edgekeys.copy(), \
                      'graphid' : networkref.networkid }
        networkref.graph.add_node('n0')
        networkref.graph.node['n0'] = helperdict
        write_gpickle(networkref.graph, picklefilepath)
        networkref.graph.remove_node('n0')
        
        from zipfile import ZipFile, ZIP_DEFLATED
        tmpzipfile = ZipFile(self.data.fullpathtofile, 'a', ZIP_DEFLATED)
        # store it in the zip file
        tmpzipfile.write(picklefilepath, 'Pickle/' + local_fname)
        tmpzipfile.close()
        
        # remove pickle file from system
        logger.debug('Unlink: %s' % picklefilepath)
        os.unlink(picklefilepath)
开发者ID:satra,项目名称:connectomeviewer,代码行数:34,代码来源:cfile.py


示例6: aqcuire_all_resources

    def aqcuire_all_resources(self, format_dict):
        import cStringIO as StringIO
        from zipfile import ZipFile

        # Download archive.
        url = self.url(format_dict)
        shapefile_online = self._urlopen(url)
        zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')
        shapefile_online.close()

        # Iterate through all scales and levels and extract relevant files.
        modified_format_dict = dict(format_dict)
        scales = ('c', 'l', 'i', 'h', 'f')
        levels = (1, 2, 3, 4)
        for scale, level in itertools.product(scales, levels):
            modified_format_dict.update({'scale': scale, 'level': level})
            target_path = self.target_path(modified_format_dict)
            target_dir = os.path.dirname(target_path)
            if not os.path.isdir(target_dir):
                os.makedirs(target_dir)

            for member_path in self.zip_file_contents(modified_format_dict):
                ext = os.path.splitext(member_path)[1]
                target = os.path.splitext(target_path)[0] + ext
                member = zfh.getinfo(member_path)
                with open(target, 'wb') as fh:
                    fh.write(zfh.open(member).read())

        zfh.close()
开发者ID:RachelNorth,项目名称:cartopy,代码行数:29,代码来源:shapereader.py


示例7: upload_analyzer

    def upload_analyzer(self):
        """Upload analyzer to guest.
        @return: operation status.
        """
        zip_data = StringIO()
        zip_file = ZipFile(zip_data, "w", ZIP_DEFLATED)

        root = os.path.join("analyzer", self.platform)
        root_len = len(os.path.abspath(root))

        if not os.path.exists(root):
            log.error("No valid analyzer found at path: %s" % root)
            return False

        for root, dirs, files in os.walk(root):
            archive_root = os.path.abspath(root)[root_len:]
            for name in files:
                path = os.path.join(root, name)
                archive_name = os.path.join(archive_root, name)
                zip_file.write(path, archive_name, ZIP_DEFLATED)

        zip_file.close()
        data = xmlrpclib.Binary(zip_data.getvalue())
        zip_data.close()

        log.debug("Uploading analyzer to guest (ip=%s)" % self.ip)
        self.server.add_analyzer(data)
开发者ID:Fuitad,项目名称:cuckoo-1,代码行数:27,代码来源:guest.py


示例8: getTranslations

def getTranslations(type, localesDir, defaultLocale, projectName, key):
  result = urllib2.urlopen('http://api.crowdin.net/api/project/%s/export?key=%s' % (projectName, key)).read()
  if result.find('<success') < 0:
    raise Exception('Server indicated that the operation was not successful\n' + result)

  result = urllib2.urlopen('http://api.crowdin.net/api/project/%s/download/all.zip?key=%s' % (projectName, key)).read()
  zip = ZipFile(StringIO(result))
  dirs = {}
  for info in zip.infolist():
    if not info.filename.endswith('.json'):
      continue

    dir, file = os.path.split(info.filename)
    if not re.match(r'^[\w\-]+$', dir) or dir == defaultLocale:
      continue
    if type == 'chrome' and file.count('.') == 1:
      origFile = file
    else:
      origFile = re.sub(r'\.json$', '', file)
    if type == 'gecko' and not origFile.endswith('.dtd') and not origFile.endswith('.properties'):
      continue

    mapping = langMappingChrome if type == 'chrome' else langMappingGecko
    for key, value in mapping.iteritems():
      if value == dir:
        dir = key
    if type == 'chrome':
      dir = dir.replace('-', '_')

    data = zip.open(info.filename).read()
    if data == '[]':
      continue

    if not dir in dirs:
      dirs[dir] = set()
    dirs[dir].add(origFile)

    path = os.path.join(localesDir, dir, origFile)
    if not os.path.exists(os.path.dirname(path)):
      os.makedirs(os.path.dirname(path))
    if type == 'chrome' and origFile.endswith('.json'):
      postprocessChromeLocale(path, data)
    elif type == 'chrome':
      data = json.loads(data)
      if origFile in data:
        fileHandle = codecs.open(path, 'wb', encoding='utf-8')
        fileHandle.write(data[origFile]['message'])
        fileHandle.close()
    else:
      fromJSON(path, data)

  # Remove any extra files
  for dir, files in dirs.iteritems():
    baseDir = os.path.join(localesDir, dir)
    if not os.path.exists(baseDir):
      continue
    for file in os.listdir(baseDir):
      path = os.path.join(baseDir, file)
      if os.path.isfile(path) and (file.endswith('.json') or file.endswith('.properties') or file.endswith('.dtd')) and not file in files:
        os.remove(path)
开发者ID:chinnurtb,项目名称:buildtools,代码行数:60,代码来源:localeTools.py


示例9: parse_template

def parse_template(template_name):
    """Resolve template name into absolute path to the template
    and boolean if absolute path is temporary directory.
    """
    if template_name.startswith('http'):
        if '#' in template_name:
            url, subpath = template_name.rsplit('#', 1)
        else:
            url = template_name
            subpath = ''
        with tempfile.NamedTemporaryFile() as tmpfile:
            urlretrieve(url, tmpfile.name)
            if not is_zipfile(tmpfile.name):
                raise ConfigurationError("Not a zip file: %s" % tmpfile)
            zf = ZipFile(tmpfile)
            try:
                path = tempfile.mkdtemp()
                zf.extractall(path)
                return os.path.join(path, subpath), True
            finally:
                zf.close()

    registry = TemplatesRegistry()
    if registry.has_template(template_name):
        path = registry.path_of_template(template_name)
    elif ':' in template_name:
        path = resolve_dotted_path(template_name)
    else:
        path = os.path.realpath(template_name)

    if not os.path.isdir(path):
        raise ConfigurationError('Template directory does not exist: %s' % path)
    return path, False
开发者ID:glenfant,项目名称:mr.bob,代码行数:33,代码来源:configurator.py


示例10: download_unzip

def download_unzip(input_zip):
    url = urllib.urlopen(input_zip)
    unzipped_string = ''
    zipfile = ZipFile(StringIO(url.read()))
    for name in zipfile.namelist():
        unzipped_string += zipfile.open(name).read()
    return unzipped_string
开发者ID:BinaryDefense,项目名称:goatrider,代码行数:7,代码来源:goatrider.py


示例11: get_sightings_from_atlas

def get_sightings_from_atlas(uri, species_ids):
    # Create a dict of sightings
    # Each species ID will have a list of sightings with [lat, long]
    sightings = dict()
    for species_id in species_ids:
        sightings[species_id] = []

    # The CSV headers
    LONG = 0
    LAT = 1
    LSID = 2
        
    # Download API call and unzip
    url = urlopen(uri)
    zipfile = ZipFile(StringIO(url.read()))

    # Skip the header row using [1:]
    for line in zipfile.open("data.csv").readlines()[1:]:
        sighting_record = line.split(",")
        sightings[sighting_record[LSID][1:-2]].append([sighting_record[LAT][1:-1],sighting_record[LONG][1:-1]])
        
    for species_id in species_ids:
        # Don't return too many sightings for a single species
        sightings[species_id] = sightings[species_id][0:species_sighting_limit]
        # Prune any empty entries
        if sightings[species_id] == []: del sightings[species_id]
        
    return sightings
开发者ID:bilalakil,项目名称:govhack-2016,代码行数:28,代码来源:index.py


示例12: export_zip

    def export_zip(self, paths):
        stringio = StringIO()
        archive = ZipFile(stringio, mode='w')

        def _add_resource(resource):
            for filename in resource.get_files_to_archive(True):
                if filename.endswith('.metadata'):
                    continue
                path = Path(self.handler.key).get_pathto(filename)
                archive.writestr(str(path), resource.handler.to_str())

        for path in paths:
            child = self.get_resource(path, soft=True)
            if child is None:
                continue
            # A Folder => we add its content
            if isinstance(child, Folder):
                for subchild in child.traverse_resources():
                    if subchild is None or isinstance(subchild, Folder):
                        continue
                    _add_resource(subchild)
            else:
                _add_resource(child)

        archive.close()
        return stringio.getvalue()
开发者ID:matrixorz,项目名称:ikaaro,代码行数:26,代码来源:folder.py


示例13: createDevEnv

def createDevEnv(baseDir, type):
  fileBuffer = StringIO()
  createBuild(baseDir, type=type, outFile=fileBuffer, devenv=True, releaseBuild=True)

  from zipfile import ZipFile
  zip = ZipFile(StringIO(fileBuffer.getvalue()), 'r')
  zip.extractall(os.path.join(baseDir, 'devenv'))
  zip.close()

  print 'Development environment created, waiting for connections from active extensions...'
  metadata = readMetadata(baseDir, type)
  connections = [0]

  import SocketServer, time, thread

  class ConnectionHandler(SocketServer.BaseRequestHandler):
    def handle(self):
      connections[0] += 1
      self.request.sendall('HTTP/1.0 OK\nConnection: close\n\n%s' % metadata.get('general', 'basename'))

  server = SocketServer.TCPServer(('localhost', 43816), ConnectionHandler)

  def shutdown_server(server):
    time.sleep(10)
    server.shutdown()
  thread.start_new_thread(shutdown_server, (server,))
  server.serve_forever()

  if connections[0] == 0:
    print 'Warning: No incoming connections, extension probably not active in the browser yet'
  else:
    print 'Handled %i connection(s)' % connections[0]
开发者ID:EFForg,项目名称:trackerlab,代码行数:32,代码来源:packagerChrome.py


示例14: get_info

def get_info(in_stream):
    """ Return the version and submitter strings from zipfile byte stream. """
    arch = ZipFile(in_stream, 'r')
    try:
        return unpack_info(arch.read('__INFO__'))
    finally:
        arch.close()
开发者ID:ArneBab,项目名称:infocalypse,代码行数:7,代码来源:submission.py


示例15: load_property_inspection

def load_property_inspection():
    """
    Loads and returns several variables for the data set from Kaggle's Property Inspection Prediction competition.
    Link: https://www.kaggle.com/c/liberty-mutual-group-property-inspection-prediction

    Returns
    ----------
    data : array-like
        Pandas data frame containing the entire data set.

    X : array-like
        Training input samples.

    y : array-like
        Target values.
    """
    file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'property_inspection.zip')
    z = ZipFile(file_location)
    data = pd.read_csv(z.open('train.csv'))
    data = data.set_index('Id')

    X = data.iloc[:, 1:].values
    y = data.iloc[:, 0].values

    # transform the categorical variables from strings to integers
    encoder = CategoryEncoder()
    X = encoder.fit_transform(X)

    return data, X, y
开发者ID:jdwittenauer,项目名称:ionyx,代码行数:29,代码来源:datasets.py


示例16: do_export

    def do_export(self, REQUEST=None):
        """ """
        if REQUEST and not self.getParentNode().checkPermissionView():
            raise Unauthorized

        errors = []

        my_container = self.getParentNode()

        objects_to_archive = self.gather_objects(my_container)

        file_like_object = StringIO()
        zip_file = ZipFile(file_like_object, 'w')
        archive_files = []
        try:
            for obj in objects_to_archive:
                added_path = None
                if self.is_exportable(obj):
                    added_path = self.add_object_to_zip(obj, zip_file)
                if added_path:
                    archive_files.append((obj.title_or_id(),
                                          getattr(obj, 'meta_label',
                                                  obj.meta_type),
                                          added_path))

            self.add_index(zip_file, archive_files)
            zip_file.close()
        except Exception, e:
            errors.append(e)
开发者ID:bogtan,项目名称:Naaya,代码行数:29,代码来源:zip_import_export.py


示例17: load_forest_cover

def load_forest_cover():
    """
    Loads and returns several variables for the data set from Kaggle's Forest Cover Type Prediction competition.
    Link: https://www.kaggle.com/c/forest-cover-type-prediction

    Returns
    ----------
    data : array-like
        Pandas data frame containing the entire data set.

    X : array-like
        Training input samples.

    y : array-like
        Target values.
    """
    file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'forest_cover.zip')
    z = ZipFile(file_location)
    data = pd.read_csv(z.open('train.csv'))
    data = data.set_index('Id')

    # move the label to the first position
    cols = data.columns.tolist()
    cols = cols[-1:] + cols[0:-1]
    data = data[cols]

    X = data.iloc[:, 1:].values
    y = data.iloc[:, 0].values

    return data, X, y
开发者ID:jdwittenauer,项目名称:ionyx,代码行数:30,代码来源:datasets.py


示例18: get_data_famafrench

def get_data_famafrench(name, start=None, end=None):
    start, end = _sanitize_dates(start, end)

    # path of zip files
    zipFileURL = "http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/"

    url = urllib.urlopen(zipFileURL + name + ".zip")
    zipfile = ZipFile(StringIO(url.read()))
    data = zipfile.open(name + ".txt").readlines()

    file_edges = np.where(np.array([len(d) for d in data]) == 2)[0]

    datasets = {}
    for i in range(len(file_edges) - 1):
        dataset = [d.split() for d in data[(file_edges[i] + 1):
                                           file_edges[i + 1]]]
        if(len(dataset) > 10):
            ncol = np.median(np.array([len(d) for d in dataset]))
            header_index = np.where(
                np.array([len(d) for d in dataset]) == (ncol - 1))[0][-1]
            header = dataset[header_index]
            # to ensure the header is unique
            header = [str(j + 1) + " " + header[j] for j in range(len(header))]
            index = np.array(
                [d[0] for d in dataset[(header_index + 1):]], dtype=int)
            dataset = np.array(
                [d[1:] for d in dataset[(header_index + 1):]], dtype=float)
            datasets[i] = DataFrame(dataset, index, columns=header)

    return datasets
开发者ID:brandonkane,项目名称:pandas,代码行数:30,代码来源:data.py


示例19: load_otto_group

def load_otto_group():
    """
    Loads and returns several variables for the data set from Kaggle's Otto Group Product Classification competition.
    Link: https://www.kaggle.com/c/otto-group-product-classification-challenge

    Returns
    ----------
    data : array-like
        Pandas data frame containing the entire data set.

    X : array-like
        Training input samples.

    y : array-like
        Target values.
    """
    file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'otto_group.zip')
    z = ZipFile(file_location)
    data = pd.read_csv(z.open('train.csv'))
    data = data.set_index('id')

    # move the label to the first position
    cols = data.columns.tolist()
    cols = cols[-1:] + cols[0:-1]
    data = data[cols]

    X = data.iloc[:, 1:].values

    y = data.iloc[:, 0].values

    # transform the labels from strings to integers
    encoder = LabelEncoder()
    y = encoder.fit_transform(y)

    return data, X, y
开发者ID:jdwittenauer,项目名称:ionyx,代码行数:35,代码来源:datasets.py


示例20: acquire_resource

    def acquire_resource(self, target_path, format_dict):
        """
        Downloads the zip file and extracts the files listed in
        :meth:`zip_file_contents` to the target path.

        """
        import cStringIO as StringIO
        from zipfile import ZipFile

        target_dir = os.path.dirname(target_path)
        if not os.path.isdir(target_dir):
            os.makedirs(target_dir)

        url = self.url(format_dict)

        shapefile_online = self._urlopen(url)

        zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')

        for member_path in self.zip_file_contents(format_dict):
            ext = os.path.splitext(member_path)[1]
            target = os.path.splitext(target_path)[0] + ext
            member = zfh.getinfo(member_path)
            with open(target, 'wb') as fh:
                fh.write(zfh.open(member).read())

        shapefile_online.close()
        zfh.close()

        return target_path
开发者ID:RachelNorth,项目名称:cartopy,代码行数:30,代码来源:shapereader.py



注:本文中的zipfile.ZipFile类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python zipfile.ZipInfo类代码示例发布时间:2022-05-26
下一篇:
Python zipfile.PyZipFile类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap