• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python file_sys.FileSystem类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyon.util.file_sys.FileSystem的典型用法代码示例。如果您正苦于以下问题:Python FileSystem类的具体用法?Python FileSystem怎么用?Python FileSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了FileSystem类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _force_clean

    def _force_clean(cls, recreate=False, initial=False):
        # Database resources
        from pyon.core.bootstrap import get_sys_name, CFG
        from pyon.datastore.datastore_common import DatastoreFactory
        datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name())
        if initial:
            datastore._init_database(datastore.database)

        dbs = datastore.list_datastores()
        clean_prefix = '%s_' % get_sys_name().lower()
        things_to_clean = [x for x in dbs if x.startswith(clean_prefix)]
        try:
            for thing in things_to_clean:
                datastore.delete_datastore(datastore_name=thing)
                if recreate:
                    datastore.create_datastore(datastore_name=thing)

        finally:
            datastore.close()

        # Broker resources
        from putil.rabbitmq.rabbit_util import RabbitManagementUtil
        rabbit_util = RabbitManagementUtil(CFG, sysname=bootstrap.get_sys_name())
        deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname()
        log.info("Deleted %s exchanges, %s queues" % (len(deleted_exchanges), len(deleted_queues)))

        # File system
        from pyon.util.file_sys import FileSystem
        FileSystem._clean(CFG)
开发者ID:edwardhunter,项目名称:scioncc,代码行数:29,代码来源:int_test.py


示例2: tearDown

    def tearDown(self):
        """
        Cleanup. Delete Subscription, Stream, Process Definition
        """

        for fname in self.fnames:
            FileSystem.unlink(fname)
开发者ID:oldpatricka,项目名称:pyon,代码行数:7,代码来源:test_hdf_array_iterator.py


示例3: create_known

        def create_known(dataset_name, rootgrp_name, grp_name):
            """
            A known array to compare against during tests
            """

            known_array = numpy.ones((10,20))

            filename = FileSystem.get_url(FS.TEMP,random_name(), ".hdf5")

            # Write an hdf file with known values to compare against
            h5pyfile = h5py.File(filename, mode = 'w', driver='core')
            grp = h5pyfile.create_group(rootgrp_name)
            subgrp = grp.create_group(grp_name)
            dataset = subgrp.create_dataset(dataset_name, known_array.shape, known_array.dtype.str, maxshape=(None,None))
            dataset.write_direct(known_array)
            h5pyfile.close()

            # convert the hdf file into a binary string
            f = open(filename, mode='rb')
            # read the binary string representation of the file
            known_hdf_as_string = f.read() # this is a known string to compare against during tests
            f.close()
            # cleaning up
            FileSystem.unlink(f.name)

            return known_array, known_hdf_as_string
开发者ID:dstuebe,项目名称:pyon,代码行数:26,代码来源:test_science_object_codec.py


示例4: on_start

    def on_start(self):
        #these values should come in from a config file, maybe pyon.yml
        self.pydap_host = self.CFG.get_safe('server.pydap.host', 'localhost')
        self.pydap_port = self.CFG.get_safe('server.pydap.port', '8001')
        self.pydap_url  = 'http://%s:%s/' % (self.pydap_host, self.pydap_port)
        self.pydap_data_path = self.CFG.get_safe('server.pydap.data_path', 'RESOURCE:ext/pydap')
        self.datasets_xml_path = self.CFG.get_safe('server.pydap.datasets_xml_path', "RESOURCE:ext/datasets.xml")
        self.pydap_data_path = FileSystem.get_extended_url(self.pydap_data_path) + '/'

        filename = self.datasets_xml_path.split('/')[-1]
        base = '/'.join(self.datasets_xml_path.split('/')[:-1])
        real_path = FileSystem.get_extended_url(base)
        self.datasets_xml_path = os.path.join(real_path, filename)
        self.setup_filesystem(real_path)
开发者ID:mbarry02,项目名称:coi-services,代码行数:14,代码来源:registration_process.py


示例5: _force_clean

    def _force_clean(cls, recreate=False):
        from pyon.core.bootstrap import get_sys_name, CFG
        from pyon.datastore.couchdb.couchdb_standalone import CouchDataStore
        datastore = CouchDataStore(config=CFG)
        dbs = datastore.list_datastores()
        things_to_clean = filter(lambda x: x.startswith('%s_' % get_sys_name().lower()), dbs)
        try:
            for thing in things_to_clean:
                datastore.delete_datastore(datastore_name=thing)
                if recreate:
                    datastore.create_datastore(datastore_name=thing)

        finally:
            datastore.close()
        FileSystem._clean(CFG)
开发者ID:ateranishi,项目名称:pyon,代码行数:15,代码来源:int_test.py


示例6: _get_time_index

    def _get_time_index(self, granule, timeval):
        '''
        @brief Obtains the index where a time's value is
        @param granule must be a complete dataset (hdf_string provided)
        @param timeval the vector value
        @return Index value for timeval or closest approx such that timeval is IN the subset
        '''
        assert isinstance(granule, StreamGranuleContainer), 'object is not a granule.'
        assert granule.identifiables[self.data_stream_id].values, 'hdf_string is not provided.'

        hdf_string = granule.identifiables[self.data_stream_id].values
        file_path = self._get_hdf_from_string(hdf_string)

        #-------------------------------------------------------------------------------------
        # Determine the field_id for the temporal coordinate vector (aka time)
        #-------------------------------------------------------------------------------------

        time_field = self.definition.identifiables[self.time_id].coordinate_ids[0]
        value_path = granule.identifiables[time_field].values_path or self.definition.identifiables[time_field].values_path
        record_count = granule.identifiables[self.element_count_id].value

        #-------------------------------------------------------------------------------------
        # Go through the time vector and get the indexes that correspond to the timeval
        # It will find a value such that
        # t_n <= i < t_(n+1), where i is the index
        #-------------------------------------------------------------------------------------


        var_name = value_path.split('/').pop()
        res = acquire_data([file_path], [var_name], record_count).next()
        time_vector = res[var_name]['values']
        retval = 0
        for i in xrange(len(time_vector)):
            if time_vector[i] == timeval:
                retval = i
                break
            elif i==0 and time_vector[i] > timeval:
                retval = i
                break
            elif (i+1) < len(time_vector): # not last val
                if time_vector[i] < timeval and time_vector[i+1] > timeval:
                    retval = i
                    break
            else: # last val
                retval = i
                break
        FileSystem.unlink(file_path)
        return retval
开发者ID:dstuebe,项目名称:coi-services,代码行数:48,代码来源:replay_process.py


示例7: make_some_data

    def make_some_data(self):
        import numpy as np

        stream_id = 'I am very special'
        definition = SBE37_CDM_stream_definition()
        definition.stream_resource_id = stream_id

        self.couch.create(definition)

        total = 200
        n = 10 # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1,n)

            psc = PointSupplementConstructor(point_definition=definition, stream_id=stream_id)
            for x in xrange(r):
                i+=1
                point_id = psc.add_point(time=i, location=(0,0,0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=np.random.normal(loc=48.0,scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[definition.data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            with open(FileSystem.get_hierarchical_url(FS.CACHE, '%s.hdf5' % sha1),'w') as f:
                f.write(hdf_string)
            granule.identifiables[definition.data_stream_id].values = ''
            self.couch.create(granule)
开发者ID:dstuebe,项目名称:coi-services,代码行数:30,代码来源:data_retriever_test.py


示例8: get_datasets_xml_path

    def get_datasets_xml_path(cls, cfg):
        datasets_xml_path = cfg.get_safe('server.pydap.datasets_xml_path', 'RESOURCE:ext/datasets.xml')
        base, filename = os.path.split(datasets_xml_path)
        base = FileSystem.get_extended_url(base)
        path = os.path.join(base, filename)

        return path
开发者ID:n1ywb,项目名称:coi-services,代码行数:7,代码来源:registration_process.py


示例9: _create_coverage

 def _create_coverage(self, dataset_id, description, parameter_dict, spatial_domain,temporal_domain):
     pdict = ParameterDictionary.load(parameter_dict)
     sdom = GridDomain.load(spatial_domain)
     tdom = GridDomain.load(temporal_domain)
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     scov = SimplexCoverage(file_root,dataset_id,description or dataset_id,parameter_dictionary=pdict, temporal_domain=tdom, spatial_domain=sdom, inline_data_writes=self.inline_data_writes)
     return scov
开发者ID:blazetopher,项目名称:coi-services,代码行数:7,代码来源:dataset_management_service.py


示例10: _splice_coverage

 def _splice_coverage(cls, dataset_id, scov):
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     vcov = cls._get_coverage(dataset_id,mode='a')
     scov_pth = scov.persistence_dir
     if isinstance(vcov.reference_coverage, SimplexCoverage):
         ccov = ComplexCoverage(file_root, uuid4().hex, 'Complex coverage for %s' % dataset_id, 
                 reference_coverage_locs=[vcov.head_coverage_path,],
                 parameter_dictionary=ParameterDictionary(),
                 complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
         log.info('Creating Complex Coverage: %s', ccov.persistence_dir)
         ccov.append_reference_coverage(scov_pth)
         ccov_pth = ccov.persistence_dir
         ccov.close()
         vcov.replace_reference_coverage(ccov_pth)
     elif isinstance(vcov.reference_coverage, ComplexCoverage):
         log.info('Appending simplex coverage to complex coverage')
         #vcov.reference_coverage.append_reference_coverage(scov_pth)
         dir_path = vcov.reference_coverage.persistence_dir
         vcov.close()
         ccov = AbstractCoverage.load(dir_path, mode='a')
         ccov.append_reference_coverage(scov_pth)
         ccov.refresh()
         ccov.close()
     vcov.refresh()
     vcov.close()
开发者ID:edwardhunter,项目名称:coi-services,代码行数:25,代码来源:dataset_management_service.py


示例11: __init__

    def __init__(self, name = None):
        """
        @param name The name of the dataset
        """
        # generate a random name for the filename if it has not been provided.
        self.filename = FileSystem.get_url(fs=FS.TEMP, filename=name or random_name(), ext='encoder.hdf5')

        # Using inline imports to put off making hdf/numpy required dependencies
        import h5py

        # open an hdf file on disk - in /tmp to write data to since we can't yet do in memory
        try:
            log.debug("Creating h5py file object for the encoder at %s" % self.filename)
            if os.path.isfile(self.filename):
                # if file exists, then append to it
                self.h5pyfile = h5py.File(self.filename, mode = 'r+', driver='core')
            else:
                # if file does not already exist, write a new one
                self.h5pyfile = h5py.File(self.filename, mode = 'w', driver='core')
            assert self.h5pyfile, 'No h5py file object created.'
        except IOError:
            log.debug("Error opening file for the HDFEncoder! ")
            raise HDFEncoderException("Error while trying to open file. ")
        except AssertionError as err:
            log.debug(err.message)
            raise HDFEncoderException(err.message)
开发者ID:dstuebe,项目名称:pyon,代码行数:26,代码来源:hdf_codec.py


示例12: _get_cov

 def _get_cov(self, name, nt):
     path = CFG.get_safe('server.pydap.data_path', "RESOURCE:ext/pydap")
     ext_path = FileSystem.get_extended_url(path)
     cov,filename = _make_coverage(ext_path, "the_cov")
     cov.insert_timesteps(nt) 
     cov.set_parameter_values("time", value=nt)
     return cov, filename
开发者ID:blazetopher,项目名称:coi-services,代码行数:7,代码来源:test_coverage_handler.py


示例13: _slice

    def _slice(self,granule,slice_):
        '''
        @brief Creates a granule which is a slice of the granule parameter
        @param granule the superset
        @param slice_ The slice values for which to create the granule
        @return Crafted subset granule of the parameter granule.
        '''
        retval = copy.deepcopy(granule)
        fields = self._list_data(self.definition,granule)
        record_count = slice_.stop - slice_.start
        assert record_count > 0, 'slice is malformed'
        pairs = self._pair_up(granule)
        var_names = list([i[0] for i in pairs]) # Get the var_names from the pairs
        log.debug('var_names: %s',var_names)
        file_path = self._get_hdf_from_string(granule.identifiables[self.data_stream_id].values)
        codec = HDFEncoder()
        vectors = acquire_data([file_path],var_names,record_count,slice_ ).next()

        for row, value in vectors.iteritems():
            vp = self._find_vp(pairs, row)
            # Determine the range_id reverse dictionary lookup
            #@todo: improve this pattern
            for field,path in fields.iteritems():
                if vp==path:
                    range_id = field
                    break
            bounds_id = retval.identifiables[range_id].bounds_id
            # Recalculate the bounds for this fields and update the granule
            range = value['range']
            retval.identifiables[bounds_id].value_pair[0] = float(range[0])
            retval.identifiables[bounds_id].value_pair[1] = float(range[1])
            codec.add_hdf_dataset(vp, value['values'])
            record_count = len(value['values'])
            #----- DEBUGGING ---------
            log.debug('slice- row: %s', row)
            log.debug('slice- value_path: %s', vp)
            log.debug('slice- range_id: %s', range_id)
            log.debug('slice- bounds_id: %s', bounds_id)
            log.debug('slice- limits: %s', value['range'])
            #-------------------------


        retval.identifiables[self.element_count_id].value = record_count
        hdf_string = codec.encoder_close()
        self._patch_granule(retval, hdf_string)
        FileSystem.unlink(file_path)
        return retval
开发者ID:dstuebe,项目名称:coi-services,代码行数:47,代码来源:replay_process.py


示例14: _create_coverage

 def _create_coverage(self, dataset_id, parameter_dict_id, time_dom, spatial_dom):
     pd = self.dataset_management_client.read_parameter_dictionary(parameter_dict_id)
     pdict = ParameterDictionary.load(pd)
     sdom = GridDomain.load(spatial_dom.dump())
     tdom = GridDomain.load(time_dom.dump())
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     scov = SimplexCoverage(file_root, dataset_id, dataset_id, parameter_dictionary=pdict, temporal_domain=tdom, spatial_domain=sdom)
     return scov
开发者ID:wbollenbacher,项目名称:coi-services,代码行数:8,代码来源:test_stream_ingestion_worker.py


示例15: _force_clean

    def _force_clean(cls, recreate=False):
        from pyon.core.bootstrap import get_sys_name, CFG
        from pyon.datastore.datastore_common import DatastoreFactory
        datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name())
        #datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE)

        dbs = datastore.list_datastores()
        things_to_clean = filter(lambda x: x.startswith('%s_' % get_sys_name().lower()), dbs)
        try:
            for thing in things_to_clean:
                datastore.delete_datastore(datastore_name=thing)
                if recreate:
                    datastore.create_datastore(datastore_name=thing)

        finally:
            datastore.close()
        FileSystem._clean(CFG)
开发者ID:j2project,项目名称:pyon,代码行数:17,代码来源:int_test.py


示例16: refresh_datasets_xml

    def refresh_datasets_xml(self):
        datasets_xml_path = self.CFG.get_safe('server.pydap.datasets_xml_path', "RESOURCE:ext/datasets.xml")
        filename = datasets_xml_path.split('/')[-1]
        base = '/'.join(datasets_xml_path.split('/')[:-1])
        real_path = FileSystem.get_extended_url(base)
        datasets_xml_path = os.path.join(real_path, filename)

        os.remove(datasets_xml_path)
开发者ID:Bobfrat,项目名称:coi-services,代码行数:8,代码来源:registration_bootstrap.py


示例17: read_persisted_cache

 def read_persisted_cache(self, sha1, encoding):
     byte_string = None
     path = FileSystem.get_hierarchical_url(FS.CACHE,sha1,'.%s' % encoding)
     try:
         with open(path, 'r') as f:
             byte_string = f.read()
     except IOError as e:
         raise BadRequest(e.message)
     return byte_string
开发者ID:ooici-eoi,项目名称:coi-services,代码行数:9,代码来源:replay_process_a.py


示例18: _create_coverage

 def _create_coverage(self, dataset_id, description, parameter_dict, spatial_domain,temporal_domain):
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     pdict = ParameterDictionary.load(parameter_dict)
     sdom = GridDomain.load(spatial_domain)
     tdom = GridDomain.load(temporal_domain)
     scov = self._create_simplex_coverage(dataset_id, pdict, sdom, tdom, self.inline_data_writes)
     vcov = ViewCoverage(file_root, dataset_id, description or dataset_id, reference_coverage_location=scov.persistence_dir)
     scov.close()
     return vcov
开发者ID:edwardhunter,项目名称:coi-services,代码行数:9,代码来源:dataset_management_service.py


示例19: _create_view_coverage

 def _create_view_coverage(self, dataset_id, description, parent_dataset_id):
     # As annoying as it is we need to load the view coverage belonging to parent dataset id and use the information
     # inside to build the new one...
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     pscov = self._get_simplex_coverage(parent_dataset_id, mode='r')
     scov_location = pscov.persistence_dir
     pscov.close()
     vcov = ViewCoverage(file_root, dataset_id, description or dataset_id, reference_coverage_location=scov_location)
     return vcov
开发者ID:edwardhunter,项目名称:coi-services,代码行数:9,代码来源:dataset_management_service.py


示例20: sub_listen

        def sub_listen(msg, headers):

            assertions(isinstance(msg,StreamGranuleContainer),'replayed message is not a granule.')
            hdf_string = msg.identifiables[data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            assertions(sha1 == msg.identifiables[encoding_id].sha1,'Checksum failed.')
            assertions(msg.identifiables[element_count_id].value==1, 'record replay count is incorrect %d.' % msg.identifiables[element_count_id].value)
            output_file = FileSystem.mktemp()
            output_file.write(msg.identifiables[data_stream_id].values)
            output_file_path = output_file.name
            output_file.close()
            output_vectors = acquire_data([output_file_path],fields,2).next()
            for field in fields:
                comparison = (input_vectors[field]['values']==output_vectors[field]['values'])
                assertions(comparison.all(), 'vector mismatch: %s vs %s' %
                                             (input_vectors[field]['values'],output_vectors[field]['values']))
            FileSystem.unlink(output_file_path)
            ar.set(True)
开发者ID:dstuebe,项目名称:coi-services,代码行数:18,代码来源:test_replay_integration.py



注:本文中的pyon.util.file_sys.FileSystem类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python log.debug函数代码示例发布时间:2022-05-27
下一篇:
Python containers.DotDict类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap