• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python util.spawn函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyon.util.spawn函数的典型用法代码示例。如果您正苦于以下问题:Python spawn函数的具体用法?Python spawn怎么用?Python spawn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了spawn函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _es_call

 def _es_call(es, *args, **kwargs):
     res = AsyncResult()
     def async_call(es, *args, **kwargs):
         res.set(es(*args,**kwargs))
     spawn(async_call,es,*args,**kwargs)
     try:
         retval = res.get(timeout=10)
     except Timeout:
         raise exceptions.Timeout("Call to ElasticSearch timed out.")
     return retval
开发者ID:dstuebe,项目名称:coi-services,代码行数:10,代码来源:index_management_service.py


示例2: get_ready_event

    def get_ready_event(self):
        """
        Returns an Event that is set when all the listeners in this Process are running.
        """
        ev = Event()
        def allready(ev):
            waitall([x.get_ready_event() for x in self.listeners])
            ev.set()

        spawn(allready, ev)
        return ev
开发者ID:oldpatricka,项目名称:pyon,代码行数:11,代码来源:process.py


示例3: get_dirty_values_async_result

    def get_dirty_values_async_result(self):
        dirty_async_res = AsyncResult()
        def dirty_check(self, res):
            while True:
                if self.is_dirty():
                    time.sleep(0.1)
                else:
                    res.set(True)
                    break

        spawn(dirty_check, self, dirty_async_res)

        return dirty_async_res
开发者ID:kerfoot,项目名称:coverage-model,代码行数:13,代码来源:brick_dispatch.py


示例4: run_test_dispatcher

def run_test_dispatcher(work_count, num_workers=1):
    # Set up temporary directories to save data
    import shutil
    BASE_DIR = tempfile.mkdtemp()
    PIDANTIC_DIR = tempfile.mkdtemp()

    WORK_KEYS = ['a','b','c','d','e']

    for x in [x for x in os.listdir(BASE_DIR) if x.endswith('.h5')]:
        os.remove(os.path.join(BASE_DIR,x))

    fps = {}
    for k in WORK_KEYS:
        fps[k] = os.path.join(BASE_DIR, '{0}.h5'.format(k))
#        with h5py.File(fps[k], 'a'):
#            pass

    bD = (50,)
    cD = (5,)
    fv = -9999
    dtype = 'f'

    def fcb(message, work):
        log.error('WORK DISCARDED!!!; %s: %s', message, work)

    disp = BrickWriterDispatcher(fcb, num_workers=num_workers, pidantic_dir=PIDANTIC_DIR)
    disp.run()

    def make_work():
        for x in xrange(work_count):
            bk = random.choice(WORK_KEYS)
            brick_metrics = (fps[bk], bD, cD, dtype, fv)
            if np.random.random_sample(1)[0] > 0.5:
                sl = int(np.random.randint(0,10,1)[0])
                w = np.random.random_sample(1)[0]
            else:
                strt = int(np.random.randint(0,bD[0] - 2,1)[0])
                stp = int(np.random.randint(strt+1,bD[0],1)[0])
                sl = slice(strt, stp)
                w = np.random.random_sample(stp-strt)
            disp.put_work(work_key=bk, work_metrics=brick_metrics, work=([sl], w))
            time.sleep(0.1)

    spawn(make_work)

    # Remove temporary directories
    shutil.rmtree(BASE_DIR)
    shutil.rmtree(PIDANTIC_DIR)

    return disp
开发者ID:birdage,项目名称:coverage-model,代码行数:50,代码来源:brick_dispatch.py


示例5: on_start

    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._persister_loop, self.persist_interval)
        log.debug('EventPersister persist greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)

        # View trigger thread
        self._refresh_greenlet = spawn(self._refresher_loop, self.refresh_interval)
        log.debug('EventPersister view refresher greenlet started in "%s" (interval %s)', self.__class__.__name__, self.refresh_interval)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
                                         callback=self._on_event,
                                         queue_name="event_persister")

        self.event_sub.start()
开发者ID:MauriceManning,项目名称:coi-services,代码行数:15,代码来源:event_persister.py


示例6: start

    def start(self):
        # Create our own queue for container heartbeats and broadcasts
        topic = get_safe(self._pd_core.pd_cfg, "aggregator.container_topic") or "bx_containers"
        queue_name = "pd_aggregator_%s_%s" % (topic, create_valid_identifier(self.container.id, dot_sub="_"))
        self.sub_cont = Subscriber(binding=topic, from_name=queue_name, auto_delete=True,
                                   callback=self._receive_container_info)
        self.sub_cont_gl = spawn(self.sub_cont.listen)
        self.sub_cont.get_ready_event().wait()

        self.evt_sub = EventSubscriber(event_type=OT.ContainerLifecycleEvent, callback=self._receive_event)
        self.evt_sub.add_event_subscription(event_type=OT.ProcessLifecycleEvent)
        self.evt_sub_gl = spawn(self.evt_sub.listen)
        self.evt_sub.get_ready_event().wait()

        log.info("PD Aggregator - event and heartbeat subscribers started")
开发者ID:edwardhunter,项目名称:scioncc,代码行数:15,代码来源:pd_registry.py


示例7: test_rpc_with_xn

    def test_rpc_with_xn(self):
        # get an xn to use for send/recv
        xn = self.container.ex_manager.create_xn_service('hello')
        self.addCleanup(xn.delete)

        # create an RPCServer for a hello service
        hs = HelloService()
        rpcs = RPCServer(from_name=xn, service=hs)

        # spawn the listener, kill on test exit (success/fail/error should cover?)
        gl_listen = spawn(rpcs.listen)
        def cleanup():
            rpcs.close()
            gl_listen.join(timeout=2)
            gl_listen.kill()
        self.addCleanup(cleanup)

        # wait for listen to be ready
        rpcs.get_ready_event().wait(timeout=5)

        # ok, now create a client using same xn
        hsc = HelloServiceClient(to_name=xn)

        # try to message it!
        ret = hsc.hello('hi there')

        # did we get back what we expected?
        self.assertEquals(ret, 'BACK:hi there')
开发者ID:j2project,项目名称:pyon,代码行数:28,代码来源:test_exchange.py


示例8: start

    def start(self):
        queue_name = get_safe(self._pd_core.pd_cfg, "command_queue") or "pd_command"
        self.sub_cont = Subscriber(binding=queue_name, from_name=queue_name, callback=self._receive_command)
        self.sub_cont_gl = spawn(self.sub_cont.listen, activate=False)
        self.sub_cont.get_ready_event().wait()

        self.pub_result = Publisher()
开发者ID:edwardhunter,项目名称:scioncc,代码行数:7,代码来源:pd_engine.py


示例9: execute_acquire_data

    def execute_acquire_data(self, *args):
        """
        Spawns a greenlet to perform a data acquisition
        Calls BaseDataHandler._acquire_data
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument should be a config dictionary
        """
        try:
            config = args[0]

        except IndexError:
            raise ParameterError('\'acquire_data\' command requires a config dict.')

        if not isinstance(config, dict):
            raise TypeError('args[0] of \'acquire_data\' is not a dict.')
        else:
            if get_safe(config,'constraints') is None and not self._semaphore.acquire(blocking=False):
                log.warn('Already acquiring new data - action not duplicated')
                return

            g = spawn(self._acquire_data, config, self._unlock_new_data_callback)
            log.debug('** Spawned {0}'.format(g))
            self._glet_queue.append(g)
开发者ID:seman,项目名称:coi-services,代码行数:27,代码来源:base_data_handler.py


示例10: _spawn

 def _spawn(self):
     """ Spawn a gevent greenlet using defined target method and args.
     """
     gl = spawn(self.target, *self.spawn_args, **self.spawn_kwargs)
     gl.link(lambda _: self.ev_exit.set())  # Set exit event when we terminate
     gl._glname = "ION Thread %s" % str(self.target)
     return gl
开发者ID:scion-network,项目名称:scioncc,代码行数:7,代码来源:thread.py


示例11: test_pub_speed

    def test_pub_speed(self):
        pub = Publisher(node=self.container.node, name="i_no_exist")

        print >>sys.stderr, ""

        self.counter = 0
        self.alive = True
        def sendem():
            while self.alive:
                self.counter += 1
                pub.publish('meh')

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()



        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >>sys.stderr, "Published messages per second:", mps, "(", self.counter, "messages in", diff, "seconds)"
开发者ID:oldpatricka,项目名称:pyon,代码行数:28,代码来源:test_speed.py


示例12: test_rpc_speed

    def test_rpc_speed(self):
        hsc = HelloServiceClient()

        print >>sys.stderr, ""

        self.counter = 0
        self.alive = True
        def sendem():
            while self.alive:
                hsc.noop('data')
                self.counter += 1

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()

        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >>sys.stderr, "Requests per second (RPC):", mps, "(", self.counter, "messages in", diff, "seconds)"
开发者ID:oldpatricka,项目名称:pyon,代码行数:26,代码来源:test_speed.py


示例13: test_known_error

    def test_known_error(self):

        # IonExceptions and TypeErrors get forwarded back intact
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise NotFound("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, NotFound)
开发者ID:mkl-,项目名称:scioncc,代码行数:26,代码来源:test_process.py


示例14: test_unknown_error

    def test_unknown_error(self):

        # Unhandled exceptions get handled and then converted to ContainerErrors
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise self.ExpectedError("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, ContainerError)
        self.assertEquals(len(p._errors), 1)
开发者ID:mkl-,项目名称:scioncc,代码行数:27,代码来源:test_process.py


示例15: on_start

    def on_start(self):
        super(IngestionWorker,self).on_start()
        #----------------------------------------------
        # Start up couch
        #----------------------------------------------


        self.couch_config = self.CFG.get('couch_storage')
        self.hdf_storage = self.CFG.get('hdf_storage')

        self.number_of_workers = self.CFG.get('number_of_workers')
        self.description = self.CFG.get('description')

        self.ingest_config_id = self.CFG.get('configuration_id')

        self.datastore_name = self.couch_config.get('datastore_name',None) or 'dm_datastore'
        try:
            self.datastore_profile = getattr(DataStore.DS_PROFILE, self.couch_config.get('datastore_profile','SCIDATA'))
        except AttributeError:
            log.exception('Invalid datastore profile passed to ingestion worker. Defaulting to SCIDATA')

            self.datastore_profile = DataStore.DS_PROFILE.SCIDATA
        log.debug('datastore_profile %s' % self.datastore_profile)
        self.db = self.container.datastore_manager.get_datastore(ds_name=self.datastore_name, profile = self.datastore_profile, config = self.CFG)

        self.resource_reg_client = ResourceRegistryServiceClient(node = self.container.node)

        self.dataset_configs = {}
        # update the policy
        def receive_dataset_config_event(event_msg, headers):
            log.info('Updating dataset config in ingestion worker: %s', event_msg)

            if event_msg.type != DatasetIngestionTypeEnum.DATASETINGESTIONBYSTREAM:
                raise IngestionWorkerException('Received invalid type in dataset config event.')

            stream_id = event_msg.configuration.stream_id

            if event_msg.deleted:
                try:
                    del self.dataset_configs[stream_id]
                except KeyError:
                    log.info('Tried to remove dataset config that does not exist!')
            else:
                self.dataset_configs[stream_id] = event_msg

            # Hook to override just before processing is complete
            self.dataset_configs_event_test_hook(event_msg, headers)


        #Start the event subscriber - really - what a mess!
        self.event_subscriber = EventSubscriber(
            event_type="DatasetIngestionConfigurationEvent",
            origin=self.ingest_config_id,
            callback=receive_dataset_config_event
            )

        self.gl = spawn(self.event_subscriber.listen)
        self.event_subscriber._ready_event.wait(timeout=5)

        log.info(str(self.db))
开发者ID:seman,项目名称:coi-services,代码行数:60,代码来源:ingestion_worker.py


示例16: execute_acquire_data

    def execute_acquire_data(self, *args):
        """
        Creates a copy of self._dh_config, creates a publisher, and spawns a greenlet to perform a data acquisition cycle
        If the args[0] is a dict, any entries keyed with one of the 'PATCHABLE_CONFIG_KEYS' are used to patch the config
        Greenlet binds to BaseDataHandler._acquire_data and passes the publisher and config
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument can be a config dictionary
        """
        log.debug('Executing acquire_data: args = {0}'.format(args))

        # Make a copy of the config to ensure no cross-pollution
        config = self._dh_config.copy()

        # Patch the config if mods are passed in
        try:
            config_mods = args[0]
            if not isinstance(config_mods, dict):
                raise IndexError()

            log.debug('Configuration modifications provided: {0}'.format(config_mods))
            for k in self._params['PATCHABLE_CONFIG_KEYS']:
                p=get_safe(config_mods, k)
                if not p is None:
                    config[k] = p

        except IndexError:
            log.info('No configuration modifications were provided')

        # Verify that there is a stream_id member in the config
        stream_id = get_safe(config, 'stream_id')
        if not stream_id:
            raise ConfigurationError('Configuration does not contain required \'stream_id\' member')

        isNew = get_safe(config, 'constraints') is None

        if isNew and not self._semaphore.acquire(blocking=False):
            log.warn('Already acquiring new data - action not duplicated')
            return

        ndc = None
        if isNew:
            # Get the NewDataCheck attachment and add it's content to the config
            ext_ds_id = get_safe(config,'external_dataset_res_id')
            if ext_ds_id:
                ndc = self._find_new_data_check_attachment(ext_ds_id)

        config['new_data_check'] = ndc

            # Create a publisher to pass into the greenlet
        publisher = self._stream_registrar.create_publisher(stream_id=stream_id)

        # Spawn a greenlet to do the data acquisition and publishing
        g = spawn(self._acquire_data, config, publisher, self._unlock_new_data_callback, self._update_new_data_check_attachment)
        log.debug('** Spawned {0}'.format(g))
        self._glet_queue.append(g)
开发者ID:kerfoot,项目名称:coi-services,代码行数:59,代码来源:base_data_handler.py


示例17: on_start

    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func, self.persist_interval)
        log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._on_event)
        self.event_sub.start()
开发者ID:kerfoot,项目名称:coi-services,代码行数:8,代码来源:event_persister.py


示例18: _listen

    def _listen(self, sub):
        """
        Pass in a subscriber here, this will make it listen in a background greenlet.
        """
        gl = spawn(sub.listen)
        self._listens.append(gl)
        sub._ready_event.wait(timeout=5)

        return gl
开发者ID:dstuebe,项目名称:pyon,代码行数:9,代码来源:test_event.py


示例19: start

    def start(self):
        """
        Starts all internal greenlets of this router device.
        """
        self._queue_incoming = Queue()
        self._gl_msgs = self._gl_pool.spawn(self._run_gl_msgs)
        self._gl_msgs.link_exception(self._child_failed)

        self.gl_ioloop = spawn(self._run_ioloop)
开发者ID:jamie-cyber1,项目名称:pyon,代码行数:9,代码来源:transport.py


示例20: start

    def start(self):
        log.debug("SFlowManager.start")

        if self._counter_interval > 0:
            self._gl_counter = spawn(self._counter)
        else:
            log.debug("Counter interval is 0, not spawning counter greenlet")

        self._udp_socket = socket(AF_INET, SOCK_DGRAM)
开发者ID:ooici,项目名称:pyon,代码行数:9,代码来源:sflow.py



注:本文中的pyon.util.spawn函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python arg_check.validate_is_instance函数代码示例发布时间:2022-05-27
下一篇:
Python log.warning函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap