• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python ray.put函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中ray.put函数的典型用法代码示例。如果您正苦于以下问题:Python put函数的具体用法?Python put怎么用?Python put使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了put函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testPutGet

  def testPutGet(self):
    ray.init(start_ray_local=True, num_workers=0)

    for i in range(100):
      value_before = i * 10 ** 6
      objectid = ray.put(value_before)
      value_after = ray.get(objectid)
      self.assertEqual(value_before, value_after)

    for i in range(100):
      value_before = i * 10 ** 6 * 1.0
      objectid = ray.put(value_before)
      value_after = ray.get(objectid)
      self.assertEqual(value_before, value_after)

    for i in range(100):
      value_before = "h" * i
      objectid = ray.put(value_before)
      value_after = ray.get(objectid)
      self.assertEqual(value_before, value_after)

    for i in range(100):
      value_before = [1] * i
      objectid = ray.put(value_before)
      value_after = ray.get(objectid)
      self.assertEqual(value_before, value_after)

    ray.worker.cleanup()
开发者ID:amplab,项目名称:ray,代码行数:28,代码来源:runtest.py


示例2: testObjStore

  def testObjStore(self):
    node_ip_address = "127.0.0.1"
    scheduler_address = ray.services.start_ray_local(num_objstores=2, num_workers=0, worker_path=None)
    ray.connect(node_ip_address, scheduler_address, mode=ray.SCRIPT_MODE)
    objstore_addresses = [objstore_info["address"] for objstore_info in ray.scheduler_info()["objstores"]]
    w1 = ray.worker.Worker()
    w2 = ray.worker.Worker()
    ray.reusables._cached_reusables = [] # This is a hack to make the test run.
    ray.connect(node_ip_address, scheduler_address, objstore_address=objstore_addresses[0], mode=ray.SCRIPT_MODE, worker=w1)
    ray.reusables._cached_reusables = [] # This is a hack to make the test run.
    ray.connect(node_ip_address, scheduler_address, objstore_address=objstore_addresses[1], mode=ray.SCRIPT_MODE, worker=w2)

    for cls in [Foo, Bar, Baz, Qux, SubQux, Exception, CustomError, Point, NamedTupleExample]:
      ray.register_class(cls)

    # putting and getting an object shouldn't change it
    for data in RAY_TEST_OBJECTS:
      objectid = ray.put(data, w1)
      result = ray.get(objectid, w1)
      assert_equal(result, data)

    # putting an object, shipping it to another worker, and getting it shouldn't change it
    for data in RAY_TEST_OBJECTS:
      objectid = ray.put(data, w1)
      result = ray.get(objectid, w2)
      assert_equal(result, data)

    # putting an object, shipping it to another worker, and getting it shouldn't change it
    for data in RAY_TEST_OBJECTS:
      objectid = ray.put(data, w2)
      result = ray.get(objectid, w1)
      assert_equal(result, data)

    # This test fails. See https://github.com/ray-project/ray/issues/159.
    # getting multiple times shouldn't matter
    # for data in [np.zeros([10, 20]), np.random.normal(size=[45, 25]), np.zeros([10, 20], dtype=np.dtype("float64")), np.zeros([10, 20], dtype=np.dtype("float32")), np.zeros([10, 20], dtype=np.dtype("int64")), np.zeros([10, 20], dtype=np.dtype("int32"))]:
    #   objectid = worker.put(data, w1)
    #   result = worker.get(objectid, w2)
    #   result = worker.get(objectid, w2)
    #   result = worker.get(objectid, w2)
    #   assert_equal(result, data)

    # Getting a buffer after modifying it before it finishes should return updated buffer
    objectid = ray.libraylib.get_objectid(w1.handle)
    buf = ray.libraylib.allocate_buffer(w1.handle, objectid, 100)
    buf[0][0] = 1
    ray.libraylib.finish_buffer(w1.handle, objectid, buf[1], 0)
    completedbuffer = ray.libraylib.get_buffer(w1.handle, objectid)
    self.assertEqual(completedbuffer[0][0], 1)

    # We started multiple drivers manually, so we will disconnect them manually.
    ray.disconnect(worker=w1)
    ray.disconnect(worker=w2)
    ray.worker.cleanup()
开发者ID:amplab,项目名称:ray,代码行数:54,代码来源:runtest.py


示例3: test_getting_and_putting

def test_getting_and_putting(ray_start_sharded):
    for n in range(8):
        x = np.zeros(10**n)

        for _ in range(100):
            ray.put(x)

        x_id = ray.put(x)
        for _ in range(1000):
            ray.get(x_id)

    assert ray.services.remaining_processes_alive()
开发者ID:robertnishihara,项目名称:ray,代码行数:12,代码来源:test_stress.py


示例4: testGettingAndPutting

    def testGettingAndPutting(self):
        ray.init(num_workers=1)

        for n in range(8):
            x = np.zeros(10 ** n)

            for _ in range(100):
                ray.put(x)

            x_id = ray.put(x)
            for _ in range(1000):
                ray.get(x_id)

        self.assertTrue(ray.services.all_processes_alive())
        ray.worker.cleanup()
开发者ID:adgirish,项目名称:ray,代码行数:15,代码来源:stress_tests.py


示例5: step

    def step(self):
        with self.update_weights_timer:
            if self.remote_evaluators:
                weights = ray.put(self.local_evaluator.get_weights())
                for e in self.remote_evaluators:
                    e.set_weights.remote(weights)

        with self.sample_timer:
            samples = []
            while sum(s.count for s in samples) < self.train_batch_size:
                if self.remote_evaluators:
                    samples.extend(
                        ray.get([
                            e.sample.remote() for e in self.remote_evaluators
                        ]))
                else:
                    samples.append(self.local_evaluator.sample())
            samples = SampleBatch.concat_samples(samples)
            self.sample_timer.push_units_processed(samples.count)

        with self.grad_timer:
            for i in range(self.num_sgd_iter):
                fetches = self.local_evaluator.compute_apply(samples)
                if "stats" in fetches:
                    self.learner_stats = fetches["stats"]
                if self.num_sgd_iter > 1:
                    logger.debug("{} {}".format(i, fetches))
            self.grad_timer.push_units_processed(samples.count)

        self.num_steps_sampled += samples.count
        self.num_steps_trained += samples.count
        return fetches
开发者ID:jamescasbon,项目名称:ray,代码行数:32,代码来源:sync_samples_optimizer.py


示例6: numpy_to_dist

def numpy_to_dist(a):
  result = DistArray(a.shape)
  for index in np.ndindex(*result.num_blocks):
    lower = DistArray.compute_block_lower(index, a.shape)
    upper = DistArray.compute_block_upper(index, a.shape)
    result.objectids[index] = ray.put(a[[slice(l, u) for (l, u) in zip(lower, upper)]])
  return result
开发者ID:amplab,项目名称:ray,代码行数:7,代码来源:core.py


示例7: testRecursiveObjects

  def testRecursiveObjects(self):
    ray.init(start_ray_local=True, num_workers=0)

    class ClassA(object):
      pass

    ray.register_class(ClassA)

    # Make a list that contains itself.
    l = []
    l.append(l)
    # Make an object that contains itself as a field.
    a1 = ClassA()
    a1.field = a1
    # Make two objects that contain each other as fields.
    a2 = ClassA()
    a3 = ClassA()
    a2.field = a3
    a3.field = a2
    # Make a dictionary that contains itself.
    d1 = {}
    d1["key"] = d1
    # Create a list of recursive objects.
    recursive_objects = [l, a1, a2, a3, d1]

    # Check that exceptions are thrown when we serialize the recursive objects.
    for obj in recursive_objects:
      self.assertRaises(Exception, lambda : ray.put(obj))

    ray.worker.cleanup()
开发者ID:amplab,项目名称:ray,代码行数:30,代码来源:runtest.py


示例8: modified_lu

def modified_lu(q):
    """Perform a modified LU decomposition of a matrix.

    This takes a matrix q with orthonormal columns, returns l, u, s such that
    q - s = l * u.

    Args:
        q: A two dimensional orthonormal matrix q.

    Returns:
        A tuple of a lower triangular matrix l, an upper triangular matrix u,
            and a a vector representing a diagonal matrix s such that
            q - s = l * u.
    """
    q = q.assemble()
    m, b = q.shape[0], q.shape[1]
    S = np.zeros(b)

    q_work = np.copy(q)

    for i in range(b):
        S[i] = -1 * np.sign(q_work[i, i])
        q_work[i, i] -= S[i]
        # Scale ith column of L by diagonal element.
        q_work[(i + 1):m, i] /= q_work[i, i]
        # Perform Schur complement update.
        q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i],
                                                 q_work[i, (i + 1):b])

    L = np.tril(q_work)
    for i in range(b):
        L[i, i] = 1
    U = np.triu(q_work)[:b, :]
    # TODO(rkn): Get rid of the put below.
    return ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S
开发者ID:jamescasbon,项目名称:ray,代码行数:35,代码来源:linalg.py


示例9: modified_lu

def modified_lu(q):
  """
  Algorithm 5 from http://www.eecs.berkeley.edu/Pubs/TechRpts/2013/EECS-2013-175.pdf
  takes a matrix q with orthonormal columns, returns l, u, s such that q - s = l * u
  arguments:
    q: a two dimensional orthonormal q
  return values:
    l: lower triangular
    u: upper triangular
    s: a diagonal matrix represented by its diagonal
  """
  q = q.assemble()
  m, b = q.shape[0], q.shape[1]
  S = np.zeros(b)

  q_work = np.copy(q)

  for i in range(b):
    S[i] = -1 * np.sign(q_work[i, i])
    q_work[i, i] -= S[i]
    q_work[(i + 1):m, i] /= q_work[i, i] # scale ith column of L by diagonal element
    q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i], q_work[i, (i + 1):b]) # perform Schur complement update

  L = np.tril(q_work)
  for i in range(b):
    L[i, i] = 1
  U = np.triu(q_work)[:b, :]
  return ray.get(numpy_to_dist.remote(ray.put(L))), U, S # TODO(rkn): get rid of put
开发者ID:amplab,项目名称:ray,代码行数:28,代码来源:linalg.py


示例10: step

    def step(self):
        with self.update_weights_timer:
            if self.remote_evaluators:
                weights = ray.put(self.local_evaluator.get_weights())
                for e in self.remote_evaluators:
                    e.set_weights.remote(weights)

        with self.sample_timer:
            if self.remote_evaluators:
                batches = ray.get(
                    [e.sample.remote() for e in self.remote_evaluators])
            else:
                batches = [self.local_evaluator.sample()]

            # Handle everything as if multiagent
            tmp = []
            for batch in batches:
                if isinstance(batch, SampleBatch):
                    batch = MultiAgentBatch({
                        DEFAULT_POLICY_ID: batch
                    }, batch.count)
                tmp.append(batch)
            batches = tmp

            for batch in batches:
                self.replay_buffer.append(batch)
                self.num_steps_sampled += batch.count
                self.buffer_size += batch.count
                while self.buffer_size > self.max_buffer_size:
                    evicted = self.replay_buffer.pop(0)
                    self.buffer_size -= evicted.count

        if self.num_steps_sampled >= self.replay_starts:
            self._optimize()
开发者ID:jamescasbon,项目名称:ray,代码行数:34,代码来源:sync_batch_replay_optimizer.py


示例11: step

    def step(self):
        with self.update_weights_timer:
            if self.remote_evaluators:
                weights = ray.put(self.local_evaluator.get_weights())
                for e in self.remote_evaluators:
                    e.set_weights.remote(weights)

        with self.sample_timer:
            if self.remote_evaluators:
                batch = SampleBatch.concat_samples(
                    ray.get(
                        [e.sample.remote() for e in self.remote_evaluators]))
            else:
                batch = self.local_evaluator.sample()

            # Handle everything as if multiagent
            if isinstance(batch, SampleBatch):
                batch = MultiAgentBatch({
                    DEFAULT_POLICY_ID: batch
                }, batch.count)

            for policy_id, s in batch.policy_batches.items():
                for row in s.rows():
                    self.replay_buffers[policy_id].add(
                        pack_if_needed(row["obs"]),
                        row["actions"],
                        row["rewards"],
                        pack_if_needed(row["new_obs"]),
                        row["dones"],
                        weight=None)

        if self.num_steps_sampled >= self.replay_starts:
            self._optimize()

        self.num_steps_sampled += batch.count
开发者ID:robertnishihara,项目名称:ray,代码行数:35,代码来源:sync_replay_optimizer.py


示例12: testRegisterClass

  def testRegisterClass(self):
    ray.init(start_ray_local=True, num_workers=0)

    # Check that putting an object of a class that has not been registered
    # throws an exception.
    class TempClass(object):
      pass
    self.assertRaises(Exception, lambda : ray.put(Foo))
    # Check that registering a class that Ray cannot serialize efficiently
    # raises an exception.
    self.assertRaises(Exception, lambda : ray.register_class(type(True)))
    # Check that registering the same class with pickle works.
    ray.register_class(type(float), pickle=True)
    self.assertEqual(ray.get(ray.put(float)), float)

    ray.worker.cleanup()
开发者ID:amplab,项目名称:ray,代码行数:16,代码来源:runtest.py


示例13: step

    def step(self):
        weights = ray.put(self.local_evaluator.get_weights())
        gradient_queue = []
        num_gradients = 0

        # Kick off the first wave of async tasks
        for e in self.remote_evaluators:
            e.set_weights.remote(weights)
            fut = e.compute_gradients.remote(e.sample.remote())
            gradient_queue.append((fut, e))
            num_gradients += 1

        # Note: can't use wait: https://github.com/ray-project/ray/issues/1128
        while gradient_queue:
            with self.wait_timer:
                fut, e = gradient_queue.pop(0)
                gradient = ray.get(fut)

            if gradient is not None:
                with self.apply_timer:
                    self.local_evaluator.apply_gradients(gradient)

            if num_gradients < self.grads_per_step:
                with self.dispatch_timer:
                    e.set_weights.remote(self.local_evaluator.get_weights())
                    fut = e.compute_gradients.remote(e.sample.remote())
                    gradient_queue.append((fut, e))
                    num_gradients += 1

        self.num_steps_sampled += self.grads_per_step * self.batch_size
        self.num_steps_trained += self.grads_per_step * self.batch_size
开发者ID:adgirish,项目名称:ray,代码行数:31,代码来源:async.py


示例14: __setstate__

 def __setstate__(self, state):
     if "evaluator" in state:
         self.local_evaluator.restore(state["evaluator"])
         remote_state = ray.put(state["evaluator"])
         for r in self.remote_evaluators:
             r.restore.remote(remote_state)
     if "optimizer" in state:
         self.optimizer.restore(state["optimizer"])
开发者ID:robertnishihara,项目名称:ray,代码行数:8,代码来源:agent.py


示例15: Driver

    def Driver(success):
        success.value = True
        # Start driver.
        ray.init(redis_address=redis_address)
        summary_start = StateSummary()
        if (0, 1) != summary_start[:2]:
            success.value = False

        max_attempts_before_failing = 100

        # Two new objects.
        ray.get(ray.put(1111))
        ray.get(ray.put(1111))
        attempts = 0
        while (2, 1, summary_start[2]) != StateSummary():
            time.sleep(0.1)
            attempts += 1
            if attempts == max_attempts_before_failing:
                success.value = False
                break

        @ray.remote
        def f():
            ray.put(1111)  # Yet another object.
            return 1111  # A returned object as well.

        # 1 new function.
        attempts = 0
        while (2, 1, summary_start[2] + 1) != StateSummary():
            time.sleep(0.1)
            attempts += 1
            if attempts == max_attempts_before_failing:
                success.value = False
                break

        ray.get(f.remote())
        attempts = 0
        while (4, 2, summary_start[2] + 1) != StateSummary():
            time.sleep(0.1)
            attempts += 1
            if attempts == max_attempts_before_failing:
                success.value = False
                break

        ray.shutdown()
开发者ID:jamescasbon,项目名称:ray,代码行数:45,代码来源:monitor_test.py


示例16: test_cache

def test_cache(ray_start_regular):
    A = np.random.rand(1, 1000000)
    v = np.random.rand(1000000)
    A_id = ray.put(A)
    v_id = ray.put(v)
    a = time.time()
    for i in range(100):
        A.dot(v)
    b = time.time() - a
    c = time.time()
    for i in range(100):
        ray.get(A_id).dot(ray.get(v_id))
    d = time.time() - c

    if d > 1.5 * b:
        if os.getenv("TRAVIS") is None:
            raise Exception("The caching test was too slow. "
                            "d = {}, b = {}".format(d, b))
        else:
            print("WARNING: The caching test was too slow. "
                  "d = {}, b = {}".format(d, b))
开发者ID:jamescasbon,项目名称:ray,代码行数:21,代码来源:microbenchmarks.py


示例17: pin_in_object_store

def pin_in_object_store(obj):
    """Pin an object in the object store.

    It will be available as long as the pinning process is alive. The pinned
    object can be retrieved by calling get_pinned_object on the identifier
    returned by this call.
    """

    obj_id = ray.put(_to_pinnable(obj))
    _pinned_objects.append(ray.get(obj_id))
    return "{}{}".format(PINNED_OBJECT_PREFIX,
                         base64.b64encode(obj_id.binary()).decode("utf-8"))
开发者ID:robertnishihara,项目名称:ray,代码行数:12,代码来源:util.py


示例18: from_pandas

def from_pandas(df, npartitions=None, chunksize=None):
    """Converts a pandas DataFrame to a Ray DataFrame.

    Args:
        df (pandas.DataFrame): The pandas DataFrame to convert.
        npartitions (int): The number of partitions to split the DataFrame
            into. Has priority over chunksize.
        chunksize (int): The number of rows to put in each partition.

    Returns:
        A new Ray DataFrame object.
    """
    from .dataframe import DataFrame

    if npartitions is not None:
        chunksize = int(len(df) / npartitions)
    elif chunksize is None:
        raise ValueError("The number of partitions or chunksize must be set.")

    temp_df = df

    dataframes = []
    lengths = []
    while len(temp_df) > chunksize:
        t_df = temp_df[:chunksize]
        lengths.append(len(t_df))
        # reset_index here because we want a pd.RangeIndex
        # within the partitions. It is smaller and sometimes faster.
        t_df = t_df.reset_index(drop=True)
        top = ray.put(t_df)
        dataframes.append(top)
        temp_df = temp_df[chunksize:]
    else:
        temp_df = temp_df.reset_index(drop=True)
        dataframes.append(ray.put(temp_df))
        lengths.append(len(temp_df))

    return DataFrame(dataframes, df.columns, index=df.index)
开发者ID:adgirish,项目名称:ray,代码行数:38,代码来源:utils.py


示例19: _step

    def _step(self):
        sample_timesteps, train_timesteps = 0, 0
        weights = None

        with self.timers["sample_processing"]:
            completed = list(self.sample_tasks.completed())
            counts = ray.get([c[1][1] for c in completed])
            for i, (ev, (sample_batch, count)) in enumerate(completed):
                sample_timesteps += counts[i]

                # Send the data to the replay buffer
                random.choice(
                    self.replay_actors).add_batch.remote(sample_batch)

                # Update weights if needed
                self.steps_since_update[ev] += counts[i]
                if self.steps_since_update[ev] >= self.max_weight_sync_delay:
                    # Note that it's important to pull new weights once
                    # updated to avoid excessive correlation between actors
                    if weights is None or self.learner.weights_updated:
                        self.learner.weights_updated = False
                        with self.timers["put_weights"]:
                            weights = ray.put(
                                self.local_evaluator.get_weights())
                    ev.set_weights.remote(weights)
                    self.num_weight_syncs += 1
                    self.steps_since_update[ev] = 0

                # Kick off another sample request
                self.sample_tasks.add(ev, ev.sample_with_count.remote())

        with self.timers["replay_processing"]:
            for ra, replay in self.replay_tasks.completed():
                self.replay_tasks.add(ra, ra.replay.remote())
                if self.learner.inqueue.full():
                    self.num_samples_dropped += 1
                else:
                    with self.timers["get_samples"]:
                        samples = ray.get(replay)
                    # Defensive copy against plasma crashes, see #2610 #3452
                    self.learner.inqueue.put((ra, samples and samples.copy()))

        with self.timers["update_priorities"]:
            while not self.learner.outqueue.empty():
                ra, prio_dict, count = self.learner.outqueue.get()
                ra.update_priorities.remote(prio_dict)
                train_timesteps += count

        return sample_timesteps, train_timesteps
开发者ID:jamescasbon,项目名称:ray,代码行数:49,代码来源:async_replay_optimizer.py


示例20: train

    def train(self, num_iter):

        start = time.time()
        for i in range(num_iter):
            
            t1 = time.time()
            self.train_step()
            t2 = time.time()
            print('total time of one step', t2 - t1)           
            print('iter ', i,' done')

            # record statistics every 10 iterations
            if ((i + 1) % 10 == 0):
                
                rewards = self.aggregate_rollouts(num_rollouts = 100, evaluate = True)
                w = ray.get(self.workers[0].get_weights_plus_stats.remote())
                np.savez(self.logdir + "/lin_policy_plus", w)
                
                print(sorted(self.params.items()))
                logz.log_tabular("Time", time.time() - start)
                logz.log_tabular("Iteration", i + 1)
                logz.log_tabular("AverageReward", np.mean(rewards))
                logz.log_tabular("StdRewards", np.std(rewards))
                logz.log_tabular("MaxRewardRollout", np.max(rewards))
                logz.log_tabular("MinRewardRollout", np.min(rewards))
                logz.log_tabular("timesteps", self.timesteps)
                logz.dump_tabular()
                
            t1 = time.time()
            # get statistics from all workers
            for j in range(self.num_workers):
                self.policy.observation_filter.update(ray.get(self.workers[j].get_filter.remote()))
            self.policy.observation_filter.stats_increment()

            # make sure master filter buffer is clear
            self.policy.observation_filter.clear_buffer()
            # sync all workers
            filter_id = ray.put(self.policy.observation_filter)
            setting_filters_ids = [worker.sync_filter.remote(filter_id) for worker in self.workers]
            # waiting for sync of all workers
            ray.get(setting_filters_ids)
         
            increment_filters_ids = [worker.stats_increment.remote() for worker in self.workers]
            # waiting for increment of all workers
            ray.get(increment_filters_ids)            
            t2 = time.time()
            print('Time to sync statistics:', t2 - t1)
                        
        return 
开发者ID:zhan0903,项目名称:ARS,代码行数:49,代码来源:ars.py



注:本文中的ray.put函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ray.shutdown函数代码示例发布时间:2022-05-26
下一篇:
Python ray.init函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap