• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python logger.Logger类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pybullet_utils.logger.Logger的典型用法代码示例。如果您正苦于以下问题:Python Logger类的具体用法?Python Logger怎么用?Python Logger使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Logger类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _build_nets

  def _build_nets(self, json_data):
    assert self.ACTOR_NET_KEY in json_data
    assert self.CRITIC_NET_KEY in json_data

    actor_net_name = json_data[self.ACTOR_NET_KEY]
    critic_net_name = json_data[self.CRITIC_NET_KEY]
    actor_init_output_scale = 1 if (self.ACTOR_INIT_OUTPUT_SCALE_KEY not in json_data
                                   ) else json_data[self.ACTOR_INIT_OUTPUT_SCALE_KEY]

    s_size = self.get_state_size()
    g_size = self.get_goal_size()
    a_size = self.get_action_size()

    # setup input tensors
    self.s_tf = tf.placeholder(tf.float32, shape=[None, s_size], name="s")  # observations
    self.tar_val_tf = tf.placeholder(tf.float32, shape=[None], name="tar_val")  # target value s
    self.adv_tf = tf.placeholder(tf.float32, shape=[None], name="adv")  # advantage
    self.a_tf = tf.placeholder(tf.float32, shape=[None, a_size], name="a")  # target actions
    self.g_tf = tf.placeholder(tf.float32,
                               shape=([None, g_size] if self.has_goal() else None),
                               name="g")  # goals

    with tf.variable_scope('main'):
      with tf.variable_scope('actor'):
        self.actor_tf = self._build_net_actor(actor_net_name, actor_init_output_scale)
      with tf.variable_scope('critic'):
        self.critic_tf = self._build_net_critic(critic_net_name)

    if (self.actor_tf != None):
      Logger.print2('Built actor net: ' + actor_net_name)

    if (self.critic_tf != None):
      Logger.print2('Built critic net: ' + critic_net_name)

    return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:35,代码来源:pg_agent.py


示例2: save_model

 def save_model(self, out_path):
   with self.sess.as_default(), self.graph.as_default():
     try:
       save_path = self.saver.save(self.sess, out_path, write_meta_graph=False, write_state=False)
       Logger.print2('Model saved to: ' + save_path)
     except:
       Logger.print2("Failed to save model to: " + save_path)
   return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:8,代码来源:tf_agent.py


示例3: build_arg_parser

def build_arg_parser(args):
  arg_parser = ArgParser()
  arg_parser.load_args(args)

  arg_file = arg_parser.parse_string('arg_file', '')
  if (arg_file != ''):
    path = pybullet_data.getDataPath() + "/args/" + arg_file
    succ = arg_parser.load_file(path)
    Logger.print2(arg_file)
    assert succ, Logger.print2('Failed to load args from: ' + arg_file)
  return arg_parser
开发者ID:bulletphysics,项目名称:bullet3,代码行数:11,代码来源:testrl.py


示例4: __init__

    def __init__(self, world, id, json_data):
        self.world = world
        self.id = id
        self.logger = Logger()
        self._mode = self.Mode.TRAIN
        
        assert self._check_action_space(), \
            Logger.print2("Invalid action space, got {:s}".format(str(self.get_action_space())))
        
        self._enable_training = True
        self.path = Path()
        self.iter = int(0)
        self.start_time = time.time()
        self._update_counter = 0

        self.update_period = 1.0 # simulated time (seconds) before each training update
        self.iters_per_update = int(1)
        self.discount = 0.95
        self.mini_batch_size = int(32)
        self.replay_buffer_size = int(50000)
        self.init_samples = int(1000)
        self.normalizer_samples = np.inf
        self._local_mini_batch_size = self.mini_batch_size # batch size for each work for multiprocessing
        self._need_normalizer_update = True
        self._total_sample_count = 0

        self._output_dir = ""
        self._int_output_dir = ""
        self.output_iters = 100
        self.int_output_iters = 100
        
        self.train_return = 0.0
        self.test_episodes = int(0)
        self.test_episode_count = int(0)
        self.test_return = 0.0
        self.avg_test_return = 0.0
        
        self.exp_anneal_samples = 320000
        self.exp_params_beg = ExpParams()
        self.exp_params_end = ExpParams()
        self.exp_params_curr = ExpParams()

        self._load_params(json_data)
        self._build_replay_buffer(self.replay_buffer_size)
        self._build_normalizers()
        self._build_bounds()
        self.reset()

        return
开发者ID:jiapei100,项目名称:bullet3,代码行数:49,代码来源:rl_agent.py


示例5: main

def main():
  # Command line arguments
  args = sys.argv[1:]
  arg_parser = ArgParser()
  arg_parser.load_args(args)

  num_workers = arg_parser.parse_int('num_workers', 1)
  assert (num_workers > 0)

  Logger.print2('Running with {:d} workers'.format(num_workers))
  cmd = 'mpiexec -n {:d} python3 DeepMimic_Optimizer.py '.format(num_workers)
  cmd += ' '.join(args)
  Logger.print2('cmd: ' + cmd)
  subprocess.call(cmd, shell=True)
  return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:15,代码来源:mpi_run.py


示例6: update

  def update(self):
    new_count = MPIUtil.reduce_sum(self.new_count)
    new_sum = MPIUtil.reduce_sum(self.new_sum)
    new_sum_sq = MPIUtil.reduce_sum(self.new_sum_sq)

    new_total = self.count + new_count
    if (self.count // self.CHECK_SYNC_COUNT != new_total // self.CHECK_SYNC_COUNT):
      assert self.check_synced(), Logger.print2('Normalizer parameters desynchronized')

    if new_count > 0:
      new_mean = self._process_group_data(new_sum / new_count, self.mean)
      new_mean_sq = self._process_group_data(new_sum_sq / new_count, self.mean_sq)
      w_old = float(self.count) / new_total
      w_new = float(new_count) / new_total

      self.mean = w_old * self.mean + w_new * new_mean
      self.mean_sq = w_old * self.mean_sq + w_new * new_mean_sq
      self.count = new_total
      self.std = self.calc_std(self.mean, self.mean_sq)

      self.new_count = 0
      self.new_sum.fill(0)
      self.new_sum_sq.fill(0)

    return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:25,代码来源:normalizer.py


示例7: _build_nets

  def _build_nets(self, json_data):
    assert self.ACTOR_NET_KEY in json_data
    assert self.CRITIC_NET_KEY in json_data

    actor_net_name = json_data[self.ACTOR_NET_KEY]
    critic_net_name = json_data[self.CRITIC_NET_KEY]
    actor_init_output_scale = 1 if (self.ACTOR_INIT_OUTPUT_SCALE_KEY not in json_data
                                   ) else json_data[self.ACTOR_INIT_OUTPUT_SCALE_KEY]

    s_size = self.get_state_size()
    g_size = self.get_goal_size()
    a_size = self.get_action_size()

    # setup input tensors
    self.s_tf = tf.placeholder(tf.float32, shape=[None, s_size], name="s")
    self.a_tf = tf.placeholder(tf.float32, shape=[None, a_size], name="a")
    self.tar_val_tf = tf.placeholder(tf.float32, shape=[None], name="tar_val")
    self.adv_tf = tf.placeholder(tf.float32, shape=[None], name="adv")
    self.g_tf = tf.placeholder(tf.float32,
                               shape=([None, g_size] if self.has_goal() else None),
                               name="g")
    self.old_logp_tf = tf.placeholder(tf.float32, shape=[None], name="old_logp")
    self.exp_mask_tf = tf.placeholder(tf.float32, shape=[None], name="exp_mask")

    with tf.variable_scope('main'):
      with tf.variable_scope('actor'):
        self.a_mean_tf = self._build_net_actor(actor_net_name, actor_init_output_scale)
      with tf.variable_scope('critic'):
        self.critic_tf = self._build_net_critic(critic_net_name)

    if (self.a_mean_tf != None):
      Logger.print2('Built actor net: ' + actor_net_name)

    if (self.critic_tf != None):
      Logger.print2('Built critic net: ' + critic_net_name)

    self.norm_a_std_tf = self.exp_params_curr.noise * tf.ones(a_size)
    norm_a_noise_tf = self.norm_a_std_tf * tf.random_normal(shape=tf.shape(self.a_mean_tf))
    norm_a_noise_tf *= tf.expand_dims(self.exp_mask_tf, axis=-1)
    self.sample_a_tf = self.a_mean_tf + norm_a_noise_tf * self.a_norm.std_tf
    self.sample_a_logp_tf = TFUtil.calc_logp_gaussian(x_tf=norm_a_noise_tf,
                                                      mean_tf=None,
                                                      std_tf=self.norm_a_std_tf)

    return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:45,代码来源:ppo_agent.py


示例8: _update_mode

 def _update_mode(self):
     if (self._mode == self.Mode.TRAIN):
         self._update_mode_train()
     elif (self._mode == self.Mode.TRAIN_END):
         self._update_mode_train_end()
     elif (self._mode == self.Mode.TEST):
         self._update_mode_test()
     else:
         assert False, Logger.print2("Unsupported RL agent mode" + str(self._mode))
     return
开发者ID:jiapei100,项目名称:bullet3,代码行数:10,代码来源:rl_agent.py


示例9: store

  def store(self, path):
    start_idx = MathUtil.INVALID_IDX
    n = path.pathlength()

    if (n > 0):
      assert path.is_valid()

      if path.check_vals():
        if self.buffers is None:
          self._init_buffers(path)

        idx = self._request_idx(n + 1)
        self._store_path(path, idx)
        self._add_sample_buffers(idx)

        self.num_paths += 1
        self.total_count += n + 1
        start_idx = idx[0]
      else:
        Logger.print2('Invalid path data value detected')

    return start_idx
开发者ID:bulletphysics,项目名称:bullet3,代码行数:22,代码来源:replay_buffer.py


示例10: end_episode

    def end_episode(self):
        if (self.path.pathlength() > 0):
            self._end_path()

            if (self._mode == self.Mode.TRAIN or self._mode == self.Mode.TRAIN_END):
                if (self.enable_training and self.path.pathlength() > 0):
                    self._store_path(self.path)
            elif (self._mode == self.Mode.TEST):
                self._update_test_return(self.path)
            else:
                assert False, Logger.print2("Unsupported RL agent mode" + str(self._mode))

            self._update_mode()
        return
开发者ID:jiapei100,项目名称:bullet3,代码行数:14,代码来源:rl_agent.py


示例11: record

  def record(self, x):
    size = self.get_size()
    is_array = isinstance(x, np.ndarray)
    if not is_array:
      assert (size == 1)
      x = np.array([[x]])

    assert x.shape[-1] == size, \
        Logger.print2('Normalizer shape mismatch, expecting size {:d}, but got {:d}'.format(size, x.shape[-1]))
    x = np.reshape(x, [-1, size])

    self.new_count += x.shape[0]
    self.new_sum += np.sum(x, axis=0)
    self.new_sum_sq += np.sum(np.square(x), axis=0)
    return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:15,代码来源:normalizer.py


示例12: update_flatgrad

  def update_flatgrad(self, flat_grad, grad_scale=1.0):
    if self.iter % self.CHECK_SYNC_ITERS == 0:
      assert self.check_synced(), Logger.print2('Network parameters desynchronized')

    if grad_scale != 1.0:
      flat_grad *= grad_scale

    MPI.COMM_WORLD.Allreduce(flat_grad, self._global_flat_grad, op=MPI.SUM)
    self._global_flat_grad /= MPIUtil.get_num_procs()

    self._load_flat_grad(self._global_flat_grad)
    self.sess.run([self._update], self._grad_feed)
    self.iter += 1

    return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:15,代码来源:mpi_solver.py


示例13: set_mean_std

  def set_mean_std(self, mean, std):
    size = self.get_size()
    is_array = isinstance(mean, np.ndarray) and isinstance(std, np.ndarray)

    if not is_array:
      assert (size == 1)
      mean = np.array([mean])
      std = np.array([std])

    assert len(mean) == size and len(std) == size, \
        Logger.print2('Normalizer shape mismatch, expecting size {:d}, but got {:d} and {:d}'.format(size, len(mean), len(std)))

    self.mean = mean
    self.std = std
    self.mean_sq = self.calc_mean_sq(self.mean, self.std)
    return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:16,代码来源:normalizer.py


示例14: load_model

 def load_model(self, in_path):
   with self.sess.as_default(), self.graph.as_default():
     self.saver.restore(self.sess, in_path)
     self._load_normalizers()
     Logger.print2('Model loaded from: ' + in_path)
   return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:6,代码来源:tf_agent.py


示例15: shutdown

def shutdown():
    global world

    Logger.print2('Shutting down...')
    world.shutdown()
    return
开发者ID:jiapei100,项目名称:bullet3,代码行数:6,代码来源:DeepMimic_Optimizer.py


示例16: Logger

from pybullet_utils.logger import Logger
logger = Logger()
logger.configure_output_file("e:/mylog.txt")
for i in range (10):
	logger.log_tabular("Iteration", 1)
Logger.print2("hello world")

logger.print_tabular()
logger.dump_tabular()
开发者ID:jiapei100,项目名称:bullet3,代码行数:9,代码来源:testlog.py


示例17: _train

    def _train(self):
        samples = self.replay_buffer.total_count
        self._total_sample_count = int(MPIUtil.reduce_sum(samples))
        end_training = False
        
        if (self.replay_buffer_initialized):  
            if (self._valid_train_step()):
                prev_iter = self.iter
                iters = self._get_iters_per_update()
                avg_train_return = MPIUtil.reduce_avg(self.train_return)
            
                for i in range(iters):
                    curr_iter = self.iter
                    wall_time = time.time() - self.start_time
                    wall_time /= 60 * 60 # store time in hours

                    has_goal = self.has_goal()
                    s_mean = np.mean(self.s_norm.mean)
                    s_std = np.mean(self.s_norm.std)
                    g_mean = np.mean(self.g_norm.mean) if has_goal else 0
                    g_std = np.mean(self.g_norm.std) if has_goal else 0

                    self.logger.log_tabular("Iteration", self.iter)
                    self.logger.log_tabular("Wall_Time", wall_time)
                    self.logger.log_tabular("Samples", self._total_sample_count)
                    self.logger.log_tabular("Train_Return", avg_train_return)
                    self.logger.log_tabular("Test_Return", self.avg_test_return)
                    self.logger.log_tabular("State_Mean", s_mean)
                    self.logger.log_tabular("State_Std", s_std)
                    self.logger.log_tabular("Goal_Mean", g_mean)
                    self.logger.log_tabular("Goal_Std", g_std)
                    self._log_exp_params()

                    self._update_iter(self.iter + 1)
                    self._train_step()

                    Logger.print2("Agent " + str(self.id))
                    self.logger.print_tabular()
                    Logger.print2("") 

                    if (self._enable_output() and curr_iter % self.int_output_iters == 0):
                        self.logger.dump_tabular()

                if (prev_iter // self.int_output_iters != self.iter // self.int_output_iters):
                    end_training = self.enable_testing()

        else:

            Logger.print2("Agent " + str(self.id))
            Logger.print2("Samples: " + str(self._total_sample_count))
            Logger.print2("") 

            if (self._total_sample_count >= self.init_samples):
                self.replay_buffer_initialized = True
                end_training = self.enable_testing()
        
        if self._need_normalizer_update:
            self._update_normalizers()
            self._need_normalizer_update = self.normalizer_samples > self._total_sample_count

        if end_training:
            self._init_mode_train_end()
 
        return
开发者ID:jiapei100,项目名称:bullet3,代码行数:64,代码来源:rl_agent.py


示例18: RLAgent

class RLAgent(ABC):
    class Mode(Enum):
        TRAIN = 0
        TEST = 1
        TRAIN_END = 2

    NAME = "None"
    
    UPDATE_PERIOD_KEY = "UpdatePeriod"
    ITERS_PER_UPDATE = "ItersPerUpdate"
    DISCOUNT_KEY = "Discount"
    MINI_BATCH_SIZE_KEY = "MiniBatchSize"
    REPLAY_BUFFER_SIZE_KEY = "ReplayBufferSize"
    INIT_SAMPLES_KEY = "InitSamples"
    NORMALIZER_SAMPLES_KEY = "NormalizerSamples"

    OUTPUT_ITERS_KEY = "OutputIters"
    INT_OUTPUT_ITERS_KEY = "IntOutputIters"
    TEST_EPISODES_KEY = "TestEpisodes"

    EXP_ANNEAL_SAMPLES_KEY = "ExpAnnealSamples"
    EXP_PARAM_BEG_KEY = "ExpParamsBeg"
    EXP_PARAM_END_KEY = "ExpParamsEnd"
        
    def __init__(self, world, id, json_data):
        self.world = world
        self.id = id
        self.logger = Logger()
        self._mode = self.Mode.TRAIN
        
        assert self._check_action_space(), \
            Logger.print2("Invalid action space, got {:s}".format(str(self.get_action_space())))
        
        self._enable_training = True
        self.path = Path()
        self.iter = int(0)
        self.start_time = time.time()
        self._update_counter = 0

        self.update_period = 1.0 # simulated time (seconds) before each training update
        self.iters_per_update = int(1)
        self.discount = 0.95
        self.mini_batch_size = int(32)
        self.replay_buffer_size = int(50000)
        self.init_samples = int(1000)
        self.normalizer_samples = np.inf
        self._local_mini_batch_size = self.mini_batch_size # batch size for each work for multiprocessing
        self._need_normalizer_update = True
        self._total_sample_count = 0

        self._output_dir = ""
        self._int_output_dir = ""
        self.output_iters = 100
        self.int_output_iters = 100
        
        self.train_return = 0.0
        self.test_episodes = int(0)
        self.test_episode_count = int(0)
        self.test_return = 0.0
        self.avg_test_return = 0.0
        
        self.exp_anneal_samples = 320000
        self.exp_params_beg = ExpParams()
        self.exp_params_end = ExpParams()
        self.exp_params_curr = ExpParams()

        self._load_params(json_data)
        self._build_replay_buffer(self.replay_buffer_size)
        self._build_normalizers()
        self._build_bounds()
        self.reset()

        return

    def __str__(self):
        action_space_str = str(self.get_action_space())
        info_str = ""
        info_str += '"ID": {:d},\n "Type": "{:s}",\n "ActionSpace": "{:s}",\n "StateDim": {:d},\n "GoalDim": {:d},\n "ActionDim": {:d}'.format(
            self.id, self.NAME, action_space_str[action_space_str.rfind('.') + 1:], self.get_state_size(), self.get_goal_size(), self.get_action_size())
        return "{\n" + info_str + "\n}"

    def get_output_dir(self):
        return self._output_dir
    
    def set_output_dir(self, out_dir):
        self._output_dir = out_dir
        if (self._output_dir != ""):
            self.logger.configure_output_file(out_dir + "/agent" + str(self.id) + "_log.txt")
        return

    output_dir = property(get_output_dir, set_output_dir)

    def get_int_output_dir(self):
        return self._int_output_dir
    
    def set_int_output_dir(self, out_dir):
        self._int_output_dir = out_dir
        return

    int_output_dir = property(get_int_output_dir, set_int_output_dir)
#.........这里部分代码省略.........
开发者ID:jiapei100,项目名称:bullet3,代码行数:101,代码来源:rl_agent.py



注:本文中的pybullet_utils.logger.Logger类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python status.OperStatus类代码示例发布时间:2022-05-25
下一篇:
Python pybullet_data.getDataPath函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap