• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python shell.call函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中resource_management.core.shell.call函数的典型用法代码示例。如果您正苦于以下问题:Python call函数的具体用法?Python call怎么用?Python call使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了call函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_write_lock_files_solr_cloud

def get_write_lock_files_solr_cloud(hadoop_prefix, collections):
    import params

    write_locks_to_delete = ''

    for collection_path in collections:
        code, output = call(format('{hadoop_prefix} -ls {collection_path}'))
        core_paths = get_core_paths(output, collection_path)

        collection_name = collection_path.replace(format('{solr_hdfs_directory}/'), '')
        zk_code, zk_output = call(format(
            '{zk_client_prefix} -cmd get {solr_cloud_zk_directory}/collections/{collection_name}/state.json'),
            env={'JAVA_HOME': params.java64_home},
            timeout=60
        )
        if zk_code != 0:
            Logger.error(format('Cannot determine cores owned by [{solr_hostname}] in collection [{collection_name}] due to ZK error.'))
            continue

        for core_path in core_paths:
            core_node_name = core_path.replace(format('{collection_path}/'), '')
            pattern = re.compile(format(HOSTNAME_VERIFIER_PATTERN), re.MULTILINE|re.DOTALL)
            core_on_hostname = re.search(pattern, zk_output)
            if core_on_hostname is not None:
                write_locks_to_delete += WRITE_LOCK_PATTERN.format(core_path)

    return write_locks_to_delete
开发者ID:hortonworks,项目名称:solr-stack,代码行数:27,代码来源:solr_utils.py


示例2: is_active_namenode

def is_active_namenode(hdfs_binary):
  """
  Checks if current NameNode is active. Waits up to 30 seconds. If other NameNode is active returns False.
  :return: True if current NameNode is active, False otherwise
  """
  import params

  if params.dfs_ha_enabled:
    is_active_this_namenode_cmd = as_user(format("{hdfs_binary} --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
    is_active_other_namenode_cmd = as_user(format("{hdfs_binary} --config {hadoop_conf_dir} haadmin -getServiceState {other_namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})

    for i in range(0, 5):
      code, out = shell.call(is_active_this_namenode_cmd) # If active NN, code will be 0
      if code == 0: # active
        return True

      code, out = shell.call(is_active_other_namenode_cmd) # If other NN is active, code will be 0
      if code == 0: # other NN is active
        return False

      if i < 4: # Do not sleep after last iteration
        time.sleep(6)

    Logger.info("Active NameNode is not found.")
    return False

  else:
    return True
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:28,代码来源:hdfs_namenode.py


示例3: actionexecute

  def actionexecute(self, env):
    config = Script.get_config()

    version = default('/commandParams/version', None)
    stack_name = default('/hostLevelParams/stack_name', "")

    if not version:
      raise Fail("Value is required for '/commandParams/version'")
  
    # other os?
    if OSCheck.is_redhat_family():
      cmd = ('/usr/bin/yum', 'clean', 'all')
      code, out = shell.call(cmd, sudo=True)

    min_ver = format_hdp_stack_version("2.2")
    real_ver = format_hdp_stack_version(version)
    if stack_name == "HDP":
      if compare_versions(real_ver, min_ver) >= 0:
        cmd = ('hdp-select', 'set', 'all', version)
        code, out = shell.call(cmd, sudo=True)

      if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
        # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
        for k, v in conf_select.PACKAGE_DIRS.iteritems():
          for dir_def in v:
            link_config(dir_def['conf_dir'], dir_def['current_dir'])
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:26,代码来源:ru_set_all.py


示例4: post_upgrade_restart

  def post_upgrade_restart(self, env, upgrade_type=None):
    if upgrade_type == "nonrolling":
      return

    Logger.info("Executing Stack Upgrade post-restart")
    import params
    env.set_params(params)
    zk_server_host = random.choice(params.zookeeper_hosts)
    cli_shell = format("{zk_cli_shell} -server {zk_server_host}:{client_port}")
    # Ensure that a quorum is still formed.
    unique = get_unique_id_and_date()
    create_command = format("echo 'create /{unique} mydata' | {cli_shell}")
    list_command = format("echo 'ls /' | {cli_shell}")
    delete_command = format("echo 'delete /{unique} ' | {cli_shell}")

    quorum_err_message = "Failed to establish zookeeper quorum"
    call_and_match_output(create_command, 'Created', quorum_err_message, user=params.zk_user)
    call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]", quorum_err_message, user=params.zk_user)
    shell.call(delete_command, user=params.zk_user)

    if params.client_port:
      check_leader_command = format("echo stat | nc localhost {client_port} | grep Mode")
      code, out = shell.call(check_leader_command, logoutput=False)
      if code == 0 and out:
        Logger.info(out)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:25,代码来源:zookeeper_server.py


示例5: prepare_rpcbind

def prepare_rpcbind():
    Logger.info("check if native nfs server is running")
    p, output = shell.call("pgrep nfsd")
    if p == 0:
        Logger.info("native nfs server is running. shutting it down...")
        # shutdown nfs
        shell.call("service nfs stop")
        shell.call("service nfs-kernel-server stop")
        Logger.info("check if the native nfs server is down...")
        p, output = shell.call("pgrep nfsd")
        if p == 0:
            raise Fail("Failed to shutdown native nfs service")

    Logger.info("check if rpcbind or portmap is running")
    p, output = shell.call("pgrep rpcbind")
    q, output = shell.call("pgrep portmap")

    if p != 0 and q != 0:
        Logger.info("no portmap or rpcbind running. starting one...")
        p, output = shell.call(("service", "rpcbind", "start"), sudo=True)
        q, output = shell.call(("service", "portmap", "start"), sudo=True)
        if p != 0 and q != 0:
            raise Fail("Failed to start rpcbind or portmap")

    Logger.info("now we are ready to start nfs gateway")
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:25,代码来源:hdfs_nfsgateway.py


示例6: initiate_safe_zkfc_failover

def initiate_safe_zkfc_failover():
  """
  If this is the active namenode, initiate a safe failover and wait for it to become the standby.

  If an error occurs, force a failover to happen by killing zkfc on this host. In this case, during the Restart,
  will also have to start ZKFC manually.
  """
  import params

  # Must kinit before running the HDFS command
  if params.security_enabled:
    Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
            user = params.hdfs_user)

  check_service_cmd = format("hdfs haadmin -getServiceState {namenode_id}")
  code, out = shell.call(check_service_cmd, logoutput=True, user=params.hdfs_user)

  original_state = "unknown"
  if code == 0 and out:
    original_state = "active" if "active" in out else ("standby" if "standby" in out else original_state)
    Logger.info("Namenode service state: %s" % original_state)

    if original_state == "active":
      msg = "Rolling Upgrade - Initiating a ZKFC failover on {0} NameNode host {1}.".format(original_state, params.hostname)
      Logger.info(msg)

      check_standby_cmd = format("hdfs haadmin -getServiceState {namenode_id} | grep standby")
      failover_command = format("hdfs haadmin -failover {namenode_id} {other_namenode_id}")

      code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True)
      Logger.info(format("Rolling Upgrade - failover command returned {code}"))
      wait_for_standby = False

      if code == 0:
        wait_for_standby = True
      else:
        # Try to kill ZKFC manually
        was_zkfc_killed = kill_zkfc(params.hdfs_user)
        code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True)
        Logger.info(format("Rolling Upgrade - check for standby returned {code}"))

        if code == 255 and out:
          Logger.info("Rolling Upgrade - namenode is already down.")
        else:
          if was_zkfc_killed:
            # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.
            wait_for_standby = True

      if wait_for_standby:
        Logger.info("Waiting for this NameNode to become the standby one.")
        Execute(check_standby_cmd,
                user=params.hdfs_user,
                tries=50,
                try_sleep=6,
                logoutput=True)
  else:
    raise Fail("Unable to determine NameNode HA states by calling command: {0}".format(check_service_cmd))
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:57,代码来源:utils.py


示例7: _check_existence

 def _check_existence(self, name):
   code, out = shell.call(CHECK_CMD % name)
   if bool(code):
     return False
   elif '*' in name or '?' in name:  # Check if all packages matching pattern are installed
     code1, out1 = shell.call(GET_NOT_INSTALLED_CMD % name)
     return NO_PACKAGES_FOUND_STATUS in out1.splitlines()
   else:
     return True
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:9,代码来源:zypper.py


示例8: _check_existence

 def _check_existence(self, name):
     if "." in name:  # To work with names like 'zookeeper_2_2_1_0_2072.noarch'
         name = os.path.splitext(name)[0]
     code, out = shell.call(CHECK_CMD % name)
     if bool(code):
         return False
     elif "*" in name or "?" in name:  # Check if all packages matching pattern are installed
         code1, out1 = shell.call(CHECK_AVAILABLE_PACKAGES_CMD % name)
         return not bool(code1)
     else:
         return True
开发者ID:chinpeng,项目名称:ambari,代码行数:11,代码来源:yumrpm.py


示例9: _init_cmd

 def _init_cmd(self, command):
   if self._upstart:
     if command == "status":
       ret,out = shell.call(["/sbin/" + command, self.resource.service_name])
       _proc, state = out.strip().split(' ', 1)
       ret = 0 if state != "stop/waiting" else 1
     else:
       ret,out = shell.call(["/sbin/" + command, self.resource.service_name])
   else:
     ret,out = shell.call(["/etc/init.d/%s" % self.resource.service_name, command])
   return ret,out
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:11,代码来源:service.py


示例10: action_run

  def action_run(self):
    from tempfile import NamedTemporaryFile

    Logger.info("Running script %s" % self.resource)
    with NamedTemporaryFile(prefix="resource_management-script", bufsize=0) as tf:
      tf.write(self.resource.code)
      tf.flush()

      _ensure_metadata(tf.name, self.resource.user, self.resource.group)
      shell.call([self.resource.interpreter, tf.name],
                      cwd=self.resource.cwd, env=self.resource.environment,
                      preexec_fn=_preexec_fn(self.resource))
开发者ID:Altiscale,项目名称:incubator-slider,代码行数:12,代码来源:system.py


示例11: _check_existence

 def _check_existence(self, name):
   code, out = shell.call(CHECK_EXISTENCE_CMD % name)
   if bool(code):
     return False
   elif '*' in name or '.' in name:  # Check if all packages matching regexp are installed
     code1, out1 = shell.call(GET_PACKAGES_BY_PATTERN_CMD % name)
     for package_name in out1.splitlines():
       code2, out2 = shell.call(GET_PACKAGE_STATUS_CMD % package_name)
       if PACKAGE_INSTALLED_STATUS not in out2.splitlines():
         return False
     return True
   else:
     return True
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:13,代码来源:apt.py


示例12: check_process_status

def check_process_status(pid_file):
  """
  Function checks whether process is running.
  Process is considered running, if pid file exists, and process with
  a pid, mentioned in pid file is running
  If process is not running, will throw ComponentIsNotRunning exception

  @param pid_file: path to service pid file
  """
  if not pid_file or not os.path.isfile(pid_file):
    raise ComponentIsNotRunning()
  
  try:
    pid = int(sudo.read_file(pid_file))
  except:
    Logger.debug("Pid file {0} does not exist".format(pid_file))
    raise ComponentIsNotRunning()

  code, out = shell.call(["ps","-p", str(pid)])
  
  if code:
    Logger.debug("Process with pid {0} is not running. Stale pid file"
              " at {1}".format(pid, pid_file))
    raise ComponentIsNotRunning()
  pass
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:25,代码来源:check_process_status.py


示例13: remove_solr_ssl_support

def remove_solr_ssl_support():
    import params

    if not params.solr_cloud_mode:
        return

    code, output = call(
            format(
                    '{zk_client_prefix} -cmd get {solr_cloud_zk_directory}{clusterprops_json}'
            ),
            env={'JAVA_HOME': params.java64_home},
            timeout=60
    )

    if "NoNodeException" in output:
        return

    Execute(
            format(
                    '{zk_client_prefix} -cmd clear {solr_cloud_zk_directory}{clusterprops_json}'
            ),
            environment={'JAVA_HOME': params.java64_home},
            ignore_failures=True,
            user=params.solr_config_user
    )
开发者ID:hortonworks,项目名称:solr-stack,代码行数:25,代码来源:setup_solr_ssl_support.py


示例14: _check_datanode_startup

def _check_datanode_startup():
  """
  Checks that a DataNode is reported as being alive via the
  "hdfs dfsadmin -report -live" command. Once the DataNode is found to be
  alive this method will return, otherwise it will raise a Fail(...) and retry
  automatically.
  :return:
  """
  import params

  try:
    # 'su - hdfs -c "hdfs dfsadmin -report -live"'
    command = 'hdfs dfsadmin -report -live'
    return_code, hdfs_output = shell.call(command, user=params.hdfs_user)
  except:
    raise Fail('Unable to determine if the DataNode has started after upgrade.')

  if return_code == 0:
    if params.hostname.lower() in hdfs_output.lower():
      Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
      return
    else:
      raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))

  # return_code is not 0, fail
  raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:26,代码来源:datanode_upgrade.py


示例15: bootstrap_standby_namenode

def bootstrap_standby_namenode(params, use_path=False):

  bin_path = os.path.join(params.hadoop_bin_dir, '') if use_path else ""

  try:
    iterations = 50
    bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive")
    # Blue print based deployments start both NN in parallel and occasionally
    # the first attempt to bootstrap may fail. Depending on how it fails the
    # second attempt may not succeed (e.g. it may find the folder and decide that
    # bootstrap succeeded). The solution is to call with -force option but only
    # during initial start
    if params.command_phase == "INITIAL_START":
      bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive -force")
    Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd))
    for i in range(iterations):
      Logger.info('Try %d out of %d' % (i+1, iterations))
      code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user)
      if code == 0:
        Logger.info("Standby namenode bootstrapped successfully")
        return True
      elif code == 5:
        Logger.info("Standby namenode already bootstrapped")
        return True
      else:
        Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code))
  except Exception as ex:
    Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex)))
  return False
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:29,代码来源:hdfs_namenode.py


示例16: pre_rolling_upgrade_shutdown

def pre_rolling_upgrade_shutdown(hdfs_binary):
  """
  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
  DataNode in preparation for an upgrade. This will then periodically check
  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
  This function will obtain the Kerberos ticket if security is enabled.
  :param hdfs_binary: name/path of the HDFS binary to use
  :return: Return True if ran ok (even with errors), and False if need to stop the datanode forcefully.
  """
  import params

  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
  if params.security_enabled:
    Execute(params.dn_kinit_cmd, user = params.hdfs_user)

  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
  command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')

  code, output = shell.call(command, user=params.hdfs_user)
  if code == 0:
    # verify that the datanode is down
    _check_datanode_shutdown(hdfs_binary)
  else:
    # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
    if output is not None and re.search("Shutdown already in progress", output):
      Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
      return False
  return True
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:28,代码来源:datanode_upgrade.py


示例17: kill_zkfc

def kill_zkfc(zkfc_user):
  """
  There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
  Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
  Option 2. Silent failover (not supported as of HDP 2.2.0.0)
  :param zkfc_user: User that started the ZKFC process.
  :return: Return True if ZKFC was killed, otherwise, false.
  """
  import params
  if params.dfs_ha_enabled:
    zkfc_pid_file = get_service_pid_file("zkfc", zkfc_user)
    if zkfc_pid_file:
      check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user)
      code, out = shell.call(check_process)
      if code == 0:
        Logger.debug("ZKFC is running and will be killed.")
        kill_command = format("kill -15 `cat {zkfc_pid_file}`")
        Execute(kill_command,
             user=zkfc_user
        )
        File(zkfc_pid_file,
             action = "delete",
        )
        return True
  return False
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:25,代码来源:utils.py


示例18: create

def create(stack_name, package, version, dry_run = False):
  """
  Creates a config version for the specified package
  :param stack_name: the name of the stack
  :param package: the name of the package, as-used by conf-select
  :param version: the version number to create
  :return List of directories created
  """
  Logger.info("Checking if need to create versioned conf dir /etc/{0}/{1}/0".format(package, version))
  if not _valid(stack_name, package, version):
    Logger.info("Will not create it since parameters are not valid.")
    return []

  command = "dry-run-create" if dry_run else "create-conf-dir"

  code, stdout, stderr = shell.call(get_cmd(command, package, version), logoutput=False, quiet=False, sudo=True, stderr = subprocess.PIPE)

  # conf-select can set more than one directory
  # per package, so return that list, especially for dry_run
  dirs = []
  if 0 == code and stdout is not None: # just be sure we have a stdout
    for line in stdout.splitlines():
      dirs.append(line.rstrip('\n'))

  # take care of permissions
  if not code and stdout and command == "create-conf-dir":
    for d in dirs:
      Directory(d,
          mode=0755,
          cd_access='a',
          recursive=True)

  return dirs
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:33,代码来源:conf_select.py


示例19: _get_current_hiveserver_version

def _get_current_hiveserver_version():
  """
  Runs "hive --version" and parses the result in order
  to obtain the current version of hive.

  :return:  the hiveserver2 version, returned by "hive --version"
  """
  import params

  try:
    # When downgrading the source version should be the version we are downgrading from
    if "downgrade" == params.upgrade_direction:
      if not params.downgrade_from_version:
        raise Fail('The version from which we are downgrading from should be provided in \'downgrade_from_version\'')
      source_version = params.downgrade_from_version
    else:
      source_version = params.current_version
    hive_execute_path = _get_hive_execute_path(source_version)
    version_hive_bin = params.hive_bin
    formatted_source_version = format_hdp_stack_version(source_version)
    if formatted_source_version and compare_versions(formatted_source_version, "2.2") >= 0:
      version_hive_bin = format('/usr/hdp/{source_version}/hive/bin')
    command = format('{version_hive_bin}/hive --version')
    return_code, hdp_output = shell.call(command, user=params.hive_user, path=hive_execute_path)
  except Exception, e:
    Logger.error(str(e))
    raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:27,代码来源:hive_server_upgrade.py


示例20: get_hdp_version

def get_hdp_version():
  try:
    command = 'hdp-select status hadoop-client'
    return_code, hdp_output = shell.call(command, timeout=20)
  except Exception, e:
    Logger.error(str(e))
    raise Fail('Unable to execute hdp-select command to retrieve the version.')
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:7,代码来源:setup_spark.py



注:本文中的resource_management.core.shell.call函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python shell.checked_call函数代码示例发布时间:2022-05-26
下一篇:
Python logger.Logger类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap