• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python functions.format函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中resource_management.libraries.functions.format函数的典型用法代码示例。如果您正苦于以下问题:Python format函数的具体用法?Python format怎么用?Python format使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了format函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: service

def service(component_name, action="start"):
    import params

    if component_name.lower() == "mesos_dispatcher":
        daemon = params.spark_dispatch_start
        pid_file = params.mesos_dispatcher_pid_file
        usr = params.spark_user
    else:
        # daemon = params.spark_history_server_start
        pass

    if action == "start":

        check_process = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
        cmd = format("export MESOS_NATIVE_JAVA_LIBRARY={mesos_native_java_library} && " "{daemon}")

        # Remove the pid file if its corresponding process is not running.
        File(pid_file, action="delete", not_if=check_process)

        # Attempt to start the process. Internally, this is skipped if the process is already running.
        Execute(cmd, user=usr, environment={"JAVA_HOME": params.java_home}, not_if=check_process)

        # Ensure that the process with the expected PID exists.
        Execute(check_process, user=usr, not_if=check_process, initial_wait=5)

    elif action == "stop":
        cmd = format("{spark_dispatch_stop}")
        Execute(cmd, user=usr, environment={"JAVA_HOME": params.java_home})

        File(pid_file, action="delete")
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:30,代码来源:dispatcher_service.py


示例2: jdbc_connector

def jdbc_connector():
  import params
  from urllib2 import HTTPError
  from resource_management import Fail
  for jar_name in params.sqoop_jdbc_drivers_dict:
    if 'mysql-connector-java.jar' in jar_name:
      continue
    downloaded_custom_connector = format("{sqoop_lib}/{jar_name}")
    jdbc_symlink_remote = params.sqoop_jdbc_drivers_dict[jar_name]
    jdbc_driver_label = params.sqoop_jdbc_drivers_name_dict[jar_name]
    driver_curl_source = format("{jdk_location}/{jdbc_symlink_remote}")
    environment = {
      "no_proxy": format("{ambari_server_hostname}")
    }
    try:
      File(downloaded_custom_connector,
           content = DownloadSource(driver_curl_source),
           mode = 0644,
      )
    except HTTPError:
      error_string = format("Could not download {driver_curl_source}\n\
                 Please upload jdbc driver to server by run command:\n\
                 ambari-server setup --jdbc-db={jdbc_driver_label} --jdbc-driver=<PATH TO DRIVER>\n\
                 at {ambari_server_hostname}") 
      raise Fail(error_string)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:25,代码来源:sqoop.py


示例3: _get_directory_mappings_during_upgrade

def _get_directory_mappings_during_upgrade():
  """
  Gets a dictionary of directory to archive name that represents the
  directories that need to be backed up and their output tarball archive targets
  :return:  the dictionary of directory to tarball mappings
  """
  import params

  # Must be performing an Upgrade
  if params.upgrade_direction is None or params.upgrade_direction != Direction.UPGRADE or \
          params.upgrade_from_version is None or params.upgrade_from_version == "":
    Logger.error("Function _get_directory_mappings_during_upgrade() can only be called during a Stack Upgrade in direction UPGRADE.")
    return {}

  # By default, use this for all stacks.
  knox_data_dir = '/var/lib/knox/data'

  if params.stack_name and params.stack_name.upper() == "HDP" and \
          compare_versions(format_hdp_stack_version(params.upgrade_from_version), "2.3.0.0") > 0:
    # Use the version that is being upgraded from.
    knox_data_dir = format('/usr/hdp/{upgrade_from_version}/knox/data')

  # the trailing "/" is important here so as to not include the "conf" folder itself
  directories = {knox_data_dir: BACKUP_DATA_ARCHIVE, params.knox_conf_dir + "/": BACKUP_CONF_ARCHIVE}

  Logger.info(format("Knox directories to backup:\n{directories}"))
  return directories
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:27,代码来源:upgrade.py


示例4: check_thrift_port_sasl

def check_thrift_port_sasl(address, port, hive_auth = "NOSASL", key = None, kinitcmd = None, smokeuser = 'ambari-qa',
                           transport_mode = "binary"):
  """
  Hive thrift SASL port check
  """
  BEELINE_CHECK_TIMEOUT = 30

  if kinitcmd:
    url = format("jdbc:hive2://{address}:{port}/;principal={key}")
    Execute(kinitcmd,
            user=smokeuser
    )
  else:
    url = format("jdbc:hive2://{address}:{port}")

  if hive_auth != "NOSASL" and transport_mode != "http":
    cmd = format("! beeline -u '{url}' -e '' ") + "2>&1| awk '{print}'|grep -i -e 'Connection refused' -e 'Invalid URL'"
    Execute(cmd,
            user=smokeuser,
            path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
            timeout=BEELINE_CHECK_TIMEOUT
    )
  else:
    s = socket.socket()
    s.settimeout(1)
    try:
      s.connect((address, port))
    except socket.error, e:
      raise
    finally:
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:30,代码来源:hive_check.py


示例5: spark_service

def spark_service(action):
  import params
  
  if action == 'start':
    if params.security_enabled:
      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
      Execute(spark_kinit_cmd, user=params.spark_user)

    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
    # need to copy the tarball, otherwise, copy it.
    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.3.0.0') < 0:
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user)
      if resource_created:
        params.HdfsResource(None, action="execute")

    no_op_test = format(
      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
    Execute(format('{spark_history_server_start}'),
            user=params.spark_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=no_op_test
    )
  elif action == 'stop':
    Execute(format('{spark_history_server_stop}'),
            user=params.spark_user,
            environment={'JAVA_HOME': params.java_home}
    )
    File(params.spark_history_server_pid_file,
         action="delete"
    )
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:30,代码来源:spark_service.py


示例6: _create_file

  def _create_file(self, target, source=None, mode=""):
    """
    PUT file command in slow, however _get_file_status is pretty fast,
    so we should check if the file really should be put before doing it.
    """
    file_status = self._get_file_status(target) if target!=self.main_resource.resource.target else self.target_status
    mode = "" if not mode else mode

    if file_status:
      if source:
        length = file_status['length']
        local_file_size = os.stat(source).st_size # TODO: os -> sudo

        # TODO: re-implement this using checksums
        if local_file_size == length:
          Logger.info(format("DFS file {target} is identical to {source}, skipping the copying"))
          return
        elif not self.main_resource.resource.replace_existing_files:
          Logger.info(format("Not replacing existing DFS file {target} which is different from {source}, due to replace_existing_files=False"))
          return
      else:
        Logger.info(format("File {target} already exists in DFS, skipping the creation"))
        return

    Logger.info(format("Creating new file {target} in DFS"))
    kwargs = {'permission': mode} if mode else {}

    self.util.run_command(target, 'CREATE', method='PUT', overwrite=True, assertable_result=False, file_to_put=source, **kwargs)

    if mode and file_status:
      file_status['permission'] = mode
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:31,代码来源:hdfs_resource.py


示例7: _get_current_hiveserver_version

def _get_current_hiveserver_version():
  """
  Runs "hive --version" and parses the result in order
  to obtain the current version of hive.

  :return:  the hiveserver2 version, returned by "hive --version"
  """
  import params

  try:
    # When downgrading the source version should be the version we are downgrading from
    if "downgrade" == params.upgrade_direction:
      if not params.downgrade_from_version:
        raise Fail('The version from which we are downgrading from should be provided in \'downgrade_from_version\'')
      source_version = params.downgrade_from_version
    else:
      source_version = params.current_version
    hive_execute_path = _get_hive_execute_path(source_version)
    version_hive_bin = params.hive_bin
    formatted_source_version = format_hdp_stack_version(source_version)
    if formatted_source_version and compare_versions(formatted_source_version, "2.2") >= 0:
      version_hive_bin = format('/usr/hdp/{source_version}/hive/bin')
    command = format('{version_hive_bin}/hive --version')
    return_code, hdp_output = shell.call(command, user=params.hive_user, path=hive_execute_path)
  except Exception, e:
    Logger.error(str(e))
    raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:27,代码来源:hive_server_upgrade.py


示例8: execute

def execute(configurations={}, parameters={}, host_name=None):
  """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

  from resource_management.libraries.functions import reload_windows_env
  from resource_management.core.resources import Execute
  reload_windows_env()
  hive_home = os.environ['HIVE_HOME']

  if configurations is None:
    return ('UNKNOWN', ['There were no configurations supplied to the script.'])

  transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
  if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
    transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]

  port = THRIFT_PORT_DEFAULT
  if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
    port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
  elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
    port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])

  hiveuser = HADOOPUSER_DEFAULT
  if HADOOPUSER_KEY in configurations:
    hiveuser = configurations[HADOOPUSER_KEY]

  result_code = None
  try:
    if host_name is None:
      host_name = socket.getfqdn()

    beeline_url = ['jdbc:hive2://{host_name}:{port}/', "transportMode={transport_mode}"]
    # append url according to used transport
    if transport_mode == "http":
      beeline_url.append('httpPath=cliservice')
    beeline_url_string = format(";".join(beeline_url))
    beeline_cmd = os.path.join(hive_home, "bin", "beeline.cmd")
    cmd = format("cmd /c {beeline_cmd} -u {beeline_url_string} -e '' 2>&1 | findstr Connected")

    start_time = time.time()
    try:
      Execute(cmd, user=hiveuser, timeout=30)
      total_time = time.time() - start_time
      result_code = 'OK'
      label = OK_MESSAGE.format(total_time, port)
    except:
      result_code = 'CRITICAL'
      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
  except:
    label = traceback.format_exc()
    result_code = 'UNKNOWN'

  return (result_code, [label])
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:59,代码来源:alert_hive_thrift_port.py


示例9: stop

 def stop(self, env, rolling_restart=False):
   import params
   env.set_params(params)
   daemon_cmd = format('source {params.conf_dir}/atlas-env.sh; {params.metadata_stop_script}')
   Execute(daemon_cmd,
           user=params.metadata_user,
   )
   Execute (format("rm -f {params.pid_file}"))
开发者ID:zouzhberk,项目名称:ambaridemo,代码行数:8,代码来源:metadata_server.py


示例10: get_check_command

def get_check_command(oozie_url, host_name, configurations):
  if OOZIE_USER in configurations:
    oozie_user = configurations[OOZIE_USER]
  else:
    raise Exception("Oozie user is required")
    
  security_enabled = False
  if SECURITY_ENABLED in configurations:
    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
  kerberos_env = None
  if security_enabled:
    if OOZIE_KEYTAB in configurations and OOZIE_PRINCIPAL in configurations:
      oozie_keytab = configurations[OOZIE_KEYTAB]
      oozie_principal = configurations[OOZIE_PRINCIPAL]

      # substitute _HOST in kerberos principal with actual fqdn
      oozie_principal = oozie_principal.replace('_HOST', host_name)
    else:
      raise KerberosPropertiesNotFound('The Oozie keytab and principal are required configurations when security is enabled.')

    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
    # when executing curl
    env = Environment.get_instance()
    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
    kerberos_env = {'KRB5CCNAME': ccache_file}

    # Get the configured Kerberos executable search paths, if any
    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
    else:
      kerberos_executable_search_paths = None

    klist_path_local = get_klist_path(kerberos_executable_search_paths)
    klist_command = format("{klist_path_local} -s {ccache_file}")

    # Determine if we need to kinit by testing to see if the relevant cache exists and has
    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
    # it kinits we do but recover quickly when keytabs are regenerated
    return_code, _ = call(klist_command, user=oozie_user)
    if return_code != 0:
      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
      kinit_command = format("{kinit_path_local} -l 5m -kt {oozie_keytab} {oozie_principal}; ")

      # kinit
      Execute(kinit_command, 
              environment=kerberos_env,
              user=oozie_user,
      )

  # oozie configuration directory uses a symlink when > HDP 2.2
  oozie_config_directory = OOZIE_CONF_DIR_LEGACY
  if os.path.exists(OOZIE_CONF_DIR):
    oozie_config_directory = OOZIE_CONF_DIR

  command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
    oozie_config_directory, oozie_url)

  return (command, kerberos_env, oozie_user)
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:58,代码来源:alert_check_oozie_server.py


示例11: _copy_from_local_directory

 def _copy_from_local_directory(self, target, source):
   for next_path_part in os.listdir(source):
     new_source = os.path.join(source, next_path_part)
     new_target = format("{target}/{next_path_part}")
     if os.path.isdir(new_source):
       Logger.info(format("Creating DFS directory {new_target}"))
       self._create_directory(new_target)
       self._copy_from_local_directory(new_target, new_source)
     else:
       self._create_file(new_target, new_source)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:10,代码来源:hdfs_resource.py


示例12: start

  def start(self, env, rolling_restart=False):
    import params
    env.set_params(params)
    self.configure(env)

    daemon_cmd = format('source {params.conf_dir}/atlas-env.sh ; {params.metadata_start_script}')
    no_op_test = format('ls {params.pid_file} >/dev/null 2>&1 && ps -p `cat {params.pid_file}` >/dev/null 2>&1')
    Execute(daemon_cmd,
            user=params.metadata_user,
            not_if=no_op_test
    )
开发者ID:zouzhberk,项目名称:ambaridemo,代码行数:11,代码来源:metadata_server.py


示例13: startdemoldap

 def startdemoldap(self, env):
   import params
   env.set_params(params)
   self.configureldap(env)
   daemon_cmd = format('{ldap_bin} start')
   no_op_test = format('ls {ldap_pid_file} >/dev/null 2>&1 && ps -p `cat {ldap_pid_file}` >/dev/null 2>&1')
   Execute(daemon_cmd,
           user=params.knox_user,
           environment={'JAVA_HOME': params.java_home},
           not_if=no_op_test
   )
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:11,代码来源:knox_gateway.py


示例14: phoenix_service

def phoenix_service(action = 'start'): # 'start', 'stop', 'status'
    # Note: params/status_params should already be imported before calling phoenix_service()
    pid_file = format("{pid_dir}/phoenix-{hbase_user}-server.pid")
    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")

    if action == "status":
      check_process_status(pid_file)
    else:
      env = {'JAVA_HOME': format("{java64_home}"), 'HBASE_CONF_DIR': format("{hbase_conf_dir}")}
      daemon_cmd = format("{phx_daemon_script} {action}")
      if action == 'start':
        Execute(daemon_cmd,
                user=format("{hbase_user}"),
                environment=env)
  
      elif action == 'stop':
        Execute(daemon_cmd,
                timeout = 30,
                on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `cat {pid_file}`"),
                user=format("{hbase_user}"),
                environment=env
        )
        File(pid_file,
             action = "delete"
        )
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:25,代码来源:phoenix_service.py


示例15: run_migration

def run_migration(env, upgrade_type):
  """
  If the acl migration script is present, then run it for either upgrade or downgrade.
  That script was introduced in HDP 2.3.4.0 and requires stopping all Kafka brokers first.
  Requires configs to be present.
  :param env: Environment.
  :param upgrade_type: "rolling" or "nonrolling
  """
  import params

  if upgrade_type is None:
    raise Fail('Parameter "upgrade_type" is missing.')

  if params.upgrade_direction is None:
    raise Fail('Parameter "upgrade_direction" is missing.')

  if params.upgrade_direction == Direction.DOWNGRADE and params.downgrade_from_version is None:
    raise Fail('Parameter "downgrade_from_version" is missing.')

  if not params.security_enabled:
    Logger.info("Skip running the Kafka ACL migration script since cluster security is not enabled.")
    return
  
  Logger.info("Upgrade type: {0}, direction: {1}".format(str(upgrade_type), params.upgrade_direction))

  # If the schema upgrade script exists in the version upgrading to, then attempt to upgrade/downgrade it while still using the present bits.
  kafka_acls_script = None
  command_suffix = ""
  if params.upgrade_direction == Direction.UPGRADE:
    kafka_acls_script = format("/usr/hdp/{version}/kafka/bin/kafka-acls.sh")
    command_suffix = "--upgradeAcls"
  elif params.upgrade_direction == Direction.DOWNGRADE:
    kafka_acls_script = format("/usr/hdp/{downgrade_from_version}/kafka/bin/kafka-acls.sh")
    command_suffix = "--downgradeAcls"

  if kafka_acls_script is not None:
    if os.path.exists(kafka_acls_script):
      Logger.info("Found Kafka acls script: {0}".format(kafka_acls_script))
      if params.zookeeper_connect is None:
        raise Fail("Could not retrieve property kafka-broker/zookeeper.connect")

      acls_command = "{0} --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect={1} {2}".\
        format(kafka_acls_script, params.zookeeper_connect, command_suffix)

      Execute(acls_command,
              user=params.kafka_user,
              logoutput=True)
    else:
      Logger.info("Did not find Kafka acls script: {0}".format(kafka_acls_script))
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:49,代码来源:upgrade.py


示例16: service_check

    def service_check(self, env):
        import params

        env.set_params(params)
        if params.security_enabled:
            Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser_principal}"), user=params.smokeuser)
        Execute("sqoop version", user=params.smokeuser, path=params.sqoop_bin_dir, logoutput=True)
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:7,代码来源:service_check.py


示例17: get_live_status

def get_live_status(pid_file, flume_conf_directory):
  """
  Gets the status information of a flume agent, including source, sink, and
  channel counts.
  :param pid_file: the PID file of the agent to check
  :param flume_conf_directory:  the configuration directory (ie /etc/flume/conf)
  :return: a dictionary of information about the flume agent
  """
  pid_file_part = pid_file.split(os.sep).pop()

  res = {}
  res['name'] = pid_file_part

  if pid_file_part.endswith(".pid"):
    res['name'] = pid_file_part[:-4]

  res['status'] = 'RUNNING' if is_flume_process_live(pid_file) else 'NOT_RUNNING'
  res['sources_count'] = 0
  res['sinks_count'] = 0
  res['channels_count'] = 0

  flume_agent_conf_dir = flume_conf_directory + os.sep + res['name']
  flume_agent_meta_file = flume_agent_conf_dir + os.sep + 'ambari-meta.json'

  try:
    with open(flume_agent_meta_file) as fp:
      meta = json.load(fp)
      res['sources_count'] = meta['sources_count']
      res['sinks_count'] = meta['sinks_count']
      res['channels_count'] = meta['channels_count']
  except:
    Logger.logger.exception(format("Error reading {flume_agent_meta_file}: "))

  return res
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:34,代码来源:flume_agent_helper.py


示例18: _check_datanode_shutdown

def _check_datanode_shutdown(hdfs_binary):
  """
  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
  several times, pausing in between runs. Once the DataNode stops responding
  this method will return, otherwise it will raise a Fail(...) and retry
  automatically.
  The stack defaults for retrying for HDFS are also way too slow for this
  command; they are set to wait about 45 seconds between client retries. As
  a result, a single execution of dfsadmin will take 45 seconds to retry and
  the DataNode may be marked as dead, causing problems with HBase.
  https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
  times for ipc.client.connect.retry.interval. In the meantime, override them
  here, but only for RU.
  :param hdfs_binary: name/path of the HDFS binary to use
  :return:
  """
  import params

  # override stock retry timeouts since after 30 seconds, the datanode is
  # marked as dead and can affect HBase during RU
  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
  command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')

  try:
    Execute(command, user=params.hdfs_user, tries=1)
  except:
    Logger.info("DataNode has successfully shutdown for upgrade.")
    return

  Logger.info("DataNode has not shutdown.")
  raise Fail('DataNode has not shutdown.')
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:31,代码来源:datanode_upgrade.py


示例19: pre_rolling_restart

  def pre_rolling_restart(self, env):
    """
    Performs the tasks surrounding the Oozie startup when a rolling upgrade
    is in progress. This includes backing up the configuration, updating
    the database, preparing the WAR, and installing the sharelib in HDFS.
    :param env:
    :return:
    """
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
      return

    Logger.info("Executing Oozie Server Rolling Upgrade pre-restart")

    oozie_server_upgrade.backup_configuration()

    Execute(format("hdp-select set oozie-server {version}"))

    oozie_server_upgrade.restore_configuration()
    oozie_server_upgrade.prepare_libext_directory()
    oozie_server_upgrade.upgrade_oozie()
开发者ID:chinpeng,项目名称:ambari,代码行数:25,代码来源:oozie_server.py


示例20: pre_rolling_upgrade_shutdown

def pre_rolling_upgrade_shutdown(hdfs_binary):
  """
  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
  DataNode in preparation for an upgrade. This will then periodically check
  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
  This function will obtain the Kerberos ticket if security is enabled.
  :param hdfs_binary: name/path of the HDFS binary to use
  :return: Return True if ran ok (even with errors), and False if need to stop the datanode forcefully.
  """
  import params

  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
  if params.security_enabled:
    Execute(params.dn_kinit_cmd, user = params.hdfs_user)

  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
  command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')

  code, output = shell.call(command, user=params.hdfs_user)
  if code == 0:
    # verify that the datanode is down
    _check_datanode_shutdown(hdfs_binary)
  else:
    # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
    if output is not None and re.search("Shutdown already in progress", output):
      Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
      return False
  return True
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:28,代码来源:datanode_upgrade.py



注:本文中的resource_management.libraries.functions.format函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python functions.get_kinit_path函数代码示例发布时间:2022-05-26
下一篇:
Python shell.checked_call函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap