本文整理汇总了Python中resource_management.libraries.functions.version.compare_versions函数的典型用法代码示例。如果您正苦于以下问题:Python compare_versions函数的具体用法?Python compare_versions怎么用?Python compare_versions使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了compare_versions函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: pre_upgrade_restart
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
hdp_select.select("kafka-broker", params.version)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
conf_select.select(params.stack_name, "kafka", params.version)
# This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary.
if params.current_version and params.version and params.upgrade_direction:
src_version = dst_version = None
if params.upgrade_direction == Direction.UPGRADE:
src_version = format_hdp_stack_version(params.current_version)
dst_version = format_hdp_stack_version(params.version)
else:
# These represent the original values during the UPGRADE direction
src_version = format_hdp_stack_version(params.version)
dst_version = format_hdp_stack_version(params.downgrade_from_version)
if compare_versions(src_version, '2.3.4.0') < 0 and compare_versions(dst_version, '2.3.4.0') >= 0:
# Calling the acl migration script requires the configs to be present.
self.configure(env, upgrade_type=upgrade_type)
upgrade.run_migration(env, upgrade_type)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:25,代码来源:kafka_broker.py
示例2: _create_config_links_if_necessary
def _create_config_links_if_necessary(self, stack_id, stack_version):
"""
Sets up the required structure for /etc/<component>/conf symlinks and /usr/hdp/current
configuration symlinks IFF the current stack is < HDP 2.3+ and the new stack is >= HDP 2.3
stack_id: stack id, ie HDP-2.3
stack_version: version to set, ie 2.3.0.0-1234
"""
if stack_id is None:
Logger.info("Cannot create config links when stack_id is not defined")
return
args = stack_id.upper().split('-')
if len(args) != 2:
Logger.info("Unrecognized stack id {0}, cannot create config links".format(stack_id))
return
if args[0] != "HDP":
Logger.info("Unrecognized stack name {0}, cannot create config links".format(args[0]))
if compare_versions(format_hdp_stack_version(args[1]), "2.3.0.0") < 0:
Logger.info("Configuration symlinks are not needed for {0}, only HDP-2.3+".format(stack_version))
return
for package_name, directories in conf_select.PACKAGE_DIRS.iteritems():
# if already on HDP 2.3, then we should skip making conf.backup folders
if self.current_hdp_stack_version and compare_versions(self.current_hdp_stack_version, '2.3') >= 0:
Logger.info("The current cluster stack of {0} does not require backing up configurations; "
"only conf-select versioned config directories will be created.".format(stack_version))
# only link configs for all known packages
conf_select.link_component_conf_to_versioned_config(package_name, stack_version)
else:
# link configs and create conf.backup folders for all known packages
conf_select.convert_conf_directories_to_symlinks(package_name, stack_version, directories,
skip_existing_links = False, link_to = "backup")
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:35,代码来源:install_packages.py
示例3: actionexecute
def actionexecute(self, env):
config = Script.get_config()
version = default('/commandParams/version', None)
stack_name = default('/hostLevelParams/stack_name', "")
if not version:
raise Fail("Value is required for '/commandParams/version'")
# other os?
if OSCheck.is_redhat_family():
cmd = ('/usr/bin/yum', 'clean', 'all')
code, out = shell.call(cmd, sudo=True)
min_ver = format_hdp_stack_version("2.2")
real_ver = format_hdp_stack_version(version)
if stack_name == "HDP":
if compare_versions(real_ver, min_ver) >= 0:
cmd = ('hdp-select', 'set', 'all', version)
code, out = shell.call(cmd, sudo=True)
if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
# backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
for k, v in conf_select.PACKAGE_DIRS.iteritems():
for dir_def in v:
link_config(dir_def['conf_dir'], dir_def['current_dir'])
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:26,代码来源:ru_set_all.py
示例4: check_stack_feature
def check_stack_feature(stack_feature, stack_version):
"""
Given a stack_feature and a specific stack_version, it validates that the feature is supported by the stack_version.
IMPORTANT, notice that the mapping of feature to version comes from cluster-env if it exists there.
:param stack_feature: Feature name to check if it is supported by the stack. For example: "rolling_upgrade"
:param stack_version: Version of the stack
:return: Will return True if successful, otherwise, False.
"""
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.version import compare_versions
stack_features_config = default("/configurations/cluster-env/stack_features", None)
if not stack_version:
Logger.debug("Cannot determine if feature %s is supported since did not provide a stack version." % stack_feature)
return False
if stack_features_config:
data = json.loads(stack_features_config)
for feature in data["stack_features"]:
if feature["name"] == stack_feature:
if "min_version" in feature:
min_version = feature["min_version"]
if compare_versions(stack_version, min_version, format = True) < 0:
return False
if "max_version" in feature:
max_version = feature["max_version"]
if compare_versions(stack_version, max_version, format = True) >= 0:
return False
return True
else:
raise Fail("Stack features not defined by stack")
return False
开发者ID:maduhu,项目名称:HDP2.5-ambari,代码行数:34,代码来源:stack_features.py
示例5: pre_rolling_restart
def pre_rolling_restart(self, env):
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.2.0') >= 0:
conf_select.select(params.stack_name, "spark", params.version)
hdp_select.select("spark-thriftserver", params.version)
开发者ID:zouzhberk,项目名称:ambaridemo,代码行数:7,代码来源:spark_thrift_server.py
示例6: pre_rolling_restart
def pre_rolling_restart(self, env):
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
Execute(format("hdp-select set spark-historyserver {version}"))
copy_tarballs_to_hdfs('tez', 'spark-historyserver', params.spark_user, params.hdfs_user, params.user_group)
开发者ID:chinpeng,项目名称:ambari,代码行数:7,代码来源:job_history_server.py
示例7: pre_upgrade_restart
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
conf_select.select(params.stack_name, "storm", params.version)
hdp_select.select("storm-client", params.version)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:7,代码来源:drpc_server.py
示例8: pre_rolling_restart
def pre_rolling_restart(self, env):
Logger.info("Executing Rolling Upgrade post-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
Execute(format("hdp-select set hadoop-yarn-resourcemanager {version}"))
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:7,代码来源:resourcemanager.py
示例9: spark_service
def spark_service(action):
import params
if action == 'start':
if params.security_enabled:
spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
Execute(spark_kinit_cmd, user=params.spark_user)
# Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
# need to copy the tarball, otherwise, copy it.
if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.3.0.0') < 0:
resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user)
if resource_created:
params.HdfsResource(None, action="execute")
no_op_test = format(
'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
Execute(format('{spark_history_server_start}'),
user=params.spark_user,
environment={'JAVA_HOME': params.java_home},
not_if=no_op_test
)
elif action == 'stop':
Execute(format('{spark_history_server_stop}'),
user=params.spark_user,
environment={'JAVA_HOME': params.java_home}
)
File(params.spark_history_server_pid_file,
action="delete"
)
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:30,代码来源:spark_service.py
示例10: pre_rolling_restart
def pre_rolling_restart(self, env):
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
absolute_backup_dir = None
if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir))
# This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
absolute_backup_dir = upgrade.backup_data()
# conf-select will change the symlink to the conf folder.
conf_select.select(params.stack_name, "knox", params.version)
hdp_select.select("knox-server", params.version)
# Extract the tar of the old conf folder into the new conf directory
if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
conf_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
if os.path.exists(conf_tar_source_path):
extract_dir = os.path.realpath(params.knox_conf_dir)
conf_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
Logger.info("Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
Execute(('cp', conf_tar_source_path, conf_tar_dest_path),
sudo = True,
)
tar_archive.untar_archive(conf_tar_source_path, extract_dir)
File(conf_tar_dest_path,
action = "delete",
)
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:32,代码来源:knox_gateway.py
示例11: _get_current_hiveserver_version
def _get_current_hiveserver_version():
"""
Runs "hive --version" and parses the result in order
to obtain the current version of hive.
:return: the hiveserver2 version, returned by "hive --version"
"""
import params
try:
# When downgrading the source version should be the version we are downgrading from
if "downgrade" == params.upgrade_direction:
if not params.downgrade_from_version:
raise Fail('The version from which we are downgrading from should be provided in \'downgrade_from_version\'')
source_version = params.downgrade_from_version
else:
source_version = params.current_version
hive_execute_path = _get_hive_execute_path(source_version)
version_hive_bin = params.hive_bin
formatted_source_version = format_hdp_stack_version(source_version)
if formatted_source_version and compare_versions(formatted_source_version, "2.2") >= 0:
version_hive_bin = format('/usr/hdp/{source_version}/hive/bin')
command = format('{version_hive_bin}/hive --version')
return_code, hdp_output = shell.call(command, user=params.hive_user, path=hive_execute_path)
except Exception, e:
Logger.error(str(e))
raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:27,代码来源:hive_server_upgrade.py
示例12: _get_directory_mappings_during_upgrade
def _get_directory_mappings_during_upgrade():
"""
Gets a dictionary of directory to archive name that represents the
directories that need to be backed up and their output tarball archive targets
:return: the dictionary of directory to tarball mappings
"""
import params
# Must be performing an Upgrade
if params.upgrade_direction is None or params.upgrade_direction != Direction.UPGRADE or \
params.upgrade_from_version is None or params.upgrade_from_version == "":
Logger.error("Function _get_directory_mappings_during_upgrade() can only be called during a Stack Upgrade in direction UPGRADE.")
return {}
# By default, use this for all stacks.
knox_data_dir = '/var/lib/knox/data'
if params.stack_name and params.stack_name.upper() == "HDP" and \
compare_versions(format_hdp_stack_version(params.upgrade_from_version), "2.3.0.0") > 0:
# Use the version that is being upgraded from.
knox_data_dir = format('/usr/hdp/{upgrade_from_version}/knox/data')
# the trailing "/" is important here so as to not include the "conf" folder itself
directories = {knox_data_dir: BACKUP_DATA_ARCHIVE, params.knox_conf_dir + "/": BACKUP_CONF_ARCHIVE}
Logger.info(format("Knox directories to backup:\n{directories}"))
return directories
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:27,代码来源:upgrade.py
示例13: zookeeper_service
def zookeeper_service(action='start', upgrade_type=None):
import params
# This path may be missing after Ambari upgrade. We need to create it.
if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version \
and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
conf_select.select(params.stack_name, "zookeeper", params.current_version)
hdp_select.select("zookeeper-server", params.version)
cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
if action == 'start':
daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
Execute(daemon_cmd,
not_if=no_op_test,
user=params.zk_user
)
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
Execute(kinit_cmd,
user=params.smokeuser
)
elif action == 'stop':
daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
rm_pid = format("rm -f {zk_pid_file}")
Execute(daemon_cmd,
user=params.zk_user
)
Execute(rm_pid)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:33,代码来源:zookeeper_service.py
示例14: pre_upgrade_restart
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing DataNode Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
conf_select.select(params.stack_name, "hadoop", params.version)
hdp_select.select("hadoop-hdfs-datanode", params.version)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:7,代码来源:datanode.py
示例15: pre_upgrade_restart
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
conf_select.select(params.stack_name, "zookeeper", params.version)
hdp_select.select("zookeeper-server", params.version)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:8,代码来源:zookeeper_server.py
示例16: pre_upgrade_restart
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
Logger.info("Executing Spark Client Stack Upgrade pre-restart")
conf_select.select(params.stack_name, "spark", params.version)
hdp_select.select("spark-client", params.version)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:8,代码来源:spark_client.py
示例17: pre_rolling_restart
def pre_rolling_restart(self, env):
Logger.info("Executing Rolling Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
Execute(format("hdp-select set hadoop-mapreduce-historyserver {version}"))
copy_tarballs_to_hdfs('mapreduce', 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
开发者ID:fanzhidongyzby,项目名称:ambari,代码行数:8,代码来源:historyserver.py
示例18: pre_rolling_restart
def pre_rolling_restart(self, env):
Logger.info("Executing DataNode Rolling Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
conf_select.select(params.stack_name, "hadoop", params.version)
hdp_select.select("hadoop-hdfs-datanode", params.version)
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:8,代码来源:datanode.py
示例19: unlink_all_configs
def unlink_all_configs(self, env):
"""
Reverses the work performed in link_config. This should only be used when downgrading from
HDP 2.3 to 2.2 in order to under the symlink work required for 2.3.
"""
stack_name = default('/hostLevelParams/stack_name', "").upper()
downgrade_to_version = default('/commandParams/version', None)
downgrade_from_version = default('/commandParams/downgrade_from_version', None)
upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)
# downgrade only
if upgrade_direction != Direction.DOWNGRADE:
Logger.warning("Unlinking configurations should only be performed on a downgrade.")
return
# HDP only
if stack_name != "HDP":
Logger.warning("Unlinking configurations should only be performed on the HDP stack.")
return
if downgrade_to_version is None or downgrade_from_version is None:
Logger.warning("Both 'commandParams/version' and 'commandParams/downgrade_from_version' must be specified to unlink configs on downgrade.")
return
Logger.info("Unlinking all configs when downgrading from HDP 2.3 to 2.2")
# normalize the versions
stack_23 = format_hdp_stack_version("2.3")
downgrade_to_version = format_hdp_stack_version(downgrade_to_version)
downgrade_from_version = format_hdp_stack_version(downgrade_from_version)
# downgrade-to-version must be 2.2 (less than 2.3)
if compare_versions(downgrade_to_version, stack_23) >= 0:
Logger.warning("Unlinking configurations should only be performed when downgrading to HDP 2.2")
return
# downgrade-from-version must be 2.3+
if compare_versions(downgrade_from_version, stack_23) < 0:
Logger.warning("Unlinking configurations should only be performed when downgrading from HDP 2.3 or later")
return
# iterate through all directory conf mappings and undo the symlinks
for key, value in conf_select.PACKAGE_DIRS.iteritems():
for directory_mapping in value:
original_config_directory = directory_mapping['conf_dir']
self._unlink_config(original_config_directory)
开发者ID:OpenPOWER-BigData,项目名称:HDP-ambari,代码行数:46,代码来源:ru_set_all.py
示例20: pre_rolling_restart
def pre_rolling_restart(self, env):
Logger.info("Executing Rolling Upgrade post-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
conf_select.select(params.stack_name, "hadoop", params.version)
hdp_select.select("hadoop-yarn-resourcemanager", params.version)
开发者ID:andreysabitov,项目名称:ambari-mantl,代码行数:8,代码来源:resourcemanager.py
注:本文中的resource_management.libraries.functions.version.compare_versions函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论