本文整理汇总了Python中sahara.i18n._函数的典型用法代码示例。如果您正苦于以下问题:Python _函数的具体用法?Python _怎么用?Python _使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: domain_for_proxy
def domain_for_proxy():
'''Return the proxy domain or None
If configured to use the proxy domain, this function will return that
domain. If not configured to use the proxy domain, this function will
return None. If the proxy domain can't be found this will raise an
exception.
:returns: A Keystone Domain object or None.
:raises ConfigurationError: If the domain is requested but not specified.
:raises NotFoundException: If the domain name is specified but cannot be
found.
'''
if CONF.use_domain_for_proxy_users is False:
return None
if CONF.proxy_user_domain_name is None:
raise ex.ConfigurationError(_('Proxy domain requested but not '
'specified.'))
admin = k.client_for_admin()
global PROXY_DOMAIN
if not PROXY_DOMAIN:
domain_list = admin.domains.list(name=CONF.proxy_user_domain_name)
if len(domain_list) == 0:
raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
message=_('Failed to find domain %s'))
# the domain name should be globally unique in Keystone
if len(domain_list) > 1:
raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
message=_('Unexpected results found '
'when searching for domain '
'%s'))
PROXY_DOMAIN = domain_list[0]
return PROXY_DOMAIN
开发者ID:degorenko,项目名称:sahara,代码行数:35,代码来源:proxy.py
示例2: validate_number_of_datanodes
def validate_number_of_datanodes(cluster, scaled_groups, default_configs):
dfs_replication = 0
for config in default_configs:
if config.name == "dfs.replication":
dfs_replication = config.default_value
conf = cluster.cluster_configs
if "HDFS" in conf and "dfs.replication" in conf["HDFS"]:
dfs_replication = conf["HDFS"]["dfs.replication"]
if not scaled_groups:
scaled_groups = {}
dn_count = 0
for ng in cluster.node_groups:
if "DATANODE" in ng.node_processes:
if ng.id in scaled_groups:
dn_count += scaled_groups[ng.id]
else:
dn_count += ng.count
if dn_count < int(dfs_replication):
raise ex.InvalidComponentCountException(
"datanode",
_("%s or more") % dfs_replication,
dn_count,
_("Number of %(dn)s instances should not be less " "than %(replication)s")
% {"dn": "DATANODE", "replication": "dfs.replication"},
)
开发者ID:snowind,项目名称:sahara,代码行数:27,代码来源:clusterspec.py
示例3: wait_ambari_requests
def wait_ambari_requests(self, requests, cluster_name):
requests = set(requests)
failed = []
while len(requests) > 0:
completed, not_completed = set(), set()
for req_id in requests:
request = self.get_request_info(cluster_name, req_id)
status = request.get("request_status")
if status == 'COMPLETED':
completed.add(req_id)
elif status in ['IN_PROGRESS', 'PENDING']:
not_completed.add(req_id)
else:
failed.append(request)
if failed:
msg = _("Some Ambari request(s) "
"not in COMPLETED state: %(description)s.")
descrs = []
for req in failed:
descr = _(
"request %(id)d: %(name)s - in status %(status)s")
descrs.append(descr %
{'id': req.get("id"),
'name': req.get("request_context"),
'status': req.get("request_status")})
raise p_exc.HadoopProvisionError(msg % {'description': descrs})
requests = not_completed
context.sleep(5)
LOG.debug("Waiting for %d ambari request(s) to be completed",
len(not_completed))
LOG.debug("All ambari requests have been completed")
开发者ID:frgaudet,项目名称:sahara,代码行数:31,代码来源:client.py
示例4: _install_services
def _install_services(self, cluster_name, ambari_info):
LOG.info(_LI('Installing required Hadoop services ...'))
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name))
body = ('{"RequestInfo" : { "context" : "Install all services" },'
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')
result = self._put(install_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
ambari_info, cluster_name, request_id),
ambari_info)
if success:
LOG.info(_LI("Install of Hadoop stack successful."))
self._finalize_ambari_state(ambari_info)
else:
LOG.critical(_LC('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {0}').format(result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
开发者ID:degorenko,项目名称:sahara,代码行数:30,代码来源:versionhandler.py
示例5: _check_storm
def _check_storm(cluster):
dr_count = utils.get_instances_count(cluster, common.DRPC_SERVER)
ni_count = utils.get_instances_count(cluster, common.NIMBUS)
su_count = utils.get_instances_count(cluster, common.STORM_UI_SERVER)
sv_count = utils.get_instances_count(cluster, common.SUPERVISOR)
if dr_count > 1:
raise ex.InvalidComponentCountException(common.DRPC_SERVER,
_("0 or 1"), dr_count)
if ni_count > 1:
raise ex.InvalidComponentCountException(common.NIMBUS,
_("0 or 1"), ni_count)
if su_count > 1:
raise ex.InvalidComponentCountException(common.STORM_UI_SERVER,
_("0 or 1"), su_count)
if dr_count == 0 and ni_count == 1:
raise ex.RequiredServiceMissingException(
common.DRPC_SERVER, required_by=common.NIMBUS)
if dr_count == 1 and ni_count == 0:
raise ex.RequiredServiceMissingException(
common.NIMBUS, required_by=common.DRPC_SERVER)
if su_count == 1 and (dr_count == 0 or ni_count == 0):
raise ex.RequiredServiceMissingException(
common.NIMBUS, required_by=common.STORM_UI_SERVER)
if dr_count == 1 and sv_count == 0:
raise ex.RequiredServiceMissingException(
common.SUPERVISOR, required_by=common.DRPC_SERVER)
if sv_count > 0 and dr_count == 0:
raise ex.RequiredServiceMissingException(
common.DRPC_SERVER, required_by=common.SUPERVISOR)
开发者ID:crobby,项目名称:sahara,代码行数:29,代码来源:validation.py
示例6: render
def render(res=None, resp_type=None, status=None, **kwargs):
if not res:
res = {}
if type(res) is dict:
res.update(kwargs)
elif kwargs:
# can't merge kwargs into the non-dict res
abort_and_log(500,
_("Non-dict and non-empty kwargs passed to render"))
status_code = getattr(flask.request, 'status_code', None)
if status:
status_code = status
if not status_code:
status_code = 200
if not resp_type:
resp_type = getattr(flask.request, 'resp_type', RT_JSON)
if not resp_type:
resp_type = RT_JSON
serializer = None
if "application/json" in resp_type:
resp_type = RT_JSON
serializer = wsgi.JSONDictSerializer()
else:
abort_and_log(400, _("Content type '%s' isn't supported") % resp_type)
body = serializer.serialize(res)
resp_type = str(resp_type)
return flask.Response(response=body, status=status_code,
mimetype=resp_type)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:34,代码来源:api.py
示例7: validate
def validate(self, cluster):
nn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "namenode")])
if nn_count != 1:
raise ex.InvalidComponentCountException("namenode", 1, nn_count)
snn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "secondarynamenode")])
if snn_count > 1:
raise ex.InvalidComponentCountException("secondarynamenode", _("0 or 1"), snn_count)
jt_count = sum([ng.count for ng in utils.get_node_groups(cluster, "jobtracker")])
if jt_count > 1:
raise ex.InvalidComponentCountException("jobtracker", _("0 or 1"), jt_count)
oozie_count = sum([ng.count for ng in utils.get_node_groups(cluster, "oozie")])
if oozie_count > 1:
raise ex.InvalidComponentCountException("oozie", _("0 or 1"), oozie_count)
hive_count = sum([ng.count for ng in utils.get_node_groups(cluster, "hiveserver")])
if jt_count == 0:
tt_count = sum([ng.count for ng in utils.get_node_groups(cluster, "tasktracker")])
if tt_count > 0:
raise ex.RequiredServiceMissingException("jobtracker", required_by="tasktracker")
if oozie_count > 0:
raise ex.RequiredServiceMissingException("jobtracker", required_by="oozie")
if hive_count > 0:
raise ex.RequiredServiceMissingException("jobtracker", required_by="hive")
if hive_count > 1:
raise ex.InvalidComponentCountException("hive", _("0 or 1"), hive_count)
开发者ID:metasensus,项目名称:sahara,代码行数:34,代码来源:versionhandler.py
示例8: __call__
def __call__(self, req):
"""Ensures that tenants in url and token are equal.
Handle incoming request by checking tenant info prom the headers and
url ({tenant_id} url attribute).
Pass request downstream on success.
Reject request if tenant_id from headers not equals to tenant_id from
url.
"""
token_tenant = req.environ.get("HTTP_X_TENANT_ID")
if not token_tenant:
LOG.warning("Can't get tenant_id from env")
raise ex.HTTPServiceUnavailable()
path = req.environ['PATH_INFO']
if path != '/':
try:
version, possibly_url_tenant, rest = (
strutils.split_path(path, 2, 3, True)
)
except ValueError:
LOG.warning("Incorrect path: {path}".format(path=path))
raise ex.HTTPNotFound(_("Incorrect path"))
if uuidutils.is_uuid_like(possibly_url_tenant):
url_tenant = possibly_url_tenant
if token_tenant != url_tenant:
LOG.debug("Unauthorized: token tenant != requested tenant")
raise ex.HTTPUnauthorized(
_('Token tenant != requested tenant'))
return self.application
开发者ID:openstack,项目名称:sahara,代码行数:32,代码来源:auth_valid.py
示例9: node_group_template_update
def node_group_template_update(context, values, ignore_default=False):
session = get_session()
try:
with session.begin():
ngt_id = values['id']
ngt = _node_group_template_get(context, session, ngt_id)
if not ngt:
raise ex.NotFoundException(
ngt_id, _("NodeGroupTemplate id '%s' not found"))
elif not ignore_default and ngt.is_default:
raise ex.UpdateFailedException(
ngt_id,
_("NodeGroupTemplate id '%s' can not be updated. "
"It is a default template.")
)
# Check to see that the node group template to be updated is not in
# use by an existing cluster.
for template_relationship in ngt.templates_relations:
if len(template_relationship.cluster_template.clusters) > 0:
raise ex.UpdateFailedException(
ngt_id,
_("NodeGroupTemplate id '%s' can not be updated. "
"It is referenced by an existing cluster.")
)
ngt.update(values)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for NodeGroupTemplate: %s") % e.columns)
return ngt
开发者ID:shamim8888,项目名称:sahara,代码行数:32,代码来源:api.py
示例10: cluster_create
def cluster_create(context, values):
values = values.copy()
cluster = m.Cluster()
node_groups = values.pop("node_groups", [])
cluster.update(values)
session = get_session()
with session.begin():
try:
cluster.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for Cluster: %s") % e.columns)
try:
for ng in node_groups:
node_group = m.NodeGroup()
node_group.update({"cluster_id": cluster.id})
node_group.update(ng)
node_group.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for NodeGroup: %s") % e.columns)
return cluster_get(context, cluster.id)
开发者ID:esikachev,项目名称:sahara,代码行数:25,代码来源:api.py
示例11: cluster_template_create
def cluster_template_create(context, values):
values = values.copy()
cluster_template = m.ClusterTemplate()
node_groups = values.pop("node_groups") or []
cluster_template.update(values)
session = get_session()
with session.begin():
try:
cluster_template.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for ClusterTemplate: %s") % e.columns)
try:
for ng in node_groups:
node_group = m.TemplatesRelation()
node_group.update({"cluster_template_id": cluster_template.id})
node_group.update(ng)
node_group.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for TemplatesRelation: %s") % e.columns)
return cluster_template_get(context, cluster_template.id)
开发者ID:esikachev,项目名称:sahara,代码行数:26,代码来源:api.py
示例12: validate_number_of_datanodes
def validate_number_of_datanodes(cluster, scaled_groups, default_configs):
dfs_replication = 0
for config in default_configs:
if config.name == 'dfs.replication':
dfs_replication = config.default_value
conf = cluster.cluster_configs
if 'HDFS' in conf and 'dfs.replication' in conf['HDFS']:
dfs_replication = conf['HDFS']['dfs.replication']
if not scaled_groups:
scaled_groups = {}
dn_count = 0
for ng in cluster.node_groups:
if 'DATANODE' in ng.node_processes:
if ng.id in scaled_groups:
dn_count += scaled_groups[ng.id]
else:
dn_count += ng.count
if dn_count < int(dfs_replication):
raise ex.InvalidComponentCountException(
'datanode', _('%s or more') % dfs_replication, dn_count,
_('Number of %(dn)s instances should not be less '
'than %(replication)s')
% {'dn': 'DATANODE', 'replication': 'dfs.replication'})
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:25,代码来源:clusterspec.py
示例13: _await_cldb
def _await_cldb(self, cluster_context, instances=None, timeout=600):
instances = instances or cluster_context.get_instances()
cldb_node = cluster_context.get_instance(mfs.CLDB)
start_time = timeutils.utcnow()
retry_count = 0
with cldb_node.remote() as r:
LOG.debug("Waiting {count} seconds for CLDB initialization".format(
count=timeout))
while timeutils.delta_seconds(start_time,
timeutils.utcnow()) < timeout:
ec, out = r.execute_command(NODE_LIST_CMD,
raise_when_error=False)
resp = json.loads(out)
status = resp['status']
if str(status).lower() == 'ok':
ips = [n['ip'] for n in resp['data']]
retry_count += 1
for i in instances:
if (i.management_ip not in ips
and retry_count > DEFAULT_RETRY_COUNT):
raise ex.HadoopProvisionError(_(
"Node failed to connect to CLDB: %s") %
i.management_ip)
break
else:
context.sleep(DELAY)
else:
raise ex.HadoopProvisionError(_("CLDB failed to start"))
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:28,代码来源:base_node_manager.py
示例14: proxy_user_delete
def proxy_user_delete(username=None, user_id=None):
'''Delete the user from the proxy domain.
:param username: The name of the user to delete.
:param user_id: The id of the user to delete, if provided this overrides
the username.
:raises NotFoundException: If there is an error locating the user in the
proxy domain.
'''
admin = k.client_for_admin()
if not user_id:
domain = domain_for_proxy()
user_list = b.execute_with_retries(
admin.users.list, domain=domain.id, name=username)
if len(user_list) == 0:
raise ex.NotFoundException(
value=username,
message_template=_('Failed to find user %s'))
if len(user_list) > 1:
raise ex.NotFoundException(
value=username,
message_template=_('Unexpected results found when searching '
'for user %s'))
user_id = user_list[0].id
b.execute_with_retries(admin.users.delete, user_id)
LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id))
开发者ID:Chinmoy-Dey,项目名称:sahara,代码行数:27,代码来源:proxy.py
示例15: generate_key_pair
def generate_key_pair(key_length=2048):
"""Create RSA key pair with specified number of bits in key.
Returns tuple of private and public keys.
"""
with tempfiles.tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'tempkey')
args = [
'ssh-keygen',
'-q', # quiet
'-N', '', # w/o passphrase
'-t', 'rsa', # create key of rsa type
'-f', keyfile, # filename of the key file
'-C', 'Generated-by-Sahara' # key comment
]
if key_length is not None:
args.extend(['-b', key_length])
processutils.execute(*args)
if not os.path.exists(keyfile):
raise ex.SystemError(_("Private key file hasn't been created"))
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise ex.SystemError(_("Public key file hasn't been created"))
public_key = open(public_key_path).read()
return private_key, public_key
开发者ID:crobby,项目名称:sahara,代码行数:27,代码来源:crypto.py
示例16: job_binary_internal_create
def job_binary_internal_create(context, values):
"""Returns a JobBinaryInternal that does not contain a data field
The data column uses deferred loading.
"""
values["datasize"] = len(values["data"])
datasize_KB = values["datasize"] / 1024.0
if datasize_KB > CONF.job_binary_max_KB:
raise ex.DataTooBigException(
round(datasize_KB, 1), CONF.job_binary_max_KB,
_("Size of internal binary (%(size)sKB) is greater "
"than the maximum (%(maximum)sKB)"))
job_binary_int = m.JobBinaryInternal()
job_binary_int.update(values)
session = get_session()
try:
with session.begin():
session.add(job_binary_int)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for JobBinaryInternal: %s") % e.columns)
return job_binary_internal_get(context, job_binary_int.id)
开发者ID:shamim8888,项目名称:sahara,代码行数:25,代码来源:api.py
示例17: check_mains_libs
def check_mains_libs(data, **kwargs):
mains = data.get("mains", [])
libs = data.get("libs", [])
job_type, subtype = edp.split_job_type(data.get("type"))
streaming = (job_type == edp.JOB_TYPE_MAPREDUCE and
subtype == edp.JOB_SUBTYPE_STREAMING)
# These types must have a value in mains and may also use libs
if job_type in [edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK,
edp.JOB_TYPE_STORM]:
if not mains:
if job_type in [edp.JOB_TYPE_SPARK, edp.JOB_TYPE_STORM]:
msg = _(
"%s job requires main application jar") % data.get("type")
else:
msg = _("%s flow requires main script") % data.get("type")
raise e.InvalidDataException(msg)
# Check for overlap
if set(mains).intersection(set(libs)):
raise e.InvalidDataException(_("'mains' and 'libs' overlap"))
else:
# Java and MapReduce require libs, but MapReduce.Streaming does not
if not streaming and not libs:
raise e.InvalidDataException(_("%s flow requires libs") %
data.get("type"))
if mains:
raise e.InvalidDataException(_("%s flow does not use mains") %
data.get("type"))
# Make sure that all referenced binaries exist
_check_binaries(mains)
_check_binaries(libs)
开发者ID:frgaudet,项目名称:sahara,代码行数:35,代码来源:job.py
示例18: suspend_job
def suspend_job(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
if job_execution.info['status'] not in edp.JOB_STATUSES_SUSPENDIBLE:
raise e.SuspendingFailed(_("Suspending operation can not be performed"
" on status: {status}")).format(
status=job_execution.info['status'])
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
engine = get_job_engine(cluster, job_execution)
job_execution = conductor.job_execution_update(
ctx, job_execution_id, {
'info': {'status': edp.JOB_STATUS_TOBESUSPENDED}})
try:
job_info = engine.suspend_job(job_execution)
except Exception as ex:
job_info = None
conductor.job_execution_update(
ctx, job_execution_id, {'info': {
'status': edp.JOB_STATUS_SUSPEND_FAILED}})
raise e.SuspendingFailed(_("Error during suspending of job execution: "
"{error}")).format(error=ex)
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
LOG.info("Job execution was suspended successfully")
return job_execution
conductor.job_execution_update(
ctx, job_execution_id, {'info': {
'status': edp.JOB_STATUS_SUSPEND_FAILED}})
raise e.SuspendingFailed(_("Failed to suspend job execution "
"{jid}")).format(jid=job_execution_id)
开发者ID:openstack,项目名称:sahara,代码行数:31,代码来源:job_manager.py
示例19: _validate_existing_ng_scaling
def _validate_existing_ng_scaling(self, cluster, existing):
scalable_processes = self._get_scalable_processes()
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and "datanode" in ng.node_processes:
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(
ng.name,
_("Vanilla plugin cannot scale nodegroup" " with processes: %s") % " ".join(ng.node_processes),
)
dn_amount = len(vu.get_datanodes(cluster))
rep_factor = c_helper.get_config_value("HDFS", "dfs.replication", cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
raise ex.ClusterCannotBeScaled(
cluster.name,
_(
"Vanilla plugin cannot shrink cluster because "
"it would be not enough nodes for replicas "
"(replication factor is %s)"
)
% rep_factor,
)
开发者ID:metasensus,项目名称:sahara,代码行数:26,代码来源:versionhandler.py
示例20: url_for
def url_for(service_catalog, service_type, admin=False, endpoint_type=None):
if not endpoint_type:
endpoint_type = 'publicURL'
if admin:
endpoint_type = 'adminURL'
service = _get_service_from_catalog(service_catalog, service_type)
if service:
endpoints = service['endpoints']
if CONF.os_region_name:
endpoints = [e for e in endpoints
if e['region'] == CONF.os_region_name]
try:
return _get_endpoint_url(endpoints, endpoint_type)
except Exception:
raise ex.SystemError(
_("Endpoint with type %(type)s is not found for service "
"%(service)s")
% {'type': endpoint_type,
'service': service_type})
else:
raise ex.SystemError(
_('Service "%s" not found in service catalog') % service_type)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:25,代码来源:base.py
注:本文中的sahara.i18n._函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论