本文整理汇总了Python中starcluster.logger.log.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: execute
def execute(self, args):
if "createimage" in sys.argv:
warnings.warn("createimage is deprecated and will go away in the "
"next release. please use the s3image/ebsimage "
"commands instead", DeprecationWarning)
if len(args) != 3:
self.parser.error(
'you must specify an instance-id, image name, and bucket')
bucket = None
instanceid, image_name, bucket = args
self.bucket = bucket
self.image_name = image_name
i = self.ec2.get_instance(instanceid)
key_location = self.cfg.get_key(i.key_name).get('key_location')
aws_user_id = self.cfg.aws.get('aws_user_id')
ec2_cert = self.cfg.aws.get('ec2_cert')
ec2_private_key = self.cfg.aws.get('ec2_private_key')
try:
ami_id = self.ec2.create_s3_image(instanceid, key_location,
aws_user_id, ec2_cert,
ec2_private_key, bucket,
image_name=image_name,
**self.specified_options_dict)
log.info("Your new AMI id is: %s" % ami_id)
except KeyboardInterrupt:
raise exception.CancelledS3ImageCreation(self.bucket,
self.image_name)
开发者ID:FinchPowers,项目名称:StarCluster,代码行数:27,代码来源:s3image.py
示例2: start_nfs_server
def start_nfs_server(self):
log.info("Starting NFS server on %s" % self.alias)
self.ssh.execute('/etc/init.d/portmap start')
self.ssh.execute('mount -t rpc_pipefs sunrpc /var/lib/nfs/rpc_pipefs/',
ignore_exit_status=True)
self.ssh.execute('/etc/init.d/nfs start')
self.ssh.execute('/usr/sbin/exportfs -fra')
开发者ID:ZhuJiahui,项目名称:StarCluster,代码行数:7,代码来源:node.py
示例3: _upload_image
def _upload_image(self):
log.info('Uploading bundled image: (please be patient)')
conn = self.host_ssh
config_dict = self.config_dict
conn.execute('ec2-upload-bundle -b %(bucket)s '
'-m /mnt/%(prefix)s.manifest.xml -a %(access_key)s '
'-s %(secret_key)s' % config_dict, silent=False)
开发者ID:RobertAditazz,项目名称:StarCluster,代码行数:7,代码来源:image.py
示例4: run
def run(self):
"""
As soon as a new node is ready, run the add plugins commands over it.
"""
interval = self.cluster.refresh_interval
log.info("Waiting for one of the new nodes to be up "
"(updating every {}s)".format(interval))
while True:
self.ready_instances = []
self.stream_unpropagated_spots()
self.stream_spots()
self.stream_unpropagated_instances()
self.stream_update_nrm()
self.stream_instances()
self.stream_manage_reboots()
self.stream_ready_instances()
if any([self.unpropagated_spots, self.spots,
self.unpropagated_instances, self.instances]):
if self.ready_instances:
# ready_instances means nodes were added, that took
# time so we should loop again now
continue
log.info("Sleeping for {} seconds".format(interval))
time.sleep(interval)
else:
break
开发者ID:cariaso,项目名称:StarCluster,代码行数:28,代码来源:streaming_node_add.py
示例5: root_device_name
def root_device_name(self):
root_dev = self.instance.root_device_name
bmap = self.block_device_mapping
if bmap and root_dev not in bmap and self.is_ebs_backed():
# Hack for misconfigured AMIs (e.g. CentOS 6.3 Marketplace) These
# AMIs have root device name set to /dev/sda1 but no /dev/sda1 in
# block device map - only /dev/sda. These AMIs somehow magically
# work so check if /dev/sda exists and return that instead to
# prevent detach_external_volumes() from trying to detach the root
# volume on these AMIs.
log.warn("Root device %s is not in the block device map" %
root_dev)
log.warn("This means the AMI was registered with either "
"an incorrect root device name or an incorrect block "
"device mapping")
sda, sda1 = '/dev/sda', '/dev/sda1'
if root_dev == sda1:
log.info("Searching for possible root device: %s" % sda)
if sda in self.block_device_mapping:
log.warn("Found '%s' - assuming its the real root device" %
sda)
root_dev = sda
else:
log.warn("Device %s isn't in the block device map either" %
sda)
return root_dev
开发者ID:ZhuJiahui,项目名称:StarCluster,代码行数:26,代码来源:node.py
示例6: list_all_instances
def list_all_instances(self, show_terminated=False):
reservations = self.conn.get_all_instances()
if not reservations:
log.info("No instances found")
for res in reservations:
groups = ', '.join([ g.id for g in res.groups]) or 'N/A'
for instance in res.instances:
if instance.state == 'terminated' and not show_terminated:
continue
id = instance.id or 'N/A'
dns_name = instance.dns_name or 'N/A'
private_dns_name = instance.private_dns_name or 'N/A'
state = instance.state or 'N/A'
private_ip = instance.private_ip_address or 'N/A'
public_ip = instance.ip_address or 'N/A'
zone = instance.placement or 'N/A'
ami = instance.image_id or 'N/A'
keypair = instance.key_name or 'N/A'
print "id: %s" % id
print "dns_name: %s" % dns_name
print "private_dns_name: %s" % private_dns_name
print "state: %s" % state
print "public_ip: %s" % public_ip
print "private_ip: %s" % private_ip
print "zone: %s" % zone
print "ami: %s" % ami
print "groups: %s" % groups
print "keypair: %s" % keypair
print
开发者ID:astraw,项目名称:StarCluster,代码行数:29,代码来源:awsutils.py
示例7: _setup_ebs_volume
def _setup_ebs_volume(self):
""" Mount EBS volume, if specified, in ~/.starclustercfg to /home"""
# setup /etc/fstab on master to use block device if specified
for vol in self._volumes:
vol = self._volumes[vol]
vol_id = vol.get("volume_id")
device = vol.get("device")
volume_partition = vol.get("partition")
mount_path = vol.get("mount_path")
if vol_id and volume_partition and mount_path:
log.info("Mounting EBS volume %s on %s..." % (vol_id, mount_path))
mconn = self._master.ssh
if not mconn.path_exists(device):
log.warn("Cannot find device %s for volume %s" % (device, vol))
log.warn("Not mounting %s on %s" % (vol_id, mount_path))
log.warn("This usually means there was a problem" + "attaching the EBS volume to the master node")
continue
if not mconn.path_exists(volume_partition):
log.warn("Cannot find partition %s on volume %s" % (volume_partition, vol_id))
log.warn("Not mounting %s on %s" % (vol_id, mount_path))
log.warn(
"This either means that the volume has not been"
+ "partitioned or that the partition specified"
+ "does not exist on the volume"
)
continue
master_fstab = mconn.remote_file("/etc/fstab", mode="a")
print >> master_fstab, "%s %s auto noauto,defaults 0 0 " % (volume_partition, mount_path)
master_fstab.close()
mconn.execute("mkdir -p %s" % mount_path)
mconn.execute("mount %s" % mount_path)
开发者ID:samof76,项目名称:StarCluster,代码行数:31,代码来源:clustersetup.py
示例8: execute
def execute(self, args):
if len(args) != 3:
self.parser.error('you must specify an instance-id, image name, and bucket')
instanceid, image_name, bucket = args
self.bucket = bucket
self.image_name = image_name
cfg = self.cfg
ec2 = cfg.get_easy_ec2()
i = ec2.get_instance(instanceid)
if not self.opts.confirm:
for group in i.groups:
if group.id.startswith(static.SECURITY_GROUP_PREFIX):
log.warn("Instance %s is a StarCluster instance" % i.id)
print
log.warn("Creating an image from a StarCluster instance " + \
"can lead to problems when attempting to use the resulting " + \
"image with StarCluster later on")
print
log.warn(
"The recommended way to re-image a StarCluster AMI is " + \
"to launch a single instance using either ElasticFox, the " +\
"EC2 command line tools, or the AWS management console. " +\
"Then login to the instance, modify it, and use this " + \
"command to create a new AMI from it.")
print
resp = raw_input("Continue anyway (y/n)? ")
if resp not in ['y','Y','yes']:
log.info("Aborting...")
sys.exit(1)
break
self.catch_ctrl_c()
ami_id = image.create_image(instanceid, image_name, bucket, cfg,
**self.specified_options_dict)
log.info("Your new AMI id is: %s" % ami_id)
开发者ID:quantumsummers,项目名称:StarCluster,代码行数:34,代码来源:cli.py
示例9: update_dns
def update_dns(self, host_name, ip_address):
ttl = 10
host_name = ".".join([host_name, self.domain])
conn = boto.connect_route53()
response = conn.get_all_rrsets(self.hosted_zone_id, 'A', host_name, maxitems=1)
if len(response):
response = response[0]
comment = "Starcluster route53 plugin deleted record for %s"%(host_name)
changes = ResourceRecordSets(conn, self.hosted_zone_id, comment)
change1 = changes.add_change("DELETE", host_name, 'A', response.ttl)
for old_value in response.resource_records:
change1.add_value(old_value)
try:
changes.commit()
log.info(comment)
except Exception as e:
log.warning(e)
comment = "Starcluster route53 plugin updated record for %s to %s"%(host_name, ip_address)
changes = ResourceRecordSets(conn, self.hosted_zone_id, comment)
change2 = changes.add_change("CREATE", host_name, 'A', ttl)
change2.add_value(ip_address)
try:
changes.commit()
log.info(comment)
except Exception as e:
log.warning(e)
开发者ID:muccg,项目名称:StarCluster-plugins,代码行数:28,代码来源:route53.py
示例10: run
def run(self, nodes, master, user, user_shell, volumes):
self._check_ipython_installed(master)
user_home = master.getpwnam(user).pw_dir
profile_dir = posixpath.join(user_home, '.ipython', 'profile_default')
master.ssh.switch_user(user)
self._write_config(master, user, profile_dir)
# Start the cluster and some engines on the master (leave 1
# processor free to handle cluster house keeping)
cfile, n_engines_master = self._start_cluster(master, profile_dir)
# Start engines on each of the non-master nodes
non_master_nodes = [node for node in nodes if not node.is_master()]
for node in non_master_nodes:
self.pool.simple_job(
_start_engines, (node, user, node.num_processors),
jobid=node.alias)
n_engines_non_master = sum(node.num_processors
for node in non_master_nodes)
if len(non_master_nodes) > 0:
log.info("Adding %d engines on %d nodes",
n_engines_non_master, len(non_master_nodes))
self.pool.wait(len(non_master_nodes))
if self.enable_notebook:
self._start_notebook(master, user, profile_dir)
n_engines_total = n_engines_master + n_engines_non_master
log.info(STARTED_MSG % dict(cluster=master.parent_cluster,
user=user, connector_file=cfile,
key_location=master.key_location,
n_engines=n_engines_total,
n_nodes=len(nodes)))
master.ssh.switch_user('root')
开发者ID:FinchPowers,项目名称:StarCluster,代码行数:30,代码来源:ipcluster.py
示例11: run
def run(self, nodes, master, user, user_shell, volumes):
# set up some paths
repo_dir = get_repo_dir(user)
setup_script = get_setup_script(user)
for node in nodes:
# NOTE: nodes includes master
log.info("Installing %s as root on %s" % (project_name, node.alias))
#
cmd_strs = [
# FIXME: do this somewhere else
'pip install pyparsing==2.0.1',
'pip install patsy',
'pip install statsmodels',
'rm -rf %s' % repo_dir,
'git clone %s %s' % (repo_url, repo_dir),
'python %s develop' % setup_script,
# 'python %s build_ext --inplace' % setup_script,
'chown -R %s %s' % (user, repo_dir),
]
for cmd_str in cmd_strs:
node.ssh.execute(cmd_str + ' >out 2>err')
pass
pass
for node in nodes:
log.info("Setting up %s as %s on %s" % (project_name, user, node.alias))
#
cmd_strs = [
'mkdir -p ~/.matplotlib',
'echo backend: Agg > ~/.matplotlib/matplotlibrc',
]
for cmd_str in cmd_strs:
node.shell(user=user, command=cmd_str)
pass
pass
return
开发者ID:JDReutt,项目名称:BayesDB,代码行数:35,代码来源:starcluster_plugin.py
示例12: run
def run(self, nodes, master, user, user_shell, volumes):
"""
Mount NFS shares on master and all nodes
"""
log.info("Running plugin automount")
log.debug("automount.NfsShares.run automount.NfsShares.run(nodes, master, user, user_shell, volumes)")
#### OPEN NFS-RELATED PORTS FOR THIS CLUSTER
self.openNfsPorts("default")
self.openNfsPorts('@sc-' + self.cluster)
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
#### FIX mountd PORT ON head AND MASTER/NODES
mountdport = "32767"
for node in nodes:
self.setMountdOnNode(node, mountdport)
self.setMountdOnHead(mountdport)
self.restartServicesOnHead()
#### MOUNT ON ALL NODES
for node in nodes:
self.mount(node)
log.info("Completed plugin automount")
开发者ID:agua,项目名称:agua,代码行数:27,代码来源:automount.py
示例13: openNfsPorts
def openNfsPorts(self, group):
"""
Open (fixed) NFS-related ports (portmap, nfs and mountd)
"""
portmapport = self.portmapport
nfsport = self.nfsport
mountdport = self.mountdport
log.info("Opening NFS-related ports for group: %s", group)
log.debug("automount.openNfsPorts group; %s", group)
log.debug("automount.openNfsPorts portmapport; %s", portmapport)
log.debug("automount.openNfsPorts nfsport; %s", nfsport)
log.debug("automount.openNfsPorts mountdport; %s", mountdport)
permissions = [
dict(group=group, port=nfsport, type="tcp"),
dict(group=group, port=nfsport, type="udp"),
dict(group=group, port=portmapport, type="tcp"),
dict(group=group, port=portmapport, type="udp"),
dict(group=group, port=mountdport, type="tcp"),
dict(group=group, port=mountdport, type="udp")
]
#### OPEN PORTS FROM HEAD NODE (NO SSH FROM MASTER)
commands = self.setPortCommands(group, permissions)
for command in commands:
self.runSystemCommand(command);
开发者ID:agua,项目名称:agua,代码行数:27,代码来源:automount.py
示例14: create
def create(self, volume_size, volume_zone, name=None, tags=None):
try:
self.validate(volume_size, volume_zone, self._aws_block_device)
instance = self._request_instance(volume_zone)
self._validate_required_progs([self._mkfs_cmd.split()[0]])
self._determine_device()
vol = self._create_volume(volume_size, volume_zone)
if tags:
for tag in tags:
tagval = tags.get(tag)
tagmsg = "Adding volume tag: %s" % tag
if tagval:
tagmsg += "=%s" % tagval
log.info(tagmsg)
vol.add_tag(tag, tagval)
if name:
vol.add_tag("Name", name)
self._attach_volume(self._volume, instance.id,
self._aws_block_device)
self._get_volume_device(self._aws_block_device)
self._format_volume()
self.shutdown()
log.info("Your new %sGB volume %s has been created successfully" %
(volume_size, vol.id))
return vol
except Exception:
log.error("Failed to create new volume", exc_info=True)
self._delete_new_volume()
raise
finally:
self._warn_about_volume_hosts()
开发者ID:ricrogz,项目名称:StarCluster,代码行数:31,代码来源:volume.py
示例15: run_cmd
def run_cmd(node, cmd, user, silent=True):
log.info("%[email protected]%s: %s" % (user, node.alias, cmd))
if user != 'root':
node.ssh.switch_user(user)
node.ssh.execute(cmd, silent=silent)
if user != 'root':
node.ssh.switch_user('root')
开发者ID:jremmons,项目名称:starcluster-startceleryworker,代码行数:7,代码来源:celery_worker.py
示例16: run
def run(self, nodes, master, user, user_shell, volumes):
if not self.var_str == "":
for node in nodes:
log.info("Adding vars to: %s " % node.alias)
node.ssh.execute('echo \''+self.var_str.replace('\'', '\\\'')+'\' >> .bashrc')
if self.envar_location is not None:
node.ssh.execute('echo \''+self.var_str.replace('\'', '\\\'')+'\' >> '+self.envar_location)
开发者ID:EVS-ATMOS,项目名称:radar_in_the_cloud,代码行数:7,代码来源:scenv.py
示例17: wrap_f
def wrap_f(func, *arg, **kargs):
"""Raw timing function """
time1 = time.time()
res = func(*arg, **kargs)
time2 = time.time()
log.info('%s took %0.3f mins' % (prefix, (time2 - time1) / 60.0))
return res
开发者ID:aaravindanarun,项目名称:StarCluster,代码行数:7,代码来源:utils.py
示例18: export_fs_to_nodes
def export_fs_to_nodes(self, nodes, export_paths):
"""
Export each path in export_paths to each node in nodes via NFS
nodes - list of nodes to export each path to
export_paths - list of paths on this remote host to export to each node
Example:
# export /home and /opt/sge6 to each node in nodes
$ node.start_nfs_server()
$ node.export_fs_to_nodes(nodes=[node1,node2],
export_paths=['/home', '/opt/sge6'])
"""
# setup /etc/exports
log.info("Configuring NFS exports path(s):\n%s" %
' '.join(export_paths))
nfs_export_settings = "(async,no_root_squash,no_subtree_check,rw)"
etc_exports = self.ssh.remote_file('/etc/exports', 'r')
contents = etc_exports.read()
etc_exports.close()
etc_exports = self.ssh.remote_file('/etc/exports', 'a')
for node in nodes:
for path in export_paths:
export_line = ' '.join(
[path, node.alias + nfs_export_settings + '\n'])
if export_line not in contents:
etc_exports.write(export_line)
etc_exports.close()
self.ssh.execute('exportfs -fra')
开发者ID:ZhuJiahui,项目名称:StarCluster,代码行数:29,代码来源:node.py
示例19: execute
def execute(self, args):
if not args:
self.parser.error("please specify a cluster")
for cluster_name in args:
cl = self.cm.get_cluster(cluster_name)
is_ebs = cl.is_ebs_cluster()
if not self.opts.confirm:
action = "Terminate"
if is_ebs:
action = "Stop EBS"
if cl.spot_bid:
action = "Terminate Spot EBS"
resp = raw_input("%s cluster %s (y/n)? " %
(action, cluster_name))
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
continue
cl.stop_cluster()
if is_ebs and cl._nodes:
log.warn(("All EBS-backed nodes in '%s' are now in a " + \
"'stopped' state") % cluster_name)
log.warn("You can restart this cluster by passing -x " + \
"to the 'start' command")
log.warn("Use the 'terminate' command to *completely* " + \
"terminate this cluster")
log.warn("NOTE: Unless EBS-backed nodes are in a " + \
"'running' or 'terminated'")
log.warn("state, you are charged for the EBS volumes " + \
"backing the nodes.")
开发者ID:agua,项目名称:StarCluster,代码行数:29,代码来源:stop.py
示例20: _install_combblas
def _install_combblas(self, node):
log.info("\tInstalling CombBLAS")
instructions = [
"wget -O combblas.tgz %s" % self.combblas_source,
"tar xvfz combblas.tgz",
"rm combblas.tgz"
]
self._follow_instructions(instructions, node)
# Expects the combblas.patch file to be in the same directory as this source file
patchfname = os.path.dirname(inspect.getsourcefile(SkylarkInstaller)) + '/combblas.patch'
log.info(patchfname)
node.ssh.put(patchfname, 'CombBLAS/combblas.patch')
instructions = [
"cd CombBLAS",
"yes | git apply --ignore-space-change --ignore-whitespace combblas.patch",
"rm combblas.patch",
"cmake .",
"make -j %s" % self.nproc,
"cp *.so /usr/local/lib",
"mkdir /usr/local/include/CombBLAS",
"cp *.h /usr/local/include/CombBLAS",
"cp *.cpp /usr/local/include/CombBLAS",
"cp -R SequenceHeaps /usr/local/include/CombBLAS",
"cp -R psort-1.0 /usr/local/include/CombBLAS",
"cp -R graph500-1.2 /usr/local/include/CombBLAS",
"cd ..",
"rm -r CombBLAS"
]
self._follow_instructions(instructions, node)
开发者ID:rustandruin,项目名称:starcluster-configuration,代码行数:31,代码来源:skylark_installer.py
注:本文中的starcluster.logger.log.info函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论