本文整理汇总了Python中settings.getnodes函数的典型用法代码示例。如果您正苦于以下问题:Python getnodes函数的具体用法?Python getnodes怎么用?Python getnodes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getnodes函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: run
def run(self):
super(KvmRbdFio, self).run()
# We'll always drop caches for rados bench
self.dropcaches()
monitoring.start(self.run_dir)
time.sleep(5)
names = ""
for i in xrange(self.concurrent_procs):
names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-kvmrbdfio " % i
out_file = '%s/output' % self.run_dir
pre_cmd = 'sudo fio --rw=read -ioengine=sync --numjobs=1 --bs=4M --runtime=1 --size %dM %s > /dev/null' % (self.vol_size * 9/10, names)
fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
print 'Attempting to populating fio files...'
common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
print 'Running rbd fio %s test.' % self.mode
common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
# ps = []
# for i in xrange(self.concurrent_procs):
# out_file = '%s/output.%s' % (self.run_dir, i)
# p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
# ps.append(p)
# for p in ps:
# p.wait()
monitoring.stop(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:cityyard,项目名称:ceph-tools,代码行数:27,代码来源:kvmrbdfio.py
示例2: initialize
def initialize(self):
common.cleanup_tests()
if not self.use_existing:
common.setup_cluster()
common.setup_ceph()
# Create the run directory
common.make_remote_dir(self.run_dir)
# Setup the pools
monitoring.start("%s/pool_monitoring" % self.run_dir)
for i in xrange(self.concurrent_procs):
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rados-bench-%s-%s %d %d' % (node, i, self.pgs_per_pool, self.pgs_per_pool)).communicate()
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rados-bench-%s-%s size 1' % (node, i)).communicate()
# check the health for each pool.
print 'Checking Healh after pool creation.'
common.check_health()
monitoring.stop()
print 'Running scrub monitoring.'
monitoring.start("%s/scrub_monitoring" % self.run_dir)
common.check_scrub()
monitoring.stop()
print 'Pausing for 60s for idle monitoring.'
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
return True
开发者ID:cityyard,项目名称:ceph-tools,代码行数:35,代码来源:radosbench.py
示例3: rmpool
def rmpool(self, name, profile_name):
pool_profiles = self.config.get("pool_profiles", {"default": {}})
profile = pool_profiles.get(profile_name, {})
cache_profile = profile.get("cache_profile", None)
if cache_profile:
cache_name = "%s-cache" % name
# flush and remove the overlay and such
common.pdsh(
settings.getnodes("head"),
"sudo ceph -c %s osd tier cache-mode %s forward" % (self.tmp_conf, cache_name),
).communicate()
common.pdsh(
settings.getnodes("head"), "sudo rados -c %s -p %s cache-flush-evict-all" % (self.tmp_conf, cache_name)
).communicate()
common.pdsh(
settings.getnodes("head"), "sudo ceph -c %s osd tier remove-overlay %s" % (self.tmp_conf, name)
).communicate()
common.pdsh(
settings.getnodes("head"), "sudo ceph -c %s osd tier remove %s %s" % (self.tmp_conf, name, cache_name)
).communicate()
# delete the cache pool
self.rmpool(cache_name, cache_profile)
common.pdsh(
settings.getnodes("head"),
"sudo ceph -c %s osd pool delete %s %s --yes-i-really-really-mean-it" % (self.tmp_conf, name, name),
).communicate()
开发者ID:athenahealth,项目名称:cbt,代码行数:28,代码来源:ceph.py
示例4: pre
def pre(self):
pre_time = self.config.get("pre_time", 60)
common.pdsh(settings.getnodes('head'), self.logcmd('Starting Recovery Test Thread, waiting %s seconds.' % pre_time)).communicate()
time.sleep(pre_time)
lcmd = self.logcmd("Setting the ceph osd noup flag")
common.pdsh(settings.getnodes('head'), '%s -c %s osd set noup;%s' % (self.ceph_cmd, self.cluster.tmp_conf, lcmd)).communicate()
self.state = 'markdown'
开发者ID:rldleblanc,项目名称:cbt,代码行数:7,代码来源:ceph.py
示例5: initialize
def initialize(self):
pass
# self.cleanup()
# super(KvmRbdFio, self).initialize()
common.setup_cluster()
# common.setup_ceph()
# Setup the pools
# common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate()
# common.pdsh(settings.cluster.get('head'), 'sudo ceph osd pool set rbdfio size 1').communicate()
# print 'Checking Healh after pool creation.'
# common.check_health()
# common.pdsh(settings.cluster.get('clients'), 'sudo modprobe rbd').communicate()
# for i in xrange(self.concurrent_procs):
names = ""
for i in xrange(self.concurrent_procs):
letter = string.ascii_lowercase[i+1]
# common.pdsh(settings.cluster.get('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate()
# common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d --pool rbdfio --id admin' % i).communicate()
# common.pdsh(settings.cluster.get('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/vd%s' % letter).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/vd%s /srv/rbdfio-`hostname -s`-%d' %(letter, i)).communicate()
# Create the run directory
common.make_remote_dir(self.run_dir)
开发者ID:cityyard,项目名称:ceph-tools,代码行数:27,代码来源:kvmrbdfio.py
示例6: initialize
def initialize(self):
super(RawFio, self).initialize()
common.pdsh(settings.getnodes('clients'),
'sudo rm -rf %s' % self.run_dir,
continue_if_error=False).communicate()
common.make_remote_dir(self.run_dir)
clnts = settings.getnodes('clients')
logger.info('creating mountpoints...')
logger.info('Attempting to initialize fio files...')
initializer_list = []
for i in range(self.concurrent_procs):
b = self.block_devices[i % len(self.block_devices)]
fiopath = b
pre_cmd = 'sudo %s --rw=write -ioengine=%s --bs=%s ' % (self.fio_cmd, self.ioengine, self.op_size)
pre_cmd = '%s --size %dM --name=%s --output-format=%s> /dev/null' % (
pre_cmd, self.vol_size, fiopath, self.fio_out_format)
initializer_list.append(common.pdsh(clnts, pre_cmd,
continue_if_error=False))
for p in initializer_list:
p.communicate()
# Create the run directory
common.pdsh(clnts, 'rm -rf %s' % self.run_dir,
continue_if_error=False).communicate()
common.make_remote_dir(self.run_dir)
开发者ID:bengland2,项目名称:cbt,代码行数:26,代码来源:rawfio.py
示例7: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
common.pdsh(settings.getnodes('head'), '/usr/bin/rbd create cbt-librbdfio-%s --size %s --pool %s --order %s' % (node, self.vol_size, self.poolname, self.vol_order)).communicate()
monitoring.stop()
开发者ID:FrankLikuohao,项目名称:ceph-tools,代码行数:8,代码来源:librbdfio.py
示例8: markdown
def markdown(self):
for osdnum in self.config.get('osds'):
lcmd = self.logcmd("Marking OSD %s down." % osdnum)
common.pdsh(settings.getnodes('head'), '%s -c %s osd down %s;%s' % (self.ceph_cmd, self.cluster.tmp_conf, osdnum, lcmd)).communicate()
lcmd = self.logcmd("Marking OSD %s out." % osdnum)
common.pdsh(settings.getnodes('head'), '%s -c %s osd out %s;%s' % (self.ceph_cmd, self.cluster.tmp_conf, osdnum, lcmd)).communicate()
common.pdsh(settings.getnodes('head'), self.logcmd('Waiting for the cluster to break and heal')).communicate()
self.state = 'osdout'
开发者ID:rldleblanc,项目名称:cbt,代码行数:9,代码来源:ceph.py
示例9: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
common.pdsh(settings.getnodes('clients'), '/usr/bin/rbd create cbt-kernelrbdfio-`hostname -s` --size %s --pool %s' % (self.vol_size, self.poolname)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo rbd map cbt-kernelrbdfio-`hostname -s` --pool %s --id admin' % self.poolname).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s`').communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s` %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
monitoring.stop()
开发者ID:sirspock,项目名称:cbt,代码行数:10,代码来源:rbdfio.py
示例10: stop
def stop(directory=None):
nodes = settings.getnodes('clients', 'osds', 'mons', 'rgws')
common.pdsh(nodes, 'pkill -SIGINT -f collectl').communicate()
common.pdsh(nodes, 'sudo pkill -SIGINT -f perf_3.6').communicate()
common.pdsh(settings.getnodes('osds'), 'sudo pkill -SIGINT -f blktrace').communicate()
if directory:
sc = settings.cluster
common.pdsh(nodes, 'cd %s/perf;sudo chown %s.%s perf.data' % (directory, sc.get('user'), sc.get('user')))
make_movies(directory)
开发者ID:JevonQ,项目名称:cbt,代码行数:10,代码来源:monitoring.py
示例11: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
#determine rados version
rados_version_str, err = common.pdsh(settings.getnodes('head'), '/usr/bin/rados -v').communicate()
m = re.findall("version (\d+)", rados_version_str)
rados_version = int(m[0])
if mode in ['write'] or rados_version < 9:
op_size_str = '-b %s' % self.op_size
else:
op_size_str = ''
common.make_remote_dir(run_dir)
# dump the cluster config
self.cluster.dump_config(run_dir)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(run_dir, recovery_callback)
# Run rados bench
monitoring.start(run_dir)
logger.info('Running radosbench %s test.' % mode)
ps = []
for i in xrange(self.concurrent_procs):
out_file = '%s/output.%s' % (run_dir, i)
objecter_log = '%s/objecter.%s.log' % (run_dir, i)
# default behavior is to use a single storage pool
pool_name = self.pool
run_name = '--run-name %s`hostname -s`-%s'%(self.object_set_id, i)
if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process
pool_name = 'rados-bench-`hostname -s`-%s'%i
run_name = ''
rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
(self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
# Finally, get the historic ops
self.cluster.dump_historic_ops(run_dir)
common.sync_files('%s/*' % run_dir, out_dir)
开发者ID:JevonQ,项目名称:cbt,代码行数:55,代码来源:radosbench.py
示例12: osdin
def osdin(self):
# Wait until the cluster is healthy.
ret = self.cluster.check_health(self.health_checklist, "%s/recovery.log" % self.config.get('run_dir'))
if self.inhealthtries < self.maxhealthtries and ret == 0:
self.inhealthtries = self.inhealthtries + 1
return # Cluster hasn't become unhealthy yet.
if ret == 0:
common.pdsh(settings.getnodes('head'), self.logcmd('Cluster never went unhealthy.')).communicate()
else:
common.pdsh(settings.getnodes('head'), self.logcmd('Cluster appears to have healed.')).communicate()
self.state = "post"
开发者ID:rldleblanc,项目名称:cbt,代码行数:12,代码来源:ceph.py
示例13: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
for node in settings.getnodes("clients").split(","):
for volnum in xrange(0, self.volumes_per_client):
node = node.rpartition("@")[2]
common.pdsh(
settings.getnodes("head"),
"/usr/bin/rbd create cbt-librbdfio-%s-%d --size %s --pool %s --order %s"
% (node, volnum, self.vol_size, self.poolname, self.vol_order),
).communicate()
monitoring.stop()
开发者ID:athenahealth,项目名称:cbt,代码行数:13,代码来源:librbdfio.py
示例14: pre
def pre(self):
pre_time = self.config.get("pre_time", 60)
common.pdsh(settings.getnodes('head'), self.logcmd('Starting Recovery Test Thread, waiting %s seconds.' % pre_time)).communicate()
time.sleep(pre_time)
lcmd = self.logcmd("Setting the ceph osd noup flag")
common.pdsh(settings.getnodes('head'), 'ceph -c %s ceph osd set noup;%s' % (self.cluster.tmp_conf, lcmd)).communicate()
for osdnum in self.config.get('osds'):
lcmd = self.logcmd("Marking OSD %s down." % osdnum)
common.pdsh(settings.getnodes('head'), 'ceph -c %s osd down %s;%s' % (self.cluster.tmp_conf, osdnum, lcmd)).communicate()
lcmd = self.logcmd("Marking OSD %s out." % osdnum)
common.pdsh(settings.getnodes('head'), 'ceph -c %s osd out %s;%s' % (self.cluster.tmp_conf, osdnum, lcmd)).communicate()
common.pdsh(settings.getnodes('head'), self.logcmd('Waiting for the cluster to break and heal')).communicate()
self.state = 'osdout'
开发者ID:hjwsm1989,项目名称:ceph-tools,代码行数:14,代码来源:ceph.py
示例15: initialize
def initialize(self):
self.cleanup()
super(RbdFio, self).initialize()
common.setup_cluster()
common.setup_ceph()
common.dump_config(self.run_dir)
# Setup the pools
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool create rbdfio %d %d' % (self.pgs, self.pgs)).communicate()
common.pdsh(settings.getnodes('head'), 'sudo ceph osd pool set rbdfio size 1').communicate()
print 'Checking Healh after pool creation.'
common.check_health()
common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
for i in xrange(self.concurrent_procs):
common.pdsh(settings.getnodes('clients'), 'sudo rbd create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (i, self.vol_size)).communicate()
# common.pdsh(settings.cluster.get('clients'), 'sudo rbd map rbdfio-`hostname -s`-%d --pool rbdfio --id admin' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d /srv/rbdfio-`hostname -s`-%d' %(i, i)).communicate()
common.check_scrub()
# Create the run directory
common.make_remote_dir(self.run_dir)
开发者ID:cityyard,项目名称:ceph-tools,代码行数:25,代码来源:rbdfio.py
示例16: cleanup_tests
def cleanup_tests():
clients = settings.getnodes('clients')
rgws = settings.getnodes('rgws')
nodes = settings.getnodes('clients', 'servers', 'mons', 'rgws')
pdsh(clients, 'sudo killall -9 rados;sudo killall -9 rest-bench').communicate()
if rgws:
pdsh(rgws, 'sudo killall -9 radosgw-admin').communicate()
pdsh(nodes, 'sudo killall -9 pdcp').communicate()
# cleanup the tmp_dir
tmp_dir = settings.cluster.get("tmp_dir")
print 'Deleting %s' % tmp_dir
pdsh(nodes, 'rm -rf %s' % tmp_dir).communicate()
开发者ID:cityyard,项目名称:ceph-tools,代码行数:14,代码来源:common.py
示例17: initialize
def initialize(self):
super(RbdFio, self).initialize()
self.cleanup()
if not self.use_existing:
self.cluster.initialize()
self.cluster.dump_config(self.run_dir)
# Setup the pools
monitoring.start("%s/pool_monitoring" % self.run_dir)
common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool create rbdfio %d %d' % (self.tmp_conf, self.pgs, self.pgs)).communicate()
common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool set rbdfio size 1' % self.tmp_conf).communicate()
print 'Checking Healh after pool creation.'
self.cluster.check_health()
monitoring.stop()
# Mount the filesystem
common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
for i in xrange(self.concurrent_procs):
common.pdsh(settings.getnodes('clients'), 'sudo rbd -c %s create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (self.tmp_conf, i, self.vol_size)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/mnt/rbdfio-`hostname -s`-%d' % (self.tmp_dir, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d %s/mnt/rbdfio-`hostname -s`-%d' % (i, self.tmp_dir, i)).communicate()
print 'Running scrub monitoring'
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
# Create the run directory
common.make_remote_dir(self.run_dir)
开发者ID:hjwsm1989,项目名称:ceph-tools,代码行数:32,代码来源:rbdfio.py
示例18: initialize
def initialize(self):
super(KvmRbdFio, self).initialize()
for i in xrange(1):
letter = string.ascii_lowercase[i+1]
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.ext4 /dev/vd%s' % letter).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir /srv/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t ext4 -o noatime /dev/vd%s /srv/rbdfio-`hostname -s`-%d' %(letter, i)).communicate()
# Create the run directory
common.make_remote_dir(self.run_dir)
# populate the fio files
logger.info('Attempting to populating fio files...')
pre_cmd = 'sudo fio --rw=write -ioengine=sync --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.numjobs, self.vol_size, self.names)
common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
开发者ID:JevonQ,项目名称:cbt,代码行数:15,代码来源:kvmrbdfio.py
示例19: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
op_size_str = '-b %s' % self.op_size
common.make_remote_dir(run_dir)
# dump the cluster config
common.dump_config(run_dir)
monitoring.start(run_dir)
# Run rados bench
print 'Running radosbench read test.'
ps = []
for i in xrange(self.concurrent_procs):
out_file = '%s/output.%s' % (run_dir, i)
objecter_log = '%s/objecter.%s.log' % (run_dir, i)
p = common.pdsh(settings.getnodes('clients'), '/usr/bin/rados -p rados-bench-`hostname -s`-%s %s bench %s %s %s --no-cleanup 2> %s > %s' % (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file))
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
# Get the historic ops
common.dump_historic_ops(run_dir)
common.sync_files('%s/*' % run_dir, out_dir)
开发者ID:cityyard,项目名称:ceph-tools,代码行数:29,代码来源:radosbench.py
示例20: __init__
def __init__(self, cluster, config):
super(KvmRbdFio, self).__init__(cluster, config)
self.concurrent_procs = config.get('concurrent_procs', 1)
self.total_procs = self.concurrent_procs * len(settings.getnodes('clients').split(','))
self.time = str(config.get('time', '300'))
self.ramp = str(config.get('ramp', '0'))
self.iodepth = config.get('iodepth', 16)
self.numjobs = config.get('numjobs', 1)
self.mode = config.get('mode', 'write')
self.rwmixread = config.get('rwmixread', 50)
self.rwmixwrite = 100 - self.rwmixread
self.ioengine = config.get('ioengine', 'libaio')
self.op_size = config.get('op_size', 4194304)
self.pgs = config.get('pgs', 2048)
self.vol_size = config.get('vol_size', 65536) * 0.9
self.rep_size = config.get('rep_size', 1)
self.rbdadd_mons = config.get('rbdadd_mons')
self.rbdadd_options = config.get('rbdadd_options')
self.client_ra = config.get('client_ra', '128')
self.fio_cmd = config.get('fio_cmd', '/usr/bin/fio')
# FIXME there are too many permutations, need to put results in SQLITE3
self.run_dir = '%s/osd_ra-%08d/client_ra-%08d/op_size-%08d/concurrent_procs-%03d/iodepth-%03d/%s' % (self.run_dir, int(self.osd_ra), int(self.client_ra), int(self.op_size), int(self.total_procs), int(self.iodepth), self.mode)
self.out_dir = '%s/osd_ra-%08d/client_ra-%08d/op_size-%08d/concurrent_procs-%03d/iodepth-%03d/%s' % (self.archive_dir, int(self.osd_ra), int(self.client_ra), int(self.op_size), int(self.total_procs), int(self.iodepth), self.mode)
# Make the file names string
self.names = ''
for i in xrange(self.concurrent_procs):
self.names += '--name=/srv/rbdfio-`hostname -s`-0/cbt-kvmrbdfio-%d ' % i
开发者ID:JevonQ,项目名称:cbt,代码行数:30,代码来源:kvmrbdfio.py
注:本文中的settings.getnodes函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论