本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection类的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection类的具体用法?Python RemoteMachineShellConnection怎么用?Python RemoteMachineShellConnection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RemoteMachineShellConnection类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _retrieve_cluster_cert_extended
def _retrieve_cluster_cert_extended(self,server):
cli_command = 'ssl-manage'
options = "--cluster-cert-info --extended"
remote_client = RemoteMachineShellConnection(server)
output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
return output, error
开发者ID:arod1987,项目名称:testrunner,代码行数:7,代码来源:x509clitest.py
示例2: test_AuditEvent
def test_AuditEvent(self):
auditIns = audit(host=self.master)
ops = self.input.param("ops", None)
source = 'internal'
user = 'couchbase'
rest = RestConnection(self.master)
#status = rest.setAuditSettings(enabled='true')
auditIns.setAuditEnable('true')
if (ops in ['enable', 'disable']):
if ops == 'disable':
#status = rest.setAuditSettings(enabled='false')
auditIns.setAuditEnable('false')
else:
#status = rest.setAuditSettings(enabled='true')
auditIns.setAuditEnable('true')
if ops == 'disable':
shell = RemoteMachineShellConnection(self.master)
try:
result = shell.file_exists(auditIns.getAuditLogPath(), auditIns.AUDITLOGFILENAME)
finally:
shell.disconnect()
self.assertFalse(result, 'Issue with file getting create in new directory')
else:
auditIns = audit(host=self.master)
expectedResults = {"auditd_enabled":auditIns.getAuditStatus(),
"descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
"log_path":self.changePathWindows((auditIns.getAuditLogPath())[:-1]), "source":"internal",
"user":"couchbase", "rotate_interval":86400, "version":1, 'hostname':self.getHostName(self.master)}
self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
开发者ID:arod1987,项目名称:testrunner,代码行数:30,代码来源:auditcheckconfig.py
示例3: rebalance_in_out_at_once_persistence_stopped
def rebalance_in_out_at_once_persistence_stopped(self):
num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
rest = RestConnection(self.master)
self._wait_for_stats_all_buckets(servs_init)
for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
shell = RemoteMachineShellConnection(server)
for bucket in self.buckets:
shell.execute_cbepctl(bucket, "stop", "", "", "")
self.sleep(5)
self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
, end=self.num_items / 2 + self.num_items_without_persistence)
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
self.log.info("adding nodes {0} to cluster".format(servs_in))
self.log.info("removing nodes {0} from cluster".format(servs_out))
tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
result_nodes = set(servs_init + servs_in) - set(servs_out)
# wait timeout in 60 min because MB-7386 rebalance stuck
self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
for task in tasks:
task.result()
self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
self._wait_for_stats_all_buckets(servs_in)
self._verify_all_buckets(self.master, timeout=None)
self._verify_stats_all_buckets(result_nodes)
#verify that curr_items_tot corresponds to sum of curr_items from all nodes
verified = True
for bucket in self.buckets:
verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:35,代码来源:rebalanceinout.py
示例4: testClusterInitNegative
def testClusterInitNegative(self):
cluster_init_username = self.input.param("cluster_init_username", None)
cluster_init_password = self.input.param("cluster_init_password", None)
cluster_init_port = self.input.param("cluster_init_port", None)
cluster_init_ramsize = self.input.param("cluster_init_ramsize", None)
command_init = self.input.param("command_init", "cluster-init")
server = self.servers[-1]
remote_client = RemoteMachineShellConnection(server)
rest = RestConnection(server)
rest.force_eject_node()
self.sleep(5)
try:
cli_command = command_init
options = ""
if cluster_init_username is not None:
options += "--cluster-init-username={0} ".format(cluster_init_username)
if cluster_init_password is not None:
options += "--cluster-init-password={0} ".format(cluster_init_password)
if cluster_init_port is not None:
options += "--cluster-init-port={0} ".format(cluster_init_port)
if cluster_init_ramsize is None:
options += "--cluster-init-ramsize={0} ".format(cluster_init_ramsize)
output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=None, password=None)
self.assertEqual(output[0], 'ERROR: unable to init localhost (400) Bad Request')
self.assertTrue(output[1] == "[u'Username and password are required.']" or output[1] == "[u'The password must be at least six characters.']")
remote_client.disconnect()
finally:
rest = RestConnection(server)
rest.force_eject_node()
self.sleep(5)
rest.init_cluster()
开发者ID:saigon,项目名称:testrunner,代码行数:33,代码来源:couchbase_clitest.py
示例5: test_add_remove_autofailover
def test_add_remove_autofailover(self):
rest = RestConnection(self.master)
serv_out = self.servers[3]
shell = RemoteMachineShellConnection(serv_out)
known_nodes = ['[email protected]'+self.master.ip]
rest.create_bucket(bucket='default', ramQuotaMB=100)
rest.update_autofailover_settings(True,30)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers[1:4])
for server in self.servers[1:4]:
rest.add_node('Administrator','password',server.ip)
known_nodes.append('[email protected]'+server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
shell.stop_server()
self.sleep(60)
shell.start_server()
self.sleep(30)
for server in self.servers:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status,200,"Not able to login via SSL code")
开发者ID:EricACooper,项目名称:testrunner,代码行数:25,代码来源:x509tests.py
示例6: user_manage
def user_manage(self, delete, list, set, ro_username, ro_password):
options = self._get_default_options()
if delete:
options += " --delete "
if list:
options += " --list "
if set:
options += " --set "
if ro_username is not None:
options += " --ro-username " + str(ro_username)
if ro_password:
options += " --ro-password " + str(ro_password)
remote_client = RemoteMachineShellConnection(self.server)
stdout, stderr = remote_client.couchbase_cli("user-manage",
self.hostname, options)
remote_client.disconnect()
if delete:
return stdout, stderr, self._was_success(stdout, "Local read-only"
"user deleted")
elif set:
return stdout, stderr, self._was_success(stdout, "Local read-only"
"user deleted")
else:
return stdout, stderr, self._no_error_in_output(stdout)
开发者ID:prasanna135,项目名称:testrunner,代码行数:26,代码来源:couchbase_cli.py
示例7: initialize
def initialize(self, params):
# log = logger.new_logger("Installer")
start_time = time.time()
cluster_initialized = False
server = params["server"]
remote_client = RemoteMachineShellConnection(params["server"])
while time.time() < (start_time + (10 * 60)):
rest = RestConnection(server)
try:
rest.init_cluster(username=server.rest_username, password=server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
if server.data_path:
time.sleep(3)
# Make sure that data_path is writable by couchbase user
#remote_client.stop_couchbase()
remote_client.execute_command('rm -rf {0}/*'.format(server.data_path))
remote_client.execute_command("chown -R couchbase:couchbase {0}".format(server.data_path))
rest.set_data_path(data_path=server.data_path)
# Symlink data-dir to custom path
#remote_client.execute_command('mv /opt/couchbase/var {0}'.format(server.data_path))
#remote_client.execute_command('ln -s {0}/var /opt/couchbase/var'.format(server.data_path))
#remote_client.execute_command("chown -h couchbase:couchbase /opt/couchbase/var")
#remote_client.start_couchbase()
time.sleep(3)
cluster_initialized = True
break
except ServerUnavailableException:
log.error("error happened while initializing the cluster @ {0}".format(server.ip))
log.info('sleep for 5 seconds before trying again ...')
time.sleep(5)
if not cluster_initialized:
raise Exception("unable to initialize membase node")
开发者ID:vmx,项目名称:testrunner,代码行数:32,代码来源:install.py
示例8: test_upgrade_negative
def test_upgrade_negative(self):
op = self.input.param("op", None)
error = self.input.param("error", '')
remote = RemoteMachineShellConnection(self.master)
if op is None:
self.fail("operation should be specified")
if op == "higher_version":
tmp = self.initial_version
self.initial_version = self.upgrade_versions[0]
self.upgrade_versions = [tmp, ]
info = None
if op == "wrong_arch":
info = remote.extract_remote_info()
info.architecture_type = ('x86_64', 'x86')[info.architecture_type == 'x86']
self._install([self.master])
self.operations([self.master])
try:
if op == "close_port":
RemoteUtilHelper.enable_firewall(self.master)
for upgrade_version in self.upgrade_versions:
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
format(upgrade_version))
output, error = self._upgrade(upgrade_version, self.master, info=info)
if str(output).find(error) != -1 or str(error).find(error) != -1:
raise Exception(error)
except Exception, ex:
self.log.info("Exception %s appeared as expected" % ex)
self.log.info("Check that old version is working fine")
self.verification([self.master])
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:29,代码来源:newupgradetests.py
示例9: verify_for_recovery_type
def verify_for_recovery_type(self, chosen = [], serverMap = {}, buckets = [], recoveryTypeMap = {}, fileMap = {}, deltaRecoveryBuckets = []):
""" Verify recovery type is delta or full """
summary = ""
logic = True
for server in self.chosen:
shell = RemoteMachineShellConnection(serverMap[server.ip])
os_type = shell.extract_remote_info()
if os_type.type.lower() == 'windows':
return
for bucket in buckets:
path = fileMap[server.ip][bucket.name]
exists = shell.file_exists(path,"check.txt")
if deltaRecoveryBuckets != None:
if recoveryTypeMap[server.ip] == "delta" and (bucket.name in deltaRecoveryBuckets) and not exists:
logic = False
summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
elif recoveryTypeMap[server.ip] == "delta" and (bucket.name not in deltaRecoveryBuckets) and exists:
summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Full, Actual Delta".format(server.ip,bucket.name)
logic = False
else:
if recoveryTypeMap[server.ip] == "delta" and not exists:
logic = False
summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
elif recoveryTypeMap[server.ip] == "full" and exists:
logic = False
summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Full, Actual Delta".format(server.ip,bucket.name)
shell.disconnect()
self.assertTrue(logic, summary)
开发者ID:lichia,项目名称:testrunner,代码行数:28,代码来源:failovertests.py
示例10: test_upgrade
def test_upgrade(self):
self._install([self.master])
self.operations([self.master])
for upgrade_version in self.upgrade_versions:
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
format(upgrade_version))
upgrade_threads = self._async_update(upgrade_version, [self.master])
#wait upgrade statuses
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
success_upgrade = True
while not self.queue.empty():
success_upgrade &= self.queue.get()
if not success_upgrade:
self.fail("Upgrade failed!")
self.sleep(self.expire_time)
# if not self.is_linux:
# self.wait_node_restarted(self.master, wait_time=1200, wait_if_warmup=True, check_service=True)
remote = RemoteMachineShellConnection(self.master)
for bucket in self.buckets:
remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
remote.disconnect()
self.sleep(30)
self.verification([self.master])
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:26,代码来源:newupgradetests.py
示例11: offline_cluster_upgrade_with_reinstall
def offline_cluster_upgrade_with_reinstall(self):
self._install(self.servers[:self.nodes_init])
self.operations(self.servers[:self.nodes_init])
if self.ddocs_num:
self.create_ddocs_and_views()
if self.during_ops:
for opn in self.during_ops:
getattr(self, opn)()
num_nodes_reinstall = self.input.param('num_nodes_reinstall', 1)
stoped_nodes = self.servers[self.nodes_init - (self.nodes_init - num_nodes_reinstall):self.nodes_init]
nodes_reinstall = self.servers[:num_nodes_reinstall]
for upgrade_version in self.upgrade_versions:
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
format(upgrade_version))
for server in stoped_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.disconnect()
self.sleep(self.sleep_time)
upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
self.force_reinstall(nodes_reinstall)
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
success_upgrade = True
while not self.queue.empty():
success_upgrade &= self.queue.get()
if not success_upgrade:
self.fail("Upgrade failed!")
self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
self.verification(self.servers[:self.nodes_init])
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:30,代码来源:newupgradetests.py
示例12: offline_cluster_upgrade_and_rebalance
def offline_cluster_upgrade_and_rebalance(self):
num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
servs_out = self.servers[self.nodes_init - num_stoped_nodes - self.nodes_out :self.nodes_init - num_stoped_nodes]
servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
self._install(self.servers)
self.operations(self.servers[:self.nodes_init])
if self.ddocs_num:
self.create_ddocs_and_views()
if self.during_ops:
for opn in self.during_ops:
getattr(self, opn)()
for upgrade_version in self.upgrade_versions:
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
format(upgrade_version))
for server in stoped_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.disconnect()
upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
try:
self.cluster.rebalance(self.servers[:self.nodes_init], servs_in, servs_out)
except RebalanceFailedException:
self.log.info("rebalance failed as expected")
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
success_upgrade = True
while not self.queue.empty():
success_upgrade &= self.queue.get()
if not success_upgrade:
self.fail("Upgrade failed!")
ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self)
self.cluster.rebalance(self.servers[:self.nodes_init], [], servs_out)
self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
self.verification(list(set(self.servers[:self.nodes_init] + servs_in) - set(servs_out)))
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:35,代码来源:newupgradetests.py
示例13: change_erlang_threads_values
def change_erlang_threads_values(servers, sync_threads=True, num_threads="16:16"):
"""Change the the type of sync erlang threads and its value
sync_threads=True means sync threads +S with default threads number equal 16:16
sync_threads=False means async threads: +A 16, for instance
Default: +S 16:16
"""
log = logger.Logger.get_logger()
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
sync_type = sync_threads and "S" or "A"
command = "sed -i 's/+[A,S] .*/+%s %s \\\/g' /opt/%s/bin/%s-server" % (
sync_type,
num_threads,
product,
product,
)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
msg = "modified erlang +%s to %s for server %s"
log.info(msg % (sync_type, num_threads, server.ip))
开发者ID:ketakigangal,项目名称:cbsystest,代码行数:26,代码来源:cluster_helper.py
示例14: test_full_eviction_changed_to_value_eviction
def test_full_eviction_changed_to_value_eviction(self):
KEY_NAME = 'key1'
gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_create, "create", 0)
self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
self._verify_stats_all_buckets(self.servers[:self.nodes_init])
remote = RemoteMachineShellConnection(self.master)
for bucket in self.buckets:
output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
cluster_host="localhost",
user=self.master.rest_username,
password=self.master.rest_password,
options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
ClusterOperationHelper.wait_for_ns_servers_or_assert(
self.servers[:self.nodes_init], self,
wait_time=self.wait_timeout, wait_if_warmup=True)
self.sleep(10, 'Wait some time before next load')
#self._load_all_buckets(self.master, gen_create2, "create", 0)
#import pdb;pdb.set_trace()
rest = RestConnection(self.master)
client = VBucketAwareMemcached(rest, 'default')
mcd = client.memcached(KEY_NAME)
try:
rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
self.fail('Bucket is incorrectly functional')
except MemcachedError, e:
pass # this is the exception we are hoping for
开发者ID:EricACooper,项目名称:testrunner,代码行数:34,代码来源:eviction_change_policy.py
示例15: setting_compaction
def setting_compaction(self, db_frag_perc, db_frag_size, view_frag_perc,
view_frag_size, from_period, to_period,
abort_outside, parallel_compact, purgeint):
options = self._get_default_options()
if db_frag_perc is not None:
options += " --compaction-db-percentage " + str(db_frag_perc)
if db_frag_size is not None:
options += " --compaction-db-size " + str(db_frag_size)
if view_frag_perc is not None:
options += " --compaction-view-percentage " + str(view_frag_perc)
if view_frag_size is not None:
options += " --compaction-view-size " + str(view_frag_size)
if from_period is not None:
options += " --compaction-period-from " + str(from_period)
if to_period is not None:
options += " --compaction-period-to " + str(to_period)
if abort_outside is not None:
options += " --enable-compaction-abort " + str(abort_outside)
if parallel_compact is not None:
options += " --enable-compaction-parallel " + str(parallel_compact)
if purgeint is not None:
options += " --metadata-purge-interval " + str(purgeint)
remote_client = RemoteMachineShellConnection(self.server)
stdout, stderr = remote_client.couchbase_cli("setting-compaction",
self.hostname, options)
remote_client.disconnect()
return stdout, stderr, self._was_success(stdout, "Compaction "
"settings modified")
开发者ID:prasanna135,项目名称:testrunner,代码行数:29,代码来源:couchbase_cli.py
示例16: rebalance_in_out_with_auto_DB_compaction
def rebalance_in_out_with_auto_DB_compaction(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
"ERROR: Not enough nodes to do rebalance in and out")
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
result_nodes = set(servs_init + servs_in) - set(servs_out)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
while rebalance.state != "FINISHED":
self._monitor_DB_fragmentation()
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
rebalance.result()
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
result = monitor_fragm.result()
if compact_run:
self.log.info("auto compaction run successfully")
elif result:
self.log.info("Compaction is already completed")
else:
self.fail("auto compaction does not run")
self.verify_cluster_stats(result_nodes)
remote_client.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:27,代码来源:autocompaction.py
示例17: setting_index
def setting_index(self, max_rollbacks, stable_snap_interval,
mem_snap_interval, storage_mode, threads,
log_level):
options = self._get_default_options()
if max_rollbacks:
options += " --index-max-rollback-points " + str(max_rollbacks)
if stable_snap_interval:
options += " --index-stable-snapshot-interval " + str(
stable_snap_interval)
if mem_snap_interval:
options += " --index-memory-snapshot-interval " + str(
mem_snap_interval)
if storage_mode:
options += " --index-storage-setting " + str(storage_mode)
if threads:
options += " --index-threads " + str(threads)
if log_level:
options += " --index-log-level " + str(log_level)
remote_client = RemoteMachineShellConnection(self.server)
stdout, stderr = remote_client.couchbase_cli("setting-index",
self.hostname, options)
remote_client.disconnect()
return stdout, stderr, self._was_success(stdout,
"Indexer settings modified")
开发者ID:prasanna135,项目名称:testrunner,代码行数:25,代码来源:couchbase_cli.py
示例18: rebalance_in_with_DB_time_compaction
def rebalance_in_with_DB_time_compaction(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
currTime = datetime.datetime.now()
fromTime = currTime + datetime.timedelta(hours=1)
toTime = currTime + datetime.timedelta(hours=24)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=fromTime.hour,
allowedTimePeriodFromMin=fromTime.minute, allowedTimePeriodToHour=toTime.hour, allowedTimePeriodToMin=toTime.minute,
allowedTimePeriodAbort="false")
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self._monitor_DB_fragmentation()
for i in xrange(10):
active_tasks = self.cluster.async_monitor_active_task(self.master, "bucket_compaction", "bucket", wait_task=False)
for active_task in active_tasks:
result = active_task.result()
self.assertTrue(result)
self.sleep(2)
currTime = datetime.datetime.now()
#Need to make it configurable
newTime = currTime + datetime.timedelta(minutes=5)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=currTime.hour,
allowedTimePeriodFromMin=currTime.minute, allowedTimePeriodToHour=newTime.hour, allowedTimePeriodToMin=newTime.minute,
allowedTimePeriodAbort="false")
servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
rebalance.result()
if compact_run:
self.log.info("auto compaction run successfully")
else:
self.fail("auto compaction does not run")
remote_client.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:33,代码来源:autocompaction.py
示例19: _setting_cluster
def _setting_cluster(self, cmd, data_ramsize, index_ramsize, fts_ramsize,
cluster_name, cluster_username,
cluster_password, cluster_port):
options = self._get_default_options()
if cluster_username is not None:
options += " --cluster-username " + str(cluster_username)
if cluster_password is not None:
options += " --cluster-password " + str(cluster_password)
if data_ramsize:
options += " --cluster-ramsize " + str(data_ramsize)
if index_ramsize:
options += " --cluster-index-ramsize " + str(index_ramsize)
if fts_ramsize:
options += " --cluster-fts-ramsize " + str(fts_ramsize)
if cluster_name:
options += " --cluster-name " + str(cluster_name)
if cluster_port:
options += " --cluster-port " + str(cluster_port)
remote_client = RemoteMachineShellConnection(self.server)
stdout, stderr = remote_client.couchbase_cli(cmd, self.hostname,
options)
remote_client.disconnect()
return stdout, stderr, self._was_success(stdout,
"Cluster settings modified")
开发者ID:prasanna135,项目名称:testrunner,代码行数:25,代码来源:couchbase_cli.py
示例20: test_auto_compaction_with_multiple_buckets
def test_auto_compaction_with_multiple_buckets(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
for bucket in self.buckets:
if bucket.name == "default":
self.disable_compaction()
else:
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, bucket=bucket.name)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
end_time = time.time() + self.wait_timeout * 30
for bucket in self.buckets:
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, self.autocompaction_value, bucket.name)
while monitor_fragm.state != "FINISHED":
if end_time < time.time():
self.fail("Fragmentation level is not reached in %s sec" % self.wait_timeout * 30)
try:
self._load_all_buckets(self.servers[0], self.gen_update, "update", 0)
except Exception, ex:
self.log.error("Load cannot be performed: %s" % str(ex))
self.fail(ex)
monitor_fragm.result()
compact_run = remote_client.wait_till_compaction_end(rest, bucket.name,
timeout_in_seconds=(self.wait_timeout * 5))
if compact_run:
self.log.info("auto compaction run successfully")
开发者ID:arod1987,项目名称:testrunner,代码行数:25,代码来源:autocompaction.py
注:本文中的remote.remote_util.RemoteMachineShellConnection类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论