本文整理汇总了Python中xdcrnewbasetests.NodeHelper类的典型用法代码示例。如果您正苦于以下问题:Python NodeHelper类的具体用法?Python NodeHelper怎么用?Python NodeHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NodeHelper类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_capi_with_malformed_http_resp
def test_capi_with_malformed_http_resp(self):
repl_id = self._start_es_replication(xdcr_params={'workerBatchSize':'2000',
'docBatchSizeKb':'8096',
'targetNozzlePerNode':'64'})
rest_conn = RestConnection(self.src_master)
rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')
gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}', xrange(100), start=0, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')
self._wait_for_es_replication_to_catchup()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self.src_master)\
+ '/goxdcr.log*'
for node in self.src_cluster.get_nodes():
count = NodeHelper.check_goxdcr_log(
node,
"malformed HTTP response",
goxdcr_log)
self.assertEqual(count, 0, "malformed HTTP response error message found in " + str(node.ip))
self.log.info("malformed HTTP response error message not found in " + str(node.ip))
self._verify_es_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:27,代码来源:capiXDCR.py
示例2: test_node_crash_cluster
def test_node_crash_cluster(self):
self.setup_xdcr_and_load()
crashed_nodes = []
crash = self._input.param("crash", "").split('-')
if "C1" in crash:
crashed_nodes += self.src_cluster.get_nodes()
self.__kill_processes(crashed_nodes)
self.sleep(30)
if "C2" in crash:
crashed_nodes += self.dest_cluster.get_nodes()
self.__kill_processes(crashed_nodes)
for crashed_node in crashed_nodes:
self.__start_cb_server(crashed_node)
bucket_type = self._input.param("bucket_type", "membase")
if "C1" in crash:
if bucket_type == "ephemeral":
self.sleep(self._wait_timeout)
else:
NodeHelper.wait_warmup_completed(self.src_cluster.get_nodes())
gen_create = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
self.async_perform_update_delete()
if "C2" in crash:
if bucket_type == "ephemeral":
self.sleep(self._wait_timeout)
else:
NodeHelper.wait_warmup_completed(self.dest_cluster.get_nodes())
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:35,代码来源:uniXDCR.py
示例3: replication_with_firewall_enabled
def replication_with_firewall_enabled(self):
self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
self.setup_xdcr_and_load()
self.perform_update_delete()
NodeHelper.enable_firewall(self.dest_master)
self.sleep(30)
NodeHelper.disable_firewall(self.dest_master)
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:9,代码来源:uniXDCR.py
示例4: replication_while_rebooting_a_non_master_destination_node
def replication_while_rebooting_a_non_master_destination_node(self):
self.setup_xdcr_and_load()
self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
self.perform_update_delete()
self.sleep(self._wait_timeout / 2)
rebooted_node = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(rebooted_node, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:9,代码来源:uniXDCR.py
示例5: is_ssl_over_memcached
def is_ssl_over_memcached(self, master):
if not NodeHelper.check_goxdcr_log(master,
"Try to create a ssl over memcached connection"):
if NodeHelper.check_goxdcr_log(master,
"Get or create ssl over proxy connection"):
self.log.error("SSL still uses ns_proxy connection!")
return False
self.log.info("SSL uses memcached after upgrade!")
return True
开发者ID:EricACooper,项目名称:testrunner,代码行数:9,代码来源:upgradeXDCR.py
示例6: test_checkpointing_with_full_rollback
def test_checkpointing_with_full_rollback(self):
bucket = self.src_cluster.get_buckets()[0]
nodes = self.src_cluster.get_nodes()
# Stop Persistence on Node A & Node B
for node in nodes:
mem_client = MemcachedClientHelper.direct_client(node, bucket)
mem_client.stop_persistence()
self.src_cluster.pause_all_replications()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
self.src_cluster.resume_all_replications()
self.sleep(self._checkpoint_interval * 2)
self.get_and_validate_latest_checkpoint()
# Perform mutations on the bucket
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
# Kill memcached on Node A so that Node B becomes master
shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
shell.kill_memcached()
# Start persistence on Node B
mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
mem_client.start_persistence()
# Failover Node B
failover_task = self.src_cluster.async_failover()
failover_task.result()
# Wait for Failover & rollback to complete
self.sleep(self._wait_timeout * 5)
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
+ '/goxdcr.log*'
count1 = NodeHelper.check_goxdcr_log(
nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count1, 0, "full rollback not received from DCP as expected")
self.log.info("full rollback received from DCP as expected")
count2 = NodeHelper.check_goxdcr_log(
nodes[0],
"Rolled back startSeqno to 0",
goxdcr_log)
self.assertGreater(count2, 0, "startSeqno not rolled back to 0 as expected")
self.log.info("startSeqno rolled back to 0 as expected")
shell.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:56,代码来源:checkpointXDCR.py
示例7: incremental_offline_upgrade
def incremental_offline_upgrade(self):
upgrade_seq = self.input.param("upgrade_seq", "src>dest")
self._install(self.servers[:self.src_init + self.dest_init ])
self.create_buckets()
self._join_all_clusters()
self.sleep(60)
bucket = self.src_cluster.get_bucket_by_name('default')
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)
self.sleep(self.wait_timeout)
self._wait_for_replication_to_catchup()
nodes_to_upgrade = []
if upgrade_seq == "src>dest":
nodes_to_upgrade = copy.copy(self.src_nodes)
nodes_to_upgrade.extend(self.dest_nodes)
elif upgrade_seq == "src<dest":
nodes_to_upgrade = copy.copy(self.dest_nodes)
nodes_to_upgrade.extend(self.src_nodes)
elif upgrade_seq == "src><dest":
min_cluster = min(len(self.src_nodes), len(self.dest_nodes))
for i in xrange(min_cluster):
nodes_to_upgrade.append(self.src_nodes[i])
nodes_to_upgrade.append(self.dest_nodes[i])
for _seq, node in enumerate(nodes_to_upgrade):
self._offline_upgrade([node])
self.sleep(60)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
itemPrefix = "loadThree" + _seq * 'a'
gen_create3 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items)
self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('default')
itemPrefix = "loadFour" + _seq * 'a'
gen_create4 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items)
self._load_bucket(bucket, self.src_master, gen_create4, 'create', exp=0)
self._wait_for_replication_to_catchup()
self.merge_all_buckets()
self.verify_results()
self.sleep(self.wait_timeout * 5, "Let clusters work for some time")
if float(self.initial_version[:2]) == 3.1 and float(self.upgrade_versions[0][:2]) == 4.1:
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
for node in self.src_cluster.get_nodes():
count = NodeHelper.check_goxdcr_log(
node,
"Failed to repair connections to target cluster",
goxdcr_log)
self.assertEqual(count, 0, "Failed to repair connections to target cluster "
"error message found in " + str(node.ip))
self.log.info("Failed to repair connections to target cluster "
"error message not found in " + str(node.ip))
开发者ID:EricACooper,项目名称:testrunner,代码行数:55,代码来源:upgradeXDCR.py
示例8: test_retry_connections_on_errors_before_restart
def test_retry_connections_on_errors_before_restart(self):
"""
CBQE-3373: Do not restart pipeline as soon as connection errors are
detected, backoff and retry 5 times before trying to restart pipeline.
"""
passed = False
# start data load after setting up xdcr
load_tasks = self.setup_xdcr_async_load()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
# block port 11210 on target to simulate a connection error
shell = RemoteMachineShellConnection(self.dest_master)
out, err = shell.execute_command("/sbin/iptables -A INPUT -p tcp --dport"
" 11210 -j DROP")
shell.log_command_output(out, err)
out, err = shell.execute_command("/sbin/iptables -L")
shell.log_command_output(out, err)
# complete loading
for task in load_tasks:
task.result()
# wait for goxdcr to detect i/o timeout and try repairing
self.sleep(self._wait_timeout*5)
# unblock port 11210 so replication can continue
out, err = shell.execute_command("/sbin/iptables -D INPUT -p tcp --dport"
" 11210 -j DROP")
shell.log_command_output(out, err)
out, err = shell.execute_command("/sbin/iptables -L")
shell.log_command_output(out, err)
shell.disconnect()
# check logs for traces of retry attempts
for node in self.src_cluster.get_nodes():
count1 = NodeHelper.check_goxdcr_log(
node,
"Failed to repair connections to target cluster",
goxdcr_log)
count2 = NodeHelper.check_goxdcr_log(
node,
"Failed to set up connections to target cluster",
goxdcr_log)
count = count1 + count2
if count > 0:
self.log.info('SUCCESS: We tried to repair connections before'
' restarting pipeline')
passed = True
if not passed:
self.fail("No attempts were made to repair connections on %s before"
" restarting pipeline" % self.src_cluster.get_nodes())
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:54,代码来源:uniXDCR.py
示例9: is_goxdcr_migration_successful
def is_goxdcr_migration_successful(self, server):
count = NodeHelper.check_goxdcr_log(server,
"Starting to migrate xdcr metadata")
if count > 0:
count = NodeHelper.check_goxdcr_log(server,
"Metadata migration completed without errors")
self.log.info(count)
if count == 1:
self.log.info("SUCCESS: Metadata migration completed without errors")
return True
self.log.error("ERROR: Metadata migration was unsuccessful")
return False
return True
开发者ID:EricACooper,项目名称:testrunner,代码行数:13,代码来源:upgradeXDCR.py
示例10: load_with_async_ops_with_warmup_master
def load_with_async_ops_with_warmup_master(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node(master=True))
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node(master=True))
self.sleep(self._wait_timeout)
NodeHelper.wait_warmup_completed(warmupnodes)
self.async_perform_update_delete()
self.sleep(self._wait_timeout / 2)
self.verify_results()
开发者ID:lichia,项目名称:testrunner,代码行数:13,代码来源:biXDCR.py
示例11: replication_while_rebooting_a_non_master_src_dest_node
def replication_while_rebooting_a_non_master_src_dest_node(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
reboot_node_dest = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
reboot_node_src = self.src_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.sleep(120)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.verify_results()
开发者ID:lichia,项目名称:testrunner,代码行数:15,代码来源:biXDCR.py
示例12: test_verify_mb19697
def test_verify_mb19697(self):
self.setup_xdcr_and_load()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
self.src_cluster.pause_all_replications()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
self.src_cluster.load_all_buckets_from_generator(gen)
self.src_cluster.resume_all_replications()
self._wait_for_replication_to_catchup()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
load_tasks = self.src_cluster.async_load_all_buckets_from_generator(gen)
self.src_cluster.rebalance_out()
for task in load_tasks:
task.result()
self._wait_for_replication_to_catchup()
self.src_cluster.rebalance_in()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
load_tasks = self.src_cluster.async_load_all_buckets_from_generator(gen)
self.src_cluster.failover_and_rebalance_master()
for task in load_tasks:
task.result()
self._wait_for_replication_to_catchup()
for node in self.src_cluster.get_nodes():
count = NodeHelper.check_goxdcr_log(
node,
"counter .+ goes backward, maybe due to the pipeline is restarted",
goxdcr_log)
self.assertEqual(count, 0, "counter goes backward, maybe due to the pipeline is restarted "
"error message found in " + str(node.ip))
self.log.info("counter goes backward, maybe due to the pipeline is restarted "
"error message not found in " + str(node.ip))
self.sleep(300)
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:47,代码来源:uniXDCR.py
示例13: test_verify_mb20463
def test_verify_mb20463(self):
src_version = NodeHelper.get_cb_version(self.src_cluster.get_master_node())
if float(src_version[:3]) != 4.5:
self.log.info("Source cluster has to be at 4.5 for this test")
return
servs = self._input.servers[2:4]
params = {}
params['num_nodes'] = len(servs)
params['product'] = 'cb'
params['version'] = '4.1.2-6088'
params['vbuckets'] = [1024]
self.log.info("will install {0} on {1}".format('4.1.2-6088', [s.ip for s in servs]))
InstallerJob().parallel_install(servs, params)
if params['product'] in ["couchbase", "couchbase-server", "cb"]:
success = True
for server in servs:
success &= RemoteMachineShellConnection(server).is_couchbase_installed()
if not success:
self.fail("some nodes were not installed successfully on target cluster!")
self.log.info("4.1.2 installed successfully on target cluster")
conn = RestConnection(self.dest_cluster.get_master_node())
conn.add_node(user=self._input.servers[3].rest_username, password=self._input.servers[3].rest_password,
remoteIp=self._input.servers[3].ip)
self.sleep(30)
conn.rebalance(otpNodes=[node.id for node in conn.node_statuses()])
self.sleep(30)
conn.create_bucket(bucket='default', ramQuotaMB=512)
tasks = self.setup_xdcr_async_load()
self.sleep(30)
NodeHelper.enable_firewall(self.dest_master)
self.sleep(30)
NodeHelper.disable_firewall(self.dest_master)
for task in tasks:
task.result()
self._wait_for_replication_to_catchup(timeout=600)
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:46,代码来源:uniXDCR.py
示例14: _verify_bandwidth_usage
def _verify_bandwidth_usage(self, node, nw_limit=1, no_of_nodes=2, event_time=None,
nw_usage="[1-9][0-9]*", end_time=None):
goxdcr_log = NodeHelper.get_goxdcr_log_dir(node) + '/goxdcr.log'
nw_max = (nw_limit * 1024 * 1024)/no_of_nodes
if event_time:
time_to_compare = time.strptime(event_time, '%Y-%m-%dT%H:%M:%S')
else:
matches, _ = NodeHelper.check_goxdcr_log(node, "Success adding replication specification",
goxdcr_log, print_matches=True)
time_to_compare_str = matches[-1].split(' ')[0].split('.')[0]
time_to_compare = time.strptime(time_to_compare_str, '%Y-%m-%dT%H:%M:%S')
matches, count = NodeHelper.check_goxdcr_log(node, "bandwidth_limit=" + str(nw_max) +
", bandwidth_usage=" + nw_usage, goxdcr_log, print_matches=True)
match_count = 0
skip_count = 0
for item in matches:
items = item.split(' ')
item_time = items[0].split('.')[0]
item_datetime = time.strptime(item_time, '%Y-%m-%dT%H:%M:%S')
if item_datetime < time_to_compare:
skip_count += 1
continue
if end_time:
end_datetime = time.strptime(end_time, '%Y-%m-%dT%H:%M:%S')
if item_datetime > end_datetime:
skip_count += 1
continue
bandwidth_usage = items[-1].split('=')[-1]
if int(bandwidth_usage) <= nw_max:
match_count += 1
continue
else:
self.fail("Bandwidth usage higher than Bandwidth limit in {0}".format(item))
if match_count + skip_count == count:
self.log.info("{0} stale entries skipped".format(skip_count))
if match_count > 0:
self.log.info("{0} entries checked - Bandwidth usage always lower than Bandwidth limit as expected".
format(match_count))
else:
if self._input.param("replication_type") == "capi":
self.log.info("Bandwidth Throttler not enabled on replication as expected")
else:
self.fail("Bandwidth Throttler not enabled on replication")
开发者ID:arod1987,项目名称:testrunner,代码行数:46,代码来源:nwusageXDCR.py
示例15: test_node_crash_master
def test_node_crash_master(self):
self.setup_xdcr_and_load()
crashed_nodes = []
crash = self._input.param("crash", "").split('-')
if "C1" in crash:
crashed_nodes.append(self.src_master)
if "C2" in crash:
crashed_nodes.append(self.dest_master)
self.__kill_processes(crashed_nodes)
for crashed_node in crashed_nodes:
self.__start_cb_server(crashed_node)
NodeHelper.wait_warmup_completed(crashed_nodes)
self.async_perform_update_delete()
self.verify_results()
开发者ID:lichia,项目名称:testrunner,代码行数:18,代码来源:uniXDCR.py
示例16: load_with_async_ops_with_warmup
def load_with_async_ops_with_warmup(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
"Test case does not apply for Ephemeral buckets"
return
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node())
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node())
self.sleep(self._wait_timeout)
self.async_perform_update_delete()
self.sleep(self._wait_timeout / 2)
NodeHelper.wait_warmup_completed(warmupnodes)
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:19,代码来源:uniXDCR.py
示例17: test_update_to_scramsha_auth
def test_update_to_scramsha_auth(self):
"""
Start with ordinary replication, then switch to use scram_sha_auth
Search for success log stmtsS
"""
old_count = NodeHelper.check_goxdcr_log(self.src_cluster.get_master_node(),
"HttpAuthMech=ScramSha for remote cluster reference remote_cluster")
self.setup_xdcr()
# modify remote cluster ref to use scramsha
for remote_cluster in self.src_cluster.get_remote_clusters()+self.dest_cluster.get_remote_clusters():
remote_cluster.use_scram_sha_auth()
self.sleep(60, "wait before checking the logs for using scram-sha")
for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]:
count = NodeHelper.check_goxdcr_log(node, "HttpAuthMech=ScramSha for remote cluster reference remote_cluster")
if count <= old_count:
self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip))
else:
self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip))
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:19,代码来源:biXDCR.py
示例18: test_verify_mb19181
def test_verify_mb19181(self):
load_tasks = self.setup_xdcr_async_load()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
+ '/goxdcr.log*'
self.dest_cluster.failover_and_rebalance_master()
for task in load_tasks:
task.result()
for node in self.src_cluster.get_nodes():
count = NodeHelper.check_goxdcr_log(
node,
"Can't move update state from",
goxdcr_log)
self.assertEqual(count, 0, "Can't move update state from - error message found in " + str(node.ip))
self.log.info("Can't move update state from - error message not found in " + str(node.ip))
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:19,代码来源:uniXDCR.py
示例19: replication_while_rebooting_a_non_master_src_dest_node
def replication_while_rebooting_a_non_master_src_dest_node(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
reboot_node_dest = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
reboot_node_src = self.src_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.sleep(120)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:19,代码来源:biXDCR.py
示例20: test_verify_mb19802_2
def test_verify_mb19802_2(self):
load_tasks = self.setup_xdcr_async_load()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
self.dest_cluster.failover_and_rebalance_master()
for task in load_tasks:
task.result()
for node in self.src_cluster.get_nodes():
count = NodeHelper.check_goxdcr_log(
node,
"batchGetMeta received fatal error and had to abort",
goxdcr_log)
self.assertEqual(count, 0, "batchGetMeta timed out error message found in " + str(node.ip))
self.log.info("batchGetMeta error message not found in " + str(node.ip))
self.sleep(300)
self.verify_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:20,代码来源:uniXDCR.py
注:本文中的xdcrnewbasetests.NodeHelper类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论