本文整理汇总了Python中utils.kill_sub_process函数的典型用法代码示例。如果您正苦于以下问题:Python kill_sub_process函数的具体用法?Python kill_sub_process怎么用?Python kill_sub_process使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了kill_sub_process函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: tearDownModule
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
if use_mysqlctld:
# Try to terminate mysqlctld gracefully, so it kills its mysqld.
for proc in setup_procs:
utils.kill_sub_process(proc, soft=True)
teardown_procs = setup_procs
else:
teardown_procs = [
tablet_master.teardown_mysql(),
tablet_replica1.teardown_mysql(),
tablet_replica2.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
开发者ID:dumbunny,项目名称:vitess,代码行数:25,代码来源:backup.py
示例2: test_regular_operation
def test_regular_operation(self):
# Use a dedicated worker to run all vtworker commands.
worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(["--cell", "test_nj"], auto_log=True)
vtworker_endpoint = "localhost:" + str(worker_rpc_port)
automation_server_proc, automation_server_port = utils.run_automation_server()
source_shard_list = "0"
dest_shard_list = "-80,80-"
_, vtctld_endpoint = utils.vtctld.rpc_endpoint()
utils.run(
environment.binary_argstr("automation_client")
+ " --server localhost:"
+ str(automation_server_port)
+ " --task HorizontalReshardingTask"
+ " --param keyspace="
+ self.KEYSPACE
+ " --param source_shard_list="
+ source_shard_list
+ " --param dest_shard_list="
+ dest_shard_list
+ " --param vtctld_endpoint="
+ vtctld_endpoint
+ " --param vtworker_endpoint="
+ vtworker_endpoint
+ " --param min_healthy_rdonly_tablets=1"
)
self.verify()
utils.kill_sub_process(automation_server_proc, soft=True)
utils.kill_sub_process(worker_proc, soft=True)
开发者ID:Analyticalloopholes,项目名称:vitess,代码行数:32,代码来源:automation_horizontal_resharding.py
示例3: test_vtgate_qps
def test_vtgate_qps(self):
# create the topology
utils.run_vtctl('CreateKeyspace test_keyspace')
t = tablet.Tablet(tablet_uid=1, cell="nj")
t.init_tablet("master", "test_keyspace", "0")
t.update_addrs()
utils.run_vtctl('RebuildShardGraph test_keyspace/0', auto_log=True)
utils.run_vtctl('RebuildKeyspaceGraph test_keyspace', auto_log=True)
# start vtgate and the qps-er
vtgate_proc, vtgate_port = utils.vtgate_start()
qpser = utils.run_bg(utils.vtroot+'/bin/zkclient2 -server localhost:%u -mode qps2 test_nj test_keyspace' % vtgate_port)
time.sleep(10)
utils.kill_sub_process(qpser)
# get the vtgate vars, make sure we have what we need
v = utils.get_vars(vtgate_port)
# some checks on performance / stats
# a typical workstation will do 38-40k QPS, check we have more than 15k
rpcCalls = v['TopoReaderRpcQueryCount']['test_nj']
if rpcCalls < 150000:
self.fail('QPS is too low: %u < 15000' % (rpcCalls / 10))
else:
logging.debug("Recorded qps: %u", rpcCalls / 10)
utils.vtgate_kill(vtgate_proc)
开发者ID:iamima,项目名称:vitess,代码行数:26,代码来源:zkocc_test.py
示例4: test_regular_operation
def test_regular_operation(self):
# Use a dedicated worker to run all vtworker commands.
worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj'],
auto_log=True)
vtworker_endpoint = 'localhost:' + str(worker_rpc_port)
automation_server_proc, automation_server_port = (
utils.run_automation_server())
source_shard_list = '0'
dest_shard_list = '-80,80-'
_, vtctld_endpoint = utils.vtctld.rpc_endpoint()
utils.run(
environment.binary_argstr('automation_client') +
' --server localhost:' + str(automation_server_port) +
' --task HorizontalReshardingTask' +
' --param keyspace=' + self.KEYSPACE +
' --param source_shard_list=' + source_shard_list +
' --param dest_shard_list=' + dest_shard_list +
' --param vtctld_endpoint=' + vtctld_endpoint +
' --param vtworker_endpoint=' + vtworker_endpoint +
' --param min_healthy_rdonly_endpoints=1')
self.verify()
utils.kill_sub_process(automation_server_proc, soft=True)
utils.kill_sub_process(worker_proc, soft=True)
开发者ID:TheRealAWebb,项目名称:vitess,代码行数:28,代码来源:automation_horizontal_resharding.py
示例5: tearDownModule
def tearDownModule():
if utils.options.skip_teardown:
return
if use_mysqlctld:
# Try to terminate mysqlctld gracefully, so it kills its mysqld.
for proc in setup_procs:
utils.kill_sub_process(proc, soft=True)
teardown_procs = setup_procs
else:
teardown_procs = [
tablet_62344.teardown_mysql(),
tablet_31981.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_62344.remove_tree()
tablet_31981.remove_tree()
path = os.path.join(environment.vtdataroot, 'snapshot')
try:
shutil.rmtree(path)
except OSError as e:
logging.debug("removing snapshot %s: %s", path, str(e))
开发者ID:Eter365,项目名称:vitess,代码行数:28,代码来源:clone.py
示例6: teardown
def teardown(self):
import utils
for cluster in self.clusters.itervalues():
utils.kill_sub_process(cluster.proc)
if not utils.options.keep_logs:
shutil.rmtree(cluster.data_dir)
开发者ID:Eter365,项目名称:vitess,代码行数:7,代码来源:etcd.py
示例7: teardown
def teardown(self):
import utils # pylint: disable=g-import-not-at-top
for cluster in self.clusters.itervalues():
utils.kill_sub_process(cluster.proc)
if not utils.options.keep_logs:
shutil.rmtree(cluster.data_dir)
开发者ID:gitql,项目名称:vitess,代码行数:7,代码来源:etcd.py
示例8: run_test_zkocc_qps
def run_test_zkocc_qps():
_populate_zk()
# preload the test_nj cell
zkocc_14850 = utils.zkocc_start()
qpser = utils.run_bg(utils.vtroot+'/bin/zkclient2 -server localhost:%u -mode qps /zk/test_nj/zkocc1/data1 /zk/test_nj/zkocc1/data2' % utils.zkocc_port_base)
time.sleep(10)
utils.kill_sub_process(qpser)
# get the zkocc vars, make sure we have what we need
v = utils.get_vars(utils.zkocc_port_base)
if v['ZkReader']['test_nj']['State']['Current'] != 'Connected':
raise utils.TestError('invalid zk global state: ', v['ZkReader']['test_nj']['State']['Current'])
if v['ZkReader']['test_nj']['State']['DurationConnected'] < 9e9:
raise utils.TestError('not enough time in Connected state', v['ZkReader']['test_nj']['State']['DurationConnected'])
# some checks on performance / stats
# a typical workstation will do 15k QPS, check we have more than 3k
rpcCalls = v['ZkReader']['RpcCalls']
if rpcCalls < 30000:
raise utils.TestError('QPS is too low: %u < 30000', rpcCalls / 10)
cacheReads = v['ZkReader']['test_nj']['CacheReads']
if cacheReads < 30000:
raise utils.TestError('Cache QPS is too low: %u < 30000', cacheReads / 10)
totalCacheReads = v['ZkReader']['total']['CacheReads']
if cacheReads != totalCacheReads:
raise utils.TestError('Rollup stats are wrong: %u != %u', cacheReads,
totalCacheReads)
if v['ZkReader']['UnknownCellErrors'] != 0:
raise utils.TestError('unexpected UnknownCellErrors', v['ZkReader']['UnknownCellErrors'])
utils.zkocc_kill(zkocc_14850)
开发者ID:ShawnShoper,项目名称:WeShare,代码行数:33,代码来源:zkocc.py
示例9: test_regular_operation
def test_regular_operation(self):
# Use a dedicated worker to run all vtworker commands.
worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj'],
auto_log=True)
vtworker_endpoint = "localhost:" + str(worker_rpc_port)
automation_server_proc, automation_server_port = utils.run_automation_server()
keyspace = 'test_keyspace'
source_shard_list = '0'
dest_shard_list = '-80,80-'
_, vtctld_endpoint = utils.vtctld.rpc_endpoint()
utils.run(environment.binary_argstr('automation_client') +
' --server localhost:' + str(automation_server_port) +
' --task HorizontalReshardingTask' +
' --param keyspace=' + keyspace +
' --param source_shard_list=' + source_shard_list +
' --param dest_shard_list=' + dest_shard_list +
' --param source_shard_rdonly_list=' +
worker.shard_rdonly1.tablet_alias +
' --param dest_shard_rdonly_list=' +
worker.shard_0_rdonly1.tablet_alias + ',' +
worker.shard_1_rdonly1.tablet_alias +
' --param vtctld_endpoint=' + vtctld_endpoint +
' --param vtworker_endpoint=' + vtworker_endpoint)
self.assert_shard_data_equal(0, worker.shard_master,
worker.shard_0_tablets.replica)
self.assert_shard_data_equal(1, worker.shard_master,
worker.shard_1_tablets.replica)
utils.kill_sub_process(automation_server_proc, soft=True)
utils.kill_sub_process(worker_proc, soft=True)
开发者ID:haoqoo,项目名称:vitess,代码行数:34,代码来源:automation_horizontal_resharding.py
示例10: test_zkocc_qps
def test_zkocc_qps(self):
# preload the test_nj cell
zkocc_14850 = utils.zkocc_start()
qpser = utils.run_bg(utils.vtroot+'/bin/zkclient2 -server localhost:%u -mode qps /zk/test_nj/vt/zkocc1/data1 /zk/test_nj/vt/zkocc1/data2' % utils.zkocc_port_base)
time.sleep(10)
utils.kill_sub_process(qpser)
# get the zkocc vars, make sure we have what we need
v = utils.get_vars(utils.zkocc_port_base)
if v['ZkReader']['test_nj']['State'] != 'Connected':
raise utils.TestError('invalid zk global state: ', v['ZkReader']['test_nj']['State'])
# some checks on performance / stats
# a typical workstation will do 45-47k QPS, check we have more than 15k
rpcCalls = v['ZkReader']['RpcCalls']
if rpcCalls < 150000:
self.fail('QPS is too low: %u < 15000' % (rpcCalls / 10))
else:
logging.debug("Recorded qps: %u", rpcCalls / 10)
cacheReads = v['ZkReader']['test_nj']['CacheReads']
if cacheReads < 150000:
self.fail('Cache QPS is too low: %u < 15000' % (cacheReads / 10))
totalCacheReads = v['ZkReader']['total']['CacheReads']
self.assertEqual(cacheReads, totalCacheReads, 'Rollup stats are wrong')
self.assertEqual(v['ZkReader']['UnknownCellErrors'], 0, 'unexpected UnknownCellErrors')
utils.zkocc_kill(zkocc_14850)
开发者ID:iamima,项目名称:vitess,代码行数:27,代码来源:zkocc_test.py
示例11: test_primecache
def test_primecache(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
master.init_tablet( 'master', 'test_keyspace', '0')
replica.init_tablet('idle')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
master.create_db('vt_test_keyspace')
master.start_vttablet(wait_for_state=None)
replica.start_vttablet(wait_for_state=None)
master.wait_for_vttablet_state('SERVING')
replica.wait_for_vttablet_state('NOT_SERVING') # DB doesn't exist
self._create_data()
# we use clone to not prime the mysql cache on the slave db
utils.run_vtctl(['Clone', '-force', '-server-mode',
master.tablet_alias, replica.tablet_alias],
auto_log=True)
# sync the buffer cache, and clear it. This will prompt for user's password
utils.run(['sync'])
utils.run(['sudo', 'bash', '-c', 'echo 1 > /proc/sys/vm/drop_caches'])
# we can now change data on the master for 30s, while slave is stopped.
# master's binlog will be in OS buffer cache now.
replica.mquery('', 'slave stop')
self._change_random_data()
use_primecache = True # easy to test without
if use_primecache:
# starting vtprimecache, sleeping for a couple seconds
args = environment.binary_args('vtprimecache') + [
'-db-config-dba-uname', 'vt_dba',
'-db-config-dba-charset', 'utf8',
'-db-config-dba-dbname', 'vt_test_keyspace',
'-db-config-app-uname', 'vt_app',
'-db-config-app-charset', 'utf8',
'-db-config-app-dbname', 'vt_test_keyspace',
'-relay_logs_path', replica.tablet_dir+'/relay-logs',
'-mysql_socket_file', replica.tablet_dir+'/mysql.sock',
'-log_dir', environment.vtlogroot,
'-worker_count', '4',
'-alsologtostderr',
]
vtprimecache = utils.run_bg(args)
time.sleep(2)
# start slave, see how longs it takes to catch up on replication
replica.mquery('', 'slave start')
self.catch_up()
if use_primecache:
# TODO(alainjobart): read and check stats
utils.kill_sub_process(vtprimecache)
tablet.kill_tablets([master, replica])
开发者ID:Carney,项目名称:vitess,代码行数:59,代码来源:primecache.py
示例12: teardown_mysql
def teardown_mysql(self, extra_args=None):
if self.use_mysqlctld and self.mysqlctld_process:
# if we use mysqlctld, we just terminate it gracefully, so it kills
# its mysqld. And we return it, so we can wait for it.
utils.kill_sub_process(self.mysqlctld_process, soft=True)
return self.mysqlctld_process
if utils.options.keep_logs:
return self.shutdown_mysql(extra_args=extra_args)
return self.mysqlctl(['teardown', '-force'], extra_args=extra_args)
开发者ID:gitql,项目名称:vitess,代码行数:9,代码来源:tablet.py
示例13: test_restart
def test_restart(self):
zkocc_server = utils.zkocc_start()
shard_0_master.create_db('vt_test_keyspace')
proc1 = shard_0_master.start_vttablet(cert=cert_dir + "/vt-server-cert.pem",
key=cert_dir + "/vt-server-key.pem")
proc2 = shard_0_master.start_vttablet(cert=cert_dir + "/vt-server-cert.pem",
key=cert_dir + "/vt-server-key.pem")
time.sleep(2.0)
proc1.poll()
if proc1.returncode is None:
raise utils.TestError("proc1 still running")
shard_0_master.kill_vttablet()
utils.kill_sub_process(zkocc_server)
logging.debug("Done here")
开发者ID:CERN-Stage-3,项目名称:vitess,代码行数:16,代码来源:secure.py
示例14: test_restart
def test_restart(self):
zkocc_server = utils.zkocc_start()
shard_0_master.create_db('vt_test_keyspace')
proc1 = shard_0_master.start_vttablet(cert=cert_dir + "/vt-server-cert.pem",
key=cert_dir + "/vt-server-key.pem")
# Takes a bit longer for vttablet to serve the pid port
time.sleep(1.0)
proc2 = shard_0_master.start_vttablet(cert=cert_dir + "/vt-server-cert.pem",
key=cert_dir + "/vt-server-key.pem")
time.sleep(1.0)
proc1.poll()
if proc1.returncode is None:
self.fail("proc1 still running")
shard_0_master.kill_vttablet()
utils.kill_sub_process(zkocc_server)
logging.debug("Done here")
开发者ID:bigrats,项目名称:vitess,代码行数:18,代码来源:secure.py
示例15: run_test_vttablet_authenticated
def run_test_vttablet_authenticated():
utils.zk_wipe()
utils.run_vtctl('CreateKeyspace -force test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
utils.run_vtctl('RebuildShardGraph test_keyspace/0')
utils.validate_topology()
tablet_62344.populate('vt_test_keyspace', create_vt_select_test,
populate_vt_select_test)
agent = tablet_62344.start_vttablet(auth=True)
utils.run_vtctl('SetReadWrite ' + tablet_62344.tablet_alias)
err, out = tablet_62344.vquery('select * from vt_select_test', path='test_keyspace/0', user='ala', password=r'ma kota')
utils.debug("Got rows: " + out)
if 'Row count: ' not in out:
raise utils.TestError("query didn't go through: %s, %s" % (err, out))
utils.kill_sub_process(agent)
开发者ID:ShawnShoper,项目名称:WeShare,代码行数:18,代码来源:tabletmanager.py
示例16: test_vertical_split
def test_vertical_split(self):
# Use a dedicated worker to run all vtworker commands.
worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj'],
auto_log=True)
vtworker_endpoint = 'localhost:' + str(worker_rpc_port)
automation_server_proc, automation_server_port = (
utils.run_automation_server())
_, vtctld_endpoint = utils.vtctld.rpc_endpoint()
params = {'source_keyspace': 'source_keyspace',
'dest_keyspace': 'destination_keyspace',
'shard_list': '0',
'tables': 'moving.*,view1',
'vtctld_endpoint': vtctld_endpoint,
'vtworker_endpoint': vtworker_endpoint,
}
args = ['--server', 'localhost:' + str(automation_server_port),
'--task', 'VerticalSplitTask']
args.extend(['--param=' + k + '=' + v for k, v in params.items()])
utils.run(environment.binary_args('automation_client') + args)
# One of the two source rdonly tablets went spare after the diff.
# Force a healthcheck on both to get them back to "rdonly".
for t in [vertical_split.source_rdonly1, vertical_split.source_rdonly2]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
self._check_srv_keyspace('')
self._check_blacklisted_tables(vertical_split.source_master,
['moving.*', 'view1'])
self._check_blacklisted_tables(vertical_split.source_replica,
['moving.*', 'view1'])
self._check_blacklisted_tables(vertical_split.source_rdonly1,
['moving.*', 'view1'])
self._check_blacklisted_tables(vertical_split.source_rdonly2,
['moving.*', 'view1'])
# check the binlog player is gone now
vertical_split.destination_master.wait_for_binlog_player_count(0)
utils.kill_sub_process(automation_server_proc, soft=True)
utils.kill_sub_process(worker_proc, soft=True)
开发者ID:32kb,项目名称:vitess,代码行数:43,代码来源:automation_vertical_split.py
示例17: test_successful_resharding
def test_successful_resharding(self):
"""Reshard from 1 to 2 shards by running the workflow."""
worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj'], auto_log=True)
vtworker_endpoint = 'localhost:%d' % worker_rpc_port
stdout = utils.run_vtctl(['WorkflowCreate', 'horizontal_resharding',
'-keyspace=test_keyspace',
'-vtworkers=%s' % vtworker_endpoint],
auto_log=True)
workflow_uuid = re.match(r'^uuid: (.*)$', stdout[0]).group(1)
utils.pause('Now is a good time to look at vtctld UI at: '
'%s, workflow uuid=%s' % (utils.vtctld.port, workflow_uuid))
utils.run_vtctl(['WorkflowWait', workflow_uuid])
self.verify()
utils.kill_sub_process(worker_proc, soft=True)
开发者ID:gitql,项目名称:vitess,代码行数:19,代码来源:horizontal_resharding_workflow.py
示例18: test_zkocc
def test_zkocc(self):
# preload the test_nj cell
zkocc_14850 = utils.zkocc_start(extra_params=['-connect-timeout=2s', '-cache-refresh-interval=1s'])
time.sleep(1)
# create a python client. The first address is bad, will test the retry logic
bad_port = utils.reserve_ports(3)
zkocc_client = zkocc.ZkOccConnection("localhost:%u,localhost:%u,localhost:%u" % (bad_port, utils.zkocc_port_base, bad_port+1), "test_nj", 30)
zkocc_client.dial()
# test failure for a python client that cannot connect
bad_zkocc_client = zkocc.ZkOccConnection("localhost:%u,localhost:%u" % (bad_port+2, bad_port), "test_nj", 30)
try:
bad_zkocc_client.dial()
raise utils.TestError('exception expected')
except zkocc.ZkOccError as e:
if str(e) != "Cannot dial to any server":
raise
level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
# FIXME(ryszard): This can be changed into a self.assertRaises.
try:
bad_zkocc_client.get("/zk/test_nj/vt/zkocc1/data1")
self.fail('exception expected')
except zkocc.ZkOccError as e:
if str(e) != "Cannot dial to any server":
raise
logging.getLogger().setLevel(level)
# get test
utils.prog_compile(['zkclient2'])
out, err = utils.run(utils.vtroot+'/bin/zkclient2 -server localhost:%u /zk/test_nj/vt/zkocc1/data1' % utils.zkocc_port_base, trap_output=True)
self.assertEqual(err, "/zk/test_nj/vt/zkocc1/data1 = Test data 1 (NumChildren=0, Version=0, Cached=false, Stale=false)\n")
zk_data = zkocc_client.get("/zk/test_nj/vt/zkocc1/data1")
self.assertDictContainsSubset({'Data': "Test data 1",
'Cached': True,
'Stale': False,},
zk_data)
self.assertDictContainsSubset({'NumChildren': 0, 'Version': 0}, zk_data['Stat'])
# getv test
out, err = utils.run(utils.vtroot+'/bin/zkclient2 -server localhost:%u /zk/test_nj/vt/zkocc1/data1 /zk/test_nj/vt/zkocc1/data2 /zk/test_nj/vt/zkocc1/data3' % utils.zkocc_port_base, trap_output=True)
self.assertEqualNormalized(err, """[0] /zk/test_nj/vt/zkocc1/data1 = Test data 1 (NumChildren=0, Version=0, Cached=true, Stale=false)
[1] /zk/test_nj/vt/zkocc1/data2 = Test data 2 (NumChildren=0, Version=0, Cached=false, Stale=false)
[2] /zk/test_nj/vt/zkocc1/data3 = Test data 3 (NumChildren=0, Version=0, Cached=false, Stale=false)
""")
zk_data = zkocc_client.getv(["/zk/test_nj/vt/zkocc1/data1", "/zk/test_nj/vt/zkocc1/data2", "/zk/test_nj/vt/zkocc1/data3"])['Nodes']
self.assertEqual(len(zk_data), 3)
for i, d in enumerate(zk_data):
self.assertEqual(d['Data'], 'Test data %s' % (i + 1))
self.assertTrue(d['Cached'])
self.assertFalse(d['Stale'])
self.assertDictContainsSubset({'NumChildren': 0, 'Version': 0}, d['Stat'])
# children test
out, err = utils.run(utils.vtroot+'/bin/zkclient2 -server localhost:%u -mode children /zk/test_nj/vt' % utils.zkocc_port_base, trap_output=True)
self.assertEqualNormalized(err, """Path = /zk/test_nj/vt
Child[0] = zkocc1
Child[1] = zkocc2
NumChildren = 2
CVersion = 2
Cached = false
Stale = false
""")
# zk command tests
self._check_zk_output("cat /zk/test_nj/vt/zkocc1/data1", "Test data 1")
self._check_zk_output("ls -l /zk/test_nj/vt/zkocc1", """total: 3
-rw-rw-rw- zk zk 11 %s data1
-rw-rw-rw- zk zk 11 %s data2
-rw-rw-rw- zk zk 11 %s data3
""" % (_format_time(zk_data[0]['Stat']['MTime']),
_format_time(zk_data[1]['Stat']['MTime']),
_format_time(zk_data[2]['Stat']['MTime'])))
# test /zk/local is not resolved and rejected
out, err = utils.run(utils.vtroot+'/bin/zkclient2 -server localhost:%u /zk/local/vt/zkocc1/data1' % utils.zkocc_port_base, trap_output=True, raise_on_error=False)
self.assertIn("zkocc: cannot resolve local cell", err)
# start a background process to query the same value over and over again
# while we kill the zk server and restart it
outfd = tempfile.NamedTemporaryFile(dir=utils.tmp_root, delete=False)
filename = outfd.name
querier = utils.run_bg('/bin/bash -c "while true ; do '+utils.vtroot+'/bin/zkclient2 -server localhost:%u /zk/test_nj/vt/zkocc1/data1 ; sleep 0.1 ; done"' % utils.zkocc_port_base, stderr=outfd.file)
outfd.close()
time.sleep(1)
# kill zk server, sleep a bit, restart zk server, sleep a bit
utils.run(utils.vtroot+'/bin/zkctl -zk.cfg [email protected]'+utils.hostname+':%u:%u:%u shutdown' % (utils.zk_port_base, utils.zk_port_base+1, utils.zk_port_base+2))
time.sleep(3)
utils.run(utils.vtroot+'/bin/zkctl -zk.cfg [email protected]'+utils.hostname+':%u:%u:%u start' % (utils.zk_port_base, utils.zk_port_base+1, utils.zk_port_base+2))
time.sleep(3)
utils.kill_sub_process(querier)
logging.debug("Checking %s", filename)
fd = open(filename, "r")
#.........这里部分代码省略.........
开发者ID:iamima,项目名称:vitess,代码行数:101,代码来源:zkocc_test.py
示例19: stop
def stop(self):
import utils # pylint: disable=g-import-not-at-top
utils.kill_sub_process(self.proc)
self.proc.wait()
shutil.rmtree(self.data_dir)
开发者ID:gitql,项目名称:vitess,代码行数:6,代码来源:etcd2.py
示例20: verify_successful_worker_copy_with_reparent
#.........这里部分代码省略.........
# rows across shards when sorted by primary key, remove
# --chunk_count 2, --min_rows_per_chunk 1 and set
# --source_reader_count back to 1.
args.extend(['--source_reader_count', '2',
'--chunk_count', '2',
'--min_rows_per_chunk', '1',
'--write_query_max_rows', '1'])
args.append('test_keyspace/0')
workerclient_proc = utils.run_vtworker_client_bg(args, worker_rpc_port)
if mysql_down:
# If MySQL is down, we wait until vtworker retried at least once to make
# sure it reached the point where a write failed due to MySQL being down.
# There should be two retries at least, one for each destination shard.
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerRetryCount >= 2',
condition_fn=lambda v: v.get('WorkerRetryCount') >= 2)
logging.debug('Worker has retried at least twice, starting reparent now')
# vtworker is blocked at this point. This is a good time to test that its
# throttler server is reacting to RPCs.
self.check_throttler_service('localhost:%d' % worker_rpc_port,
['test_keyspace/-80', 'test_keyspace/80-'],
9999)
# Bring back masters. Since we test with semi-sync now, we need at least
# one replica for the new master. This test is already quite expensive,
# so we bring back the old master as a replica rather than having a third
# replica up the whole time.
logging.debug('Restarting mysqld on destination masters')
utils.wait_procs(
[shard_0_master.start_mysql(),
shard_1_master.start_mysql()])
# Reparent away from the old masters.
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
else:
# NOTE: There is a race condition around this:
# It's possible that the SplitClone vtworker command finishes before the
# PlannedReparentShard vtctl command, which we start below, succeeds.
# Then the test would fail because vtworker did not have to retry.
#
# To workaround this, the test takes a parameter to increase the number of
# rows that the worker has to copy (with the idea being to slow the worker
# down).
# You should choose a value for num_insert_rows, such that this test
# passes for your environment (trial-and-error...)
# Make sure that vtworker got past the point where it picked a master
# for each destination shard ("finding targets" state).
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cloning the data (online)',
condition_fn=lambda v: v.get('WorkerState') == 'cloning the'
' data (online)')
logging.debug('Worker is in copy state, starting reparent now')
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
utils.wait_procs([workerclient_proc])
# Verify that we were forced to re-resolve and retry.
worker_vars = utils.get_vars(worker_port)
self.assertGreater(worker_vars['WorkerRetryCount'], 1,
"expected vtworker to retry each of the two reparented"
" destination masters at least once, but it didn't")
self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
"expected vtworker to retry, but it didn't")
utils.kill_sub_process(worker_proc, soft=True)
# Wait for the destination RDONLYs to catch up or the following offline
# clone will try to insert rows which already exist.
# TODO(mberlin): Remove this once SplitClone supports it natively.
utils.wait_for_replication_pos(shard_0_replica, shard_0_rdonly1)
utils.wait_for_replication_pos(shard_1_replica, shard_1_rdonly1)
# Run final offline clone to enable filtered replication.
_, _ = utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitClone',
'--online=false',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'], auto_log=True)
# Make sure that everything is caught up to the same replication point
self.run_split_diff('test_keyspace/-80', all_shard_tablets, shard_0_tablets)
self.run_split_diff('test_keyspace/80-', all_shard_tablets, shard_1_tablets)
self.assert_shard_data_equal(0, shard_master, shard_0_tablets.replica)
self.assert_shard_data_equal(1, shard_master, shard_1_tablets.replica)
开发者ID:alainjobart,项目名称:vitess,代码行数:101,代码来源:worker.py
注:本文中的utils.kill_sub_process函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论