• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.run_vtworker_bg函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中utils.run_vtworker_bg函数的典型用法代码示例。如果您正苦于以下问题:Python run_vtworker_bg函数的具体用法?Python run_vtworker_bg怎么用?Python run_vtworker_bg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了run_vtworker_bg函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_regular_operation

  def test_regular_operation(self):
    # Use a dedicated worker to run all vtworker commands.
    worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'],
        auto_log=True)
    vtworker_endpoint = "localhost:" + str(worker_rpc_port)

    automation_server_proc, automation_server_port = utils.run_automation_server()

    keyspace = 'test_keyspace'
    source_shard_list = '0'
    dest_shard_list = '-80,80-'
    _, vtctld_endpoint = utils.vtctld.rpc_endpoint()
    utils.run(environment.binary_argstr('automation_client') +
              ' --server localhost:' + str(automation_server_port) +
              ' --task HorizontalReshardingTask' +
              ' --param keyspace=' + keyspace +
              ' --param source_shard_list=' + source_shard_list +
              ' --param dest_shard_list=' + dest_shard_list +
              ' --param source_shard_rdonly_list=' +
              worker.shard_rdonly1.tablet_alias +
              ' --param dest_shard_rdonly_list=' +
              worker.shard_0_rdonly1.tablet_alias + ',' +
              worker.shard_1_rdonly1.tablet_alias +
              ' --param vtctld_endpoint=' + vtctld_endpoint +
              ' --param vtworker_endpoint=' + vtworker_endpoint)

    self.assert_shard_data_equal(0, worker.shard_master,
                                 worker.shard_0_tablets.replica)
    self.assert_shard_data_equal(1, worker.shard_master,
                                 worker.shard_1_tablets.replica)

    utils.kill_sub_process(automation_server_proc, soft=True)
    utils.kill_sub_process(worker_proc, soft=True)
开发者ID:haoqoo,项目名称:vitess,代码行数:34,代码来源:automation_horizontal_resharding.py


示例2: test_regular_operation

    def test_regular_operation(self):
        # Use a dedicated worker to run all vtworker commands.
        worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(["--cell", "test_nj"], auto_log=True)
        vtworker_endpoint = "localhost:" + str(worker_rpc_port)

        automation_server_proc, automation_server_port = utils.run_automation_server()

        source_shard_list = "0"
        dest_shard_list = "-80,80-"
        _, vtctld_endpoint = utils.vtctld.rpc_endpoint()
        utils.run(
            environment.binary_argstr("automation_client")
            + " --server localhost:"
            + str(automation_server_port)
            + " --task HorizontalReshardingTask"
            + " --param keyspace="
            + self.KEYSPACE
            + " --param source_shard_list="
            + source_shard_list
            + " --param dest_shard_list="
            + dest_shard_list
            + " --param vtctld_endpoint="
            + vtctld_endpoint
            + " --param vtworker_endpoint="
            + vtworker_endpoint
            + " --param min_healthy_rdonly_tablets=1"
        )

        self.verify()

        utils.kill_sub_process(automation_server_proc, soft=True)
        utils.kill_sub_process(worker_proc, soft=True)
开发者ID:Analyticalloopholes,项目名称:vitess,代码行数:32,代码来源:automation_horizontal_resharding.py


示例3: test_regular_operation

  def test_regular_operation(self):
    # Use a dedicated worker to run all vtworker commands.
    worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'],
        auto_log=True)
    vtworker_endpoint = 'localhost:' + str(worker_rpc_port)

    automation_server_proc, automation_server_port = (
        utils.run_automation_server())

    source_shard_list = '0'
    dest_shard_list = '-80,80-'
    _, vtctld_endpoint = utils.vtctld.rpc_endpoint()
    utils.run(
        environment.binary_argstr('automation_client') +
        ' --server localhost:' + str(automation_server_port) +
        ' --task HorizontalReshardingTask' +
        ' --param keyspace=' + self.KEYSPACE +
        ' --param source_shard_list=' + source_shard_list +
        ' --param dest_shard_list=' + dest_shard_list +
        ' --param vtctld_endpoint=' + vtctld_endpoint +
        ' --param vtworker_endpoint=' + vtworker_endpoint +
        ' --param min_healthy_rdonly_endpoints=1')

    self.verify()

    utils.kill_sub_process(automation_server_proc, soft=True)
    utils.kill_sub_process(worker_proc, soft=True)
开发者ID:TheRealAWebb,项目名称:vitess,代码行数:28,代码来源:automation_horizontal_resharding.py


示例4: test_vertical_split

  def test_vertical_split(self):
    # Use a dedicated worker to run all vtworker commands.
    worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'],
        auto_log=True)
    vtworker_endpoint = 'localhost:' + str(worker_rpc_port)

    automation_server_proc, automation_server_port = (
        utils.run_automation_server())

    _, vtctld_endpoint = utils.vtctld.rpc_endpoint()
    params = {'source_keyspace': 'source_keyspace',
              'dest_keyspace': 'destination_keyspace',
              'shard_list': '0',
              'tables': 'moving.*,view1',
              'vtctld_endpoint': vtctld_endpoint,
              'vtworker_endpoint': vtworker_endpoint,
             }
    args = ['--server', 'localhost:' + str(automation_server_port),
            '--task', 'VerticalSplitTask']
    args.extend(['--param=' + k + '=' + v for k, v in params.items()])
    utils.run(environment.binary_args('automation_client') + args)

    # One of the two source rdonly tablets went spare after the diff.
    # Force a healthcheck on both to get them back to "rdonly".
    for t in [vertical_split.source_rdonly1, vertical_split.source_rdonly2]:
      utils.run_vtctl(['RunHealthCheck', t.tablet_alias])

    self._check_srv_keyspace('')
    self._check_blacklisted_tables(vertical_split.source_master,
                                   ['moving.*', 'view1'])
    self._check_blacklisted_tables(vertical_split.source_replica,
                                   ['moving.*', 'view1'])
    self._check_blacklisted_tables(vertical_split.source_rdonly1,
                                   ['moving.*', 'view1'])
    self._check_blacklisted_tables(vertical_split.source_rdonly2,
                                   ['moving.*', 'view1'])

    # check the binlog player is gone now
    vertical_split.destination_master.wait_for_binlog_player_count(0)

    utils.kill_sub_process(automation_server_proc, soft=True)
    utils.kill_sub_process(worker_proc, soft=True)
开发者ID:32kb,项目名称:vitess,代码行数:43,代码来源:automation_vertical_split.py


示例5: test_successful_resharding

  def test_successful_resharding(self):
    """Reshard from 1 to 2 shards by running the workflow."""
    worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'], auto_log=True)
    vtworker_endpoint = 'localhost:%d' % worker_rpc_port

    stdout = utils.run_vtctl(['WorkflowCreate', 'horizontal_resharding',
                              '-keyspace=test_keyspace',
                              '-vtworkers=%s' % vtworker_endpoint],
                             auto_log=True)
    workflow_uuid = re.match(r'^uuid: (.*)$', stdout[0]).group(1)

    utils.pause('Now is a good time to look at vtctld UI at: '
                '%s, workflow uuid=%s' % (utils.vtctld.port, workflow_uuid))

    utils.run_vtctl(['WorkflowWait', workflow_uuid])

    self.verify()
    utils.kill_sub_process(worker_proc, soft=True)
开发者ID:gitql,项目名称:vitess,代码行数:19,代码来源:horizontal_resharding_workflow.py


示例6: setUp

 def setUp(self):
   # Run vtworker without any optional arguments to start in interactive mode.
   self.worker_proc, self.worker_port, _ = utils.run_vtworker_bg([])
开发者ID:DalianDragon,项目名称:vitess,代码行数:3,代码来源:worker.py


示例7: verify_successful_worker_copy_with_reparent

  def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
    """Verifies that vtworker can successfully copy data for a SplitClone.

    Order of operations:
    1. Run a background vtworker
    2. Wait until the worker successfully resolves the destination masters.
    3. Reparent the destination tablets
    4. Wait until the vtworker copy is finished
    5. Verify that the worker was forced to reresolve topology and retry writes
      due to the reparent.
    6. Verify that the data was copied successfully to both new shards

    Args:
      mysql_down: boolean. If True, we take down the MySQL instances on the
        destination masters at first, then bring them back and reparent away.

    Raises:
      AssertionError if things didn't go as expected.
    """
    if mysql_down:
      logging.debug('Shutting down mysqld on destination masters.')
      utils.wait_procs(
          [shard_0_master.shutdown_mysql(),
           shard_1_master.shutdown_mysql()])

    worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'],
        auto_log=True)

    workerclient_proc = utils.run_vtworker_client_bg(
        ['SplitClone',
         '--source_reader_count', '1',
         '--destination_pack_count', '1',
         '--destination_writer_count', '1',
         'test_keyspace/0'],
        worker_rpc_port)

    if mysql_down:
      # If MySQL is down, we wait until resolving at least twice (to verify that
      # we do reresolve and retry due to MySQL being down).
      worker_vars = utils.poll_for_vars(
          'vtworker', worker_port,
          'WorkerDestinationActualResolves >= 2',
          condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 2)
      self.assertNotEqual(
          worker_vars['WorkerRetryCount'], {},
          "expected vtworker to retry, but it didn't")
      logging.debug('Worker has resolved at least twice, starting reparent now')

      # Bring back masters. Since we test with semi-sync now, we need at least
      # one replica for the new master. This test is already quite expensive,
      # so we bring back the old master as a replica rather than having a third
      # replica up the whole time.
      logging.debug('Restarting mysqld on destination masters')
      utils.wait_procs(
          [shard_0_master.start_mysql(),
           shard_1_master.start_mysql()])

      # Reparent away from the old masters.
      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/-80',
           shard_0_replica.tablet_alias], auto_log=True)
      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/80-',
           shard_1_replica.tablet_alias], auto_log=True)

    else:
      # NOTE: There is a race condition around this:
      #   It's possible that the SplitClone vtworker command finishes before the
      #   PlannedReparentShard vtctl command, which we start below, succeeds.
      #   Then the test would fail because vtworker did not have to resolve the
      #   master tablet again (due to the missing reparent).
      #
      # To workaround this, the test takes a parameter to increase the number of
      # rows that the worker has to copy (with the idea being to slow the worker
      # down).
      # You should choose a value for num_insert_rows, such that this test
      # passes for your environment (trial-and-error...)
      utils.poll_for_vars(
          'vtworker', worker_port,
          'WorkerDestinationActualResolves >= 1',
          condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 1)
      logging.debug('Worker has resolved at least once, starting reparent now')

      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/-80',
           shard_0_replica.tablet_alias], auto_log=True)
      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/80-',
           shard_1_replica.tablet_alias], auto_log=True)

    utils.wait_procs([workerclient_proc])

    # Verify that we were forced to reresolve and retry.
    worker_vars = utils.get_vars(worker_port)
    self.assertGreater(worker_vars['WorkerDestinationActualResolves'], 1)
    self.assertGreater(worker_vars['WorkerDestinationAttemptedResolves'], 1)
    self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
                        "expected vtworker to retry, but it didn't")
    utils.kill_sub_process(worker_proc, soft=True)
#.........这里部分代码省略.........
开发者ID:DalianDragon,项目名称:vitess,代码行数:101,代码来源:worker.py


示例8: verify_successful_worker_copy_with_reparent

  def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
    """Verifies that vtworker can successfully copy data for a SplitClone.

    Order of operations:
    1. Run a background vtworker
    2. Wait until the worker successfully resolves the destination masters.
    3. Reparent the destination tablets
    4. Wait until the vtworker copy is finished
    5. Verify that the worker was forced to reresolve topology and retry writes
      due to the reparent.
    6. Verify that the data was copied successfully to both new shards

    Args:
      mysql_down - boolean, True iff we expect the MySQL instances on the
        destination masters to be down.

    Raises:
      AssertionError if things didn't go as expected.
    """
    worker_proc, worker_port, _ = utils.run_vtworker_bg(['--cell', 'test_nj',
                        'SplitClone',
                        '--source_reader_count', '1',
                        '--destination_pack_count', '1',
                        '--destination_writer_count', '1',
                        '--strategy=-populate_blp_checkpoint',
                        'test_keyspace/0'],
                       auto_log=True)

    if mysql_down:
      # If MySQL is down, we wait until resolving at least twice (to verify that
      # we do reresolve and retry due to MySQL being down).
      worker_vars = utils.poll_for_vars('vtworker', worker_port,
        'WorkerDestinationActualResolves >= 2',
        condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 2)
      self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
        "expected vtworker to retry, but it didn't")
      logging.debug("Worker has resolved at least twice, starting reparent now")

      # Original masters have no running MySQL, so need to force the reparent
      utils.run_vtctl(['EmergencyReparentShard', 'test_keyspace/-80',
        shard_0_replica.tablet_alias], auto_log=True)
      utils.run_vtctl(['EmergencyReparentShard', 'test_keyspace/80-',
        shard_1_replica.tablet_alias], auto_log=True)

    else:
      utils.poll_for_vars('vtworker', worker_port,
        'WorkerDestinationActualResolves >= 1',
        condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 1)
      logging.debug("Worker has resolved at least once, starting reparent now")

      utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/-80',
        shard_0_replica.tablet_alias], auto_log=True)
      utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/80-',
        shard_1_replica.tablet_alias], auto_log=True)

    logging.debug("Polling for worker state")
    # There are a couple of race conditions around this, that we need to be careful of:
    # 1. It's possible for the reparent step to take so long that the worker will
    #   actually finish before we get to the polling step. To workaround this,
    #   the test takes a parameter to increase the number of rows that the worker
    #   has to copy (with the idea being to slow the worker down).
    # 2. If the worker has a huge number of rows to copy, it's possible for the
    #   polling to timeout before the worker has finished copying the data.
    #
    # You should choose a value for num_insert_rows, such that this test passes
    # for your environment (trial-and-error...)
    worker_vars = utils.poll_for_vars('vtworker', worker_port,
      'WorkerState == cleaning up',
      condition_fn=lambda v: v.get('WorkerState') == 'cleaning up',
      # We know that vars should already be ready, since we read them earlier
      require_vars=True,
      # We're willing to let the test run for longer to make it less flaky.
      # This should still fail fast if something goes wrong with vtworker,
      # because of the require_vars flag above.
      timeout=5*60)

    # Verify that we were forced to reresolve and retry.
    self.assertGreater(worker_vars['WorkerDestinationActualResolves'], 1)
    self.assertGreater(worker_vars['WorkerDestinationAttemptedResolves'], 1)
    self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
      "expected vtworker to retry, but it didn't")

    utils.wait_procs([worker_proc])

    # Make sure that everything is caught up to the same replication point
    self.run_split_diff('test_keyspace/-80', shard_tablets, shard_0_tablets)
    self.run_split_diff('test_keyspace/80-', shard_tablets, shard_1_tablets)

    self.assert_shard_data_equal(0, shard_master, shard_0_tablets.replica)
    self.assert_shard_data_equal(1, shard_master, shard_1_tablets.replica)
开发者ID:haoqoo,项目名称:vitess,代码行数:90,代码来源:worker.py


示例9: test_resharding


#.........这里部分代码省略.........
    utils.vtgate.wait_for_endpoints('test_keyspace.80-.master', 1)
    utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1)
    utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1)

    # check the Map Reduce API works correctly, should use ExecuteKeyRanges now,
    # as we are sharded (with just one shard).
    # again, we have 3 values in the database, asking for 4 splits will get us
    # a single query.
    sql = 'select id, msg from resharding1'
    s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
    self.assertEqual(len(s), 1)
    self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
    # There must be one empty KeyRange which represents the full keyspace.
    self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
    self.assertEqual(s[0]['key_range_part']['key_ranges'][0], {})

    utils.check_srv_keyspace('test_nj', 'test_keyspace',
                             'Partitions(master): -\n'
                             'Partitions(rdonly): -\n'
                             'Partitions(replica): -\n',
                             keyspace_id_type=base_sharding.keyspace_id_type,
                             sharding_column_name='custom_ksid_col')

    # we need to create the schema, and the worker will do data copying
    for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
      utils.run_vtctl(['CopySchemaShard',
                       '--exclude_tables', 'unrelated',
                       shard_rdonly1.tablet_alias,
                       keyspace_shard],
                      auto_log=True)
    utils.run_vtctl(['RunHealthCheck', shard_rdonly1.tablet_alias])

    # Run vtworker as daemon for the following SplitClone commands.
    worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj', '--command_display_interval', '10ms'],
        auto_log=True)

    # Initial clone (online).
    workerclient_proc = utils.run_vtworker_client_bg(
        ['SplitClone',
         '--offline=false',
         '--exclude_tables', 'unrelated',
         '--min_table_size_for_split', '1',
         '--min_healthy_rdonly_tablets', '1',
         'test_keyspace/0'],
        worker_rpc_port)
    utils.wait_procs([workerclient_proc])
    self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
                                        3, 0, 0)

    # Reset vtworker such that we can run the next command.
    workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
    utils.wait_procs([workerclient_proc])

    # Modify the destination shard. SplitClone will revert the changes.
    # Delete row 1 (provokes an insert).
    shard_0_master.mquery('vt_test_keyspace',
                          'delete from resharding1 where id=1', write=True)
    # Delete row 2 (provokes an insert).
    shard_1_master.mquery('vt_test_keyspace',
                          'delete from resharding1 where id=2', write=True)
    # Update row 3 (provokes an update).
    shard_1_master.mquery('vt_test_keyspace',
                          "update resharding1 set msg='msg-not-3' where id=3",
                          write=True)
    # Insert row 4 (provokes a delete).
开发者ID:arunlodhi,项目名称:vitess,代码行数:67,代码来源:initial_sharding.py


示例10: test_resharding


#.........这里部分代码省略.........
    utils.run_vtctl(['InitShardMaster', 'test_keyspace/80-c0',
                     shard_2_master.tablet_alias], auto_log=True)
    utils.run_vtctl(['InitShardMaster', 'test_keyspace/c0-',
                     shard_3_master.tablet_alias], auto_log=True)

    # check the shards
    shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
    for s in ['-80', '80-', '80-c0', 'c0-']:
      self.assertIn(s, shards, 'unexpected shards: %s' % str(shards))
    self.assertEqual(len(shards), 4, 'unexpected shards: %s' % str(shards))

    utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
                    auto_log=True)
    utils.check_srv_keyspace(
        'test_nj', 'test_keyspace',
        'Partitions(master): -80 80-\n'
        'Partitions(rdonly): -80 80-\n'
        'Partitions(replica): -80 80-\n',
        keyspace_id_type=base_sharding.keyspace_id_type,
        sharding_column_name='custom_ksid_col')

    # disable shard_1_slave2, so we're sure filtered replication will go
    # from shard_1_slave1
    utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'spare'])
    shard_1_slave2.wait_for_vttablet_state('NOT_SERVING')

    # we need to create the schema, and the worker will do data copying
    for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
      utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
                       shard_1_rdonly1.tablet_alias, keyspace_shard],
                      auto_log=True)

    # Run vtworker as daemon for the following SplitClone commands.
    worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj', '--command_display_interval', '10ms'],
        auto_log=True)

    # Copy the data from the source to the destination shards.
    # min_table_size_for_split is set to 1 as to force a split even on the
    # small table we have.
    # --max_tps is only specified to enable the throttler and ensure that the
    # code is executed. But the intent here is not to throttle the test, hence
    # the rate limit is set very high.
    #
    # Initial clone (online).
    workerclient_proc = utils.run_vtworker_client_bg(
        ['SplitClone',
         '--offline=false',
         '--exclude_tables', 'unrelated',
         '--min_table_size_for_split', '1',
         '--min_healthy_rdonly_tablets', '1',
         '--max_tps', '9999',
         'test_keyspace/80-'],
        worker_rpc_port)
    utils.wait_procs([workerclient_proc])
    self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
                                        2, 0, 0)

    # Reset vtworker such that we can run the next command.
    workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
    utils.wait_procs([workerclient_proc])

    # Modify the destination shard. SplitClone will revert the changes.
    # Delete row 2 (provokes an insert).
    shard_2_master.mquery('vt_test_keyspace',
                          'delete from resharding1 where id=2', write=True)
开发者ID:CowLeo,项目名称:vitess,代码行数:67,代码来源:resharding.py


示例11: test_merge_sharding

  def test_merge_sharding(self):
    utils.run_vtctl(['CreateKeyspace',
                     '--sharding_column_name', 'custom_ksid_col',
                     '--sharding_column_type', base_sharding.keyspace_id_type,
                     'test_keyspace'])

    shard_0_master.init_tablet('replica', 'test_keyspace', '-40')
    shard_0_replica.init_tablet('replica', 'test_keyspace', '-40')
    shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '-40')
    shard_1_master.init_tablet('replica', 'test_keyspace', '40-80')
    shard_1_replica.init_tablet('replica', 'test_keyspace', '40-80')
    shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '40-80')
    shard_2_master.init_tablet('replica', 'test_keyspace', '80-')
    shard_2_replica.init_tablet('replica', 'test_keyspace', '80-')
    shard_2_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')

    # rebuild and check SrvKeyspace
    utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
    ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
    self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')

    # create databases so vttablet can start behaving normally
    for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
              shard_1_master, shard_1_replica, shard_1_rdonly,
              shard_2_master, shard_2_replica, shard_2_rdonly]:
      t.create_db('vt_test_keyspace')
      t.start_vttablet(wait_for_state=None,
                       binlog_use_v3_resharding_mode=False)

    # won't be serving, no replication state
    for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
              shard_1_master, shard_1_replica, shard_1_rdonly,
              shard_2_master, shard_2_replica, shard_2_rdonly]:
      t.wait_for_vttablet_state('NOT_SERVING')

    # reparent to make the tablets work
    utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-40',
                     shard_0_master.tablet_alias], auto_log=True)
    utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/40-80',
                     shard_1_master.tablet_alias], auto_log=True)
    utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
                     shard_2_master.tablet_alias], auto_log=True)

    # create the tables
    self._create_schema()
    self._insert_startup_values()

    # run a health check on source replicas so they respond to discovery
    # (for binlog players) and on the source rdonlys (for workers)
    for t in [shard_0_replica, shard_1_replica]:
      utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
    for t in [shard_0_rdonly, shard_1_rdonly]:
      utils.run_vtctl(['RunHealthCheck', t.tablet_alias])

    # create the merge shards
    shard_dest_master.init_tablet('replica', 'test_keyspace', '-80')
    shard_dest_replica.init_tablet('replica', 'test_keyspace', '-80')
    shard_dest_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')

    # start vttablet on the destination shard (no db created,
    # so they're all not serving)
    for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
      t.start_vttablet(wait_for_state=None,
                       binlog_use_v3_resharding_mode=False)
    for t in [shard_dest_master, shard_dest_replica, shard_dest_rdonly]:
      t.wait_for_vttablet_state('NOT_SERVING')

    utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
                     shard_dest_master.tablet_alias], auto_log=True)

    utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
                    auto_log=True)
    utils.check_srv_keyspace(
        'test_nj', 'test_keyspace',
        'Partitions(master): -40 40-80 80-\n'
        'Partitions(rdonly): -40 40-80 80-\n'
        'Partitions(replica): -40 40-80 80-\n',
        keyspace_id_type=base_sharding.keyspace_id_type,
        sharding_column_name='custom_ksid_col')

    # copy the schema
    utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias,
                     'test_keyspace/-80'], auto_log=True)

    # copy the data (will also start filtered replication), reset source
    # Run vtworker as daemon for the following SplitClone commands.
    worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj', '--command_display_interval', '10ms',
          '--use_v3_resharding_mode=false'],
        auto_log=True)

    # Initial clone (online).
    workerclient_proc = utils.run_vtworker_client_bg(
        ['SplitClone',
         '--offline=false',
         '--chunk_count', '10',
         '--min_rows_per_chunk', '1',
         '--min_healthy_rdonly_tablets', '1',
         'test_keyspace/-80'],
        worker_rpc_port)
#.........这里部分代码省略.........
开发者ID:alainjobart,项目名称:vitess,代码行数:101,代码来源:merge_sharding.py


示例12: verify_successful_worker_copy_with_reparent

  def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
    """Verifies that vtworker can successfully copy data for a SplitClone.

    Order of operations:
    1. Run a background vtworker
    2. Wait until the worker successfully resolves the destination masters.
    3. Reparent the destination tablets
    4. Wait until the vtworker copy is finished
    5. Verify that the worker was forced to reresolve topology and retry writes
      due to the reparent.
    6. Verify that the data was copied successfully to both new shards

    Args:
      mysql_down: boolean. If True, we take down the MySQL instances on the
        destination masters at first, then bring them back and reparent away.

    Raises:
      AssertionError if things didn't go as expected.
    """
    if mysql_down:
      logging.debug('Shutting down mysqld on destination masters.')
      utils.wait_procs(
          [shard_0_master.shutdown_mysql(),
           shard_1_master.shutdown_mysql()])

    worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj', '--use_v3_resharding_mode=false'],
        auto_log=True)

    # --max_tps is only specified to enable the throttler and ensure that the
    # code is executed. But the intent here is not to throttle the test, hence
    # the rate limit is set very high.
    # --chunk_count is 2 because rows are currently ordered by primary key such
    # that all rows of the first shard come first and then the second shard.
    # TODO(mberlin): Remove --offline=false once vtworker ensures that the
    #                destination shards are not behind the master's replication
    #                position.
    args = ['SplitClone',
            '--offline=false',
            '--destination_writer_count', '1',
            '--min_healthy_rdonly_tablets', '1',
            '--max_tps', '9999']
    if not mysql_down:
      # Make the clone as slow as necessary such that there is enough time to
      # run PlannedReparent in the meantime.
      # TODO(mberlin): Once insert_values is fixed to uniformly distribute the
      #                rows across shards when sorted by primary key, remove
      #                --chunk_count 2, --min_rows_per_chunk 1 and set
      #                --source_reader_count back to 1.
      args.extend(['--source_reader_count', '2',
                   '--chunk_count', '2',
                   '--min_rows_per_chunk', '1',
                   '--write_query_max_rows', '1'])
    args.append('test_keyspace/0')
    workerclient_proc = utils.run_vtworker_client_bg(args, worker_rpc_port)

    if mysql_down:
      # If MySQL is down, we wait until vtworker retried at least once to make
      # sure it reached the point where a write failed due to MySQL being down.
      # There should be two retries at least, one for each destination shard.
      utils.poll_for_vars(
          'vtworker', worker_port,
          'WorkerRetryCount >= 2',
          condition_fn=lambda v: v.get('WorkerRetryCount') >= 2)
      logging.debug('Worker has retried at least twice, starting reparent now')

      # vtworker is blocked at this point. This is a good time to test that its
      # throttler server is reacting to RPCs.
      self.check_throttler_service('localhost:%d' % worker_rpc_port,
                                   ['test_keyspace/-80', 'test_keyspace/80-'],
                                   9999)

      # Bring back masters. Since we test with semi-sync now, we need at least
      # one replica for the new master. This test is already quite expensive,
      # so we bring back the old master as a replica rather than having a third
      # replica up the whole time.
      logging.debug('Restarting mysqld on destination masters')
      utils.wait_procs(
          [shard_0_master.start_mysql(),
           shard_1_master.start_mysql()])

      # Reparent away from the old masters.
      utils.run_vtctl(
          ['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
           '-new_master', shard_0_replica.tablet_alias], auto_log=True)
      utils.run_vtctl(
          ['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
           '-new_master', shard_1_replica.tablet_alias], auto_log=True)

    else:
      # NOTE: There is a race condition around this:
      #   It's possible that the SplitClone vtworker command finishes before the
      #   PlannedReparentShard vtctl command, which we start below, succeeds.
      #   Then the test would fail because vtworker did not have to retry.
      #
      # To workaround this, the test takes a parameter to increase the number of
      # rows that the worker has to copy (with the idea being to slow the worker
      # down).
      # You should choose a value for num_insert_rows, such that this test
      # passes for your environment (trial-and-error...)
#.........这里部分代码省略.........
开发者ID:alainjobart,项目名称:vitess,代码行数:101,代码来源:worker.py


示例13: verify_successful_worker_copy_with_reparent

  def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
    """Verifies that vtworker can successfully copy data for a SplitClone.

    Order of operations:
    1. Run a background vtworker
    2. Wait until the worker successfully resolves the destination masters.
    3. Reparent the destination tablets
    4. Wait until the vtworker copy is finished
    5. Verify that the worker was forced to reresolve topology and retry writes
      due to the reparent.
    6. Verify that the data was copied successfully to both new shards

    Args:
      mysql_down: boolean. If True, we take down the MySQL instances on the
        destination masters at first, then bring them back and reparent away.

    Raises:
      AssertionError if things didn't go as expected.
    """
    if mysql_down:
      logging.debug('Shutting down mysqld on destination masters.')
      utils.wait_procs(
          [shard_0_master.shutdown_mysql(),
           shard_1_master.shutdown_mysql()])

    worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'],
        auto_log=True)

    # --max_tps is only specified to enable the throttler and ensure that the
    # code is executed. But the intent here is not to throttle the test, hence
    # the rate limit is set very high.
    workerclient_proc = utils.run_vtworker_client_bg(
        ['SplitClone',
         '--source_reader_count', '1',
         '--destination_pack_count', '1',
         '--destination_writer_count', '1',
         '--min_healthy_rdonly_tablets', '1',
         '--max_tps', '9999',
         'test_keyspace/0'],
        worker_rpc_port)

    if mysql_down:
      # If MySQL is down, we wait until vtworker retried at least once to make
      # sure it reached the point where a write failed due to MySQL being down.
      # There should be two retries at least, one for each destination shard.
      utils.poll_for_vars(
          'vtworker', worker_port,
          'WorkerRetryCount >= 2',
          condition_fn=lambda v: v.get('WorkerRetryCount') >= 2)
      logging.debug('Worker has retried at least twice, starting reparent now')

      # vtworker is blocked at this point. This is a good time to test that its
      # throttler server is reacting to RPCs.
      self.check_binlog_throttler('localhost:%d' % worker_rpc_port,
                                  ['test_keyspace/-80', 'test_keyspace/80-'],
                                  9999)

      # Bring back masters. Since we test with semi-sync now, we need at least
      # one replica for the new master. This test is already quite expensive,
      # so we bring back the old master as a replica rather than having a third
      # replica up the whole time.
      logging.debug('Restarting mysqld on destination masters')
      utils.wait_procs(
          [shard_0_master.start_mysql(),
           shard_1_master.start_mysql()])

      # Reparent away from the old masters.
      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/-80',
           shard_0_replica.tablet_alias], auto_log=True)
      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/80-',
           shard_1_replica.tablet_alias], auto_log=True)

    else:
      # NOTE: There is a race condition around this:
      #   It's possible that the SplitClone vtworker command finishes before the
      #   PlannedReparentShard vtctl command, which we start below, succeeds.
      #   Then the test would fail because vtworker did not have to retry.
      #
      # To workaround this, the test takes a parameter to increase the number of
      # rows that the worker has to copy (with the idea being to slow the worker
      # down).
      # You should choose a value for num_insert_rows, such that this test
      # passes for your environment (trial-and-error...)
      # Make sure that vtworker got past the point where it picked a master
      # for each destination shard ("finding targets" state).
      utils.poll_for_vars(
          'vtworker', worker_port,
          'WorkerState == copying the data',
          condition_fn=lambda v: v.get('WorkerState') == 'copying the data')
      logging.debug('Worker is in copy state, starting reparent now')

      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/-80',
           shard_0_replica.tablet_alias], auto_log=True)
      utils.run_vtctl(
          ['PlannedReparentShard', 'test_keyspace/80-',
           shard_1_replica.tablet_alias], auto_log=True)
#.........这里部分代码省略.........
开发者ID:Analyticalloopholes,项目名称:vitess,代码行数:101,代码来源:worker.py



注:本文中的utils.run_vtworker_bg函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.safe_division函数代码示例发布时间:2022-05-26
下一篇:
Python utils.run_vtworker函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap