本文整理汇总了Golang中github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice.RegisterForTest函数的典型用法代码示例。如果您正苦于以下问题:Golang RegisterForTest函数的具体用法?Golang RegisterForTest怎么用?Golang RegisterForTest使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RegisterForTest函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestGoRPCTabletConn
// This test makes sure the go rpc service works
func TestGoRPCTabletConn(t *testing.T) {
// fake service
service := tabletconntest.CreateFakeServer(t)
// listen on a random port
listener, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatalf("Cannot listen: %v", err)
}
host := listener.Addr().(*net.TCPAddr).IP.String()
port := listener.Addr().(*net.TCPAddr).Port
// Create a gRPC server and listen on the port
server := grpc.NewServer()
grpcqueryservice.RegisterForTest(server, service)
go server.Serve(listener)
// run the test suite
tabletconntest.TestSuite(t, protocolName, &pb.EndPoint{
Host: host,
PortMap: map[string]int32{
"grpc": int32(port),
},
}, service)
}
开发者ID:ruiaylin,项目名称:vitess,代码行数:26,代码来源:conn_test.go
示例2: TestGoRPCTabletConn
// This test makes sure the go rpc service works
func TestGoRPCTabletConn(t *testing.T) {
// fake service
service := tabletconntest.CreateFakeServer(t)
// listen on a random port
listener, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatalf("Cannot listen: %v", err)
}
host := listener.Addr().(*net.TCPAddr).IP.String()
port := listener.Addr().(*net.TCPAddr).Port
// Create a gRPC server and listen on the port
server := grpc.NewServer()
grpcqueryservice.RegisterForTest(server, service)
go server.Serve(listener)
// Create a gRPC client connecting to the server
ctx := context.Background()
client, err := DialTablet(ctx, &pb.EndPoint{
Host: host,
PortMap: map[string]int32{
"grpc": int32(port),
},
}, tabletconntest.TestKeyspace, tabletconntest.TestShard, 30*time.Second)
if err != nil {
t.Fatalf("dial failed: %v", err)
}
// run the test suite
tabletconntest.TestSuite(t, client, service)
// and clean up
client.Close()
}
开发者ID:afrolovskiy,项目名称:vitess,代码行数:36,代码来源:conn_test.go
示例3: TestTabletData
func TestTabletData(t *testing.T) {
db := fakesqldb.Register()
ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"})
wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient())
if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{
ShardingColumnName: "keyspace_id",
ShardingColumnType: topodatapb.KeyspaceIdType_UINT64,
}); err != nil {
t.Fatalf("CreateKeyspace failed: %v", err)
}
tablet1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
tablet1.StartActionLoop(t, wr)
defer tablet1.StopActionLoop(t)
shsq := newStreamHealthTabletServer(t)
grpcqueryservice.RegisterForTest(tablet1.RPCServer, shsq)
thc := newTabletHealthCache(ts)
stats := &querypb.RealtimeStats{
HealthError: "testHealthError",
SecondsBehindMaster: 72,
CpuUsage: 1.1,
}
// Keep broadcasting until the first result goes through.
stop := make(chan struct{})
go func() {
for {
select {
case <-stop:
return
default:
shsq.BroadcastHealth(42, stats)
}
}
}()
// Start streaming and wait for the first result.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
result, err := thc.Get(ctx, tablet1.Tablet.Alias)
cancel()
close(stop)
if err != nil {
t.Fatalf("thc.Get failed: %v", err)
}
if got, want := result.RealtimeStats, stats; !proto.Equal(got, want) {
t.Errorf("RealtimeStats = %#v, want %#v", got, want)
}
}
开发者ID:ChrisYangLiu,项目名称:vitess,代码行数:52,代码来源:tablet_data_test.go
示例4: TestGRPCDiscovery
func TestGRPCDiscovery(t *testing.T) {
flag.Set("tablet_protocol", "grpc")
flag.Set("gateway_implementation", "discoverygateway")
// Fake services for the tablet, topo server.
service, ts, cell := CreateFakeServers(t)
// Listen on a random port.
listener, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatalf("Cannot listen: %v", err)
}
host := listener.Addr().(*net.TCPAddr).IP.String()
port := listener.Addr().(*net.TCPAddr).Port
// Create a gRPC server and listen on the port.
server := grpc.NewServer()
grpcqueryservice.RegisterForTest(server, service)
go server.Serve(listener)
// Create the discovery healthcheck, and the gateway.
// Wait for the right tablets to be present.
hc := discovery.NewHealthCheck(30*time.Second, 10*time.Second, 2*time.Minute)
hc.AddTablet(cell, "test_tablet", &topodatapb.Tablet{
Alias: &topodatapb.TabletAlias{
Cell: cell,
},
Keyspace: tabletconntest.TestTarget.Keyspace,
Shard: tabletconntest.TestTarget.Shard,
Type: tabletconntest.TestTarget.TabletType,
Hostname: host,
PortMap: map[string]int32{
"grpc": int32(port),
},
})
dg := gateway.GetCreator()(hc, ts, ts, cell, 2, []topodatapb.TabletType{tabletconntest.TestTarget.TabletType})
// and run the test suite.
TestSuite(t, "discovery-grpc", dg, service)
}
开发者ID:CowLeo,项目名称:vitess,代码行数:40,代码来源:grpc_discovery_test.go
示例5: TestVerticalSplitDiff
func TestVerticalSplitDiff(t *testing.T) {
db := fakesqldb.Register()
ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"})
ctx := context.Background()
wi := NewInstance(ctx, ts, "cell1", time.Second)
sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
// Create the destination keyspace with the appropriate ServedFromMap
ki := &pbt.Keyspace{
ServedFroms: []*pbt.Keyspace_ServedFrom{
&pbt.Keyspace_ServedFrom{
TabletType: pbt.TabletType_MASTER,
Keyspace: "source_ks",
},
&pbt.Keyspace_ServedFrom{
TabletType: pbt.TabletType_REPLICA,
Keyspace: "source_ks",
},
&pbt.Keyspace_ServedFrom{
TabletType: pbt.TabletType_RDONLY,
Keyspace: "source_ks",
},
},
}
wi.wr.TopoServer().CreateKeyspace(ctx, "destination_ks", ki)
destMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
destRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 11,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
destRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 12,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, destMaster, destRdonly1, destRdonly2} {
ft.StartActionLoop(t, wi.wr)
defer ft.StopActionLoop(t)
}
wi.wr.SetSourceShards(ctx, "destination_ks", "0", []*pbt.TabletAlias{sourceRdonly1.Tablet.Alias}, []string{"moving.*", "view1"})
// add the topo and schema data we'll need
if err := wi.wr.RebuildKeyspaceGraph(ctx, "source_ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
if err := wi.wr.RebuildKeyspaceGraph(ctx, "destination_ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
// We need to use FakeTabletManagerClient because we don't
// have a good way to fake the binlog player yet, which is
// necessary for synchronizing replication.
wr := wrangler.New(logutil.NewConsoleLogger(), ts, faketmclient.NewFakeTabletManagerClient())
excludedTable := "excludedTable1"
subFlags := flag.NewFlagSet("VerticalSplitDiff", flag.ContinueOnError)
gwrk, err := commandVerticalSplitDiff(wi, wr, subFlags, []string{
"-exclude_tables", excludedTable,
"destination_ks/0",
})
if err != nil {
t.Fatalf("commandVerticalSplitDiff failed: %v", err)
}
wrk := gwrk.(*VerticalSplitDiffWorker)
for _, rdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2, destRdonly1, destRdonly2} {
// both source and destination should be identical (for schema and data returned)
rdonly.FakeMysqlDaemon.Schema = &myproto.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*myproto.TableDefinition{
&myproto.TableDefinition{
Name: "moving1",
Columns: []string{"id", "msg"},
PrimaryKeyColumns: []string{"id"},
Type: myproto.TableBaseTable,
},
&myproto.TableDefinition{
Name: excludedTable,
Columns: []string{"id", "msg"},
PrimaryKeyColumns: []string{"id"},
Type: myproto.TableBaseTable,
},
&myproto.TableDefinition{
Name: "view1",
Type: myproto.TableView,
},
},
}
grpcqueryservice.RegisterForTest(rdonly.RPCServer, &verticalDiffTabletServer{t: t, excludedTable: excludedTable})
}
err = wrk.Run(ctx)
status := wrk.StatusAsText()
t.Logf("Got status: %v", status)
if err != nil || wrk.State != WorkerStateDone {
t.Errorf("Worker run failed")
//.........这里部分代码省略.........
开发者ID:hadmagic,项目名称:vitess,代码行数:101,代码来源:vertical_split_diff_test.go
示例6: testWaitForDrain
func testWaitForDrain(t *testing.T, desc, cells string, drain drainDirective, expectedErrors []string) {
const keyspace = "ks"
const shard = "-80"
db := fakesqldb.Register()
ts := zktestserver.New(t, []string{"cell1", "cell2"})
wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient())
flag.Set("vtctl_healthcheck_timeout", "0.25s")
vp := NewVtctlPipe(t, ts)
defer vp.Close()
// Create keyspace.
if err := ts.CreateKeyspace(context.Background(), keyspace, &topodatapb.Keyspace{
ShardingColumnName: "keyspace_id",
ShardingColumnType: topodatapb.KeyspaceIdType_UINT64,
}); err != nil {
t.Fatalf("CreateKeyspace failed: %v", err)
}
t1 := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, db,
TabletKeyspaceShard(t, keyspace, shard))
t2 := NewFakeTablet(t, wr, "cell2", 1, topodatapb.TabletType_REPLICA, db,
TabletKeyspaceShard(t, keyspace, shard))
for _, ft := range []*FakeTablet{t1, t2} {
ft.StartActionLoop(t, wr)
defer ft.StopActionLoop(t)
}
target := querypb.Target{
Keyspace: keyspace,
Shard: shard,
TabletType: topodatapb.TabletType_REPLICA,
}
fqs1 := newFakeQueryService(target)
fqs2 := newFakeQueryService(target)
grpcqueryservice.RegisterForTest(t1.RPCServer, fqs1)
grpcqueryservice.RegisterForTest(t2.RPCServer, fqs2)
// Run vtctl WaitForDrain and react depending on its output.
timeout := "0.5s"
if len(expectedErrors) == 0 {
// Tests with a positive outcome should have a more generous timeout to
// avoid flakyness.
timeout = "30s"
}
stream, err := vp.RunAndStreamOutput(
[]string{"WaitForDrain", "-cells", cells, "-retry_delay", "100ms", "-timeout", timeout,
keyspace + "/" + shard, topodatapb.TabletType_REPLICA.String()})
if err != nil {
t.Fatalf("VtctlPipe.RunAndStreamOutput() failed: %v", err)
}
// QPS = 1.0. Tablets are not drained yet.
fqs1.addHealthResponse(1.0)
fqs2.addHealthResponse(1.0)
var le *logutilpb.Event
for {
le, err = stream.Recv()
if err != nil {
break
}
line := logutil.EventString(le)
t.Logf(line)
if strings.Contains(line, "for all healthy tablets to be drained") {
t.Log("Successfully waited for WaitForDrain to be blocked because tablets have a QPS rate > 0.0")
break
} else {
t.Log("waiting for WaitForDrain to see a QPS rate > 0.0")
}
}
if drain&DrainCell1 != 0 {
fqs1.addHealthResponse(0.0)
} else {
fqs1.addHealthResponse(2.0)
}
if drain&DrainCell2 != 0 {
fqs2.addHealthResponse(0.0)
} else {
fqs2.addHealthResponse(2.0)
}
// If a cell was drained, rate should go below <0.0 now.
// If not all selected cells were drained, this will end after "-timeout".
for {
le, err = stream.Recv()
if err == nil {
vp.t.Logf(logutil.EventString(le))
} else {
break
}
}
if len(expectedErrors) == 0 {
if err != io.EOF {
t.Fatalf("TestWaitForDrain: %v: no error expected but got: %v", desc, err)
}
// else: Success.
} else {
//.........这里部分代码省略.........
开发者ID:aaijazi,项目名称:vitess,代码行数:101,代码来源:wait_for_drain_test.go
示例7: TestSplitDiff
func TestSplitDiff(t *testing.T) {
db := fakesqldb.Register()
ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"})
ctx := context.Background()
wi := NewInstance(ctx, ts, "cell1", time.Second)
if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{
ShardingColumnName: "keyspace_id",
ShardingColumnType: topodatapb.KeyspaceIdType_UINT64,
}); err != nil {
t.Fatalf("CreateKeyspace failed: %v", err)
}
sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0,
topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
leftMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10,
topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
leftRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 11,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
leftRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 12,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly1, leftRdonly2} {
ft.StartActionLoop(t, wi.wr)
defer ft.StopActionLoop(t)
}
// add the topo and schema data we'll need
if err := ts.CreateShard(ctx, "ks", "80-"); err != nil {
t.Fatalf("CreateShard(\"-80\") failed: %v", err)
}
wi.wr.SetSourceShards(ctx, "ks", "-40", []*topodatapb.TabletAlias{sourceRdonly1.Tablet.Alias}, nil)
if err := wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", topodatapb.KeyspaceIdType_UINT64, 4, false); err != nil {
t.Fatalf("SetKeyspaceShardingInfo failed: %v", err)
}
if err := wi.wr.RebuildKeyspaceGraph(ctx, "ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
subFlags := flag.NewFlagSet("SplitDiff", flag.ContinueOnError)
// We need to use FakeTabletManagerClient because we don't
// have a good way to fake the binlog player yet, which is
// necessary for synchronizing replication.
wr := wrangler.New(logutil.NewConsoleLogger(), ts, faketmclient.NewFakeTabletManagerClient())
excludedTable := "excludedTable1"
gwrk, err := commandSplitDiff(wi, wr, subFlags, []string{
"-exclude_tables", excludedTable,
"ks/-40",
})
if err != nil {
t.Fatalf("commandSplitDiff failed: %v", err)
}
wrk := gwrk.(*SplitDiffWorker)
for _, rdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2, leftRdonly1, leftRdonly2} {
// In reality, the destinations *shouldn't* have identical data to the source - instead, we should see
// the data split into left and right. However, if we do that in this test, we would really just be
// testing our fake SQL logic, since we do the data filtering in SQL.
// To simplify things, just assume that both sides have identical data.
rdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
{
Name: "table1",
Columns: []string{"id", "msg", "keyspace_id"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
},
{
Name: excludedTable,
Columns: []string{"id", "msg", "keyspace_id"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
},
{
Name: "view1",
Type: tmutils.TableView,
},
},
}
}
grpcqueryservice.RegisterForTest(leftRdonly1.RPCServer, &destinationTabletServer{t: t, excludedTable: excludedTable})
grpcqueryservice.RegisterForTest(leftRdonly2.RPCServer, &destinationTabletServer{t: t, excludedTable: excludedTable})
grpcqueryservice.RegisterForTest(sourceRdonly1.RPCServer, &sourceTabletServer{t: t, excludedTable: excludedTable})
grpcqueryservice.RegisterForTest(sourceRdonly2.RPCServer, &sourceTabletServer{t: t, excludedTable: excludedTable})
err = wrk.Run(ctx)
status := wrk.StatusAsText()
t.Logf("Got status: %v", status)
if err != nil || wrk.State != WorkerStateDone {
t.Errorf("Worker run failed")
}
}
开发者ID:ChrisYangLiu,项目名称:vitess,代码行数:99,代码来源:split_diff_test.go
示例8: testSplitClone
func testSplitClone(t *testing.T, strategy string) {
db := fakesqldb.Register()
ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"})
wi := NewInstance(ts, "cell1", time.Second, time.Second)
if err := ts.CreateKeyspace(context.Background(), "ks", &pbt.Keyspace{
ShardingColumnName: "keyspace_id",
ShardingColumnType: pbt.KeyspaceIdType_UINT64,
}); err != nil {
t.Fatalf("CreateKeyspace failed: %v", err)
}
sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
leftMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
leftRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 11,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
rightMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 20,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "40-80"))
rightRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 21,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "40-80"))
for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly, rightMaster, rightRdonly} {
ft.StartActionLoop(t, wi.wr)
defer ft.StopActionLoop(t)
}
// add the topo and schema data we'll need
ctx := context.Background()
if err := ts.CreateShard(ctx, "ks", "80-"); err != nil {
t.Fatalf("CreateShard(\"-80\") failed: %v", err)
}
if err := wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", pbt.KeyspaceIdType_UINT64, 4, false); err != nil {
t.Fatalf("SetKeyspaceShardingInfo failed: %v", err)
}
if err := wi.wr.RebuildKeyspaceGraph(ctx, "ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
subFlags := flag.NewFlagSet("SplitClone", flag.ContinueOnError)
gwrk, err := commandSplitClone(wi, wi.wr, subFlags, []string{
"-strategy", strategy,
"-source_reader_count", "10",
"-destination_pack_count", "4",
"-min_table_size_for_split", "1",
"-destination_writer_count", "10",
"ks/-80",
})
if err != nil {
t.Errorf("Worker creation failed: %v", err)
}
wrk := gwrk.(*SplitCloneWorker)
for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} {
sourceRdonly.FakeMysqlDaemon.Schema = &myproto.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*myproto.TableDefinition{
&myproto.TableDefinition{
Name: "table1",
Columns: []string{"id", "msg", "keyspace_id"},
PrimaryKeyColumns: []string{"id"},
Type: myproto.TableBaseTable,
// This informs how many rows we can pack into a single insert
DataLength: 2048,
},
},
}
sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = SourceRdonlyFactory(t)
sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{
GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678},
}
sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
"START SLAVE",
}
grpcqueryservice.RegisterForTest(sourceRdonly.RPCServer, &testQueryService{t: t})
}
// We read 100 source rows. sourceReaderCount is set to 10, so
// we'll have 100/10=10 rows per table chunk.
// destinationPackCount is set to 4, so we take 4 source rows
// at once. So we'll process 4 + 4 + 2 rows to get to 10.
// That means 3 insert statements on each target (each
// containing half of the rows, i.e. 2 + 2 + 1 rows). So 3 * 10
// = 30 insert statements on each destination.
leftMaster.FakeMysqlDaemon.DbAppConnectionFactory = DestinationsFactory(t, 30)
leftRdonly.FakeMysqlDaemon.DbAppConnectionFactory = DestinationsFactory(t, 30)
rightMaster.FakeMysqlDaemon.DbAppConnectionFactory = DestinationsFactory(t, 30)
rightRdonly.FakeMysqlDaemon.DbAppConnectionFactory = DestinationsFactory(t, 30)
// Only wait 1 ms between retries, so that the test passes faster
*executeFetchRetryTime = (1 * time.Millisecond)
//.........这里部分代码省略.........
开发者ID:khanchan,项目名称:vitess,代码行数:101,代码来源:split_clone_test.go
示例9: testVerticalSplitClone
func testVerticalSplitClone(t *testing.T, strategy string) {
db := fakesqldb.Register()
ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"})
wi := NewInstance(ts, "cell1", time.Second, time.Second)
sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
// Create the destination keyspace with the appropriate ServedFromMap
ki := &pbt.Keyspace{
ServedFroms: []*pbt.Keyspace_ServedFrom{
&pbt.Keyspace_ServedFrom{
TabletType: pbt.TabletType_MASTER,
Keyspace: "source_ks",
},
&pbt.Keyspace_ServedFrom{
TabletType: pbt.TabletType_REPLICA,
Keyspace: "source_ks",
},
&pbt.Keyspace_ServedFrom{
TabletType: pbt.TabletType_RDONLY,
Keyspace: "source_ks",
},
},
}
ctx := context.Background()
wi.wr.TopoServer().CreateKeyspace(ctx, "destination_ks", ki)
destMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
destRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 11,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, destMaster, destRdonly} {
ft.StartActionLoop(t, wi.wr)
defer ft.StopActionLoop(t)
}
// add the topo and schema data we'll need
if err := wi.wr.RebuildKeyspaceGraph(ctx, "source_ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
if err := wi.wr.RebuildKeyspaceGraph(ctx, "destination_ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
subFlags := flag.NewFlagSet("SplitClone", flag.ContinueOnError)
gwrk, err := commandVerticalSplitClone(wi, wi.wr, subFlags, []string{
"-tables", "moving.*,view1",
"-strategy", strategy,
"-source_reader_count", "10",
"-destination_pack_count", "4",
"-min_table_size_for_split", "1",
"-destination_writer_count", "10",
"destination_ks/0",
})
if err != nil {
t.Errorf("Worker creation failed: %v", err)
}
wrk := gwrk.(*VerticalSplitCloneWorker)
for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} {
sourceRdonly.FakeMysqlDaemon.Schema = &myproto.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*myproto.TableDefinition{
&myproto.TableDefinition{
Name: "moving1",
Columns: []string{"id", "msg"},
PrimaryKeyColumns: []string{"id"},
Type: myproto.TableBaseTable,
// This informs how many rows we can pack into a single insert
DataLength: 2048,
},
&myproto.TableDefinition{
Name: "view1",
Type: myproto.TableView,
},
},
}
sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = VerticalSourceRdonlyFactory(t)
sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = myproto.ReplicationPosition{
GTIDSet: myproto.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678},
}
sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
"START SLAVE",
}
grpcqueryservice.RegisterForTest(sourceRdonly.RPCServer, &verticalTabletServer{t: t})
}
// We read 100 source rows. sourceReaderCount is set to 10, so
// we'll have 100/10=10 rows per table chunk.
// destinationPackCount is set to 4, so we take 4 source rows
// at once. So we'll process 4 + 4 + 2 rows to get to 10.
// That means 3 insert statements on the target. So 3 * 10
// = 30 insert statements on the destination.
//.........这里部分代码省略.........
开发者ID:bayannur,项目名称:vitess,代码行数:101,代码来源:vertical_split_clone_test.go
示例10: TestSqlDiffer
// TODO(aaijazi): Create a test in which source and destination data does not match
// TODO(aaijazi): This test is reallly slow; investigate why.
func TestSqlDiffer(t *testing.T) {
db := fakesqldb.Register()
ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"})
// We need to use FakeTabletManagerClient because we don't have a good way to fake the binlog player yet,
// which is necessary for synchronizing replication.
wr := wrangler.New(logutil.NewConsoleLogger(), ts, faketmclient.NewFakeTabletManagerClient(), time.Second)
ctx := context.Background()
supersetMaster := testlib.NewFakeTablet(t, wr, "cell1", 0,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
supersetRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 1,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
supersetRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 2,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0"))
subsetMaster := testlib.NewFakeTablet(t, wr, "cell1", 10,
pbt.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
subsetRdonly1 := testlib.NewFakeTablet(t, wr, "cell1", 11,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
subsetRdonly2 := testlib.NewFakeTablet(t, wr, "cell1", 12,
pbt.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0"))
for _, ft := range []*testlib.FakeTablet{supersetMaster, supersetRdonly1, supersetRdonly2, subsetMaster, subsetRdonly1, subsetRdonly2} {
ft.StartActionLoop(t, wr)
defer ft.StopActionLoop(t)
}
wr.SetSourceShards(ctx, "destination_ks", "0", []*pbt.TabletAlias{supersetRdonly1.Tablet.Alias}, []string{"moving.*", "view1"})
// add the topo and schema data we'll need
if err := wr.RebuildKeyspaceGraph(ctx, "source_ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
if err := wr.RebuildKeyspaceGraph(ctx, "destination_ks", nil, true); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
supersetSourceSpec := SourceSpec{"source_ks", "0", "SELECT *", supersetRdonly1.Tablet.Alias}
subsetSourceSpec := SourceSpec{"destination_ks", "0", "SELECT *", subsetRdonly1.Tablet.Alias}
gwrk := NewSQLDiffWorker(wr, "cell1", supersetSourceSpec, subsetSourceSpec)
wrk := gwrk.(*SQLDiffWorker)
for _, rdonly := range []*testlib.FakeTablet{supersetRdonly1, supersetRdonly2, subsetRdonly1, subsetRdonly2} {
rdonly.FakeMysqlDaemon.Schema = &myproto.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*myproto.TableDefinition{
&myproto.TableDefinition{
Name: "moving1",
Columns: []string{"id", "msg"},
PrimaryKeyColumns: []string{"id"},
Type: myproto.TableBaseTable,
},
&myproto.TableDefinition{
Name: "view1",
Type: myproto.TableView,
},
},
}
grpcqueryservice.RegisterForTest(rdonly.RPCServer, &sqlDifferTabletServer{t: t})
}
err := wrk.Run(ctx)
status := wrk.StatusAsText()
t.Logf("Got status: %v", status)
if err != nil || wrk.State != WorkerStateDone {
t.Errorf("Worker run failed")
}
}
开发者ID:richarwu,项目名称:vitess,代码行数:71,代码来源:sqldiffer_test.go
示例11: waitForFilteredReplication
func waitForFilteredReplication(t *testing.T, expectedErr string, initialStats *pbq.RealtimeStats, broadcastStatsFunc func() *pbq.RealtimeStats) {
ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"})
wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), time.Second)
vp := NewVtctlPipe(t, ts)
defer vp.Close()
// source of the filtered replication. We don't start its loop because we don't connect to it.
source := NewFakeTablet(t, wr, "cell1", 0, pbt.TabletType_MASTER,
TabletKeyspaceShard(t, keyspace, "0"))
// dest is the master of the dest shard which receives filtered replication events.
dest := NewFakeTablet(t, wr, "cell1", 1, pbt.TabletType_MASTER,
TabletKeyspaceShard(t, keyspace, destShard))
dest.StartActionLoop(t, wr)
defer dest.StopActionLoop(t)
// Build topology state as we would expect it when filtered replication is enabled.
ctx := context.Background()
wr.SetSourceShards(ctx, keyspace, destShard, []*pbt.TabletAlias{source.Tablet.GetAlias()}, nil)
// Set a BinlogPlayerMap to avoid a nil panic when the explicit RunHealthCheck
// is called by WaitForFilteredReplication.
// Note that for this test we don't mock the BinlogPlayerMap i.e. although
// its state says no filtered replication is running, the code under test will
// observe otherwise because we call TabletServer.BroadcastHealth() directly and
// skip going through the tabletmanager's agent.
dest.Agent.BinlogPlayerMap = tabletmanager.NewBinlogPlayerMap(ts, nil, nil)
// Use real, but trimmed down QueryService.
testConfig := tabletserver.DefaultQsConfig
testConfig.EnablePublishStats = false
testConfig.DebugURLPrefix = fmt.Sprintf("TestWaitForFilteredReplication-%d-", rand.Int63())
qs := tabletserver.NewTabletServer(testConfig)
grpcqueryservice.RegisterForTest(dest.RPCServer, qs)
qs.BroadcastHealth(42, initialStats)
// run vtctl WaitForFilteredReplication
stopBroadcasting := make(chan struct{})
go func() {
defer close(stopBroadcasting)
err := vp.Run([]string{"WaitForFilteredReplication", "-max_delay", "10s", dest.Tablet.Keyspace + "/" + dest.Tablet.Shard})
if expectedErr == "" {
if err != nil {
t.Fatalf("WaitForFilteredReplication must not fail: %v", err)
}
} else {
if err == nil || !strings.Contains(err.Error(), expectedErr) {
t.Fatalf("WaitForFilteredReplication wrong error. got: %v want substring: %v", err, expectedErr)
}
}
}()
// Broadcast health record as long as vtctl is running.
for {
// Give vtctl a head start to consume the initial stats.
// (We do this because there's unfortunately no way to explicitly
// synchronize with the point where conn.StreamHealth() has started.)
// (Tests won't break if vtctl misses the initial stats. Only coverage
// will be impacted.)
timer := time.NewTimer(1 * time.Millisecond)
select {
case <-stopBroadcasting:
timer.Stop()
return
case <-timer.C:
qs.BroadcastHealth(42, broadcastStatsFunc())
// Pace the flooding broadcasting to waste less CPU.
timer.Reset(1 * time.Millisecond)
}
}
// vtctl WaitForFilteredReplication returned.
}
开发者ID:ruiaylin,项目名称:vitess,代码行数:74,代码来源:wait_for_filtered_replication_test.go
示例12: testSplitDiff
//.........这里部分代码省略.........
if err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{
ShardingColumnName: "keyspace_id",
ShardingColumnType: topodatapb.KeyspaceIdType_UINT64,
}); err != nil {
t.Fatalf("CreateKeyspace failed: %v", err)
}
}
sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0,
topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80"))
leftMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10,
topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
leftRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 11,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
leftRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 12,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-40"))
for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly1, leftRdonly2} {
ft.StartActionLoop(t, wi.wr)
defer ft.StopActionLoop(t)
}
// add the topo and schema data we'll need
if err := ts.CreateShard(ctx, "ks", "80-"); err != nil {
t.Fatalf("CreateShard(\"-80\") failed: %v", err)
}
wi.wr.SetSourceShards(ctx, "ks", "-40", []*topodatapb.TabletAlias{sourceRdonly1.Tablet.Alias}, nil)
if err := wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", topodatapb.KeyspaceIdType_UINT64, false); err != nil {
t.Fatalf("SetKeyspaceShardingInfo failed: %v", err)
}
if err := wi.wr.RebuildKeyspaceGraph(ctx, "ks", nil); err != nil {
t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
excludedTable := "excludedTable1"
for _, rdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2, leftRdonly1, leftRdonly2} {
// The destination only has half the data.
// For v2, we do filtering at the SQl level.
// For v3, we do it in the client.
// So in any case, we need real data.
rdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
{
Name: "table1",
Columns: []string{"id", "msg", "keyspace_id"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
},
{
Name: excludedTable,
Columns: []string{"id", "msg", "keyspace_id"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
},
},
}
}
for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} {
qs := fakes.NewStreamHealthQueryService(sourceRdonly.Target())
qs.AddDefaultHealthResponse()
grpcqueryservice.RegisterForTest(sourceRdonly.RPCServer, &sourceTabletServer{
t: t,
StreamHealthQueryService: qs,
excludedTable: excludedTable,
v3: v3,
})
}
for _, destRdonly := range []*testlib.FakeTablet{leftRdonly1, leftRdonly2} {
qs := fakes.NewStreamHealthQueryService(destRdonly.Target())
qs.AddDefaultHealthResponse()
grpcqueryservice.RegisterForTest(destRdonly.RPCServer, &destinationTabletServer{
t: t,
StreamHealthQueryService: qs,
excludedTable: excludedTable,
})
}
// Run the vtworker command.
args := []string{
"SplitDiff",
"-exclude_tables", excludedTable,
"ks/-40",
}
// We need to use FakeTabletManagerClient because we don't
// have a good way to fake the binlog player yet, which is
// necessary for synchronizing replication.
wr := wrangler.New(logutil.NewConsoleLogger(), ts, newFakeTMCTopo(ts))
if err := runCommand(t, wi, wr, args); err != nil {
t.Fatal(err)
}
}
开发者ID:CowLeo,项目名称:vitess,代码行数:101,代码来源:split_diff_test.go
示例13: setUpWithConcurreny
//.........这里部分代码省略.........
tc.t.Fatalf("CreateShard(\"-80\") failed: %v", err)
}
if err := tc.wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", topodatapb.KeyspaceIdType_UINT64, false); err != nil {
tc.t.Fatalf("SetKeyspaceShardingInfo failed: %v", err)
}
if err := tc.wi.wr.RebuildKeyspaceGraph(ctx, "ks", nil); err != nil {
tc.t.Fatalf("RebuildKeyspaceGraph failed: %v", err)
}
for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} {
sourceRdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{
DatabaseSchema: "",
TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
{
Name: "table1",
Columns: []string{"id", "msg", "keyspace_id"},
PrimaryKeyColumns: []string{"id"},
Type: tmutils.TableBaseTable,
// Set the table size to a value higher than --min_table_size_for_split.
DataLength: 2048,
},
},
}
sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = sourceRdonlyFactory(
tc.t, "vt_ks.table1", splitCloneTestMin, splitCloneTestMax)
sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{
GTIDSet: replication.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678},
}
sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
"START SLAVE",
}
shqs := fakes.NewStreamHealthQueryService(sourceRdonly.Target())
shqs.AddDefaultHealthResponse()
qs := newTestQueryService(tc.t, sourceRdonly.Target(), shqs, 0, 1, sourceRdonly.Tablet.Alias.Uid)
qs.addGeneratedRows(100, 100+rowsTotal)
grpcqueryservice.RegisterForTest(sourceRdonly.RPCServer, qs)
tc.sourceRdonlyQs = append(tc.sourceRdonlyQs, qs)
}
// Set up destination rdonlys which will be used as input for the diff during the clone.
for i, destRdonly := range []*testlib.FakeTablet{leftRdonly1, rightRdonly1, leftRdonly2, rightRdonly2} {
shqs := fakes.NewStreamHealthQueryService(destRdonly.Target())
shqs.AddDefaultHealthResponse()
qs := newTestQueryService(tc.t, destRdonly.Target(), shqs, i%2, 2, destRdonly.Tablet.Alias.Uid)
grpcqueryservice.RegisterForTest(destRdonly.RPCServer, qs)
if i%2 == 0 {
tc.leftRdonlyQs = append(tc.leftRdonlyQs, qs)
} else {
tc.rightRdonlyQs = append(tc.rightRdonlyQs, qs)
}
}
tc.leftMasterFakeDb = NewFakePoolConnectionQuery(tc.t, "leftMaster")
tc.leftReplicaFakeDb = NewFakePoolConnectionQuery(tc.t, "leftReplica")
tc.rightMasterFakeDb = NewFakePoolConnectionQuery(tc.t, "rightMaster")
// In the default test case there will be 30 inserts per destination shard
// because 10 writer threads will insert 5 rows on each destination shard.
// (100 source rows / 10 writers / 2 shards = 5 rows.)
// Due to --write_query_max_rows=2 there will be 3 inserts for 5 rows.
rowsPerDestinationShard := rowsTotal / 2
rowsPerThread := rowsPerDestinationShard / concurrency
insertsPerThread := math.Ceil(float64(rowsPerThread) / float64(writeQueryMaxRows))
insertsTotal := int(insertsPerThread) * concurrency
for i := 1; i <= insertsTotal; i++ {
tc.leftMasterFakeDb.addExpectedQuery("INSERT INTO `vt_ks`.table1 (id, msg, keyspace_id) VALUES (*", nil)
// leftReplica is unused by default.
tc.rightMasterFakeDb.addExpectedQuery("INSERT INTO `vt_ks`.table1 (id, msg, keyspace_id) VALUES (*", nil)
}
expectBlpCheckpointCreationQueries(tc.leftMasterFakeDb)
expectBlpCheckpointCreationQueries(tc.rightMasterFakeDb)
leftMaster.FakeMysqlDaemon.DbAppConnectionFactory = tc.leftMasterFakeDb.getFactory()
leftReplica.FakeMysqlDaemon.DbAppConnectionFactory = tc.leftReplicaFakeDb.getFactory()
rightMaster.FakeMysqlDaemon.DbAppConnectionFactory = tc.rightMasterFakeDb.getFactory()
// Fake stream health reponses because vtworker needs them to find the master.
tc.leftMasterQs = fakes.NewStreamHealthQueryService(leftMaster.Target())
tc.leftMasterQs.AddDefaultHealthResponse()
tc.leftReplicaQs = fakes.NewStreamHealthQueryService(leftReplica.Target())
tc.leftReplicaQs.AddDefaultHealthResponse()
tc.rightMasterQs = fakes.NewStreamHealthQueryService(rightMaster.Target())
tc.rightMasterQs.AddDefaultHealthResponse()
grpcqueryservice.RegisterForTest(leftMaster.RPCServer, tc.leftMasterQs)
grpcqueryservice.RegisterForTest(leftReplica.RPCServer, tc.leftReplicaQs)
grpcqueryservice.RegisterForTest(rightMaster.RPCServer, tc.rightMasterQs)
tc.defaultWorkerArgs = []string{
"SplitClone",
"-online=false",
// --max_tps is only specified to enable the throttler and ensure that the
// code is executed. But the intent here is not to throttle the test, hence
// the rate limit is set very high.
"-max_tps", "9999",
"-write_query_max_rows", strconv.Itoa(writeQueryMaxRows),
"-source_reader_count", strconv.Itoa(concurrency),
"-min_table_size_for_split", "1",
"-destination_writer_count", strconv.Itoa(concurrency),
"ks/-80"}
}
开发者ID:CowLeo,项目名称:vitess,代码行数:101,代码来源:split_clone_test.go
示例14: setUp
//.........这里部分代码省略.........
// leftReplica is used by the reparent test.
leftReplica := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 12,
topodatapb.TabletType_REPLICA, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40"))
tc.leftReplica = leftReplica
rightMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 20,
topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80"))
rightRdonly := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 21,
topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80"))
tc.tablets = []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly, tc.leftReplica, rightMaster, rightRdonly}
for _, ft := range tc.tablets {
ft.StartActionLoop(tc.t, tc.wi.wr)
}
// add the topo and schema data we'll need
if err := tc.ts.CreateShard(ctx, "ks", "80-"); err != nil {
tc.t.Fatalf("CreateShard(\"-80\") failed: %v", err)
}
if err := tc.wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", topodatapb.KeyspaceIdType_UINT64, false); err != nil {
tc.t.Fatalf
|
请发表评论