本文整理汇总了Golang中github.com/youtube/vitess/go/event.DispatchUpdate函数的典型用法代码示例。如果您正苦于以下问题:Golang DispatchUpdate函数的具体用法?Golang DispatchUpdate怎么用?Golang DispatchUpdate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DispatchUpdate函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: replicaMigrateServedFrom
// replicaMigrateServedFrom handles the slave (replica, rdonly) migration.
func (wr *Wrangler) replicaMigrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, servedType topodatapb.TabletType, cells []string, reverse bool, tables []string, ev *events.MigrateServedFrom) error {
// Save the destination keyspace (its ServedFrom has been changed)
event.DispatchUpdate(ev, "updating keyspace")
if err := wr.ts.UpdateKeyspace(ctx, ki); err != nil {
return err
}
// Save the source shard (its blacklisted tables field has changed)
event.DispatchUpdate(ev, "updating source shard")
if err := sourceShard.UpdateSourceBlacklistedTables(servedType, cells, reverse, tables); err != nil {
return fmt.Errorf("UpdateSourceBlacklistedTables(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err)
}
if err := wr.ts.UpdateShard(ctx, sourceShard); err != nil {
return fmt.Errorf("UpdateShard(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err)
}
// Now refresh the source servers so they reload their
// blacklisted table list
event.DispatchUpdate(ev, "refreshing sources tablets state so they update their blacklisted tables")
if err := wr.RefreshTablesByShard(ctx, sourceShard, servedType, cells); err != nil {
return err
}
return nil
}
开发者ID:littleyang,项目名称:vitess,代码行数:26,代码来源:keyspace.go
示例2: MultiRestore
func (wr *Wrangler) MultiRestore(dstTabletAlias topo.TabletAlias, sources []topo.TabletAlias, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount int, strategy string) (err error) {
var ti *topo.TabletInfo
ti, err = wr.ts.GetTablet(dstTabletAlias)
if err != nil {
return
}
args := &actionnode.MultiRestoreArgs{SrcTabletAliases: sources, Concurrency: concurrency, FetchConcurrency: fetchConcurrency, InsertTableConcurrency: insertTableConcurrency, FetchRetryCount: fetchRetryCount, Strategy: strategy}
ev := &events.MultiRestore{
Tablet: *ti.Tablet,
Args: *args,
}
event.DispatchUpdate(ev, "starting")
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
actionPath, err := wr.ai.MultiRestore(dstTabletAlias, args)
if err != nil {
return err
}
if err := wr.WaitForCompletion(actionPath); err != nil {
return err
}
event.DispatchUpdate(ev, "finished")
return nil
}
开发者ID:chinna1986,项目名称:vitess,代码行数:31,代码来源:split.go
示例3: replicaMigrateServedFrom
// replicaMigrateServedFrom handles the slave (replica, rdonly) migration.
func (wr *Wrangler) replicaMigrateServedFrom(ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, servedType topo.TabletType, reverse bool, tables []string, ev *events.MigrateServedFrom) error {
// Save the destination keyspace (its ServedFrom has been changed)
event.DispatchUpdate(ev, "updating keyspace")
if err := topo.UpdateKeyspace(wr.ts, ki); err != nil {
return err
}
// Save the source shard (its blacklisted tables field has changed)
event.DispatchUpdate(ev, "updating source shard")
if sourceShard.BlacklistedTablesMap == nil {
sourceShard.BlacklistedTablesMap = make(map[topo.TabletType][]string)
}
if reverse {
delete(sourceShard.BlacklistedTablesMap, servedType)
} else {
sourceShard.BlacklistedTablesMap[servedType] = tables
}
if err := topo.UpdateShard(wr.ts, sourceShard); err != nil {
return err
}
// Now refresh the source servers so they reload their
// blacklisted table list
event.DispatchUpdate(ev, "refreshing sources tablets state so they update their blacklisted tables")
if err := wr.RefreshTablesByShard(sourceShard.Keyspace(), sourceShard.ShardName(), servedType); err != nil {
return err
}
return nil
}
开发者ID:nangong92t,项目名称:go_src,代码行数:31,代码来源:keyspace.go
示例4: reparentShardExternal
// reparentShardExternal handles an external reparent.
//
// The ev parameter is an event struct prefilled with information that the
// caller has on hand, which would be expensive for us to re-query.
func (wr *Wrangler) reparentShardExternal(ev *events.Reparent, slaveTabletMap, masterTabletMap map[topo.TabletAlias]*topo.TabletInfo, masterElectTablet *topo.TabletInfo) error {
event.DispatchUpdate(ev, "starting external")
// we fix the new master in the replication graph
event.DispatchUpdate(ev, "checking if new master was promoted")
err := wr.slaveWasPromoted(masterElectTablet)
if err != nil {
// This suggests that the master-elect is dead. This is bad.
return fmt.Errorf("slaveWasPromoted(%v) failed: %v", masterElectTablet, err)
}
// Once the slave is promoted, remove it from our maps
delete(slaveTabletMap, masterElectTablet.Alias)
delete(masterTabletMap, masterElectTablet.Alias)
// Then fix all the slaves, including the old master. This
// last step is very likely to time out for some tablets (one
// random guy is dead, the old master is dead, ...). We
// execute them all in parallel until we get to
// wr.ActionTimeout(). After this, no other action with a
// timeout is executed, so even if we got to the timeout,
// we're still good.
event.DispatchUpdate(ev, "restarting slaves")
topotools.RestartSlavesExternal(wr.ts, wr.logger, slaveTabletMap, masterTabletMap, masterElectTablet.Alias, wr.slaveWasRestarted)
return nil
}
开发者ID:plobsing,项目名称:vitess,代码行数:30,代码来源:reparent_external.go
示例5: migrateServedFrom
func (wr *Wrangler) migrateServedFrom(ki *topo.KeyspaceInfo, destinationShard *topo.ShardInfo, servedType topo.TabletType, reverse bool) (err error) {
// re-read and update keyspace info record
ki, err = wr.ts.GetKeyspace(ki.KeyspaceName())
if err != nil {
return err
}
if reverse {
if _, ok := ki.ServedFrom[servedType]; ok {
return fmt.Errorf("Destination Keyspace %s is not serving type %v", ki.KeyspaceName(), servedType)
}
ki.ServedFrom[servedType] = destinationShard.SourceShards[0].Keyspace
} else {
if _, ok := ki.ServedFrom[servedType]; !ok {
return fmt.Errorf("Destination Keyspace %s is already serving type %v", ki.KeyspaceName(), servedType)
}
delete(ki.ServedFrom, servedType)
}
// re-read and check the destination shard
destinationShard, err = wr.ts.GetShard(destinationShard.Keyspace(), destinationShard.ShardName())
if err != nil {
return err
}
if len(destinationShard.SourceShards) != 1 {
return fmt.Errorf("Destination shard %v/%v is not a vertical split target", destinationShard.Keyspace(), destinationShard.ShardName())
}
tables := destinationShard.SourceShards[0].Tables
// read the source shard, we'll need its master, and we'll need to
// update the blacklisted tables.
var sourceShard *topo.ShardInfo
sourceShard, err = wr.ts.GetShard(destinationShard.SourceShards[0].Keyspace, destinationShard.SourceShards[0].Shard)
if err != nil {
return err
}
ev := &events.MigrateServedFrom{
Keyspace: *ki,
SourceShard: *sourceShard,
DestinationShard: *destinationShard,
ServedType: servedType,
Reverse: reverse,
}
event.DispatchUpdate(ev, "start")
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
if servedType == topo.TYPE_MASTER {
err = wr.masterMigrateServedFrom(ki, sourceShard, destinationShard, servedType, tables, ev)
} else {
err = wr.replicaMigrateServedFrom(ki, sourceShard, destinationShard, servedType, reverse, tables, ev)
}
event.DispatchUpdate(ev, "finished")
return
}
开发者ID:nangong92t,项目名称:go_src,代码行数:59,代码来源:keyspace.go
示例6: migrateServedFrom
func (wr *Wrangler) migrateServedFrom(ctx context.Context, ki *topo.KeyspaceInfo, destinationShard *topo.ShardInfo, servedType topodatapb.TabletType, cells []string, reverse bool, filteredReplicationWaitTime time.Duration) (err error) {
// re-read and update keyspace info record
ki, err = wr.ts.GetKeyspace(ctx, ki.KeyspaceName())
if err != nil {
return err
}
if reverse {
ki.UpdateServedFromMap(servedType, cells, destinationShard.SourceShards[0].Keyspace, false, nil)
} else {
ki.UpdateServedFromMap(servedType, cells, destinationShard.SourceShards[0].Keyspace, true, destinationShard.Cells)
}
// re-read and check the destination shard
destinationShard, err = wr.ts.GetShard(ctx, destinationShard.Keyspace(), destinationShard.ShardName())
if err != nil {
return err
}
if len(destinationShard.SourceShards) != 1 {
return fmt.Errorf("Destination shard %v/%v is not a vertical split target", destinationShard.Keyspace(), destinationShard.ShardName())
}
tables := destinationShard.SourceShards[0].Tables
// read the source shard, we'll need its master, and we'll need to
// update the blacklisted tables.
var sourceShard *topo.ShardInfo
sourceShard, err = wr.ts.GetShard(ctx, destinationShard.SourceShards[0].Keyspace, destinationShard.SourceShards[0].Shard)
if err != nil {
return err
}
ev := &events.MigrateServedFrom{
KeyspaceName: ki.KeyspaceName(),
SourceShard: *sourceShard,
DestinationShard: *destinationShard,
ServedType: servedType,
Reverse: reverse,
}
event.DispatchUpdate(ev, "start")
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
if servedType == topodatapb.TabletType_MASTER {
err = wr.masterMigrateServedFrom(ctx, ki, sourceShard, destinationShard, tables, ev, filteredReplicationWaitTime)
} else {
err = wr.replicaMigrateServedFrom(ctx, ki, sourceShard, destinationShard, servedType, cells, reverse, tables, ev)
}
event.DispatchUpdate(ev, "finished")
return
}
开发者ID:littleyang,项目名称:vitess,代码行数:53,代码来源:keyspace.go
示例7: setState
func (vscw *VerticalSplitCloneWorker) setState(state string) {
vscw.mu.Lock()
vscw.state = state
vscw.mu.Unlock()
event.DispatchUpdate(vscw.ev, state)
}
开发者ID:nangong92t,项目名称:go_src,代码行数:7,代码来源:vertical_split_clone.go
示例8: setState
func (scw *SplitCloneWorker) setState(state string) {
scw.mu.Lock()
scw.state = state
scw.mu.Unlock()
event.DispatchUpdate(scw.ev, state)
}
开发者ID:plobsing,项目名称:vitess,代码行数:7,代码来源:split_clone.go
示例9: MultiSnapshot
func (wr *Wrangler) MultiSnapshot(keyRanges []key.KeyRange, tabletAlias topo.TabletAlias, concurrency int, tables, excludeTables []string, forceMasterSnapshot, skipSlaveRestart bool, maximumFilesize uint64) (manifests []string, parent topo.TabletAlias, err error) {
var ti *topo.TabletInfo
ti, err = wr.ts.GetTablet(tabletAlias)
if err != nil {
return
}
args := &actionnode.MultiSnapshotArgs{KeyRanges: keyRanges, Concurrency: concurrency, Tables: tables, ExcludeTables: excludeTables, SkipSlaveRestart: skipSlaveRestart, MaximumFilesize: maximumFilesize}
ev := &events.MultiSnapshot{
Tablet: *ti.Tablet,
Args: *args,
}
if len(tables) > 0 {
event.DispatchUpdate(ev, "starting table")
} else {
event.DispatchUpdate(ev, "starting keyrange")
}
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
restoreAfterSnapshot, err := wr.prepareToSnapshot(ti, forceMasterSnapshot)
if err != nil {
return
}
defer func() {
err = replaceError(err, restoreAfterSnapshot())
}()
actionPath, err := wr.ai.MultiSnapshot(tabletAlias, args)
if err != nil {
return
}
results, err := wr.WaitForCompletionReply(actionPath)
if err != nil {
return
}
reply := results.(*actionnode.MultiSnapshotReply)
event.DispatchUpdate(ev, "finished")
return reply.ManifestPaths, reply.ParentAlias, nil
}
开发者ID:chinna1986,项目名称:vitess,代码行数:46,代码来源:split.go
示例10: recordError
func (vscw *VerticalSplitCloneWorker) recordError(err error) {
vscw.mu.Lock()
vscw.state = stateVSCError
vscw.err = err
vscw.mu.Unlock()
event.DispatchUpdate(vscw.ev, "error: "+err.Error())
}
开发者ID:nangong92t,项目名称:go_src,代码行数:8,代码来源:vertical_split_clone.go
示例11: recordError
func (scw *SplitCloneWorker) recordError(err error) {
scw.mu.Lock()
scw.state = stateSCError
scw.err = err
scw.mu.Unlock()
event.DispatchUpdate(scw.ev, "error: "+err.Error())
}
开发者ID:plobsing,项目名称:vitess,代码行数:8,代码来源:split_clone.go
示例12: EmergencyReparentShard
// EmergencyReparentShard will make the provided tablet the master for
// the shard, when the old master is completely unreachable.
func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, masterElectTabletAlias *topodatapb.TabletAlias, waitSlaveTimeout time.Duration) (err error) {
// lock the shard
ctx, unlock, lockErr := wr.ts.LockShard(ctx, keyspace, shard, fmt.Sprintf("EmergencyReparentShard(%v)", topoproto.TabletAliasString(masterElectTabletAlias)))
if lockErr != nil {
return lockErr
}
defer unlock(&err)
// Create reusable Reparent event with available info
ev := &events.Reparent{}
// do the work
err = wr.emergencyReparentShardLocked(ctx, ev, keyspace, shard, masterElectTabletAlias, waitSlaveTimeout)
if err != nil {
event.DispatchUpdate(ev, "failed EmergencyReparentShard: "+err.Error())
} else {
event.DispatchUpdate(ev, "finished EmergencyReparentShard")
}
return err
}
开发者ID:dumbunny,项目名称:vitess,代码行数:22,代码来源:reparent.go
示例13: EmergencyReparentShard
// EmergencyReparentShard will make the provided tablet the master for
// the shard, when the old master is completely unreachable.
func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, masterElectTabletAlias *pb.TabletAlias, waitSlaveTimeout time.Duration) error {
// lock the shard
actionNode := actionnode.ReparentShard(emergencyReparentShardOperation, masterElectTabletAlias)
lockPath, err := wr.lockShard(ctx, keyspace, shard, actionNode)
if err != nil {
return err
}
// Create reusable Reparent event with available info
ev := &events.Reparent{}
// do the work
err = wr.emergencyReparentShardLocked(ctx, ev, keyspace, shard, masterElectTabletAlias, waitSlaveTimeout)
if err != nil {
event.DispatchUpdate(ev, "failed EmergencyReparentShard: "+err.Error())
} else {
event.DispatchUpdate(ev, "finished EmergencyReparentShard")
}
// and unlock
return wr.unlockShard(ctx, keyspace, shard, actionNode, lockPath, err)
}
开发者ID:richarwu,项目名称:vitess,代码行数:24,代码来源:reparent.go
示例14: TestUpdateDispatch
func TestUpdateDispatch(t *testing.T) {
triggered := false
event.AddListener(func(ev *testEvent) {
triggered = true
})
want := "status"
ev := &testEvent{}
event.DispatchUpdate(ev, "status")
if ev.Status != want {
t.Errorf("ev.Status = %#v, want %#v", ev.Status, want)
}
if !triggered {
t.Errorf("listener wasn't triggered on Dispatch()")
}
}
开发者ID:CowLeo,项目名称:vitess,代码行数:17,代码来源:status_test.go
示例15: migrateServedFrom
func (wr *Wrangler) migrateServedFrom(ki *topo.KeyspaceInfo, si *topo.ShardInfo, servedType topo.TabletType, reverse bool) (err error) {
// re-read and update keyspace info record
ki, err = wr.ts.GetKeyspace(ki.KeyspaceName())
if err != nil {
return err
}
if reverse {
if _, ok := ki.ServedFrom[servedType]; ok {
return fmt.Errorf("Destination Keyspace %s is not serving type %v", ki.KeyspaceName(), servedType)
}
ki.ServedFrom[servedType] = si.SourceShards[0].Keyspace
} else {
if _, ok := ki.ServedFrom[servedType]; !ok {
return fmt.Errorf("Destination Keyspace %s is already serving type %v", ki.KeyspaceName(), servedType)
}
delete(ki.ServedFrom, servedType)
}
// re-read and check the destination shard
si, err = wr.ts.GetShard(si.Keyspace(), si.ShardName())
if err != nil {
return err
}
if len(si.SourceShards) != 1 {
return fmt.Errorf("Destination shard %v/%v is not a vertical split target", si.Keyspace(), si.ShardName())
}
tables := si.SourceShards[0].Tables
// read the source shard, we'll need its master
sourceShard, err := wr.ts.GetShard(si.SourceShards[0].Keyspace, si.SourceShards[0].Shard)
if err != nil {
return err
}
ev := &events.MigrateServedFrom{
Keyspace: *ki,
SourceShard: *sourceShard,
DestinationShard: *si,
ServedType: servedType,
Reverse: reverse,
}
event.DispatchUpdate(ev, "start")
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
// For master type migration, need to:
// - switch the source shard to read-only
// - gather the replication point
// - wait for filtered replication to catch up before we continue
// - disable filtered replication after the fact
var sourceMasterTabletInfo *topo.TabletInfo
if servedType == topo.TYPE_MASTER {
// set master to read-only
event.DispatchUpdate(ev, "setting source shard master to read-only")
actionPath, err := wr.ai.SetReadOnly(sourceShard.MasterAlias)
if err != nil {
return err
}
if err := wr.WaitForCompletion(actionPath); err != nil {
return err
}
// get the position
event.DispatchUpdate(ev, "getting master position")
sourceMasterTabletInfo, err = wr.ts.GetTablet(sourceShard.MasterAlias)
if err != nil {
return err
}
masterPosition, err := wr.ai.MasterPosition(sourceMasterTabletInfo, wr.ActionTimeout())
if err != nil {
return err
}
// wait for it
event.DispatchUpdate(ev, "waiting for destination master to catch up to source master")
if err := wr.ai.WaitBlpPosition(si.MasterAlias, blproto.BlpPosition{
Uid: 0,
Position: masterPosition,
}, wr.ActionTimeout()); err != nil {
return err
}
// and clear the shard record
si.SourceShards = nil
}
// All is good, we can save the keyspace and shard (if needed) now
event.DispatchUpdate(ev, "updating keyspace")
if err = topo.UpdateKeyspace(wr.ts, ki); err != nil {
return err
}
event.DispatchUpdate(ev, "updating destination shard")
if servedType == topo.TYPE_MASTER {
if err := topo.UpdateShard(wr.ts, si); err != nil {
return err
}
//.........这里部分代码省略.........
开发者ID:chinna1986,项目名称:vitess,代码行数:101,代码来源:keyspace.go
示例16: migrateServedTypes
// migrateServedTypes operates with all concerned shards locked.
func (wr *Wrangler) migrateServedTypes(keyspace string, sourceShards, destinationShards []*topo.ShardInfo, servedType topo.TabletType, reverse bool, shardCache map[string]*topo.ShardInfo) (err error) {
// re-read all the shards so we are up to date
for i, si := range sourceShards {
if sourceShards[i], err = wr.ts.GetShard(si.Keyspace(), si.ShardName()); err != nil {
return err
}
shardCache[si.ShardName()] = sourceShards[i]
}
for i, si := range destinationShards {
if destinationShards[i], err = wr.ts.GetShard(si.Keyspace(), si.ShardName()); err != nil {
return err
}
shardCache[si.ShardName()] = destinationShards[i]
}
ev := &events.MigrateServedTypes{
Keyspace: *topo.NewKeyspaceInfo(keyspace, nil, -1),
SourceShards: sourceShards,
DestinationShards: destinationShards,
ServedType: servedType,
Reverse: reverse,
}
event.DispatchUpdate(ev, "start")
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
// check and update all shard records, in memory only
for _, si := range sourceShards {
if reverse {
// need to add to source
if topo.IsTypeInList(servedType, si.ServedTypes) {
return fmt.Errorf("Source shard %v/%v is already serving type %v", si.Keyspace(), si.ShardName(), servedType)
}
si.ServedTypes = append(si.ServedTypes, servedType)
} else {
// need to remove from source
var found bool
if si.ServedTypes, found = removeType(servedType, si.ServedTypes); !found {
return fmt.Errorf("Source shard %v/%v is not serving type %v", si.Keyspace(), si.ShardName(), servedType)
}
}
}
for _, si := range destinationShards {
if reverse {
// need to remove from destination
var found bool
if si.ServedTypes, found = removeType(servedType, si.ServedTypes); !found {
return fmt.Errorf("Destination shard %v/%v is not serving type %v", si.Keyspace(), si.ShardName(), servedType)
}
} else {
// need to add to destination
if topo.IsTypeInList(servedType, si.ServedTypes) {
return fmt.Errorf("Destination shard %v/%v is already serving type %v", si.Keyspace(), si.ShardName(), servedType)
}
si.ServedTypes = append(si.ServedTypes, servedType)
}
}
// For master type migration, need to:
// - switch the source shards to read-only
// - gather all replication points
// - wait for filtered replication to catch up before we continue
// - disable filtered replication after the fact
if servedType == topo.TYPE_MASTER {
event.DispatchUpdate(ev, "setting all source masters read-only")
err := wr.makeMastersReadOnly(sourceShards)
if err != nil {
return err
}
event.DispatchUpdate(ev, "getting positions of source masters")
masterPositions, err := wr.getMastersPosition(sourceShards)
if err != nil {
return err
}
event.DispatchUpdate(ev, "waiting for destination masters to catch up")
if err := wr.waitForFilteredReplication(masterPositions, destinationShards); err != nil {
return err
}
for _, si := range destinationShards {
si.SourceShards = nil
}
}
// All is good, we can save the shards now
event.DispatchUpdate(ev, "updating source shards")
for _, si := range sourceShards {
if err := topo.UpdateShard(wr.ts, si); err != nil {
return err
}
shardCache[si.ShardName()] = si
}
event.DispatchUpdate(ev, "updating destination shards")
//.........这里部分代码省略.........
开发者ID:chinna1986,项目名称:vitess,代码行数:101,代码来源:keyspace.go
示例17: setErrorState
func (scw *SplitCloneWorker) setErrorState(err error) {
scw.SetState(WorkerStateError)
event.DispatchUpdate(scw.ev, "error: "+err.Error())
}
开发者ID:strogo,项目名称:vitess,代码行数:4,代码来源:split_clone.go
示例18: setState
func (scw *SplitCloneWorker) setState(state StatusWorkerState) {
scw.SetState(state)
event.DispatchUpdate(scw.ev, state.String())
}
开发者ID:strogo,项目名称:vitess,代码行数:4,代码来源:split_clone.go
示例19: tabletExternallyReparentedLocked
func tabletExternallyReparentedLocked(ts topo.Server, tablet *topo.TabletInfo, actionTimeout, lockTimeout time.Duration, interrupted chan struct{}) (err error) {
// read the shard, make sure again the master is not already good.
// critical read, we want up to date info (and the shard is locked).
shardInfo, err := ts.GetShardCritical(tablet.Keyspace, tablet.Shard)
if err != nil {
return err
}
if shardInfo.MasterAlias == tablet.Alias {
return fmt.Errorf("this tablet is already the master")
}
// Read the tablets, make sure the master elect is known to the shard
// (it's this tablet, so it better be!).
// Note we will keep going with a partial tablet map, which usually
// happens when a cell is not reachable. After these checks, the
// guarantees we'll have are:
// - global cell is reachable (we just locked and read the shard)
// - the local cell that contains the new master is reachable
// (as we're going to check the new master is in the list)
// That should be enough.
tabletMap, err := topo.GetTabletMapForShard(ts, tablet.Keyspace, tablet.Shard)
switch err {
case nil:
// keep going
case topo.ErrPartialResult:
log.Warningf("Got topo.ErrPartialResult from GetTabletMapForShard, may need to re-init some tablets")
default:
return err
}
masterElectTablet, ok := tabletMap[tablet.Alias]
if !ok {
return fmt.Errorf("this master-elect tablet %v not found in replication graph %v/%v %v", tablet.Alias, tablet.Keyspace, tablet.Shard, topotools.MapKeys(tabletMap))
}
// Create reusable Reparent event with available info
ev := &events.Reparent{
ShardInfo: *shardInfo,
NewMaster: *tablet.Tablet,
}
if oldMasterTablet, ok := tabletMap[shardInfo.MasterAlias]; ok {
ev.OldMaster = *oldMasterTablet.Tablet
}
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
// sort the tablets, and handle them
slaveTabletMap, masterTabletMap := topotools.SortedTabletMap(tabletMap)
event.DispatchUpdate(ev, "starting external from tablet")
// we fix the new master in the replication graph
event.DispatchUpdate(ev, "mark ourself as new master")
err = updateReplicationGraphForPromotedSlave(ts, tablet)
if err != nil {
// This suggests we can't talk to topo server. This is bad.
return fmt.Errorf("updateReplicationGraphForPromotedSlave failed: %v", err)
}
// Once this tablet is promoted, remove it from our maps
delete(slaveTabletMap, tablet.Alias)
delete(masterTabletMap, tablet.Alias)
// Then fix all the slaves, including the old master. This
// last step is very likely to time out for some tablets (one
// random guy is dead, the old master is dead, ...). We
// execute them all in parallel until we get to
// wr.ActionTimeout(). After this, no other action with a
// timeout is executed, so even if we got to the timeout,
// we're still good.
event.DispatchUpdate(ev, "restarting slaves")
logger := logutil.NewConsoleLogger()
ai := initiator.NewActionInitiator(ts)
topotools.RestartSlavesExternal(ts, logger, slaveTabletMap, masterTabletMap, masterElectTablet.Alias, func(ti *topo.TabletInfo, swrd *actionnode.SlaveWasRestartedArgs) error {
return ai.RpcSlaveWasRestarted(ti, swrd, actionTimeout)
})
// Compute the list of Cells we need to rebuild: old master and
// all other cells if reparenting to another cell.
cells := []string{shardInfo.MasterAlias.Cell}
if shardInfo.MasterAlias.Cell != tablet.Alias.Cell {
cells = nil
}
// now update the master record in the shard object
event.DispatchUpdate(ev, "updating shard record")
log.Infof("Updating Shard's MasterAlias record")
shardInfo.MasterAlias = tablet.Alias
if err = topo.UpdateShard(ts, shardInfo); err != nil {
return err
}
// and rebuild the shard serving graph
event.DispatchUpdate(ev, "rebuilding shard serving graph")
log.Infof("Rebuilding shard serving graph data")
if err = topotools.RebuildShard(logger, ts, tablet.Keyspace, tablet.Shard, cells, lockTimeout, interrupted); err != nil {
return err
//.........这里部分代码省略.........
开发者ID:chinna1986,项目名称:vitess,代码行数:101,代码来源:actor.go
示例20: TabletExternallyReparented
// TabletExternallyReparented updates all topo records so the current
// tablet is the new master for this shard.
// Should be called under RPCWrapLock.
func (agent *ActionAgent) TabletExternallyReparented(ctx context.Context, externalID string) error {
startTime := time.Now()
// If there is a finalize step running, wait for it to finish or time out
// before checking the global shard record again.
if agent.finalizeReparentCtx != nil {
select {
case <-agent.finalizeReparentCtx.Done():
agent.finalizeReparentCtx = nil
case <-ctx.Done():
return ctx.Err()
}
}
tablet := agent.Tablet()
// Check the global shard record.
si, err := agent.TopoServer.GetShard(ctx, tablet.Keyspace, tablet.Shard)
if err != nil {
log.Warningf("fastTabletExternallyReparented: failed to read global shard record for %v/%v: %v", tablet.Keyspace, tablet.Shard, err)
return err
}
if topoproto.TabletAliasEqual(si.MasterAlias, tablet.Alias) {
// We may get called on the current master even when nothing has changed.
// If the global shard record is already updated, it means we successfully
// finished a previous reparent to this tablet.
return nil
}
// Remember when we were first told we're the master.
// If another tablet claims to be master and offers a more recent time,
// that tablet will be trusted over us.
agent.mutex.Lock()
agent._tabletExternallyReparentedTime = startTime
agent._replicationDelay = 0
agent.mutex.Unlock()
// Create a reusable Reparent event with available info.
ev := &events.Reparent{
ShardInfo: *si,
NewMaster: *tablet,
OldMaster: topodatapb.Tablet{
Alias: si.MasterAlias,
Type: topodatapb.TabletType_MASTER,
},
ExternalID: externalID,
}
defer func() {
if err != nil {
event.DispatchUpdate(ev, "failed: "+err.Error())
}
}()
event.DispatchUpdate(ev, "starting external from tablet (fast)")
var wg sync.WaitGroup
var errs concurrency.AllErrorRecorder
// Execute state change to master by force-updating only the local copy of the
// tablet record. The actual record in topo will be updated later.
log.Infof("fastTabletExternallyReparented: executing change callback for state change to MASTER")
oldTablet := proto.Clone(tablet).(*topodatapb.Tablet)
tablet.Type = topodatapb.TabletType_MASTER
tablet.HealthMap = nil
agent.setTablet(tablet)
wg.Add(1)
go func() {
defer wg.Done()
// This is where updateState will block for gracePeriod, while it gives
// vtgate a chance to stop sending replica queries.
if err := agent.updateState(ctx, oldTablet, "fastTabletExternallyReparented"); err != nil {
errs.RecordError(fmt.Errorf("fastTabletExternallyReparented: failed to change tablet state to MASTER: %v", err))
}
}()
wg.Add(1)
go func() {
defer wg.Done()
// Directly write the new master endpoint in the serving graph.
// We will do a true rebuild in the background soon, but in the meantime,
// this will be enough for clients to re-resolve the new master.
event.DispatchUpdate(ev, "writing new master endpoint")
log.Infof("fastTabletExternallyReparented: writing new master endpoint to serving graph")
ep, err := topo.TabletEndPoint(tablet)
if err != nil {
errs.RecordError(fmt.Errorf("fastTabletExternallyReparented: failed to generate EndPoint for tablet %v: %v", tablet.Alias, err))
return
}
err = topo.UpdateEndPoints(ctx, agent.TopoServer, tablet.Alias.Cell,
si.Keyspace(), si.ShardName(), topodatapb.TabletType_MASTER,
&topodatapb.EndPoints{Entries: []*topodatapb.EndPoint{ep}}, -1)
if err != nil {
errs.RecordError(fmt.Errorf("fastTabletExternallyReparented: failed to update master endpoint: %v", err))
return
}
//.........这里部分代码省略.........
开发者ID:littleyang,项目名称:vitess,代码行数:101,代码来源:reparent.go
注:本文中的github.com/youtube/vitess/go/event.DispatchUpdate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论