// WaitForCluster waits until node observes that the cluster wide config is
// committed to raft. This ensures that we can see and serve informations
// related to the cluster.
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) {
watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), state.EventCreateCluster{})
defer cancel()
var clusters []*api.Cluster
n.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
})
if err != nil {
return nil, err
}
if len(clusters) == 1 {
cluster = clusters[0]
} else {
select {
case e := <-watch:
cluster = e.(state.EventCreateCluster).Cluster
case <-ctx.Done():
return nil, ctx.Err()
}
}
return cluster, nil
}
开发者ID:amitshukla,项目名称:docker,代码行数:29,代码来源:util.go
示例2: TestKeyManagerDefaultSubsystem
// Verify the key generation and rotation for default subsystems
func TestKeyManagerDefaultSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
k := New(st, DefaultConfig())
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), len(k.config.Subsystems)*keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
assert.Equal(t, len(k.keyRing.keys), len(k.config.Subsystems)*keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in
// `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`.
// - Returns an error if listing fails.
func (s *Server) ListSecrets(ctx context.Context, request *api.ListSecretsRequest) (*api.ListSecretsResponse, error) {
var (
secrets []*api.Secret
respSecrets []*api.Secret
err error
byFilters []store.By
by store.By
labels map[string]string
)
// return all secrets that match either any of the names or any of the name prefixes (why would you give both?)
if request.Filters != nil {
for _, name := range request.Filters.Names {
byFilters = append(byFilters, store.ByName(name))
}
for _, prefix := range request.Filters.NamePrefixes {
byFilters = append(byFilters, store.ByNamePrefix(prefix))
}
for _, prefix := range request.Filters.IDPrefixes {
byFilters = append(byFilters, store.ByIDPrefix(prefix))
}
labels = request.Filters.Labels
}
switch len(byFilters) {
case 0:
by = store.All
case 1:
by = byFilters[0]
default:
by = store.Or(byFilters...)
}
s.store.View(func(tx store.ReadTx) {
secrets, err = store.FindSecrets(tx, by)
})
if err != nil {
return nil, err
}
// strip secret data from the secret, filter by label, and filter out all internal secrets
for _, secret := range secrets {
if secret.Internal || !filterMatchLabels(secret.Spec.Annotations.Labels, labels) {
continue
}
secret.Spec.Data = nil // clean the actual secret data so it's never returned
respSecrets = append(respSecrets, secret)
}
return &api.ListSecretsResponse{Secrets: respSecrets}, nil
}
开发者ID:harche,项目名称:docker,代码行数:56,代码来源:secret.go
示例6: initCluster
func (r *ReplicatedOrchestrator) initCluster(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName("default"))
if err != nil {
return err
}
if len(clusters) != 1 {
// we'll just pick it when it is created.
return nil
}
r.cluster = clusters[0]
return nil
}
func (k *KeyManager) rotateKey(ctx context.Context) error {
log := log.G(ctx).WithField("module", "keymanager")
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
if err != nil {
log.Errorf("reading cluster config failed, %v", err)
return err
}
cluster := clusters[0]
if len(cluster.NetworkBootstrapKeys) == 0 {
panic(fmt.Errorf("no key in the cluster config"))
}
subsysKeys := map[string][]*api.EncryptionKey{}
for _, key := range k.keyRing.keys {
subsysKeys[key.Subsystem] = append(subsysKeys[key.Subsystem], key)
}
k.keyRing.keys = []*api.EncryptionKey{}
// We maintain the latest key and the one before in the key ring to allow
// agents to communicate without disruption on key change.
for subsys, keys := range subsysKeys {
if len(keys) == keyringSize {
min := 0
for i, key := range keys[1:] {
if key.LamportTime < keys[min].LamportTime {
min = i
}
}
keys = append(keys[0:min], keys[min+1:]...)
}
keys = append(keys, k.allocateKey(ctx, subsys))
subsysKeys[subsys] = keys
}
for _, keys := range subsysKeys {
k.keyRing.keys = append(k.keyRing.keys, keys...)
}
return k.updateKey(cluster)
}
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
// also verify that all keys are for the right subsystem
assert.Equal(t, len(k.keyRing.keys), keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
match = key.Subsystem == SubsystemIPSec
assert.True(t, match)
}
}
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the first key has been allocated and updated in the
// store
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), 1)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
k.rotateKey(ctx)
// verify that after two rotations keyring has two keys and the very
// first key allocated has been removed
assert.Equal(t, len(k.keyRing.keys), 2)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
na, err := networkallocator.New()
if err != nil {
return err
}
nc := &networkContext{
nwkAllocator: na,
unallocatedTasks: make(map[string]*api.Task),
unallocatedServices: make(map[string]*api.Service),
unallocatedNetworks: make(map[string]*api.Network),
ingressNetwork: newIngressNetwork(),
}
a.netCtx = nc
defer func() {
// Clear a.netCtx if initialization was unsuccessful.
if err != nil {
a.netCtx = nil
}
}()
// Check if we have the ingress network. If not found create
// it before reading all network objects for allocation.
var networks []*api.Network
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
if len(networks) > 0 {
nc.ingressNetwork = networks[0]
}
})
if err != nil {
return errors.Wrap(err, "failed to find ingress network during init")
}
// If ingress network is not found, create one right away
// using the predefined template.
if len(networks) == 0 {
if err := a.store.Update(func(tx store.Tx) error {
nc.ingressNetwork.ID = identity.NewID()
if err := store.CreateNetwork(tx, nc.ingressNetwork); err != nil {
return err
}
return nil
}); err != nil {
return errors.Wrap(err, "failed to create ingress network")
}
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
if len(networks) > 0 {
nc.ingressNetwork = networks[0]
}
})
if err != nil {
return errors.Wrap(err, "failed to find ingress network after creating it")
}
}
// Try to complete ingress network allocation before anything else so
// that the we can get the preferred subnet for ingress
// network.
if !na.IsAllocated(nc.ingressNetwork) {
if err := a.allocateNetwork(ctx, nc.ingressNetwork); err != nil {
log.G(ctx).WithError(err).Error("failed allocating ingress network during init")
} else if _, err := a.store.Batch(func(batch *store.Batch) error {
if err := a.commitAllocatedNetwork(ctx, batch, nc.ingressNetwork); err != nil {
log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init")
}
return nil
}); err != nil {
log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init")
}
}
// Allocate networks in the store so far before we started
// watching.
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "error listing all networks in store while trying to allocate during init")
}
var allocatedNetworks []*api.Network
for _, n := range networks {
if na.IsAllocated(n) {
continue
}
if err := a.allocateNetwork(ctx, n); err != nil {
log.G(ctx).WithError(err).Errorf("failed allocating network %s during init", n.ID)
continue
}
allocatedNetworks = append(allocatedNetworks, n)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, n := range allocatedNetworks {
//.........这里部分代码省略.........
开发者ID:Mic92,项目名称:docker,代码行数:101,代码来源:network.go
示例14: Run
// Run is the main loop for a Raft node, it goes along the state machine,
// acting on the messages received from other Raft nodes in the cluster.
//
// Before running the main loop, it first starts the raft node based on saved
// cluster state. If no saved state exists, it starts a single-node cluster.
func (n *Node) Run(ctx context.Context) error {
ctx = log.WithLogger(ctx, logrus.WithField("raft_id", fmt.Sprintf("%x", n.Config.ID)))
ctx, cancel := context.WithCancel(ctx)
// nodeRemoved indicates that node was stopped due its removal.
nodeRemoved := false
defer func() {
cancel()
n.stop(ctx)
if nodeRemoved {
// Move WAL and snapshot out of the way, since
// they are no longer usable.
if err := n.moveWALAndSnap(); err != nil {
log.G(ctx).WithError(err).Error("failed to move wal after node removal")
}
}
n.done()
}()
wasLeader := false
for {
select {
case <-n.ticker.C():
n.raftNode.Tick()
n.cluster.Tick()
case rd := <-n.raftNode.Ready():
raftConfig := DefaultRaftConfig()
n.memoryStore.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
raftConfig = clusters[0].Spec.Raft
}
})
// Save entries to storage
if err := n.saveToStorage(&raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil {
log.G(ctx).WithError(err).Error("failed to save entries to storage")
}
if len(rd.Messages) != 0 {
// Send raft messages to peers
if err := n.send(ctx, rd.Messages); err != nil {
log.G(ctx).WithError(err).Error("failed to send message to members")
}
}
// Apply snapshot to memory store. The snapshot
// was applied to the raft store in
// saveToStorage.
if !raft.IsEmptySnap(rd.Snapshot) {
// Load the snapshot data into the store
if err := n.restoreFromSnapshot(rd.Snapshot.Data, false); err != nil {
log.G(ctx).WithError(err).Error("failed to restore from snapshot")
}
n.appliedIndex = rd.Snapshot.Metadata.Index
n.snapshotIndex = rd.Snapshot.Metadata.Index
n.confState = rd.Snapshot.Metadata.ConfState
}
// If we cease to be the leader, we must cancel any
// proposals that are currently waiting for a quorum to
// acknowledge them. It is still possible for these to
// become committed, but if that happens we will apply
// them as any follower would.
// It is important that we cancel these proposals before
// calling processCommitted, so processCommitted does
// not deadlock.
if rd.SoftState != nil {
if wasLeader && rd.SoftState.RaftState != raft.StateLeader {
wasLeader = false
if atomic.LoadUint32(&n.signalledLeadership) == 1 {
atomic.StoreUint32(&n.signalledLeadership, 0)
n.leadershipBroadcast.Publish(IsFollower)
}
// It is important that we set n.signalledLeadership to 0
// before calling n.wait.cancelAll. When a new raft
// request is registered, it checks n.signalledLeadership
// afterwards, and cancels the registration if it is 0.
// If cancelAll was called first, this call might run
// before the new request registers, but
// signalledLeadership would be set after the check.
// Setting signalledLeadership before calling cancelAll
// ensures that if a new request is registered during
// this transition, it will either be cancelled by
// cancelAll, or by its own check of signalledLeadership.
n.wait.cancelAll()
} else if !wasLeader && rd.SoftState.RaftState == raft.StateLeader {
wasLeader = true
}
}
//.........这里部分代码省略.........
开发者ID:Mic92,项目名称:docker,代码行数:101,代码来源:raft.go
示例15: Run
// Run runs dispatcher tasks which should be run on leader dispatcher.
// Dispatcher can be stopped with cancelling ctx or calling Stop().
func (d *Dispatcher) Run(ctx context.Context) error {
d.mu.Lock()
if d.isRunning() {
d.mu.Unlock()
return fmt.Errorf("dispatcher is already running")
}
logger := log.G(ctx).WithField("module", "dispatcher")
ctx = log.WithLogger(ctx, logger)
if err := d.markNodesUnknown(ctx); err != nil {
logger.Errorf(`failed to move all nodes to "unknown" state: %v`, err)
}
configWatcher, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if err == nil && len(clusters) == 1 {
heartbeatPeriod, err := ptypes.Duration(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
if err == nil && heartbeatPeriod > 0 {
d.config.HeartbeatPeriod = heartbeatPeriod
}
if clusters[0].NetworkBootstrapKeys != nil {
d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys
}
}
return nil
},
state.EventUpdateCluster{},
)
if err != nil {
d.mu.Unlock()
return err
}
defer cancel()
d.ctx, d.cancel = context.WithCancel(ctx)
d.mu.Unlock()
publishManagers := func() {
mgrs := getWeightedPeers(d.cluster)
sort.Sort(weightedPeerByNodeID(mgrs))
d.mu.Lock()
if reflect.DeepEqual(mgrs, d.lastSeenManagers) {
d.mu.Unlock()
return
}
d.lastSeenManagers = mgrs
d.mu.Unlock()
d.mgrQueue.Publish(mgrs)
}
publishManagers()
publishTicker := time.NewTicker(1 * time.Second)
defer publishTicker.Stop()
batchTimer := time.NewTimer(maxBatchInterval)
defer batchTimer.Stop()
for {
select {
case <-publishTicker.C:
publishManagers()
case <-d.processTaskUpdatesTrigger:
d.processTaskUpdates()
batchTimer.Reset(maxBatchInterval)
case <-batchTimer.C:
d.processTaskUpdates()
batchTimer.Reset(maxBatchInterval)
case v := <-configWatcher:
cluster := v.(state.EventUpdateCluster)
d.mu.Lock()
if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
// ignore error, since Spec has passed validation before
heartbeatPeriod, _ := ptypes.Duration(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
if heartbeatPeriod != d.config.HeartbeatPeriod {
// only call d.nodes.updatePeriod when heartbeatPeriod changes
d.config.HeartbeatPeriod = heartbeatPeriod
d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier)
}
}
d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys
d.mu.Unlock()
d.keyMgrQueue.Publish(struct{}{})
case <-d.ctx.Done():
return nil
}
}
}
// Run is the main loop for a Raft node, it goes along the state machine,
// acting on the messages received from other Raft nodes in the cluster.
//
// Before running the main loop, it first starts the raft node based on saved
// cluster state. If no saved state exists, it starts a single-node cluster.
func (n *Node) Run(ctx context.Context) error {
defer func() {
close(n.doneCh)
}()
for {
select {
case <-n.ticker.C():
n.Tick()
case rd := <-n.Ready():
raftConfig := DefaultRaftConfig()
n.memoryStore.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
raftConfig = clusters[0].Spec.Raft
}
})
// Save entries to storage
if err := n.saveToStorage(&raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil {
n.Config.Logger.Error(err)
}
// Send raft messages to peers
if err := n.send(rd.Messages); err != nil {
n.Config.Logger.Error(err)
}
// Apply snapshot to memory store. The snapshot
// was applied to the raft store in
// saveToStorage.
if !raft.IsEmptySnap(rd.Snapshot) {
// Load the snapshot data into the store
if err := n.restoreFromSnapshot(rd.Snapshot.Data, n.forceNewCluster); err != nil {
n.Config.Logger.Error(err)
}
n.appliedIndex = rd.Snapshot.Metadata.Index
n.snapshotIndex = rd.Snapshot.Metadata.Index
n.confState = rd.Snapshot.Metadata.ConfState
}
// Process committed entries
for _, entry := range rd.CommittedEntries {
if err := n.processCommitted(entry); err != nil {
n.Config.Logger.Error(err)
}
}
// Trigger a snapshot every once in awhile
if n.snapshotInProgress == nil &&
raftConfig.SnapshotInterval > 0 &&
n.appliedIndex-n.snapshotIndex >= raftConfig.SnapshotInterval {
n.doSnapshot(&raftConfig)
}
// If we cease to be the leader, we must cancel
// any proposals that are currently waiting for
// a quorum to acknowledge them. It is still
// possible for these to become committed, but
// if that happens we will apply them as any
// follower would.
if rd.SoftState != nil {
if n.wasLeader && rd.SoftState.RaftState != raft.StateLeader {
n.wasLeader = false
n.wait.cancelAll()
n.leadershipBroadcast.Write(IsFollower)
} else if !n.wasLeader && rd.SoftState.RaftState == raft.StateLeader {
n.wasLeader = true
n.leadershipBroadcast.Write(IsLeader)
}
}
// If we are the only registered member after
// restoring from the state, campaign to be the
// leader.
if !n.restored {
if len(n.cluster.Members()) <= 1 {
if err := n.Campaign(n.Ctx); err != nil {
panic("raft: cannot campaign to be the leader on node restore")
}
}
n.restored = true
}
// Advance the state machine
n.Advance()
case snapshotIndex := <-n.snapshotInProgress:
if snapshotIndex > n.snapshotIndex {
n.snapshotIndex = snapshotIndex
}
n.snapshotInProgress = nil
case <-n.removeRaftCh:
// If the node was removed from other members,
//.........这里部分代码省略.........
开发者ID:ygf11,项目名称:docker,代码行数:101,代码来源:raft.go
示例17: Run
// Run runs dispatcher tasks which should be run on leader dispatcher.
// Dispatcher can be stopped with cancelling ctx or calling Stop().
func (d *Dispatcher) Run(ctx context.Context) error {
d.mu.Lock()
if d.isRunning() {
d.mu.Unlock()
return errors.New("dispatcher is already running")
}
ctx = log.WithModule(ctx, "dispatcher")
if err := d.markNodesUnknown(ctx); err != nil {
log.G(ctx).Errorf(`failed to move all nodes to "unknown" state: %v`, err)
}
configWatcher, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if err == nil && len(clusters) == 1 {
heartbeatPeriod, err := gogotypes.DurationFromProto(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
if err == nil && heartbeatPeriod > 0 {
d.config.HeartbeatPeriod = heartbeatPeriod
}
if clusters[0].NetworkBootstrapKeys != nil {
d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys
}
}
return nil
},
state.EventUpdateCluster{},
)
if err != nil {
d.mu.Unlock()
return err
}
// set queues here to guarantee that Close will close them
d.mgrQueue = watch.NewQueue()
d.keyMgrQueue = watch.NewQueue()
peerWatcher, peerCancel := d.cluster.SubscribePeers()
defer peerCancel()
d.lastSeenManagers = getWeightedPeers(d.cluster)
defer cancel()
d.ctx, d.cancel = context.WithCancel(ctx)
ctx = d.ctx
d.wg.Add(1)
defer d.wg.Done()
d.mu.Unlock()
publishManagers := func(peers []*api.Peer) {
var mgrs []*api.WeightedPeer
for _, p := range peers {
mgrs = append(mgrs, &api.WeightedPeer{
Peer: p,
Weight: remotes.DefaultObservationWeight,
})
}
d.mu.Lock()
d.lastSeenManagers = mgrs
d.mu.Unlock()
d.mgrQueue.Publish(mgrs)
}
batchTimer := time.NewTimer(maxBatchInterval)
defer batchTimer.Stop()
for {
select {
case ev := <-peerWatcher:
publishManagers(ev.([]*api.Peer))
case <-d.processUpdatesTrigger:
d.processUpdates(ctx)
batchTimer.Reset(maxBatchInterval)
case <-batchTimer.C:
d.processUpdates(ctx)
batchTimer.Reset(maxBatchInterval)
case v := <-configWatcher:
cluster := v.(state.EventUpdateCluster)
d.mu.Lock()
if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
// ignore error, since Spec has passed validation before
heartbeatPeriod, _ := gogotypes.DurationFromProto(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
if heartbeatPeriod != d.config.HeartbeatPeriod {
// only call d.nodes.updatePeriod when heartbeatPeriod changes
d.config.HeartbeatPeriod = heartbeatPeriod
d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier)
}
}
d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys
d.mu.Unlock()
d.keyMgrQueue.Publish(cluster.Cluster.NetworkBootstrapKeys)
case <-ctx.Done():
return nil
}
}
}
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
// Harakiri.
go func() {
select {
case <-ctx.Done():
case <-m.stopped:
ctxCancel()
}
}()
leadershipCh, cancel := m.raftNode.SubscribeLeadership()
defer cancel()
go m.handleLeadershipEvents(ctx, leadershipCh)
authorize := func(ctx context.Context, roles []string) error {
var (
blacklistedCerts map[string]*api.BlacklistedCertificate
clusters []*api.Cluster
err error
)
m.raftNode.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName("default"))
})
// Not having a cluster object yet means we can't check
// the blacklist.
if err == nil && len(clusters) == 1 {
blacklistedCerts = clusters[0].BlacklistedCertificates
}
// Authorize the remote roles, ensure they can only be forwarded by managers
_, err = ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
return err
}
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig.RootCA())
baseResourceAPI := resourceapi.New(m.raftNode.MemoryStore())
healthServer := health.NewHealthServer()
localHealthServer := health.NewHealthServer()
authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
authenticatedResourceAPI := api.NewAuthenticatedWrapperResourceAllocatorServer(baseResourceAPI, authorize)
authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.dispatcher, authorize)
authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.raftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.raftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
// localProxyControlAPI is a special kind of proxy. It is only wired up
// to receive requests from a trusted local socket, and these requests
// don't use TLS, therefore the requests it handles locally should
// bypass authorization. When it proxies, it sends them as requests from
// this manager rather than forwarded requests (it has no TLS
// information to put in the metadata map).
forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, m.raftNode, forwardAsOwnRequest)
// Everything registered on m.server should be an authenticated
// wrapper, or a proxy wrapping an authenticated wrapper!
api.RegisterCAServer(m.server, proxyCAAPI)
api.RegisterNodeCAServer(m.server, proxyNodeCAAPI)
api.RegisterRaftServer(m.server, authenticatedRaftAPI)
api.RegisterHealthServer(m.server, authenticatedHealthAPI)
api.RegisterRaftMembershipServer(m.server, proxyRaftMembershipAPI)
api.RegisterControlServer(m.server, authenticatedControlAPI)
api.RegisterResourceAllocatorServer(m.server, proxyResourceAPI)
api.RegisterDispatcherServer(m.server, proxyDispatcherAPI)
api.RegisterControlServer(m.localserver, localProxyControlAPI)
api.RegisterHealthServer(m.localserver, localHealthServer)
healthServer.SetServingStatus("Raft", api.HealthCheckResponse_NOT_SERVING)
localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_NOT_SERVING)
errServe := make(chan error, len(m.listeners))
for proto, l := range m.listeners {
go m.serveListener(ctx, errServe, proto, l)
}
defer func() {
m.server.Stop()
m.localserver.Stop()
}()
//.........这里部分代码省略.........
开发者ID:Mic92,项目名称:docker,代码行数:101,代码来源:manager.go
示例19: doNetworkInit
func (a *Allocator) doNetworkInit(ctx context.Context) error {
na, err := networkallocator.New()
if err != nil {
return err
}
nc := &networkContext{
nwkAllocator: na,
unallocatedTasks: make(map[string]*api.Task),
unallocatedServices: make(map[string]*api.Service),
unallocatedNetworks: make(map[string]*api.Network),
}
// Check if we have the ingress network. If not found create
// it before reading all network objects for allocation.
var networks []*api.Network
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
if len(networks) > 0 {
ingressNetwork = networks[0]
}
})
if err != nil {
return fmt.Errorf("failed to find ingress network during init: %v", err)
}
// If ingress network is not found, create one right away
// using the predefined template.
if len(networks) == 0 {
if err := a.store.Update(func(tx store.Tx) error {
ingressNetwork.ID = identity.NewID()
if err := store.CreateNetwork(tx, ingressNetwork); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("failed to create ingress network: %v", err)
}
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
if len(networks) > 0 {
ingressNetwork = networks[0]
}
})
if err != nil {
return fmt.Errorf("failed to find ingress network after creating it: %v", err)
}
}
// Try to complete ingress network allocation before anything else so
// that the we can get the preferred subnet for ingress
// network.
if !na.IsAllocated(ingressNetwork) {
if err := a.allocateNetwork(ctx, nc, ingressNetwork); err != nil {
log.G(ctx).Errorf("failed allocating ingress network during init: %v", err)
}
// Update store after allocation
if err := a.store.Update(func(tx store.Tx) error {
if err := store.UpdateNetwork(tx, ingressNetwork); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("failed to create ingress network: %v", err)
}
}
// Allocate networks in the store so far before we started
// watching.
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all networks in store while trying to allocate during init: %v", err)
}
for _, n := range networks {
if na.IsAllocated(n) {
continue
}
if err := a.allocateNetwork(ctx, nc, n); err != nil {
log.G(ctx).Errorf("failed allocating network %s during init: %v", n.ID, err)
}
}
// Allocate nodes in the store so far before we process watched events.
var nodes []*api.Node
a.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all services in store while trying to allocate during init: %v", err)
}
//.........这里部分代码省略.........
请发表评论