本文整理汇总了Golang中github.com/docker/swarmkit/manager/state/store.FindClusters函数的典型用法代码示例。如果您正苦于以下问题:Golang FindClusters函数的具体用法?Golang FindClusters怎么用?Golang FindClusters使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FindClusters函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestClusterStorePasshphraseRotationForRootCA
func TestClusterStorePasshphraseRotationForRootCA(t *testing.T) {
if !*integrationTests {
t.Skip("integration test")
}
os.Setenv(ca.PassphraseENVVar, "password1")
defer os.Setenv(ca.PassphraseENVVar, "")
defer os.Setenv(ca.PassphraseENVVarPrev, "")
mCount, aCount := 5, 15
c := createManagersCluster(t, mCount, aCount)
require.NoError(t, testutils.PollFunc(nil, c.pollRegister))
// Get the leader
leader, err := c.leader()
assert.NoError(t, err)
// check key material in store
var clusters []*api.Cluster
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
clusters, err = store.FindClusters(tx, store.All)
})
assert.NoError(t, err)
assert.Len(t, clusters, 1, "there should be one cluster")
assert.NotNil(t, clusters[0].RootCA.CACert)
assert.NotNil(t, clusters[0].RootCA.CAKey)
assert.Contains(t, string(clusters[0].RootCA.CAKey), "Proc-Type: 4,ENCRYPTED")
firstEncryptedKey := clusters[0].RootCA.CAKey
// Set an ENV passphrase and kill the current leader
os.Setenv(ca.PassphraseENVVarPrev, "password1")
os.Setenv(ca.PassphraseENVVar, "password2")
require.NoError(t, c.destroyLeader())
// ensure that cluster will converge to expected number of agents, we need big timeout because of heartbeat times
require.NoError(t, testutils.PollFuncWithTimeout(nil, c.pollRegister, 30*time.Second))
// Get the new leader
leader, err = c.leader()
assert.NoError(t, err)
// check key material in store
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
clusters, err = store.FindClusters(tx, store.All)
})
assert.NoError(t, err)
assert.Len(t, clusters, 1, "there should be one cluster")
assert.NotNil(t, clusters[0].RootCA.CACert)
assert.NotNil(t, clusters[0].RootCA.CAKey)
assert.Contains(t, string(clusters[0].RootCA.CAKey), "Proc-Type: 4,ENCRYPTED")
assert.NotEqual(t, firstEncryptedKey, clusters[0].RootCA.CAKey)
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:52,代码来源:manager_cluster_test.go
示例2: TestKeyManagerDefaultSubsystem
// Verify the key generation and rotation for default subsystems
func TestKeyManagerDefaultSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
k := New(st, DefaultConfig())
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), len(k.config.Subsystems)*keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
assert.Equal(t, len(k.keyRing.keys), len(k.config.Subsystems)*keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
开发者ID:docker,项目名称:swarmkit,代码行数:35,代码来源:keymanager_test.go
示例3: WaitForCluster
// WaitForCluster waits until node observes that the cluster wide config is
// committed to raft. This ensures that we can see and serve informations
// related to the cluster.
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) {
watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), state.EventCreateCluster{})
defer cancel()
var clusters []*api.Cluster
n.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
})
if err != nil {
return nil, err
}
if len(clusters) == 1 {
cluster = clusters[0]
} else {
select {
case e := <-watch:
cluster = e.(state.EventCreateCluster).Cluster
case <-ctx.Done():
return nil, ctx.Err()
}
}
return cluster, nil
}
开发者ID:amitshukla,项目名称:docker,代码行数:29,代码来源:util.go
示例4: TestClusterStoreWithPasshphraseForRootCA
func TestClusterStoreWithPasshphraseForRootCA(t *testing.T) {
if !*integrationTests {
t.Skip("integration test")
}
// Start with a passphrase from moment 0
os.Setenv(ca.PassphraseENVVar, "password1")
defer os.Setenv(ca.PassphraseENVVar, "")
defer os.Setenv(ca.PassphraseENVVarPrev, "")
mCount, aCount := 5, 15
c := createManagersCluster(t, mCount, aCount)
require.NoError(t, testutils.PollFunc(nil, c.pollRegister))
// Get the leader
leader, err := c.leader()
assert.NoError(t, err)
// check key material in store
var clusters []*api.Cluster
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
clusters, err = store.FindClusters(tx, store.All)
})
assert.NoError(t, err)
assert.Len(t, clusters, 1, "there should be one cluster")
assert.NotNil(t, clusters[0].RootCA.CACert)
assert.NotNil(t, clusters[0].RootCA.CAKey)
assert.Contains(t, string(clusters[0].RootCA.CAKey), "Proc-Type: 4,ENCRYPTED")
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:28,代码来源:manager_cluster_test.go
示例5: getCurrentRaftConfig
func (n *Node) getCurrentRaftConfig() api.RaftConfig {
raftConfig := DefaultRaftConfig()
n.memoryStore.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
raftConfig = clusters[0].Spec.Raft
}
})
return raftConfig
}
开发者ID:fabianofranz,项目名称:docker,代码行数:10,代码来源:raft.go
示例6: ListClusters
// ListClusters returns a list of all clusters.
func (s *Server) ListClusters(ctx context.Context, request *api.ListClustersRequest) (*api.ListClustersResponse, error) {
var (
clusters []*api.Cluster
err error
)
s.store.View(func(tx store.ReadTx) {
switch {
case request.Filters != nil && len(request.Filters.Names) > 0:
clusters, err = store.FindClusters(tx, buildFilters(store.ByName, request.Filters.Names))
case request.Filters != nil && len(request.Filters.NamePrefixes) > 0:
clusters, err = store.FindClusters(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))
case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
clusters, err = store.FindClusters(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
default:
clusters, err = store.FindClusters(tx, store.All)
}
})
if err != nil {
return nil, err
}
if request.Filters != nil {
clusters = filterClusters(clusters,
func(e *api.Cluster) bool {
return filterContains(e.Spec.Annotations.Name, request.Filters.Names)
},
func(e *api.Cluster) bool {
return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes)
},
func(e *api.Cluster) bool {
return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
},
func(e *api.Cluster) bool {
return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
},
)
}
// WARN: we should never return cluster here. We need to redact the private fields first.
return &api.ListClustersResponse{
Clusters: redactClusters(clusters),
}, nil
}
开发者ID:BrickXu,项目名称:docker,代码行数:44,代码来源:cluster.go
示例7: initCluster
func (r *ReplicatedOrchestrator) initCluster(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName("default"))
if err != nil {
return err
}
if len(clusters) != 1 {
// we'll just pick it when it is created.
return nil
}
r.cluster = clusters[0]
return nil
}
开发者ID:JMesser81,项目名称:docker,代码行数:14,代码来源:services.go
示例8: Run
// Run starts the keymanager, it doesn't return
func (k *KeyManager) Run(ctx context.Context) error {
k.mu.Lock()
log := log.G(ctx).WithField("module", "keymanager")
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
if err != nil {
log.Errorf("reading cluster config failed, %v", err)
k.mu.Unlock()
return err
}
cluster := clusters[0]
if len(cluster.NetworkBootstrapKeys) == 0 {
for _, subsys := range k.config.Subsystems {
for i := 0; i < keyringSize; i++ {
k.keyRing.keys = append(k.keyRing.keys, k.allocateKey(ctx, subsys))
}
}
if err := k.updateKey(cluster); err != nil {
log.Errorf("store update failed %v", err)
}
} else {
k.keyRing.lClock = cluster.EncryptionKeyLamportClock
k.keyRing.keys = cluster.NetworkBootstrapKeys
k.rotateKey(ctx)
}
ticker := time.NewTicker(k.config.RotationInterval)
defer ticker.Stop()
k.ctx, k.cancel = context.WithCancel(ctx)
k.mu.Unlock()
for {
select {
case <-ticker.C:
k.rotateKey(ctx)
case <-k.ctx.Done():
return nil
}
}
}
开发者ID:yugongpeng,项目名称:swarmkit,代码行数:50,代码来源:keymanager.go
示例9: rotateKey
func (k *KeyManager) rotateKey(ctx context.Context) error {
log := log.G(ctx).WithField("module", "keymanager")
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
if err != nil {
log.Errorf("reading cluster config failed, %v", err)
return err
}
cluster := clusters[0]
if len(cluster.NetworkBootstrapKeys) == 0 {
panic(fmt.Errorf("no key in the cluster config"))
}
subsysKeys := map[string][]*api.EncryptionKey{}
for _, key := range k.keyRing.keys {
subsysKeys[key.Subsystem] = append(subsysKeys[key.Subsystem], key)
}
k.keyRing.keys = []*api.EncryptionKey{}
// We maintain the latest key and the one before in the key ring to allow
// agents to communicate without disruption on key change.
for subsys, keys := range subsysKeys {
if len(keys) == keyringSize {
min := 0
for i, key := range keys[1:] {
if key.LamportTime < keys[min].LamportTime {
min = i
}
}
keys = append(keys[0:min], keys[min+1:]...)
}
keys = append(keys, k.allocateKey(ctx, subsys))
subsysKeys[subsys] = keys
}
for _, keys := range subsysKeys {
k.keyRing.keys = append(k.keyRing.keys, keys...)
}
return k.updateKey(cluster)
}
开发者ID:yugongpeng,项目名称:swarmkit,代码行数:48,代码来源:keymanager.go
示例10: TestGetUnlockKey
func TestGetUnlockKey(t *testing.T) {
t.Parallel()
tc := testutils.NewTestCA(t)
defer tc.Stop()
var cluster *api.Cluster
tc.MemoryStore.View(func(tx store.ReadTx) {
clusters, err := store.FindClusters(tx, store.ByName(store.DefaultClusterName))
require.NoError(t, err)
cluster = clusters[0]
})
resp, err := tc.CAClients[0].GetUnlockKey(context.Background(), &api.GetUnlockKeyRequest{})
require.NoError(t, err)
require.Nil(t, resp.UnlockKey)
require.Equal(t, cluster.Meta.Version, resp.Version)
// Update the unlock key
require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error {
cluster = store.GetCluster(tx, cluster.ID)
cluster.Spec.EncryptionConfig.AutoLockManagers = true
cluster.UnlockKeys = []*api.EncryptionKey{{
Subsystem: ca.ManagerRole,
Key: []byte("secret"),
}}
return store.UpdateCluster(tx, cluster)
}))
tc.MemoryStore.View(func(tx store.ReadTx) {
cluster = store.GetCluster(tx, cluster.ID)
})
require.NoError(t, raftutils.PollFuncWithTimeout(nil, func() error {
resp, err = tc.CAClients[0].GetUnlockKey(context.Background(), &api.GetUnlockKeyRequest{})
if err != nil {
return fmt.Errorf("get unlock key: %v", err)
}
if !bytes.Equal(resp.UnlockKey, []byte("secret")) {
return fmt.Errorf("secret hasn't rotated yet")
}
if cluster.Meta.Version.Index > resp.Version.Index {
return fmt.Errorf("hasn't updated to the right version yet")
}
return nil
}, 250*time.Millisecond))
}
开发者ID:yongtang,项目名称:swarmkit,代码行数:47,代码来源:server_test.go
示例11: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
// also verify that all keys are for the right subsystem
assert.Equal(t, len(k.keyRing.keys), keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
match = key.Subsystem == SubsystemIPSec
assert.True(t, match)
}
}
开发者ID:docker,项目名称:swarmkit,代码行数:44,代码来源:keymanager_test.go
示例12: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the first key has been allocated and updated in the
// store
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), 1)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
k.rotateKey(ctx)
// verify that after two rotations keyring has two keys and the very
// first key allocated has been removed
assert.Equal(t, len(k.keyRing.keys), 2)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
开发者ID:RobbieJVMW,项目名称:swarmkit,代码行数:43,代码来源:keymanager_test.go
示例13: Run
// Run is the TaskReaper's main loop.
func (tr *TaskReaper) Run() {
defer close(tr.doneChan)
tr.store.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
tr.taskHistory = clusters[0].Spec.Orchestration.TaskHistoryRetentionLimit
}
})
timer := time.NewTimer(reaperBatchingInterval)
for {
select {
case event := <-tr.watcher:
switch v := event.(type) {
case state.EventCreateTask:
t := v.Task
tr.dirty[instanceTuple{
instance: t.Slot,
serviceID: t.ServiceID,
nodeID: t.NodeID,
}] = struct{}{}
if len(tr.dirty) > maxDirty {
timer.Stop()
tr.tick()
} else {
timer.Reset(reaperBatchingInterval)
}
case state.EventUpdateCluster:
tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
}
case <-timer.C:
timer.Stop()
tr.tick()
case <-tr.stopChan:
timer.Stop()
return
}
}
}
开发者ID:BrickXu,项目名称:docker,代码行数:42,代码来源:task_reaper.go
示例14: Run
// Run runs the CA signer main loop.
// The CA signer can be stopped with cancelling ctx or calling Stop().
func (s *Server) Run(ctx context.Context) error {
s.mu.Lock()
if s.isRunning() {
s.mu.Unlock()
return fmt.Errorf("CA signer is already running")
}
s.wg.Add(1)
s.mu.Unlock()
defer s.wg.Done()
logger := log.G(ctx).WithField("module", "ca")
ctx = log.WithLogger(ctx, logger)
// Run() should never be called twice, but just in case, we're
// attempting to close the started channel in a safe way
select {
case <-s.started:
return fmt.Errorf("CA server cannot be started more than once")
default:
close(s.started)
}
// Retrieve the channels to keep track of changes in the cluster
// Retrieve all the currently registered nodes
var nodes []*api.Node
updates, cancel, err := store.ViewAndWatch(
s.store,
func(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if len(clusters) != 1 {
return fmt.Errorf("could not find cluster object")
}
s.updateCluster(ctx, clusters[0])
nodes, err = store.FindNodes(readTx, store.All)
return err
},
state.EventCreateNode{},
state.EventUpdateNode{},
state.EventUpdateCluster{},
)
// Do this after updateCluster has been called, so isRunning never
// returns true without joinTokens being set correctly.
s.mu.Lock()
s.ctx, s.cancel = context.WithCancel(ctx)
s.mu.Unlock()
if err != nil {
log.G(ctx).WithFields(logrus.Fields{
"method": "(*Server).Run",
}).WithError(err).Errorf("snapshot store view failed")
return err
}
defer cancel()
// We might have missed some updates if there was a leader election,
// so let's pick up the slack.
if err := s.reconcileNodeCertificates(ctx, nodes); err != nil {
// We don't return here because that means the Run loop would
// never run. Log an error instead.
log.G(ctx).WithFields(logrus.Fields{
"method": "(*Server).Run",
}).WithError(err).Errorf("error attempting to reconcile certificates")
}
// Watch for new nodes being created, new nodes being updated, and changes
// to the cluster
for {
select {
case event := <-updates:
switch v := event.(type) {
case state.EventCreateNode:
s.evaluateAndSignNodeCert(ctx, v.Node)
case state.EventUpdateNode:
// If this certificate is already at a final state
// no need to evaluate and sign it.
if !isFinalState(v.Node.Certificate.Status) {
s.evaluateAndSignNodeCert(ctx, v.Node)
}
case state.EventUpdateCluster:
s.updateCluster(ctx, v.Cluster)
}
case <-ctx.Done():
return ctx.Err()
case <-s.ctx.Done():
return nil
}
}
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:96,代码来源:server.go
示例15: Run
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
m.cancelFunc = ctxCancel
leadershipCh, cancel := m.raftNode.SubscribeLeadership()
defer cancel()
go m.handleLeadershipEvents(ctx, leadershipCh)
authorize := func(ctx context.Context, roles []string) error {
var (
blacklistedCerts map[string]*api.BlacklistedCertificate
clusters []*api.Cluster
err error
)
m.raftNode.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName("default"))
})
// Not having a cluster object yet means we can't check
// the blacklist.
if err == nil && len(clusters) == 1 {
blacklistedCerts = clusters[0].BlacklistedCertificates
}
// Authorize the remote roles, ensure they can only be forwarded by managers
_, err = ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
return err
}
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig.RootCA(), m.config.PluginGetter)
baseResourceAPI := resourceapi.New(m.raftNode.MemoryStore())
healthServer := health.NewHealthServer()
localHealthServer := health.NewHealthServer()
authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
authenticatedResourceAPI := api.NewAuthenticatedWrapperResourceAllocatorServer(baseResourceAPI, authorize)
authenticatedLogsServerAPI := api.NewAuthenticatedWrapperLogsServer(m.logbroker, authorize)
authenticatedLogBrokerAPI := api.NewAuthenticatedWrapperLogBrokerServer(m.logbroker, authorize)
authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.dispatcher, authorize)
authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.raftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.raftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyLogBrokerAPI := api.NewRaftProxyLogBrokerServer(authenticatedLogBrokerAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
// The following local proxies are only wired up to receive requests
// from a trusted local socket, and these requests don't use TLS,
// therefore the requests they handle locally should bypass
// authorization. When requests are proxied from these servers, they
// are sent as requests from this manager rather than forwarded
// requests (it has no TLS information to put in the metadata map).
forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
handleRequestLocally := func(ctx context.Context) (context.Context, error) {
remoteAddr := "127.0.0.1:0"
m.addrMu.Lock()
if m.config.RemoteAPI != nil {
if m.config.RemoteAPI.AdvertiseAddr != "" {
remoteAddr = m.config.RemoteAPI.AdvertiseAddr
} else {
remoteAddr = m.config.RemoteAPI.ListenAddr
}
}
m.addrMu.Unlock()
creds := m.config.SecurityConfig.ClientTLSCreds
nodeInfo := ca.RemoteNodeInfo{
Roles: []string{creds.Role()},
Organization: creds.Organization(),
NodeID: creds.NodeID(),
RemoteAddr: remoteAddr,
}
return context.WithValue(ctx, ca.LocalRequestKey, nodeInfo), nil
}
localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyLogsAPI := api.NewRaftProxyLogsServer(m.logbroker, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyDispatcherAPI := api.NewRaftProxyDispatcherServer(m.dispatcher, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyCAAPI := api.NewRaftProxyCAServer(m.caserver, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyNodeCAAPI := api.NewRaftProxyNodeCAServer(m.caserver, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(baseResourceAPI, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyLogBrokerAPI := api.NewRaftProxyLogBrokerServer(m.logbroker, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
// Everything registered on m.server should be an authenticated
//.........这里部分代码省略.........
开发者ID:yongtang,项目名称:swarmkit,代码行数:101,代码来源:manager.go
示例16: RemoveNode
// RemoveNode removes a Node referenced by NodeID with the given NodeSpec.
// - Returns NotFound if the Node is not found.
// - Returns FailedPrecondition if the Node has manager role (and is part of the memberlist) or is not shut down.
// - Returns InvalidArgument if NodeID or NodeVersion is not valid.
// - Returns an error if the delete fails.
func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest) (*api.RemoveNodeResponse, error) {
if request.NodeID == "" {
return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
node := store.GetNode(tx, request.NodeID)
if node == nil {
return grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
if node.Spec.Role == api.NodeRoleManager {
if s.raft == nil {
return grpc.Errorf(codes.FailedPrecondition, "node %s is a manager but cannot access node information from the raft memberlist", request.NodeID)
}
if member := s.raft.GetMemberByNodeID(request.NodeID); member != nil {
return grpc.Errorf(codes.FailedPrecondition, "node %s is a cluster manager and is a member of the raft cluster. It must be demoted to worker before removal", request.NodeID)
}
}
if !request.Force && node.Status.State == api.NodeStatus_READY {
return grpc.Errorf(codes.FailedPrecondition, "node %s is not down and can't be removed", request.NodeID)
}
// lookup the cluster
clusters, err := store.FindClusters(tx, store.ByName("default"))
if err != nil {
return err
}
if len(clusters) != 1 {
return grpc.Errorf(codes.Internal, "could not fetch cluster object")
}
cluster := clusters[0]
removedNode := &api.RemovedNode{ID: node.ID}
// Set an expiry time for this RemovedNode if a certificate
// exists and can be parsed.
if len(node.Certificate.Certificate) != 0 {
certBlock, _ := pem.Decode(node.Certificate.Certificate)
if certBlock != nil {
X509Cert, err := x509.ParseCertificate(certBlock.Bytes)
if err == nil && !X509Cert.NotAfter.IsZero() {
expiry, err := ptypes.TimestampProto(X509Cert.NotAfter)
if err == nil {
removedNode.Expiry = expiry
}
}
}
}
cluster.RemovedNodes = append(cluster.RemovedNodes, removedNode)
if err := store.UpdateCluster(tx, cluster); err != nil {
return err
}
return store.DeleteNode(tx, request.NodeID)
})
if err != nil {
return nil, err
}
return &api.RemoveNodeResponse{}, nil
}
开发者ID:JMesser81,项目名称:docker,代码行数:67,代码来源:node.go
示例17: Run
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
// Harakiri.
go func() {
select {
case <-ctx.Done():
case <-m.stopped:
ctxCancel()
}
}()
leadershipCh, cancel := m.raftNode.SubscribeLeadership()
defer cancel()
go m.handleLeadershipEvents(ctx, leadershipCh)
authorize := func(ctx context.Context, roles []string) error {
var (
blacklistedCerts map[string]*api.BlacklistedCertificate
clusters []*api.Cluster
err error
)
m.raftNode.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName("default"))
})
// Not having a cluster object yet means we can't check
// the blacklist.
if err == nil && len(clusters) == 1 {
blacklistedCerts = clusters[0].BlacklistedCertificates
}
// Authorize the remote roles, ensure they can only be forwarded by managers
_, err = ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
return err
}
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig.RootCA())
baseResourceAPI := resourceapi.New(m.raftNode.MemoryStore())
healthServer := health.NewHealthServer()
localHealthServer := health.NewHealthServer()
authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
authenticatedResourceAPI := api.NewAuthenticatedWrapperResourceAllocatorServer(baseResourceAPI, authorize)
authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.dispatcher, authorize)
authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.raftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.raftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, m.raftNode, ca.WithMetadataForwardTLSInfo)
// localProxyControlAPI is a special kind of proxy. It is only wired up
// to receive requests from a trusted local socket, and these requests
// don't use TLS, therefore the requests it handles locally should
// bypass authorization. When it proxies, it sends them as requests from
// this manager rather than forwarded requests (it has no TLS
// information to put in the metadata map).
forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, m.raftNode, forwardAsOwnRequest)
// Everything registered on m.server should be an authenticated
// wrapper, or a proxy wrapping an authenticated wrapper!
api.RegisterCAServer(m.server, proxyCAAPI)
api.RegisterNodeCAServer(m.server, proxyNodeCAAPI)
api.RegisterRaftServer(m.server, authenticatedRaftAPI)
api.RegisterHealthServer(m.server, authenticatedHealthAPI)
api.RegisterRaftMembershipServer(m.server, proxyRaftMembershipAPI)
api.RegisterControlServer(m.server, authenticatedControlAPI)
api.RegisterResourceAllocatorServer(m.server, proxyResourceAPI)
api.RegisterDispatcherServer(m.server, proxyDispatcherAPI)
api.RegisterControlServer(m.localserver, localProxyControlAPI)
api.RegisterHealthServer(m.localserver, localHealthServer)
healthServer.SetServingStatus("Raft", api.HealthCheckResponse_NOT_SERVING)
localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_NOT_SERVING)
errServe := make(chan error, len(m.listeners))
for proto, l := range m.listeners {
go m.serveListener(ctx, errServe, proto, l)
}
defer func() {
m.server.Stop()
m.localserver.Stop()
}()
//.........这里部分代码省略.........
开发者ID:Mic92,项目名称:docker,代码行数:101,代码来源:manager.go
示例18: Run
// Run is the main loop for a Raft node, it goes along the state machine,
// acting on the messages received from other Raft nodes in the cluster.
//
// Before running the main loop, it first starts the raft node based on saved
// cluster state. If no saved state exists, it starts a single-node cluster.
func (n *Node) Run(ctx context.Context) error {
ctx = log.WithLogger(ctx, logrus.WithField("raft_id", fmt.Sprintf("%x", n.Config.ID)))
ctx, cancel := context.WithCancel(ctx)
// nodeRemoved indicates that node was stopped due its removal.
nodeRemoved := false
defer func() {
cancel()
n.stop(ctx)
if nodeRemoved {
// Move WAL and snapshot out of the way, since
// they are no longer usable.
if err := n.moveWALAndSnap(); err != nil {
log.G(ctx).WithError(err).Error("failed to move wal after node removal")
}
}
n.done()
}()
wasLeader := false
for {
select {
case <-n.ticker.C():
n.raftNode.Tick()
n.cluster.Tick()
case rd := <-n.raftNode.Ready():
raftConfig := DefaultRaftConfig()
n.memoryStore.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
raftConfig = clusters[0].Spec.Raft
}
})
// Save entries to storage
if err := n.saveToStorage(&raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil {
log.G(ctx).WithError(err).Error("failed to save entries to storage")
}
if len(rd.Messages) != 0 {
// Send raft messages to peers
if err := n.send(ctx, rd.Messages); err != nil {
log.G(ctx).WithError(err).Error("failed to send message to members")
}
}
// Apply snapshot to memory store. The snapshot
// was applied to the raft store in
// saveToStorage.
if !raft.IsEmptySnap(rd.Snapshot) {
// Load the snapshot data into the store
if err := n.restoreFromSnapshot(rd.Snapshot.Data, false); err != nil {
log.G(ctx).WithError(err).Error("failed to restore from snapshot")
}
n.appliedIndex = rd.Snapshot.Metadata.Index
n.snapshotIndex = rd.Snapshot.Metadata.Index
n.confState = rd.Snapshot.Metadata.ConfState
}
// If we cease to be the leader, we must cancel any
// proposals that are currently waiting for a quorum to
// acknowledge them. It is still possible for these to
// become committed, but if that happens we will apply
// them as any follower would.
// It is important that we cancel these proposals before
// calling processCommitted, so processCommitted does
// not deadlock.
if rd.SoftState != nil {
if wasLeader && rd.SoftState.RaftState != raft.StateLeader {
wasLeader = false
if atomic.LoadUint32(&n.signalledLeadership) == 1 {
atomic.StoreUint32(&n.signalledLeadership, 0)
n.leadershipBroadcast.Publish(IsFollower)
}
// It is important that we set n.signalledLeadership to 0
// before calling n.wait.cancelAll. When a new raft
// request is registered, it checks n.signalledLeadership
// afterwards, and cancels the registration if it is 0.
// If cancelAll was called first, this call might run
// before the new request registers, but
// signalledLeadership would be set after the check.
// Setting signalledLeadership before calling cancelAll
// ensures that if a new request is registered during
// this transition, it will either be cancelled by
// cancelAll, or by its own check of signalledLeadership.
n.wait.cancelAll()
} else if !wasLeader && rd.SoftState.RaftState == raft.StateLeader {
wasLeader = true
}
}
//.........这里部分代码省略.........
开发者ID:Mic92,项目名称:docker,代码行数:101,代码来源:raft.go
示例19: Run
// Run is the main loop for a Raft node, it goes along the state machine,
// acting on the messages received from other Raft nodes in the cluster.
//
// Before running the main loop, it first starts the raft node based on saved
// cluster state. If no saved state exists, it starts a single-node cluster.
func (n *Node) Run(ctx context.Context) error {
defer func() {
close(n.doneCh)
}()
for {
select {
case <-n.ticker.C():
n.Tick()
case rd := <-n.Ready():
raftConfig := DefaultRaftConfig()
n.memoryStore.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
raftConfig = clusters[0].Spec.Raft
}
})
// Save entries to storage
if err := n.saveToStorage(&raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil {
n.Config.Logger.Error(err)
}
// Send raft messages to peers
if err := n.send(rd.Messages); err != nil {
n.Config.Logger.Error(err)
}
// Apply snapshot to memory store. The snapshot
// was applied to the raft store in
// saveToStorage.
if !raft.IsEmptySnap(rd.Snapshot) {
// Load the snapshot data into the store
if err := n.restoreFromSnapshot(rd.Snapshot.Data, n.forceNewCluster); err != nil {
n.Config.Logger.Error(err)
}
n.appliedIndex = rd.Snapshot.Metadata.Index
n.snapshotIndex = rd.Snapshot.Metadata.Index
n.confState = rd.Snapshot.Metadata.ConfState
}
// Process committed entries
for _, entry := range rd.CommittedEntries {
if err := n.processCommitted(entry); err != nil {
n.Config.Logger.Error(err)
}
}
// Trigger a snapshot every once in awhile
if n.snapshotInProgress == nil &&
raftConfig.SnapshotInterval > 0 &&
n.appliedIndex-n.snapshotIndex >= raftConfig.SnapshotInterval {
n.doSnapshot(&raftConfig)
}
// If we cease to be the leader, we must cancel
// any proposals that are currently waiting for
// a quorum to acknowledge them. It is still
// possible for these to become committed, but
// if that happens we will apply them as any
// follower would.
if rd.SoftState != nil {
if n.wasLeader && rd.SoftState.RaftState != raft.StateLeader {
n.wasLeader = false
n.wait.cancelAll()
n.leadershipBroadcast.Write(IsFollower)
} else if !n.wasLeader && rd.SoftState.RaftState == raft.StateLeader {
n.wasLeader = true
n.leadershipBroadcast.Write(IsLeader)
}
}
// If we are the only registered member after
// restoring from the state, campaign to be the
// leader.
if !n.restored {
if len(n.cluster.Members()) <= 1 {
if err := n.Campaign(n.Ctx); err != nil {
panic("raft: cannot campaign to be the leader on node restore")
}
}
n.restored = true
}
// Advance the state machine
n.Advance()
case snapshotIndex := <-n.snapshotInProgress:
if snapshotIndex > n.snapshotIndex {
n.snapshotIndex = snapshotIndex
}
n.snapshotInProgress = nil
case <-n.removeRaftCh:
// If the node was removed from other members,
//.........这里部分代码省略.........
开发者ID:ygf11,项目名称:docker,代码行数:101,代码来源:raft.go
示例20: TestNewNodeCertificateRequiresToken
|
请发表评论