本文整理汇总了Golang中github.com/docker/swarmkit/manager/state/store.NewMemoryStore函数的典型用法代码示例。如果您正苦于以下问题:Golang NewMemoryStore函数的具体用法?Golang NewMemoryStore怎么用?Golang NewMemoryStore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewMemoryStore函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestKeyManagerDefaultSubsystem
// Verify the key generation and rotation for default subsystems
func TestKeyManagerDefaultSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
k := New(st, DefaultConfig())
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), len(k.config.Subsystems)*keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
assert.Equal(t, len(k.keyRing.keys), len(k.config.Subsystems)*keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
开发者ID:docker,项目名称:swarmkit,代码行数:35,代码来源:keymanager_test.go
示例2: TestSchedulerNoReadyNodes
func TestSchedulerNoReadyNodes(t *testing.T) {
ctx := context.Background()
initialTask := &api.Task{
ID: "id1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial task
assert.NoError(t, store.CreateTask(tx, initialTask))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
failure := watchAssignmentFailure(t, watch)
assert.Equal(t, "no suitable node", failure.Status.Message)
err = s.Update(func(tx store.Tx) error {
// Create a ready node. The task should get assigned to this
// node.
node := &api.Node{
ID: "newnode",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "newnode",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
assert.NoError(t, store.CreateNode(tx, node))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, "newnode", assignment.NodeID)
}
开发者ID:docker,项目名称:swarmkit,代码行数:59,代码来源:scheduler_test.go
示例3: NewNode
// NewNode generates a new Raft node
func NewNode(opts NodeOptions) *Node {
cfg := opts.Config
if cfg == nil {
cfg = DefaultNodeConfig()
}
if opts.TickInterval == 0 {
opts.TickInterval = time.Second
}
if opts.SendTimeout == 0 {
opts.SendTimeout = 2 * time.Second
}
raftStore := raft.NewMemoryStorage()
n := &Node{
cluster: membership.NewCluster(2 * cfg.ElectionTick),
raftStore: raftStore,
opts: opts,
Config: &raft.Config{
ElectionTick: cfg.ElectionTick,
HeartbeatTick: cfg.HeartbeatTick,
Storage: raftStore,
MaxSizePerMsg: cfg.MaxSizePerMsg,
MaxInflightMsgs: cfg.MaxInflightMsgs,
Logger: cfg.Logger,
},
doneCh: make(chan struct{}),
removeRaftCh: make(chan struct{}),
stopped: make(chan struct{}),
leadershipBroadcast: watch.NewQueue(),
lastSendToMember: make(map[uint64]chan struct{}),
keyRotator: opts.KeyRotator,
}
n.memoryStore = store.NewMemoryStore(n)
if opts.ClockSource == nil {
n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
} else {
n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
}
n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
n.wait = newWait()
n.removeRaftFunc = func(n *Node) func() {
var removeRaftOnce sync.Once
return func() {
removeRaftOnce.Do(func() {
close(n.removeRaftCh)
})
}
}(n)
return n
}
开发者ID:fabianofranz,项目名称:docker,代码行数:56,代码来源:raft.go
示例4: NewNode
// NewNode generates a new Raft node
func NewNode(ctx context.Context, opts NewNodeOptions) *Node {
cfg := opts.Config
if cfg == nil {
cfg = DefaultNodeConfig()
}
if opts.TickInterval == 0 {
opts.TickInterval = time.Second
}
raftStore := raft.NewMemoryStorage()
ctx, cancel := context.WithCancel(ctx)
n := &Node{
Ctx: ctx,
cancel: cancel,
cluster: membership.NewCluster(),
tlsCredentials: opts.TLSCredentials,
raftStore: raftStore,
Address: opts.Addr,
opts: opts,
Config: &raft.Config{
ElectionTick: cfg.ElectionTick,
HeartbeatTick: cfg.HeartbeatTick,
Storage: raftStore,
MaxSizePerMsg: cfg.MaxSizePerMsg,
MaxInflightMsgs: cfg.MaxInflightMsgs,
Logger: cfg.Logger,
},
forceNewCluster: opts.ForceNewCluster,
stopCh: make(chan struct{}),
doneCh: make(chan struct{}),
removeRaftCh: make(chan struct{}),
StateDir: opts.StateDir,
joinAddr: opts.JoinAddr,
sendTimeout: 2 * time.Second,
leadershipBroadcast: events.NewBroadcaster(),
}
n.memoryStore = store.NewMemoryStore(n)
if opts.ClockSource == nil {
n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
} else {
n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
}
if opts.SendTimeout != 0 {
n.sendTimeout = opts.SendTimeout
}
n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
n.wait = newWait()
return n
}
开发者ID:ygf11,项目名称:docker,代码行数:55,代码来源:raft.go
示例5: TestKeyManagerInvalidSubsystem
// Verify that instantiating keymanager fails if an invalid subsystem is
// passed
func TestKeyManagerInvalidSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{"serf"},
}
k := New(st, config)
assert.Nil(t, k)
}
开发者ID:RobbieJVMW,项目名称:swarmkit,代码行数:16,代码来源:keymanager_test.go
示例6: TestSetup
func TestSetup(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
observedTask1 := SetupCluster(t, store, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask1.NodeID, "id1")
}
开发者ID:docker,项目名称:swarmkit,代码行数:14,代码来源:global_test.go
示例7: TestAddNode
func TestAddNode(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
SetupCluster(t, store, watch)
addNode(t, store, node2)
observedTask2 := testutils.WatchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask2.NodeID, "id2")
}
开发者ID:docker,项目名称:swarmkit,代码行数:16,代码来源:global_test.go
示例8: TestDeleteService
func TestDeleteService(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
SetupCluster(t, store, watch)
deleteService(t, store, service1)
// task should be deleted
observedTask := testutils.WatchTaskDelete(t, watch)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask.NodeID, "id1")
}
开发者ID:docker,项目名称:swarmkit,代码行数:16,代码来源:global_test.go
示例9: TestAddService
func TestAddService(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
SetupCluster(t, store)
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
skipEvents(t, watch)
addService(t, store, service2)
observedTask := watchTaskCreate(t, watch)
assert.Equal(t, observedTask.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name2")
assert.True(t, observedTask.NodeID == "id1")
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:17,代码来源:global_test.go
示例10: TestDeleteNode
func TestDeleteNode(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
SetupCluster(t, store)
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
skipEvents(t, watch)
deleteNode(t, store, node1)
// task should be set to dead
observedTask := watchShutdownTask(t, watch)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask.NodeID, "id1")
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:17,代码来源:global_test.go
示例11: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
// also verify that all keys are for the right subsystem
assert.Equal(t, len(k.keyRing.keys), keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
match = key.Subsystem == SubsystemIPSec
assert.True(t, match)
}
}
开发者ID:docker,项目名称:swarmkit,代码行数:44,代码来源:keymanager_test.go
示例12: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the first key has been allocated and updated in the
// store
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), 1)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
k.rotateKey(ctx)
// verify that after two rotations keyring has two keys and the very
// first key allocated has been removed
assert.Equal(t, len(k.keyRing.keys), 2)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
开发者ID:RobbieJVMW,项目名称:swarmkit,代码行数:43,代码来源:keymanager_test.go
示例13: newTestServer
func newTestServer(t *testing.T) *testServer {
ts := &testServer{}
// Create a testCA just to get a usable RootCA object
tc := cautils.NewTestCA(nil)
tc.Stop()
ts.Store = store.NewMemoryStore(&mockProposer{})
assert.NotNil(t, ts.Store)
ts.Server = NewServer(ts.Store, nil, &tc.RootCA)
assert.NotNil(t, ts.Server)
temp, err := ioutil.TempFile("", "test-socket")
assert.NoError(t, err)
assert.NoError(t, temp.Close())
assert.NoError(t, os.Remove(temp.Name()))
ts.tempUnixSocket = temp.Name()
lis, err := net.Listen("unix", temp.Name())
assert.NoError(t, err)
ts.grpcServer = grpc.NewServer()
api.RegisterControlServer(ts.grpcServer, ts.Server)
go func() {
// Serve will always return an error (even when properly stopped).
// Explicitly ignore it.
_ = ts.grpcServer.Serve(lis)
}()
conn, err := grpc.Dial(temp.Name(), grpc.WithInsecure(), grpc.WithTimeout(10*time.Second),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}))
assert.NoError(t, err)
ts.clientConn = conn
ts.Client = api.NewControlClient(conn)
return ts
}
开发者ID:docker,项目名称:swarmkit,代码行数:41,代码来源:server_test.go
示例14: TestUpdaterStopGracePeriod
func TestUpdaterStopGracePeriod(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Explicitly do not set task state to
// DEAD to trigger StopGracePeriod
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateRunning {
task.Status.State = api.TaskStateRunning
return store.UpdateTask(tx, task)
}
return nil
})
assert.NoError(t, err)
}
}
}()
var instances uint64 = 3
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
StopGracePeriod: ptypes.DurationProto(100 * time.Millisecond),
},
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: instances,
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateService(tx, service))
for i := uint64(0); i < instances; i++ {
task := newTask(nil, service, uint64(i))
task.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
}
before := time.Now()
service.Spec.Task.GetContainer().Image = "v:2"
updater := NewUpdater(s, NewRestartSupervisor(s))
// Override the default (1 minute) to speed up the test.
updater.restarts.taskTimeout = 100 * time.Millisecond
updater.Run(ctx, nil, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
}
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatal("stop timeout should have elapsed")
}
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:87,代码来源:updater_test.go
示例15: TestUpdater
func TestUpdater(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
if task.Status.State == task.DesiredState {
continue
}
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}()
instances := 3
cluster := &api.Cluster{
// test cluster configuration propagation to task creation.
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: "default",
},
},
}
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: uint64(instances),
},
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
// This won't apply in this test because we set the old tasks to DEAD.
StopGracePeriod: ptypes.DurationProto(time.Hour),
},
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
for i := 0; i < instances; i++ {
assert.NoError(t, store.CreateTask(tx, newTask(cluster, service, uint64(i))))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
assert.Nil(t, task.LogDriver) // should be left alone
}
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // pick up from task
}
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.DefaultLogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Parallelism: 1,
}
updater = NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks = getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:3", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // still pick up from task
}
service.Spec.Task.GetContainer().Image = "v:4"
service.Spec.Task.LogDriver = nil // use cluster default now.
//.........这里部分代码省略.........
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:101,代码来源:updater_test.go
示例16: TestSchedulerResourceConstraint
func TestSchedulerResourceConstraint(t *testing.T) {
ctx := context.Background()
// Create a ready node without enough memory to run the task.
underprovisionedNode := &api.Node{
ID: "underprovisioned",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "underprovisioned",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 1e9,
MemoryBytes: 1e9,
},
},
}
// Non-ready nodes that satisfy the constraints but shouldn't be used
nonready1 := &api.Node{
ID: "nonready1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "nonready1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 2e9,
MemoryBytes: 2e9,
},
},
}
nonready2 := &api.Node{
ID: "nonready2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "nonready2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 2e9,
MemoryBytes: 2e9,
},
},
}
initialTask := &api.Task{
ID: "id1",
DesiredState: api.TaskStateRunning,
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Resources: &api.ResourceRequirements{
Reservations: &api.Resources{
MemoryBytes: 2e9,
},
},
},
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial node and task
assert.NoError(t, store.CreateTask(tx, initialTask))
assert.NoError(t, store.CreateNode(tx, underprovisionedNode))
assert.NoError(t, store.CreateNode(tx, nonready1))
assert.NoError(t, store.CreateNode(tx, nonready2))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
//.........这里部分代码省略.........
开发者ID:docker,项目名称:swarmkit,代码行数:101,代码来源:scheduler_test.go
示例17: TestSchedulerResourceConstraintHA
func TestSchedulerResourceConstraintHA(t *testing.T) {
// node 1 starts with 1 task, node 2 starts with 3 tasks.
// however, node 1 only has enough memory to schedule one more task.
ctx := context.Background()
node1 := &api.Node{
ID: "id1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
MemoryBytes: 1e9,
},
},
}
node2 := &api.Node{
ID: "id2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
MemoryBytes: 1e11,
},
},
}
taskTemplate := &api.Task{
DesiredState: api.TaskStateRunning,
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Resources: &api.ResourceRequirements{
Reservations: &api.Resources{
MemoryBytes: 5e8,
},
},
},
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial node and task
assert.NoError(t, store.CreateNode(tx, node1))
assert.NoError(t, store.CreateNode(tx, node2))
// preassigned tasks
task1 := taskTemplate.Copy()
task1.ID = "id1"
task1.NodeID = "id1"
task1.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task1))
task2 := taskTemplate.Copy()
task2.ID = "id2"
task2.NodeID = "id2"
task2.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task2))
task3 := taskTemplate.Copy()
task3.ID = "id3"
task3.NodeID = "id2"
task3.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task3))
task4 := taskTemplate.Copy()
task4.ID = "id4"
task4.NodeID = "id2"
task4.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task4))
// tasks to assign
task5 := taskTemplate.Copy()
task5.ID = "id5"
assert.NoError(t, store.CreateTask(tx, task5))
task6 := taskTemplate.Copy()
task6.ID = "id6"
assert.NoError(t, store.CreateTask(tx, task6))
//.........这里部分代码省略.........
开发者ID:docker,项目名称:swarmkit,代码行数:101,代码来源:scheduler_test.go
示例18: TestPreassignedTasks
func TestPreassignedTasks(t *testing.T) {
ctx := context.Background()
initialNodeSet := []*api.Node{
{
ID: "node1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "node2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
}
initialTaskSet := []*api.Task{
{
ID: "task1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
},
{
ID: "task2",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
NodeID: initialNodeSet[0].ID,
},
{
ID: "task3",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
NodeID: initialNodeSet[0].ID,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Prepoulate nodes
for _, n := range initialNodeSet {
assert.NoError(t, store.CreateNode(tx, n))
}
// Prepopulate tasks
for _, task := range initialTaskSet {
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
//preassigned tasks would be processed first
assignment1 := watchAssignment(t, watch)
// task2 and task3 are preassigned to node1
assert.Equal(t, assignment1.NodeID, "node1")
assert.Regexp(t, assignment1.ID, "(task2|task3)")
assignment2 := watchAssignment(t, watch)
if assignment1.ID == "task2" {
assert.Equal(t, "task3", assignment2.ID)
} else {
//.........这里部分代码省略.........
开发者ID:docker,项目名称:swarmkit,代码行数:101,代码来源:scheduler_test.go
示例19: TestSchedulerPluginConstraint
//.........这里部分代码省略.........
Spec: api.NetworkSpec{
Annotations: api.Annotations{
Name: "testVol1",
},
},
DriverState: &api.Driver{
Name: "plugin1",
},
},
},
},
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Mounts: []api.Mount{
{
Source: "testVol1",
Target: "/foo",
Type: api.MountTypeVolume,
VolumeOptions: volumeOptionsDriver("plugin1"),
},
},
},
},
},
ServiceAnnotations: api.Annotations{
Name: "task2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
// Add initial node and task
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, t1))
assert.NoError(t, store.CreateNode(tx, n1))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
// t1 should get assigned
assignment := watchAssignment(t, watch)
assert.Equal(t, assignment.NodeID, "node1_ID")
// Create t2; it should stay in the pending state because there is
// no node that with volume plugin `plugin2`
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, t2))
return nil
})
assert.NoError(t, err)
开发者ID:docker,项目名称:swarmkit,代码行数:67,代码来源:scheduler_test.go
示例20: TestUpdaterRollback
func TestUpdaterRollback(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
var (
failImage1 uint32
failImage2 uint32
)
watchCreate, cancelCreate := state.Watch(s.WatchQueue(), state.EventCreateTask{})
defer cancelCreate()
watchServiceUpdate, cancelServiceUpdate := state.Watch(s.WatchQueue(), state.EventUpdateService{})
defer cancelServiceUpdate()
// Fail new tasks the updater tries to run
watchUpdate, cancelUpdate := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancelUpdate()
go func() {
failedLast := false
for {
select {
case e := <-watchUpdate:
task := e.(state.EventUpdateTask).Task
if task.DesiredState == task.Status.State {
continue
}
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Never fail two image2 tasks in a row, so there's a mix of
// failed and successful tasks for the rollback.
if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 {
task.Status.State = api.TaskStateFailed
failedLast = true
} else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast {
task.Status.State = api.TaskStateFailed
failedLast = true
} else {
task.Status.State = task.DesiredState
failedLast = false
}
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
} else if task.DesiredState > api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}
}()
// Create a service with four replicas specified before the orchestrator
// is started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
s1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "image1",
},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnNone,
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 4,
},
},
Update: &api.UpdateConfig{
FailureAction: api.UpdateConfig_ROLLBACK,
Parallelism: 1,
Delay: *ptypes.DurationProto(10 * time.Millisecond),
Monitor: ptypes.DurationProto(500 * time.Millisecond),
MaxFailureRatio: 0.4,
},
},
}
assert.NoError(t, store.CreateService(tx, s1))
return nil
})
//.........这里部分代码省略.........
开发者ID:docker,项目名称:swarmkit,代码行数:101,代码来源:update_test.go
注:本文中的github.com/docker/swarmkit/manager/state/store.NewMemoryStore函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论