本文整理汇总了Golang中github.com/mesos/mesos-go/mesosproto.Offer类的典型用法代码示例。如果您正苦于以下问题:Golang Offer类的具体用法?Golang Offer怎么用?Golang Offer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Offer类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: NewTaskInfo
func (ct *ConsumerTask) NewTaskInfo(offer *mesos.Offer) *mesos.TaskInfo {
taskName := fmt.Sprintf("consumer-%s", ct.ID)
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(ct.Config)
if err != nil {
panic(err)
}
taskInfo := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: ct.createExecutor(),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", ct.Cpu),
util.NewScalarResource("mem", ct.Mem),
},
Data: data,
}
return taskInfo
}
开发者ID:elodina,项目名称:go-kafka-client-mesos,代码行数:25,代码来源:consumer_task.go
示例2: createExecutor
func (s *Scheduler) createExecutor(offer *mesos.Offer, tcpPort uint64, udpPort uint64) *mesos.ExecutorInfo {
name := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
id := fmt.Sprintf("%s-%s", name, uuid())
uris := []*mesos.CommandInfo_URI{
&mesos.CommandInfo_URI{
Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.Executor)),
Executable: proto.Bool(true),
},
}
if Config.ProducerProperties != "" {
uris = append(uris, &mesos.CommandInfo_URI{
Value: proto.String(fmt.Sprintf("%s/resource/%s", Config.Api, Config.ProducerProperties)),
})
}
command := fmt.Sprintf("./%s --log.level %s --tcp %d --udp %d --host %s", Config.Executor, Config.LogLevel, tcpPort, udpPort, offer.GetHostname())
return &mesos.ExecutorInfo{
ExecutorId: util.NewExecutorID(id),
Name: proto.String(name),
Command: &mesos.CommandInfo{
Value: proto.String(command),
Uris: uris,
},
}
}
开发者ID:elodina,项目名称:syslog-service,代码行数:28,代码来源:scheduler.go
示例3: createTaskInfo
func createTaskInfo(task eremetic.Task, offer *mesosproto.Offer) (eremetic.Task, *mesosproto.TaskInfo) {
task.FrameworkID = *offer.FrameworkId.Value
task.SlaveID = *offer.SlaveId.Value
task.Hostname = *offer.Hostname
task.AgentIP = offer.GetUrl().GetAddress().GetIp()
task.AgentPort = offer.GetUrl().GetAddress().GetPort()
portMapping, portResources := buildPorts(task, offer)
env := buildEnvironment(task, portMapping)
taskInfo := &mesosproto.TaskInfo{
TaskId: &mesosproto.TaskID{Value: proto.String(task.ID)},
SlaveId: offer.SlaveId,
Name: proto.String(task.Name),
Command: buildCommandInfo(task, env),
Container: &mesosproto.ContainerInfo{
Type: mesosproto.ContainerInfo_DOCKER.Enum(),
Docker: &mesosproto.ContainerInfo_DockerInfo{
Image: proto.String(task.Image),
ForcePullImage: proto.Bool(task.ForcePullImage),
PortMappings: portMapping,
Network: mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum(),
},
Volumes: buildVolumes(task),
},
Resources: []*mesosproto.Resource{
mesosutil.NewScalarResource("cpus", task.TaskCPUs),
mesosutil.NewScalarResource("mem", task.TaskMem),
mesosutil.NewRangesResource("ports", portResources),
},
}
return task, taskInfo
}
开发者ID:klarna,项目名称:eremetic,代码行数:33,代码来源:task.go
示例4: Push
func (oc *OfferCache) Push(newOffer *mesos.Offer) bool {
oc.mut.Lock()
defer oc.mut.Unlock()
if len(oc.offerSet) < oc.maxOffers {
// Reject offers from existing slaves.
for _, offer := range oc.offerSet {
if offer.SlaveId.GetValue() == newOffer.SlaveId.GetValue() &&
oc.singleInstancePerSlave {
log.Info("Offer already exists for slave ", newOffer.SlaveId.GetValue())
return false
}
}
oc.offerSet[newOffer.GetId().GetValue()] = newOffer
// Try to add offer to the queue, clearing out invalid
// offers in order to make room if necessary.
for i := 0; i < 2; i++ {
select {
case oc.offerQueue <- newOffer:
return true
default:
oc.gc()
}
}
}
log.Info("We already have enough offers cached.")
return false
}
开发者ID:puppetizeme,项目名称:etcd-mesos,代码行数:28,代码来源:offercache.go
示例5: launchTask
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
taskName := fmt.Sprintf("syscol-%s", offer.GetSlaveId().GetValue())
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(Config)
if err != nil {
panic(err) //shouldn't happen
}
Logger.Debugf("Task data: %s", string(data))
task := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: s.createExecutor(offer.GetSlaveId().GetValue()),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", Config.Cpus),
util.NewScalarResource("mem", Config.Mem),
},
Data: data,
}
s.cluster.Add(offer.GetSlaveId().GetValue(), task)
driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
开发者ID:elodina,项目名称:syscol,代码行数:28,代码来源:scheduler.go
示例6: AcceptOffer
func (t *T) AcceptOffer(offer *mesos.Offer) bool {
if offer == nil {
return false
}
// if the user has specified a target host, make sure this offer is for that host
if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
return false
}
// check the NodeSelector
if len(t.Pod.Spec.NodeSelector) > 0 {
slaveLabels := map[string]string{}
for _, a := range offer.Attributes {
if a.GetType() == mesos.Value_TEXT {
slaveLabels[a.GetName()] = a.GetText().GetValue()
}
}
selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
if !selector.Matches(labels.Set(slaveLabels)) {
return false
}
}
// check ports
if _, err := t.mapper.Generate(t, offer); err != nil {
log.V(3).Info(err)
return false
}
// find offered cpu and mem
var (
offeredCpus mresource.CPUShares
offeredMem mresource.MegaBytes
)
for _, resource := range offer.Resources {
if resource.GetName() == "cpus" {
offeredCpus = mresource.CPUShares(*resource.GetScalar().Value)
}
if resource.GetName() == "mem" {
offeredMem = mresource.MegaBytes(*resource.GetScalar().Value)
}
}
// calculate cpu and mem sum over all containers of the pod
// TODO (@sttts): also support pod.spec.resources.limit.request
// TODO (@sttts): take into account the executor resources
cpu := mresource.PodCPULimit(&t.Pod)
mem := mresource.PodMemLimit(&t.Pod)
log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
if (cpu > offeredCpus) || (mem > offeredMem) {
log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem)
return false
}
return true
}
开发者ID:ngbinh,项目名称:kubernetes,代码行数:58,代码来源:pod_task.go
示例7: launchTask
func (s *Scheduler) launchTask(task Task, offer *mesos.Offer) {
taskInfo := task.NewTaskInfo(offer)
task.Data().State = TaskStateStaging
task.Data().Attributes = utils.OfferAttributes(offer)
task.Data().ExecutorID = taskInfo.GetExecutor().GetExecutorId().GetValue()
task.Data().SlaveID = taskInfo.GetSlaveId().GetValue()
task.Data().TaskID = taskInfo.GetTaskId().GetValue()
s.driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{taskInfo}, &mesos.Filters{RefuseSeconds: proto.Float64(10)})
}
开发者ID:elodina,项目名称:go-kafka-client-mesos,代码行数:10,代码来源:scheduler.go
示例8: LaunchTask
func (ctx *RunOnceApplicationContext) LaunchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) error {
ctx.lock.Lock()
defer ctx.lock.Unlock()
ctx.InstancesLeftToRun--
taskInfo := ctx.newTaskInfo(offer)
ctx.tasks = append(ctx.tasks, newRunOnceTask(offer, taskInfo.GetTaskId().GetValue()))
_, err := driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{taskInfo}, &mesos.Filters{RefuseSeconds: proto.Float64(10)})
return err
}
开发者ID:elodina,项目名称:stack-deploy,代码行数:11,代码来源:run_once_application_context.go
示例9: acceptOffer
func (s *Scheduler) acceptOffer(driver scheduler.SchedulerDriver, offer *mesos.Offer) string {
if s.cluster.Exists(offer.GetSlaveId().GetValue()) {
return fmt.Sprintf("Server on slave %s is already running.", offer.GetSlaveId().GetValue())
} else {
declineReason := s.match(offer)
if declineReason == "" {
s.launchTask(driver, offer)
}
return declineReason
}
}
开发者ID:elodina,项目名称:syscol,代码行数:11,代码来源:scheduler.go
示例10: WildcardMapper
// WildcardMapper maps k8s wildcard ports (hostPort == 0) to any available offer port
func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
mapping, err := FixedMapper(t, offer)
if err != nil {
return nil, err
}
taken := make(map[uint64]struct{})
for _, entry := range mapping {
taken[entry.OfferPort] = struct{}{}
}
wildports := []HostPortMapping{}
for i, container := range t.Pod.Spec.Containers {
for pi, port := range container.Ports {
if port.HostPort == 0 {
wildports = append(wildports, HostPortMapping{
ContainerIdx: i,
PortIdx: pi,
})
}
}
}
remaining := len(wildports)
foreachPortsRange(offer.GetResources(), t.Roles(), func(bp, ep uint64, role string) {
log.V(3).Infof("Searching for wildcard port in range {%d:%d}", bp, ep)
for i := range wildports {
if wildports[i].OfferPort != 0 {
continue
}
for port := bp; port <= ep && remaining > 0; port++ {
if _, inuse := taken[port]; inuse {
continue
}
wildports[i].OfferPort = port
wildports[i].Role = starredRole(role)
mapping = append(mapping, wildports[i])
remaining--
taken[port] = struct{}{}
break
}
}
})
if remaining > 0 {
err := &PortAllocationError{
PodId: t.Pod.Name,
}
// it doesn't make sense to include a port list here because they were all zero (wildcards)
return nil, err
}
return mapping, nil
}
开发者ID:johndmulhausen,项目名称:kubernetes,代码行数:55,代码来源:port_mapping.go
示例11: Match
func (c *EqualsConstraint) Match(offer *mesos.Offer) bool {
for _, a := range offer.GetAttributes() {
if c.Attribute == a.GetName() {
if a.GetType() == mesos.Value_TEXT {
return c.Value == a.GetText().GetValue()
} else if a.GetType() == mesos.Value_SCALAR {
return c.Value == fmt.Sprintf("%.f", a.GetScalar().GetValue())
} else {
return false
}
}
}
return false
}
开发者ID:felixb,项目名称:none,代码行数:14,代码来源:constraint.go
示例12: FillFromDetails
// Fill the Spec in the T, should be called during k8s scheduling, before binding.
func (t *T) FillFromDetails(details *mesos.Offer) error {
if details == nil {
//programming error
panic("offer details are nil")
}
// compute used resources
cpu := mresource.PodCPULimit(&t.Pod)
mem := mresource.PodMemLimit(&t.Pod)
log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", details.Id, t.Pod.Namespace, t.Pod.Name, cpu, mem)
t.Spec = Spec{
SlaveID: details.GetSlaveId().GetValue(),
CPU: cpu,
Memory: mem,
}
// fill in port mapping
if mapping, err := t.mapper.Generate(t, details); err != nil {
t.Reset()
return err
} else {
ports := []uint64{}
for _, entry := range mapping {
ports = append(ports, entry.OfferPort)
}
t.Spec.PortMap = mapping
t.Spec.Ports = ports
}
// hostname needs of the executor needs to match that of the offer, otherwise
// the kubelet node status checker/updater is very unhappy
const HOSTNAME_OVERRIDE_FLAG = "--hostname-override="
hostname := details.GetHostname() // required field, non-empty
hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname
argv := t.executor.Command.Arguments
overwrite := false
for i, arg := range argv {
if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) {
overwrite = true
argv[i] = hostnameOverride
break
}
}
if !overwrite {
t.executor.Command.Arguments = append(argv, hostnameOverride)
}
return nil
}
开发者ID:Ima8,项目名称:kubernetes,代码行数:51,代码来源:pod_task.go
示例13: OfferAttributes
func OfferAttributes(offer *mesos.Offer) map[string]string {
offerAttributes := map[string]string{
"hostname": offer.GetHostname(),
}
for _, attribute := range offer.GetAttributes() {
text := attribute.GetText().GetValue()
if text != "" {
offerAttributes[attribute.GetName()] = text
}
}
return offerAttributes
}
开发者ID:elodina,项目名称:stack-deploy,代码行数:14,代码来源:constraints.go
示例14: launchTask
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(Config)
if err != nil {
panic(err) //shouldn't happen
}
Logger.Debugf("Task data: %s", string(data))
tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))
task := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: s.createExecutor(offer, tcpPort, udpPort),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", Config.Cpus),
util.NewScalarResource("mem", Config.Mem),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
},
Data: data,
Labels: utils.StringToLabels(s.labels),
}
s.cluster.Add(offer.GetSlaveId().GetValue(), task)
driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
开发者ID:elodina,项目名称:syslog-service,代码行数:34,代码来源:scheduler.go
示例15: FixedMapper
// FixedMapper maps k8s host ports to offered ports ignoring hostPorts == 0 (remaining pod-private)
func FixedMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
requiredPorts := make(map[uint64]HostPortMapping)
mapping := []HostPortMapping{}
for i, container := range t.Pod.Spec.Containers {
// strip all port==0 from this array; k8s already knows what to do with zero-
// ports (it does not create 'port bindings' on the minion-host); we need to
// remove the wildcards from this array since they don't consume host resources
for pi, port := range container.Ports {
if port.HostPort == 0 {
continue // ignore
}
m := HostPortMapping{
ContainerIdx: i,
PortIdx: pi,
OfferPort: uint64(port.HostPort),
}
if entry, inuse := requiredPorts[uint64(port.HostPort)]; inuse {
return nil, &DuplicateHostPortError{entry, m}
}
requiredPorts[uint64(port.HostPort)] = m
}
}
foreachPortsRange(offer.GetResources(), t.Roles(), func(bp, ep uint64, role string) {
for port := range requiredPorts {
log.V(3).Infof("evaluating port range {%d:%d} %d", bp, ep, port)
if (bp <= port) && (port <= ep) {
m := requiredPorts[port]
m.Role = starredRole(role)
mapping = append(mapping, m)
delete(requiredPorts, port)
}
}
})
unsatisfiedPorts := len(requiredPorts)
if unsatisfiedPorts > 0 {
err := &PortAllocationError{
PodId: t.Pod.Name,
}
for p := range requiredPorts {
err.Ports = append(err.Ports, p)
}
return nil, err
}
return mapping, nil
}
开发者ID:johndmulhausen,项目名称:kubernetes,代码行数:49,代码来源:port_mapping.go
示例16: NodeSelectorPredicate
func NodeSelectorPredicate(t *T, offer *mesos.Offer, n *api.Node) bool {
// if the user has specified a target host, make sure this offer is for that host
if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
return false
}
// check the NodeSelector
if len(t.Pod.Spec.NodeSelector) > 0 {
if n.Labels == nil {
return false
}
selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
if !selector.Matches(labels.Set(n.Labels)) {
return false
}
}
return true
}
开发者ID:pologood,项目名称:kubernetes,代码行数:18,代码来源:predicate.go
示例17: NodeSelectorPredicate
func NodeSelectorPredicate(t *T, offer *mesos.Offer) bool {
// if the user has specified a target host, make sure this offer is for that host
if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName {
return false
}
// check the NodeSelector
if len(t.Pod.Spec.NodeSelector) > 0 {
slaveLabels := map[string]string{}
for _, a := range offer.Attributes {
if a.GetType() == mesos.Value_TEXT {
slaveLabels[a.GetName()] = a.GetText().GetValue()
}
}
selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector)
if !selector.Matches(labels.Set(slaveLabels)) {
return false
}
}
return true
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:21,代码来源:predicate.go
示例18: NodeProcurement
// NodeProcurement updates t.Spec in preparation for the task to be launched on the
// slave associated with the offer.
func NodeProcurement(t *T, offer *mesos.Offer) error {
t.Spec.SlaveID = offer.GetSlaveId().GetValue()
t.Spec.AssignedSlave = offer.GetHostname()
// hostname needs of the executor needs to match that of the offer, otherwise
// the kubelet node status checker/updater is very unhappy
setCommandArgument(t.executor, "--hostname-override", offer.GetHostname(), true)
return nil
}
开发者ID:previousnext,项目名称:kube-ingress,代码行数:12,代码来源:procurement.go
示例19: Offer
func Offer(offer *mesos.Offer) string {
var buffer bytes.Buffer
buffer.WriteString(offer.GetHostname())
buffer.WriteString(ID(offer.GetId().GetValue()))
resources := Resources(offer.GetResources())
if resources != "" {
buffer.WriteString(" ")
buffer.WriteString(resources)
}
attributes := Attributes(offer.GetAttributes())
if attributes != "" {
buffer.WriteString(" ")
buffer.WriteString(attributes)
}
return buffer.String()
}
开发者ID:elodina,项目名称:stack-deploy,代码行数:18,代码来源:pretty.go
示例20: newTaskInfo
func (ctx *RunOnceApplicationContext) newTaskInfo(offer *mesos.Offer) *mesos.TaskInfo {
taskName := fmt.Sprintf("%s.%s", ctx.Application.ID, offer.GetHostname())
taskID := util.NewTaskID(fmt.Sprintf("%s|%s|%s", ctx.Application.ID, offer.GetHostname(), framework.UUID()))
var URIs []*mesos.CommandInfo_URI
if len(ctx.Application.ArtifactURLs) > 0 || len(ctx.Application.AdditionalArtifacts) > 0 {
URIs = make([]*mesos.CommandInfo_URI, 0)
for _, uri := range ctx.Application.ArtifactURLs {
URIs = append(URIs, &mesos.CommandInfo_URI{
Value: proto.String(uri),
Extract: proto.Bool(true),
})
}
for _, uri := range ctx.Application.AdditionalArtifacts {
URIs = append(URIs, &mesos.CommandInfo_URI{
Value: proto.String(uri),
Extract: proto.Bool(true),
})
}
}
return &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskID,
SlaveId: offer.GetSlaveId(),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", ctx.Application.Cpu),
util.NewScalarResource("mem", ctx.Application.Mem),
},
Command: &mesos.CommandInfo{
Shell: proto.Bool(true),
Value: proto.String(ctx.Application.LaunchCommand),
Uris: URIs,
},
}
}
开发者ID:elodina,项目名称:stack-deploy,代码行数:36,代码来源:run_once_application_context.go
注:本文中的github.com/mesos/mesos-go/mesosproto.Offer类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论