本文整理汇总了Golang中k8s/io/kubernetes/pkg/apis/batch.Kind函数的典型用法代码示例。如果您正苦于以下问题:Golang Kind函数的具体用法?Golang Kind怎么用?Golang Kind使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Kind函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: ReaperFor
func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) {
switch kind {
case api.Kind("ReplicationController"):
return &ReplicationControllerReaper{c, Interval, Timeout}, nil
case extensions.Kind("ReplicaSet"):
return &ReplicaSetReaper{c, Interval, Timeout}, nil
case extensions.Kind("DaemonSet"):
return &DaemonSetReaper{c, Interval, Timeout}, nil
case api.Kind("Pod"):
return &PodReaper{c}, nil
case api.Kind("Service"):
return &ServiceReaper{c}, nil
case extensions.Kind("Job"), batch.Kind("Job"):
return &JobReaper{c, Interval, Timeout}, nil
case extensions.Kind("Deployment"):
return &DeploymentReaper{c, Interval, Timeout}, nil
}
return nil, &NoSuchReaperError{kind}
}
开发者ID:Clarifai,项目名称:kubernetes,代码行数:26,代码来源:stop.go
示例2: ReaperFor
func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, error) {
switch kind {
case api.Kind("ReplicationController"):
return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil
case extensions.Kind("ReplicaSet"):
return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil
case extensions.Kind("DaemonSet"):
return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil
case api.Kind("Pod"):
return &PodReaper{c.Core()}, nil
case api.Kind("Service"):
return &ServiceReaper{c.Core()}, nil
case extensions.Kind("Job"), batch.Kind("Job"):
return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil
case apps.Kind("StatefulSet"):
return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil
case extensions.Kind("Deployment"):
return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil
}
return nil, &NoSuchReaperError{kind}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:29,代码来源:stop.go
示例3: generateConfigsForGroup
func generateConfigsForGroup(
nss []*v1.Namespace,
groupName string,
size, count int,
image string,
command []string,
kind schema.GroupKind,
secretsPerPod int,
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) {
configs := make([]testutils.RunObjectConfig, 0, count)
secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod)
for i := 1; i <= count; i++ {
namespace := nss[i%len(nss)].Name
secretNames := make([]string, 0, secretsPerPod)
for j := 0; j < secretsPerPod; j++ {
secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j)
secretConfigs = append(secretConfigs, &testutils.SecretConfig{
Content: map[string]string{"foo": "bar"},
Client: nil, // this will be overwritten later
Name: secretName,
Namespace: namespace,
LogFunc: framework.Logf,
})
secretNames = append(secretNames, secretName)
}
baseConfig := &testutils.RCConfig{
Client: nil, // this will be overwritten later
InternalClient: nil, // this will be overwritten later
Name: groupName + "-" + strconv.Itoa(i),
Namespace: namespace,
Timeout: 10 * time.Minute,
Image: image,
Command: command,
Replicas: size,
CpuRequest: 10, // 0.01 core
MemRequest: 26214400, // 25MB
SecretNames: secretNames,
}
var config testutils.RunObjectConfig
switch kind {
case api.Kind("ReplicationController"):
config = baseConfig
case extensions.Kind("ReplicaSet"):
config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
case extensions.Kind("Deployment"):
config = &testutils.DeploymentConfig{RCConfig: *baseConfig}
case batch.Kind("Job"):
config = &testutils.JobConfig{RCConfig: *baseConfig}
default:
framework.Failf("Unsupported kind for config creation: %v", kind)
}
configs = append(configs, config)
}
return configs, secretConfigs
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:58,代码来源:load.go
示例4: ScalerFor
func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) {
switch kind {
case api.Kind("ReplicationController"):
return &ReplicationControllerScaler{c}, nil
case extensions.Kind("ReplicaSet"):
return &ReplicaSetScaler{c.Extensions()}, nil
case extensions.Kind("Job"), batch.Kind("Job"):
return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
case extensions.Kind("Deployment"):
return &DeploymentScaler{c.Extensions()}, nil
}
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
开发者ID:CodeJuan,项目名称:kubernetes,代码行数:13,代码来源:scale.go
示例5: ScalerFor
func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) {
switch kind {
case api.Kind("ReplicationController"):
return &ReplicationControllerScaler{c.Core()}, nil
case extensions.Kind("ReplicaSet"):
return &ReplicaSetScaler{c.Extensions()}, nil
case batch.Kind("Job"):
return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
case apps.Kind("StatefulSet"):
return &StatefulSetScaler{c.Apps()}, nil
case extensions.Kind("Deployment"):
return &DeploymentScaler{c.Extensions()}, nil
}
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
开发者ID:johscheuer,项目名称:kubernetes,代码行数:15,代码来源:scale.go
示例6: Stop
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
jobs := reaper.Batch().Jobs(namespace)
pods := reaper.Pods(namespace)
scaler, err := ScalerFor(batch.Kind("Job"), *reaper)
if err != nil {
return err
}
job, err := jobs.Get(name)
if err != nil {
return err
}
if timeout == 0 {
// we will never have more active pods than job.Spec.Parallelism
parallelism := *job.Spec.Parallelism
timeout = Timeout + time.Duration(10*parallelism)*time.Second
}
// TODO: handle overlapping jobs
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
waitForJobs := NewRetryParams(reaper.pollInterval, timeout)
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil {
return err
}
// at this point only dead pods are left, that should be removed
selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector)
options := api.ListOptions{LabelSelector: selector}
podList, err := pods.List(options)
if err != nil {
return err
}
errList := []error{}
for _, pod := range podList.Items {
if err := pods.Delete(pod.Name, gracePeriod); err != nil {
// ignores the error when the pod isn't found
if !errors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
if len(errList) > 0 {
return utilerrors.NewAggregate(errList)
}
// once we have all the pods removed we can safely remove the job itself
return jobs.Delete(name, nil)
}
开发者ID:Clarifai,项目名称:kubernetes,代码行数:45,代码来源:stop.go
示例7:
})
It("should scale a job up", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
开发者ID:CodeJuan,项目名称:kubernetes,代码行数:31,代码来源:batch_v1_jobs.go
示例8:
})
It("should scale a job up", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
开发者ID:huang195,项目名称:kubernetes,代码行数:31,代码来源:job.go
示例9:
})
It("should scale a job up", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
job := newTestJob("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
开发者ID:nak3,项目名称:kubernetes,代码行数:31,代码来源:job.go
示例10: TestPodNodeConstraintsResources
func TestPodNodeConstraintsResources(t *testing.T) {
ns := kapi.NamespaceDefault
testconfigs := []struct {
config *api.PodNodeConstraintsConfig
userinfo user.Info
reviewResponse *authorizationapi.SubjectAccessReviewResponse
}{
{
config: testConfig(),
userinfo: serviceaccount.UserInfo("", "", ""),
reviewResponse: reviewResponse(false, ""),
},
}
testresources := []struct {
resource func(bool) runtime.Object
kind unversioned.GroupKind
groupresource unversioned.GroupResource
prefix string
}{
{
resource: replicationController,
kind: kapi.Kind("ReplicationController"),
groupresource: kapi.Resource("replicationcontrollers"),
prefix: "ReplicationController",
},
{
resource: deployment,
kind: extensions.Kind("Deployment"),
groupresource: extensions.Resource("deployments"),
prefix: "Deployment",
},
{
resource: replicaSet,
kind: extensions.Kind("ReplicaSet"),
groupresource: extensions.Resource("replicasets"),
prefix: "ReplicaSet",
},
{
resource: job,
kind: extensions.Kind("Job"),
groupresource: extensions.Resource("jobs"),
prefix: "Job",
},
{
resource: job,
kind: batch.Kind("Job"),
groupresource: batch.Resource("jobs"),
prefix: "Job",
},
{
resource: deploymentConfig,
kind: deployapi.Kind("DeploymentConfig"),
groupresource: deployapi.Resource("deploymentconfigs"),
prefix: "DeploymentConfig",
},
{
resource: podTemplate,
kind: deployapi.Kind("PodTemplate"),
groupresource: deployapi.Resource("podtemplates"),
prefix: "PodTemplate",
},
{
resource: podSecurityPolicySubjectReview,
kind: securityapi.Kind("PodSecurityPolicySubjectReview"),
groupresource: securityapi.Resource("podsecuritypolicysubjectreviews"),
prefix: "PodSecurityPolicy",
},
{
resource: podSecurityPolicySelfSubjectReview,
kind: securityapi.Kind("PodSecurityPolicySelfSubjectReview"),
groupresource: securityapi.Resource("podsecuritypolicyselfsubjectreviews"),
prefix: "PodSecurityPolicy",
},
{
resource: podSecurityPolicyReview,
kind: securityapi.Kind("PodSecurityPolicyReview"),
groupresource: securityapi.Resource("podsecuritypolicyreviews"),
prefix: "PodSecurityPolicy",
},
}
testparams := []struct {
nodeselector bool
expectedErrorMsg string
prefix string
}{
{
nodeselector: true,
expectedErrorMsg: "node selection by label(s) [bogus] is prohibited by policy for your role",
prefix: "with nodeSelector",
},
{
nodeselector: false,
expectedErrorMsg: "",
prefix: "without nodeSelector",
},
}
testops := []struct {
operation admission.Operation
}{
{
//.........这里部分代码省略.........
开发者ID:legionus,项目名称:origin,代码行数:101,代码来源:admission_test.go
示例11: shouldCheckResource
type podNodeConstraints struct {
*admission.Handler
selectorLabelBlacklist sets.String
config *api.PodNodeConstraintsConfig
authorizer authorizer.Authorizer
}
// resourcesToCheck is a map of resources and corresponding kinds of things that
// we want handled in this plugin
// TODO: Include a function that will extract the PodSpec from the resource for
// each type added here.
var resourcesToCheck = map[unversioned.GroupResource]unversioned.GroupKind{
kapi.Resource("pods"): kapi.Kind("Pod"),
kapi.Resource("podtemplates"): kapi.Kind("PodTemplate"),
kapi.Resource("replicationcontrollers"): kapi.Kind("ReplicationController"),
batch.Resource("jobs"): batch.Kind("Job"),
extensions.Resource("deployments"): extensions.Kind("Deployment"),
extensions.Resource("replicasets"): extensions.Kind("ReplicaSet"),
extensions.Resource("jobs"): extensions.Kind("Job"),
deployapi.Resource("deploymentconfigs"): deployapi.Kind("DeploymentConfig"),
}
// resourcesToIgnore is a list of resource kinds that contain a PodSpec that
// we choose not to handle in this plugin
var resourcesToIgnore = []unversioned.GroupKind{
extensions.Kind("DaemonSet"),
}
func shouldCheckResource(resource unversioned.GroupResource, kind unversioned.GroupKind) (bool, error) {
expectedKind, shouldCheck := resourcesToCheck[resource]
if !shouldCheck {
开发者ID:RomainVabre,项目名称:origin,代码行数:31,代码来源:admission.go
示例12: kindSupportsGarbageCollector
func kindSupportsGarbageCollector(kind schema.GroupKind) bool {
return kind != extensions.Kind("Deployment") && kind != batch.Kind("Job")
}
开发者ID:nak3,项目名称:kubernetes,代码行数:3,代码来源:density.go
示例13:
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
SecretNames: secretNames,
}
switch itArg.kind {
case api.Kind("ReplicationController"):
configs[i] = baseConfig
case extensions.Kind("ReplicaSet"):
configs[i] = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
case extensions.Kind("Deployment"):
configs[i] = &testutils.DeploymentConfig{RCConfig: *baseConfig}
case batch.Kind("Job"):
configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
default:
framework.Failf("Unsupported kind: %v", itArg.kind)
}
}
dConfig := DensityTestConfig{
ClientSet: f.ClientSet,
InternalClientset: f.InternalClientset,
Configs: configs,
PodCount: totalPods,
PollInterval: DensityPollInterval,
kind: itArg.kind,
SecretConfigs: secretConfigs,
}
开发者ID:nak3,项目名称:kubernetes,代码行数:31,代码来源:density.go
示例14:
// What kind of resource we should be creating. Default: ReplicationController
kind schema.GroupKind
secretsPerPod int
daemonsPerNode int
}
densityTests := []Density{
// TODO: Expose runLatencyTest as ginkgo flag.
{podsPerNode: 3, runLatencyTest: false, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController")},
{podsPerNode: 50, runLatencyTest: false, kind: api.Kind("ReplicationController")},
{podsPerNode: 95, runLatencyTest: true, kind: api.Kind("ReplicationController")},
{podsPerNode: 100, runLatencyTest: false, kind: api.Kind("ReplicationController")},
// Tests for other resource types:
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment")},
{podsPerNode: 30, runLatencyTest: true, kind: batch.Kind("Job")},
// Test scheduling when daemons are preset
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
// Test with secrets
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
}
for _, testArg := range densityTests {
feature := "ManualPerformance"
switch testArg.podsPerNode {
case 30:
if testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 {
feature = "Performance"
}
case 95:
feature = "HighDensityPerformance"
开发者ID:kubernetes,项目名称:kubernetes,代码行数:31,代码来源:density.go
示例15:
type podNodeConstraints struct {
*admission.Handler
selectorLabelBlacklist sets.String
config *api.PodNodeConstraintsConfig
authorizer authorizer.Authorizer
}
// resourcesToCheck is a map of resources and corresponding kinds of things that
// we want handled in this plugin
// TODO: Include a function that will extract the PodSpec from the resource for
// each type added here.
var resourcesToCheck = map[unversioned.GroupResource]unversioned.GroupKind{
kapi.Resource("pods"): kapi.Kind("Pod"),
kapi.Resource("podtemplates"): kapi.Kind("PodTemplate"),
kapi.Resource("replicationcontrollers"): kapi.Kind("ReplicationController"),
batch.Resource("jobs"): batch.Kind("Job"),
batch.Resource("jobtemplates"): batch.Kind("JobTemplate"),
batch.Resource("scheduledjobs"): batch.Kind("ScheduledJob"),
extensions.Resource("deployments"): extensions.Kind("Deployment"),
extensions.Resource("replicasets"): extensions.Kind("ReplicaSet"),
extensions.Resource("jobs"): extensions.Kind("Job"),
extensions.Resource("jobtemplates"): extensions.Kind("JobTemplate"),
apps.Resource("petsets"): apps.Kind("PetSet"),
deployapi.Resource("deploymentconfigs"): deployapi.Kind("DeploymentConfig"),
securityapi.Resource("podsecuritypolicysubjectreviews"): securityapi.Kind("PodSecurityPolicySubjectReview"),
securityapi.Resource("podsecuritypolicyselfsubjectreviews"): securityapi.Kind("PodSecurityPolicySelfSubjectReview"),
securityapi.Resource("podsecuritypolicyreviews"): securityapi.Kind("PodSecurityPolicyReview"),
}
// resourcesToIgnore is a list of resource kinds that contain a PodSpec that
// we choose not to handle in this plugin
开发者ID:legionus,项目名称:origin,代码行数:31,代码来源:admission.go
示例16: GetKind
func (config *JobConfig) GetKind() schema.GroupKind {
return batchinternal.Kind("Job")
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:3,代码来源:runners.go
示例17: Update
func (c *ErrorJobs) Update(job *batch.Job) (*batch.Job, error) {
if c.invalid {
return nil, kerrors.NewInvalid(batch.Kind(job.Kind), job.Name, nil)
}
return nil, errors.New("Job update failure")
}
开发者ID:Clarifai,项目名称:kubernetes,代码行数:6,代码来源:scale_test.go
示例18:
image string
command []string
// What kind of resource we want to create
kind schema.GroupKind
services bool
secretsPerPod int
daemonsPerNode int
}
loadTests := []Load{
// The container will consume 1 cpu and 512mb of memory.
{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController")},
// Tests for other resource types
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: extensions.Kind("Deployment")},
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: batch.Kind("Job")},
// Test scheduling when daemons are preset
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
// Test with secrets
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: extensions.Kind("Deployment"), secretsPerPod: 2},
}
for _, testArg := range loadTests {
feature := "ManualPerformance"
if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 {
feature = "Performance"
}
name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets and %v daemons",
feature,
testArg.podsPerNode,
testArg.kind,
开发者ID:kubernetes,项目名称:kubernetes,代码行数:31,代码来源:load.go
注:本文中的k8s/io/kubernetes/pkg/apis/batch.Kind函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论