本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.FromUnversionedClient函数的典型用法代码示例。如果您正苦于以下问题:Golang FromUnversionedClient函数的具体用法?Golang FromUnversionedClient怎么用?Golang FromUnversionedClient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FromUnversionedClient函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: testDeploymentCleanUpPolicy
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rsPodLabels := map[string]string{
"name": "cleanup-pod",
"pod": "nginx",
}
rsName := "nginx-controller"
replicas := 1
revisionHistoryLimit := util.IntPtr(0)
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "redis-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
Expect(err).NotTo(HaveOccurred())
}
开发者ID:leecalcote,项目名称:kubernetes,代码行数:34,代码来源:deployment.go
示例2: testRollingUpdateDeploymentEvents
func testRollingUpdateDeploymentEvents(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
rsPodLabels := map[string]string{
"name": "sample-pod-2",
"pod": "nginx",
}
rsName := "nginx-controller"
replicas := 1
rsRevision := "3546343826724305832"
annotations := make(map[string]string)
annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newRS(rsName, replicas, rsPodLabels, "nginx", "nginx")
rs.Annotations = annotations
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "redis-deployment-2"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
waitForEvents(unversionedClient, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment)
if err != nil {
Logf("error in listing events: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down
// the old ReplicaSet.
Expect(len(events.Items)).Should(Equal(2))
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nil))
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 1", newRS.Name)))
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
// Check if it's updated to revision 3546343826724305833 correctly
checkDeploymentRevision(c, ns, deploymentName, "3546343826724305833", "redis", "redis")
}
开发者ID:leecalcote,项目名称:kubernetes,代码行数:60,代码来源:deployment.go
示例3: testNewDeployment
func testNewDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
c := clientset.FromUnversionedClient(f.Client)
deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": nginxImageName}
replicas := 1
Logf("Creating simple deployment %s", deploymentName)
d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
// Check new RS annotations
Expect(newRS.Annotations["test"]).Should(Equal("should-copy-to-replica-set"))
Expect(newRS.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal(""))
Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-replica-set"))
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set"))
}
开发者ID:sittercity,项目名称:kubernetes,代码行数:33,代码来源:deployment.go
示例4: RunServiceAccountTokensController
// RunServiceAccountTokensController starts the service account token controller
func (c *MasterConfig) RunServiceAccountTokensController() {
if len(c.Options.ServiceAccountConfig.PrivateKeyFile) == 0 {
glog.Infof("Skipped starting Service Account Token Manager, no private key specified")
return
}
privateKey, err := serviceaccount.ReadPrivateKey(c.Options.ServiceAccountConfig.PrivateKeyFile)
if err != nil {
glog.Fatalf("Error reading signing key for Service Account Token Manager: %v", err)
}
rootCA := []byte{}
if len(c.Options.ServiceAccountConfig.MasterCA) > 0 {
rootCA, err = ioutil.ReadFile(c.Options.ServiceAccountConfig.MasterCA)
if err != nil {
glog.Fatalf("Error reading master ca file for Service Account Token Manager: %s: %v", c.Options.ServiceAccountConfig.MasterCA, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
glog.Fatalf("Error parsing master ca file for Service Account Token Manager: %s: %v", c.Options.ServiceAccountConfig.MasterCA, err)
}
}
options := sacontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
}
sacontroller.NewTokensController(internalclientset.FromUnversionedClient(c.KubeClient()), options).Run()
}
开发者ID:richm,项目名称:origin,代码行数:29,代码来源:run_components.go
示例5: RunPersistentVolumeProvisioner
func (c *MasterConfig) RunPersistentVolumeProvisioner(client *client.Client) {
provisioner, err := kctrlmgr.NewVolumeProvisioner(c.CloudProvider, c.ControllerManager.VolumeConfiguration)
if err != nil {
// a provisioner was expected but encountered an error
glog.Fatal(err)
}
// not all cloud providers have a provisioner.
if provisioner != nil {
allPlugins := []volume.VolumePlugin{}
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
controllerClient := volumeclaimbinder.NewControllerClient(internalclientset.FromUnversionedClient(client))
provisionerController, err := volumeclaimbinder.NewPersistentVolumeProvisionerController(
controllerClient,
c.ControllerManager.PVClaimBinderSyncPeriod.Duration,
c.ControllerManager.ClusterName,
allPlugins,
provisioner,
c.CloudProvider,
)
if err != nil {
glog.Fatalf("Could not start Persistent Volume Provisioner: %+v", err)
}
provisionerController.Run()
}
}
开发者ID:jwforres,项目名称:origin,代码行数:28,代码来源:master.go
示例6: testDeploymentLabelAdopted
func testDeploymentLabelAdopted(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
podName := "nginx"
podLabels := map[string]string{"name": podName}
rsName := "test-adopted-controller"
replicas := 3
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, podName))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, podName, false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a nginx deployment to adopt the old rs.
deploymentName := "test-adopted-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, podName, podName, extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", "nginx")
Expect(err).NotTo(HaveOccurred())
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// There should be no old RSs (overlapping RS)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
oldRSs, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(len(oldRSs)).Should(Equal(0))
Expect(len(allOldRSs)).Should(Equal(0))
// New RS should contain pod-template-hash in its selector, label, and template label
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(len(newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(newRS.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(newRS.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := api.ListOptions{LabelSelector: selector}
pods, err := c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
Expect(len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
}
Expect(len(pods.Items)).Should(Equal(replicas))
}
开发者ID:dpratt,项目名称:kubernetes,代码行数:60,代码来源:deployment.go
示例7: RunDaemonSetsController
func (c *MasterConfig) RunDaemonSetsController(client *client.Client) {
controller := daemon.NewDaemonSetsController(
internalclientset.FromUnversionedClient(client),
kctrlmgr.ResyncPeriod(c.ControllerManager),
c.ControllerManager.LookupCacheSizeForDaemonSet,
)
go controller.Run(c.ControllerManager.ConcurrentDaemonSetSyncs, utilwait.NeverStop)
}
开发者ID:jwforres,项目名称:origin,代码行数:8,代码来源:master.go
示例8: RunReplicationController
// RunReplicationController starts the Kubernetes replication controller sync loop
func (c *MasterConfig) RunReplicationController(client *client.Client) {
controllerManager := replicationcontroller.NewReplicationManager(
internalclientset.FromUnversionedClient(client),
kctrlmgr.ResyncPeriod(c.ControllerManager),
replicationcontroller.BurstReplicas,
c.ControllerManager.LookupCacheSizeForRC,
)
go controllerManager.Run(c.ControllerManager.ConcurrentRCSyncs, utilwait.NeverStop)
}
开发者ID:jwforres,项目名称:origin,代码行数:10,代码来源:master.go
示例9: RunPersistentVolumeClaimRecycler
func (c *MasterConfig) RunPersistentVolumeClaimRecycler(recyclerImageName string, client *client.Client, namespace string) {
uid := int64(0)
defaultScrubPod := volume.NewPersistentVolumeRecyclerPodTemplate()
defaultScrubPod.Namespace = namespace
defaultScrubPod.Spec.Containers[0].Image = recyclerImageName
defaultScrubPod.Spec.Containers[0].Command = []string{"/usr/bin/recycle"}
defaultScrubPod.Spec.Containers[0].Args = []string{"/scrub"}
defaultScrubPod.Spec.Containers[0].SecurityContext = &kapi.SecurityContext{RunAsUser: &uid}
defaultScrubPod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent
volumeConfig := c.ControllerManager.VolumeConfiguration
hostPathConfig := volume.VolumeConfig{
RecyclerMinimumTimeout: volumeConfig.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath,
RecyclerTimeoutIncrement: volumeConfig.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath,
RecyclerPodTemplate: defaultScrubPod,
}
if len(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath) != 0 {
if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil {
glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err)
}
}
nfsConfig := volume.VolumeConfig{
RecyclerMinimumTimeout: volumeConfig.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS,
RecyclerTimeoutIncrement: volumeConfig.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS,
RecyclerPodTemplate: defaultScrubPod,
}
if len(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS) != 0 {
if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil {
glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
}
}
allPlugins := []volume.VolumePlugin{}
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...)
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
// dynamic provisioning allows deletion of volumes as a recycling operation after a claim is deleted
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
recycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(
internalclientset.FromUnversionedClient(client),
c.ControllerManager.PVClaimBinderSyncPeriod.Duration,
volumeConfig.PersistentVolumeRecyclerConfiguration.MaximumRetry,
allPlugins,
c.CloudProvider,
)
if err != nil {
glog.Fatalf("Could not start Persistent Volume Recycler: %+v", err)
}
recycler.Run()
}
开发者ID:jwforres,项目名称:origin,代码行数:55,代码来源:master.go
示例10: main
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
config := HollowNodeConfig{}
config.addFlags(pflag.CommandLine)
util.InitFlags()
if !knownMorphs.Has(config.Morph) {
glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List())
}
// create a client to communicate with API server.
cl, err := createClientFromFile(config.KubeconfigPath)
clientset := clientset.FromUnversionedClient(cl)
if err != nil {
glog.Fatal("Failed to create a Client. Exiting.")
}
if config.Morph == "kubelet" {
cadvisorInterface := new(cadvisortest.Fake)
containerManager := cm.NewStubContainerManager()
fakeDockerClient := dockertools.NewFakeDockerClient()
fakeDockerClient.VersionInfo = docker.Env{"Version=1.1.3", "ApiVersion=1.18"}
fakeDockerClient.EnableSleep = true
hollowKubelet := kubemark.NewHollowKubelet(
config.NodeName,
clientset,
cadvisorInterface,
fakeDockerClient,
config.KubeletPort,
config.KubeletReadOnlyPort,
containerManager,
maxPods,
)
hollowKubelet.Run()
}
if config.Morph == "proxy" {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName})
iptInterface := fakeiptables.NewFake()
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder)
hollowProxy.Run()
}
}
开发者ID:mataihang,项目名称:kubernetes,代码行数:55,代码来源:hollow-node.go
示例11: testRecreateDeployment
func testRecreateDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
rsPodLabels := map[string]string{
"name": "sample-pod-3",
"pod": nginxImageName,
}
rsName := "test-recreate-controller"
replicas := 3
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-recreate-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
waitForEvents(unversionedClient, ns, deployment, 2)
events, err := c.Core().Events(ns).Search(deployment)
if err != nil {
Logf("error in listing events: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet.
Expect(len(events.Items)).Should(Equal(2))
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nil))
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 3", newRS.Name)))
}
开发者ID:sittercity,项目名称:kubernetes,代码行数:55,代码来源:deployment.go
示例12: RunHPAController
// RunHPAController starts the Kubernetes hpa controller sync loop
func (c *MasterConfig) RunHPAController(oc *osclient.Client, kc *client.Client, heapsterNamespace string) {
clientsetClient := internalclientset.FromUnversionedClient(kc)
delegatingScaleNamespacer := osclient.NewDelegatingScaleNamespacer(oc, kc)
podautoscaler := podautoscalercontroller.NewHorizontalController(
coreunversioned.EventsGetter(clientsetClient),
extensionsunversioned.ScalesGetter(delegatingScaleNamespacer),
extensionsunversioned.HorizontalPodAutoscalersGetter(clientsetClient),
metrics.NewHeapsterMetricsClient(clientsetClient, heapsterNamespace, "https", "heapster", ""),
c.ControllerManager.HorizontalPodAutoscalerSyncPeriod.Duration,
)
go podautoscaler.Run(utilwait.NeverStop)
}
开发者ID:jwforres,项目名称:origin,代码行数:13,代码来源:master.go
示例13: RunNamespaceController
// RunNamespaceController starts the Kubernetes Namespace Manager
func (c *MasterConfig) RunNamespaceController() {
versions := []string{}
for _, version := range configapi.GetEnabledAPIVersionsForGroup(c.Options, configapi.APIGroupKube) {
versions = append(versions, unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: version}.String())
}
for _, version := range configapi.GetEnabledAPIVersionsForGroup(c.Options, configapi.APIGroupExtensions) {
versions = append(versions, unversioned.GroupVersion{Group: configapi.APIGroupExtensions, Version: version}.String())
}
apiVersions := &unversioned.APIVersions{Versions: versions}
namespaceController := namespacecontroller.NewNamespaceController(internalclientset.FromUnversionedClient(c.KubeClient), apiVersions, c.ControllerManager.NamespaceSyncPeriod)
go namespaceController.Run(c.ControllerManager.ConcurrentNamespaceSyncs, utilwait.NeverStop)
}
开发者ID:richm,项目名称:origin,代码行数:13,代码来源:master.go
示例14: testRollingUpdateDeployment
func testRollingUpdateDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rcPodLabels := map[string]string{
"name": "sample-pod",
"pod": "nginx",
}
rcName := "nginx-controller"
replicas := 3
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "redis-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", "redis", "redis")
}
开发者ID:initlove,项目名称:kubernetes,代码行数:50,代码来源:deployment.go
示例15: testNewDeployment
func testNewDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(f.Client)
deploymentName := "nginx-deployment"
podLabels := map[string]string{"name": "nginx"}
replicas := 1
Logf("Creating simple deployment %s", deploymentName)
d := newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil)
d.Annotations = map[string]string{"test": "should-copy-to-RC", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-RC"}
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "nginx", false, replicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// DeploymentStatus should be appropriately updated.
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Expect(deployment.Status.Replicas).Should(Equal(replicas))
Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas))
// Check if it's updated to revision 1 correctly
_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", "nginx", "nginx")
// Check other annotations
Expect(newRC.Annotations["test"]).Should(Equal("should-copy-to-RC"))
Expect(newRC.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal(""))
Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-RC"))
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-RC"))
}
开发者ID:initlove,项目名称:kubernetes,代码行数:49,代码来源:deployment.go
示例16: testRollingUpdateDeployment
func testRollingUpdateDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rsPodLabels := map[string]string{
"name": "sample-pod",
"pod": nginxImageName,
}
rsName := "test-rolling-update-controller"
replicas := 3
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(len(allOldRSs)).Should(Equal(1))
// The old RS should contain pod-template-hash in its selector, label, and template label
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
}
开发者ID:sittercity,项目名称:kubernetes,代码行数:49,代码来源:deployment.go
示例17: RunServiceAccountsController
// RunServiceAccountsController starts the service account controller
func (c *MasterConfig) RunServiceAccountsController() {
if len(c.Options.ServiceAccountConfig.ManagedNames) == 0 {
glog.Infof("Skipped starting Service Account Manager, no managed names specified")
return
}
options := sacontroller.DefaultServiceAccountsControllerOptions()
options.ServiceAccounts = []kapi.ServiceAccount{}
for _, saName := range c.Options.ServiceAccountConfig.ManagedNames {
sa := kapi.ServiceAccount{}
sa.Name = saName
options.ServiceAccounts = append(options.ServiceAccounts, sa)
}
sacontroller.NewServiceAccountsController(internalclientset.FromUnversionedClient(c.KubeClient()), options).Run()
}
开发者ID:richm,项目名称:origin,代码行数:18,代码来源:run_components.go
示例18: newServiceAccountTokenGetter
func newServiceAccountTokenGetter(options configapi.MasterConfig, client newetcdclient.Client) (serviceaccount.ServiceAccountTokenGetter, error) {
var tokenGetter serviceaccount.ServiceAccountTokenGetter
if options.KubernetesMasterConfig == nil {
// When we're running against an external Kubernetes, use the external kubernetes client to validate service account tokens
// This prevents infinite auth loops if the privilegedLoopbackKubeClient authenticates using a service account token
kubeClient, _, err := configapi.GetKubeClient(options.MasterClients.ExternalKubernetesKubeConfig)
if err != nil {
return nil, err
}
tokenGetter = sacontroller.NewGetterFromClient(internalclientset.FromUnversionedClient(kubeClient))
} else {
// When we're running in-process, go straight to etcd (using the KubernetesStorageVersion/KubernetesStoragePrefix, since service accounts are kubernetes objects)
codec := kapi.Codecs.LegacyCodec(unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion})
ketcdHelper := etcdstorage.NewEtcdStorage(client, codec, options.EtcdStorageConfig.KubernetesStoragePrefix, false)
tokenGetter = sacontroller.NewGetterFromStorageInterface(ketcdHelper)
}
return tokenGetter, nil
}
开发者ID:poomsujarit,项目名称:origin,代码行数:18,代码来源:master_config.go
示例19: testDeploymentCleanUpPolicy
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rcPodLabels := map[string]string{
"name": "cleanup-pod",
"pod": "nginx",
}
rcName := "nginx-controller"
replicas := 1
revisionHistoryLimit := new(int)
*revisionHistoryLimit = 0
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "redis-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentOldRCsNum(c, ns, deploymentName, *revisionHistoryLimit)
Expect(err).NotTo(HaveOccurred())
}
开发者ID:initlove,项目名称:kubernetes,代码行数:44,代码来源:deployment.go
示例20: RunNodeController
// RunNodeController starts the node controller
func (c *MasterConfig) RunNodeController() {
s := c.ControllerManager
controller := nodecontroller.NewNodeController(
c.CloudProvider,
internalclientset.FromUnversionedClient(c.KubeClient),
s.PodEvictionTimeout,
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), // upstream uses the same ones too
s.NodeMonitorGracePeriod,
s.NodeStartupGracePeriod,
s.NodeMonitorPeriod,
(*net.IPNet)(&s.ClusterCIDR),
s.AllocateNodeCIDRs,
)
controller.Run(s.NodeSyncPeriod)
}
开发者ID:richm,项目名称:origin,代码行数:21,代码来源:master.go
注:本文中的k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.FromUnversionedClient函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论