How to use By method of ginkgo Package

Best Ginkgo code snippet using ginkgo.By

Run Ginkgo automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

dynamic_certificates.go

Source: dynamic_certificates.go Github

copy
1/*
2Copyright 2018 The Kubernetes Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17package lua
18
19import (
20	"context"
21	"fmt"
22	"net/http"
23	"strings"
24	"time"
25
26	"github.com/onsi/ginkgo"
27	dto "github.com/prometheus/client_model/go"
28	"github.com/prometheus/common/expfmt"
29	"github.com/prometheus/common/model"
30	"github.com/stretchr/testify/assert"
31	networking "k8s.io/api/networking/v1beta1"
32	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33
34	"k8s.io/ingress-nginx/test/e2e/framework"
35)
36
37var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
38	f := framework.NewDefaultFramework("dynamic-certificate")
39	host := "foo.com"
40
41	ginkgo.BeforeEach(func() {
42		f.NewEchoDeployment()
43	})
44
45	ginkgo.It("picks up the certificate when we add TLS spec to existing ingress", func() {
46		ensureIngress(f, host, framework.EchoService)
47
48		ing, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Get(context.TODO(), host, metav1.GetOptions{})
49		assert.Nil(ginkgo.GinkgoT(), err)
50		ing.Spec.TLS = []networking.IngressTLS{
51			{
52				Hosts:      []string{host},
53				SecretName: host,
54			},
55		}
56		_, err = framework.CreateIngressTLSSecret(f.KubeClientSet,
57			ing.Spec.TLS[0].Hosts,
58			ing.Spec.TLS[0].SecretName,
59			ing.Namespace)
60		assert.Nil(ginkgo.GinkgoT(), err)
61
62		_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
63		assert.Nil(ginkgo.GinkgoT(), err)
64
65		time.Sleep(waitForLuaSync)
66
67		ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, host)
68	})
69
70	ginkgo.It("picks up the previously missing secret for a given ingress without reloading", func() {
71		ing := framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, framework.EchoService, 80, nil)
72		f.EnsureIngress(ing)
73
74		time.Sleep(waitForLuaSync)
75
76		ip := f.GetNginxPodIP()
77		mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
78		assert.Nil(ginkgo.GinkgoT(), err)
79		assert.NotNil(ginkgo.GinkgoT(), mf)
80
81		rc0, err := extractReloadCount(mf)
82		assert.Nil(ginkgo.GinkgoT(), err)
83
84		ensureHTTPSRequest(f, fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, "ingress.local")
85
86		_, err = framework.CreateIngressTLSSecret(f.KubeClientSet,
87			ing.Spec.TLS[0].Hosts,
88			ing.Spec.TLS[0].SecretName,
89			ing.Namespace)
90		assert.Nil(ginkgo.GinkgoT(), err)
91
92		time.Sleep(waitForLuaSync)
93
94		ginkgo.By("serving the configured certificate on HTTPS endpoint")
95		ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, host)
96
97		log, err := f.NginxLogs()
98		assert.Nil(ginkgo.GinkgoT(), err)
99		assert.NotEmpty(ginkgo.GinkgoT(), log)
100
101		ginkgo.By("skipping Nginx reload")
102		mf, err = f.GetMetric("nginx_ingress_controller_success", ip)
103		assert.Nil(ginkgo.GinkgoT(), err)
104		assert.NotNil(ginkgo.GinkgoT(), mf)
105
106		rc1, err := extractReloadCount(mf)
107		assert.Nil(ginkgo.GinkgoT(), err)
108
109		assert.Equal(ginkgo.GinkgoT(), rc0, rc1)
110	})
111
112	ginkgo.Context("given an ingress with TLS correctly configured", func() {
113		ginkgo.BeforeEach(func() {
114			ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, framework.EchoService, 80, nil))
115
116			time.Sleep(waitForLuaSync)
117
118			ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, "ingress.local")
119
120			_, err := framework.CreateIngressTLSSecret(f.KubeClientSet,
121				ing.Spec.TLS[0].Hosts,
122				ing.Spec.TLS[0].SecretName,
123				ing.Namespace)
124			assert.Nil(ginkgo.GinkgoT(), err)
125
126			time.Sleep(waitForLuaSync)
127
128			ginkgo.By("configuring certificate_by_lua and skipping Nginx configuration of the new certificate")
129			f.WaitForNginxServer(ing.Spec.TLS[0].Hosts[0],
130				func(server string) bool {
131					return strings.Contains(server, "listen 443")
132				})
133
134			time.Sleep(waitForLuaSync)
135
136			ginkgo.By("serving the configured certificate on HTTPS endpoint")
137			ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, host)
138		})
139
140		/*
141			TODO(elvinefendi): this test currently does not work as expected
142			because Go transport code strips (https://github.com/golang/go/blob/431b5c69ca214ce4291f008c1ce2a50b22bc2d2d/src/crypto/tls/handshake_messages.go#L424)
143			trailing dot from SNI as suggest by the standard (https://tools.ietf.org/html/rfc6066#section-3).
144		*/
145		ginkgo.It("supports requests with domain with trailing dot", func() {
146			ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host+".", host)
147		})
148
149		ginkgo.It("picks up the updated certificate without reloading", func() {
150			ing, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Get(context.TODO(), host, metav1.GetOptions{})
151			assert.Nil(ginkgo.GinkgoT(), err)
152
153			ensureHTTPSRequest(f, fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host)
154
155			_, err = framework.CreateIngressTLSSecret(f.KubeClientSet,
156				ing.Spec.TLS[0].Hosts,
157				ing.Spec.TLS[0].SecretName,
158				ing.Namespace)
159			assert.Nil(ginkgo.GinkgoT(), err)
160
161			time.Sleep(waitForLuaSync)
162
163			ginkgo.By("configuring certificate_by_lua and skipping Nginx configuration of the new certificate")
164			f.WaitForNginxServer(ing.Spec.TLS[0].Hosts[0],
165				func(server string) bool {
166					return strings.Contains(server, "listen 443")
167				})
168
169			ginkgo.By("serving the configured certificate on HTTPS endpoint")
170			ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, host)
171
172			log, err := f.NginxLogs()
173			assert.Nil(ginkgo.GinkgoT(), err)
174			assert.NotEmpty(ginkgo.GinkgoT(), log)
175
176			index := strings.Index(log, "id=dummy_log_splitter_foo_bar")
177			assert.GreaterOrEqual(ginkgo.GinkgoT(), index, 0, "log does not contains id=dummy_log_splitter_foo_bar")
178			restOfLogs := log[index:]
179
180			ginkgo.By("skipping Nginx reload")
181			assert.NotContains(ginkgo.GinkgoT(), restOfLogs, logRequireBackendReload)
182			assert.NotContains(ginkgo.GinkgoT(), restOfLogs, logBackendReloadSuccess)
183		})
184
185		ginkgo.It("falls back to using default certificate when secret gets deleted without reloading", func() {
186			ing, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Get(context.TODO(), host, metav1.GetOptions{})
187			assert.Nil(ginkgo.GinkgoT(), err)
188
189			ensureHTTPSRequest(f, fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host)
190
191			ip := f.GetNginxPodIP()
192			mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
193			assert.Nil(ginkgo.GinkgoT(), err)
194			assert.NotNil(ginkgo.GinkgoT(), mf)
195
196			rc0, err := extractReloadCount(mf)
197			assert.Nil(ginkgo.GinkgoT(), err)
198
199			err = f.KubeClientSet.CoreV1().Secrets(ing.Namespace).Delete(context.TODO(), ing.Spec.TLS[0].SecretName, metav1.DeleteOptions{})
200			assert.Nil(ginkgo.GinkgoT(), err)
201
202			time.Sleep(waitForLuaSync)
203
204			ginkgo.By("serving the default certificate on HTTPS endpoint")
205			ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, "ingress.local")
206
207			mf, err = f.GetMetric("nginx_ingress_controller_success", ip)
208			assert.Nil(ginkgo.GinkgoT(), err)
209			assert.NotNil(ginkgo.GinkgoT(), mf)
210
211			rc1, err := extractReloadCount(mf)
212			assert.Nil(ginkgo.GinkgoT(), err)
213
214			ginkgo.By("skipping Nginx reload")
215			assert.Equal(ginkgo.GinkgoT(), rc0, rc1)
216		})
217
218		ginkgo.It("picks up a non-certificate only change", func() {
219			newHost := "foo2.com"
220			ing, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Get(context.TODO(), host, metav1.GetOptions{})
221			assert.Nil(ginkgo.GinkgoT(), err)
222
223			ing.Spec.Rules[0].Host = newHost
224			_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
225			assert.Nil(ginkgo.GinkgoT(), err)
226
227			time.Sleep(waitForLuaSync)
228
229			ginkgo.By("serving the configured certificate on HTTPS endpoint")
230			ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), newHost, "ingress.local")
231		})
232
233		ginkgo.It("removes HTTPS configuration when we delete TLS spec", func() {
234			ing, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Get(context.TODO(), host, metav1.GetOptions{})
235			assert.Nil(ginkgo.GinkgoT(), err)
236
237			ing.Spec.TLS = []networking.IngressTLS{}
238			_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
239			assert.Nil(ginkgo.GinkgoT(), err)
240
241			time.Sleep(waitForLuaSync)
242
243			f.HTTPTestClient().
244				GET("/").
245				WithHeader("Host", host).
246				Expect().
247				Status(http.StatusOK)
248
249		})
250	})
251})
252
253func extractReloadCount(mf *dto.MetricFamily) (float64, error) {
254	vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
255		Timestamp: model.Now(),
256	}, mf)
257
258	if err != nil {
259		return 0, err
260	}
261
262	return float64(vec[0].Value), nil
263}
264
Full Screen

actuation.go

Source: actuation.go Github

copy
1/*
2Copyright 2018 The Kubernetes Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17package autoscaling
18
19import (
20	"fmt"
21	"time"
22
23	appsv1 "k8s.io/api/apps/v1"
24	autoscaling "k8s.io/api/autoscaling/v1"
25	apiv1 "k8s.io/api/core/v1"
26	policyv1beta1 "k8s.io/api/policy/v1beta1"
27	apierrs "k8s.io/apimachinery/pkg/api/errors"
28	"k8s.io/apimachinery/pkg/api/resource"
29	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30	"k8s.io/apimachinery/pkg/util/intstr"
31	"k8s.io/apimachinery/pkg/util/wait"
32	"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
33	vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
34	"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/annotations"
35	clientset "k8s.io/client-go/kubernetes"
36	"k8s.io/kubernetes/test/e2e/framework"
37	framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment"
38	framework_job "k8s.io/kubernetes/test/e2e/framework/job"
39	framework_rs "k8s.io/kubernetes/test/e2e/framework/replicaset"
40	framework_ss "k8s.io/kubernetes/test/e2e/framework/statefulset"
41	testutils "k8s.io/kubernetes/test/utils"
42
43	"github.com/onsi/ginkgo"
44	"github.com/onsi/gomega"
45)
46
47var _ = ActuationSuiteE2eDescribe("Actuation", func() {
48	f := framework.NewDefaultFramework("vertical-pod-autoscaling")
49
50	ginkgo.It("stops when pods get pending", func() {
51
52		ginkgo.By("Setting up a hamster deployment")
53		d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
54
55		ginkgo.By("Setting up a VPA CRD with ridiculous request")
56		SetupVPA(f, "9999", vpa_types.UpdateModeAuto, hamsterTargetRef) // Request 9999 CPUs to make POD pending
57
58		ginkgo.By("Waiting for pods to be restarted and stuck pending")
59		err := assertPodsPendingForDuration(f.ClientSet, d, 1, 2*time.Minute)
60		gomega.Expect(err).NotTo(gomega.HaveOccurred())
61
62	})
63
64	ginkgo.It("never applies recommendations when update mode is Off", func() {
65		ginkgo.By("Setting up a hamster deployment")
66		d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
67		cpuRequest := getCPURequest(d.Spec.Template.Spec)
68		podList, err := GetHamsterPods(f)
69		gomega.Expect(err).NotTo(gomega.HaveOccurred())
70		podSet := MakePodSet(podList)
71
72		ginkgo.By("Setting up a VPA CRD in mode Off")
73		SetupVPA(f, "200m", vpa_types.UpdateModeOff, hamsterTargetRef)
74
75		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
76		CheckNoPodsEvicted(f, podSet)
77		ginkgo.By("Forcefully killing one pod")
78		killPod(f, podList)
79
80		ginkgo.By("Checking the requests were not modified")
81		updatedPodList, err := GetHamsterPods(f)
82		for _, pod := range updatedPodList.Items {
83			gomega.Expect(getCPURequest(pod.Spec)).To(gomega.Equal(cpuRequest))
84		}
85	})
86
87	ginkgo.It("applies recommendations only on restart when update mode is Initial", func() {
88		ginkgo.By("Setting up a hamster deployment")
89		SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
90		podList, err := GetHamsterPods(f)
91		gomega.Expect(err).NotTo(gomega.HaveOccurred())
92		podSet := MakePodSet(podList)
93
94		ginkgo.By("Setting up a VPA CRD in mode Initial")
95		SetupVPA(f, "200m", vpa_types.UpdateModeInitial, hamsterTargetRef)
96		updatedCPURequest := ParseQuantityOrDie("200m")
97
98		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
99		CheckNoPodsEvicted(f, podSet)
100		ginkgo.By("Forcefully killing one pod")
101		killPod(f, podList)
102
103		ginkgo.By("Checking that request was modified after forceful restart")
104		updatedPodList, err := GetHamsterPods(f)
105		foundUpdated := 0
106		for _, pod := range updatedPodList.Items {
107			podRequest := getCPURequest(pod.Spec)
108			framework.Logf("podReq: %v", podRequest)
109			if podRequest.Cmp(updatedCPURequest) == 0 {
110				foundUpdated += 1
111			}
112		}
113		gomega.Expect(foundUpdated).To(gomega.Equal(1))
114	})
115
116	ginkgo.It("evicts pods in a Deployment", func() {
117		testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
118			APIVersion: "apps/v1",
119			Kind:       "Deployment",
120			Name:       "hamster-deployment",
121		})
122	})
123
124	ginkgo.It("evicts pods in a Replication Controller", func() {
125		testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
126			APIVersion: "v1",
127			Kind:       "ReplicationController",
128			Name:       "hamster-rc",
129		})
130	})
131
132	ginkgo.It("evicts pods in a Job", func() {
133		testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
134			APIVersion: "batch/v1",
135			Kind:       "Job",
136			Name:       "hamster-job",
137		})
138	})
139
140	ginkgo.It("evicts pods in a CronJob", func() {
141		testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
142			APIVersion: "batch/v1",
143			Kind:       "CronJob",
144			Name:       "hamster-cronjob",
145		})
146	})
147
148	ginkgo.It("evicts pods in a ReplicaSet", func() {
149		testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
150			APIVersion: "apps/v1",
151			Kind:       "ReplicaSet",
152			Name:       "hamster-rs",
153		})
154	})
155
156	ginkgo.It("evicts pods in a StatefulSet", func() {
157		testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
158			APIVersion: "apps/v1",
159			Kind:       "StatefulSet",
160			Name:       "hamster-stateful",
161		})
162	})
163
164	ginkgo.It("observes pod disruption budget", func() {
165
166		ginkgo.By("Setting up a hamster deployment")
167		c := f.ClientSet
168		ns := f.Namespace.Name
169
170		SetupHamsterDeployment(f, "10m", "10Mi", 10)
171		podList, err := GetHamsterPods(f)
172		gomega.Expect(err).NotTo(gomega.HaveOccurred())
173		podSet := MakePodSet(podList)
174
175		ginkgo.By("Setting up prohibitive PDB for hamster deployment")
176		pdb := setupPDB(f, "hamster-pdb", 0 /* maxUnavailable */)
177
178		ginkgo.By("Setting up a VPA CRD")
179		SetupVPA(f, "25m", vpa_types.UpdateModeAuto, hamsterTargetRef)
180
181		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
182		CheckNoPodsEvicted(f, podSet)
183
184		ginkgo.By("Updating the PDB to allow for multiple pods to be evicted")
185		// We will check that 7 replicas are evicted in 3 minutes, which translates
186		// to 3 updater loops. This gives us relatively good confidence that updater
187		// evicts more than one pod in a loop if PDB allows it.
188		permissiveMaxUnavailable := 7
189		// Creating new PDB and removing old one, since PDBs are immutable at the moment
190		setupPDB(f, "hamster-pdb-2", permissiveMaxUnavailable)
191		err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(pdb.Name, &metav1.DeleteOptions{})
192		gomega.Expect(err).NotTo(gomega.HaveOccurred())
193
194		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, sleep for %s", VpaEvictionTimeout.String()))
195		time.Sleep(VpaEvictionTimeout)
196		ginkgo.By("Checking enough pods were evicted.")
197		currentPodList, err := GetHamsterPods(f)
198		gomega.Expect(err).NotTo(gomega.HaveOccurred())
199		evictedCount := GetEvictedPodsCount(MakePodSet(currentPodList), podSet)
200		gomega.Expect(err).NotTo(gomega.HaveOccurred())
201		gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
202	})
203
204	ginkgo.It("observes container max in LimitRange", func() {
205		ginkgo.By("Setting up a hamster deployment")
206		d := NewHamsterDeploymentWithResourcesAndLimits(f,
207			ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
208			ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
209		podList := startDeploymentPods(f, d)
210
211		ginkgo.By("Setting up a VPA CRD")
212		SetupVPA(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef)
213
214		// Max CPU limit is 300m and ratio is 3., so max request is 100m, while
215		// recommendation is 200m
216		// Max memory limit is 1T and ratio is 2., so max request is 0.5T
217		InstallLimitRangeWithMax(f, "300m", "1T", apiv1.LimitTypeContainer)
218
219		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
220		CheckNoPodsEvicted(f, MakePodSet(podList))
221	})
222
223	ginkgo.It("observes container min in LimitRange", func() {
224		ginkgo.By("Setting up a hamster deployment")
225		d := NewHamsterDeploymentWithResourcesAndLimits(f,
226			ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
227			ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
228		podList := startDeploymentPods(f, d)
229
230		ginkgo.By("Setting up a VPA CRD")
231		SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef)
232
233		// Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min
234		// request is 100m request and 300m limit
235		// Min memory limit is 0 and ratio is 2., so min request is 0
236		InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer)
237
238		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
239		CheckNoPodsEvicted(f, MakePodSet(podList))
240	})
241
242	ginkgo.It("observes pod max in LimitRange", func() {
243		ginkgo.By("Setting up a hamster deployment")
244		d := NewHamsterDeploymentWithResourcesAndLimits(f,
245			ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
246			ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
247		d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
248		d.Spec.Template.Spec.Containers[1].Name = "hamster2"
249		podList := startDeploymentPods(f, d)
250
251		ginkgo.By("Setting up a VPA CRD")
252		SetupVPAForNHamsters(f, 2, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef)
253
254		// Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m,
255		// while recommendation is 200m
256		// Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T
257		InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod)
258
259		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
260		CheckNoPodsEvicted(f, MakePodSet(podList))
261	})
262
263	ginkgo.It("observes pod min in LimitRange", func() {
264		ginkgo.By("Setting up a hamster deployment")
265		d := NewHamsterDeploymentWithResourcesAndLimits(f,
266			ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
267			ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
268		d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
269		d.Spec.Template.Spec.Containers[1].Name = "hamster2"
270		podList := startDeploymentPods(f, d)
271
272		ginkgo.By("Setting up a VPA CRD")
273		SetupVPAForNHamsters(f, 2, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef)
274
275		// Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both
276		// to limit and request so min request is 100m request and 300m limit
277		// Min memory limit is 0 and ratio is 2., so min request is 0
278		InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod)
279
280		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
281		CheckNoPodsEvicted(f, MakePodSet(podList))
282	})
283
284	ginkgo.It("does not act on injected sidecars", func() {
285		const (
286			// TODO(krzysied): Update the image url when the agnhost:2.10 image
287			// is promoted to the k8s-e2e-test-images repository.
288			agnhostImage  = "gcr.io/k8s-staging-e2e-test-images/agnhost:2.10"
289			sidecarParam  = "--sidecar-image=k8s.gcr.io/pause:3.1"
290			sidecarName   = "webhook-added-sidecar"
291			servicePort   = int32(8443)
292			containerPort = int32(8444)
293		)
294
295		ginkgo.By("Setting up Webhook for sidecar injection")
296
297		client := f.ClientSet
298		namespaceName := f.Namespace.Name
299		defer utils.CleanWebhookTest(client, namespaceName)
300
301		// Make sure the namespace created for the test is labeled to be selected by the webhooks.
302		utils.LabelNamespace(f, f.Namespace.Name)
303		utils.CreateWebhookConfigurationReadyNamespace(f)
304
305		ginkgo.By("Setting up server cert")
306		context := utils.SetupWebhookCert(namespaceName)
307		utils.CreateAuthReaderRoleBinding(f, namespaceName)
308
309		utils.DeployWebhookAndService(f, agnhostImage, context, servicePort, containerPort, sidecarParam)
310
311		// Webhook must be placed after vpa webhook. Webhooks are registered alphabetically.
312		// Use name that starts with "z".
313		webhookCleanup := utils.RegisterMutatingWebhookForPod(f, "z-sidecar-injection-webhook", context, servicePort)
314		defer webhookCleanup()
315
316		ginkgo.By("Setting up a hamster vpa")
317
318		mode := vpa_types.UpdateModeAuto
319		hamsterResourceList := apiv1.ResourceList{apiv1.ResourceCPU: ParseQuantityOrDie("100m")}
320		sidecarResourceList := apiv1.ResourceList{apiv1.ResourceCPU: ParseQuantityOrDie("5000m")}
321
322		vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
323		vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode
324
325		vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
326			ContainerRecommendations: []vpa_types.RecommendedContainerResources{
327				{
328					ContainerName: GetHamsterContainerNameByIndex(0),
329					Target:        hamsterResourceList,
330					LowerBound:    hamsterResourceList,
331					UpperBound:    hamsterResourceList,
332				},
333				{
334					ContainerName: sidecarName,
335					Target:        sidecarResourceList,
336					LowerBound:    sidecarResourceList,
337					UpperBound:    sidecarResourceList,
338				},
339			},
340		}
341
342		InstallVPA(f, vpaCRD)
343
344		ginkgo.By("Setting up a hamster deployment")
345
346		d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m"), ParseQuantityOrDie("100Mi"))
347		podList := startDeploymentPods(f, d)
348		for _, pod := range podList.Items {
349			observedContainers, ok := pod.GetAnnotations()[annotations.VpaObservedContainersLabel]
350			gomega.Expect(ok).To(gomega.Equal(true))
351			containers, err := annotations.ParseVpaObservedContainersValue(observedContainers)
352			gomega.Expect(err).NotTo(gomega.HaveOccurred())
353			gomega.Expect(containers).To(gomega.HaveLen(1))
354			gomega.Expect(pod.Spec.Containers).To(gomega.HaveLen(2))
355		}
356
357		podSet := MakePodSet(podList)
358		ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
359		CheckNoPodsEvicted(f, podSet)
360	})
361})
362
363func getCPURequest(podSpec apiv1.PodSpec) resource.Quantity {
364	return podSpec.Containers[0].Resources.Requests[apiv1.ResourceCPU]
365}
366
367func killPod(f *framework.Framework, podList *apiv1.PodList) {
368	f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podList.Items[0].Name, &metav1.DeleteOptions{})
369	err := WaitForPodsRestarted(f, podList)
370	gomega.Expect(err).NotTo(gomega.HaveOccurred())
371}
372
373// assertPodsPendingForDuration checks that at most pendingPodsNum pods are pending for pendingDuration
374func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Deployment, pendingPodsNum int, pendingDuration time.Duration) error {
375
376	pendingPods := make(map[string]time.Time)
377
378	err := wait.PollImmediate(pollInterval, pollTimeout+pendingDuration, func() (bool, error) {
379		var err error
380		currentPodList, err := framework_deployment.GetPodsForDeployment(c, deployment)
381		if err != nil {
382			return false, err
383		}
384
385		missingPods := make(map[string]bool)
386		for podName := range pendingPods {
387			missingPods[podName] = true
388		}
389
390		now := time.Now()
391		for _, pod := range currentPodList.Items {
392			delete(missingPods, pod.Name)
393			switch pod.Status.Phase {
394			case apiv1.PodPending:
395				_, ok := pendingPods[pod.Name]
396				if !ok {
397					pendingPods[pod.Name] = now
398				}
399			default:
400				delete(pendingPods, pod.Name)
401			}
402		}
403
404		for missingPod := range missingPods {
405			delete(pendingPods, missingPod)
406		}
407
408		if len(pendingPods) < pendingPodsNum {
409			return false, nil
410		}
411
412		if len(pendingPods) > pendingPodsNum {
413			return false, fmt.Errorf("%v pending pods seen - expecting %v", len(pendingPods), pendingPodsNum)
414		}
415
416		for p, t := range pendingPods {
417			fmt.Println("task", now, p, t, now.Sub(t), pendingDuration)
418			if now.Sub(t) < pendingDuration {
419				return false, nil
420			}
421		}
422
423		return true, nil
424	})
425
426	if err != nil {
427		return fmt.Errorf("assertion failed for pending pods in %v: %v", deployment.Name, err)
428	}
429	return nil
430}
431
432func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) {
433	ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controller.Kind))
434	setupHamsterController(f, controller.Kind, "100m", "100Mi", defaultHamsterReplicas)
435	podList, err := GetHamsterPods(f)
436	gomega.Expect(err).NotTo(gomega.HaveOccurred())
437
438	ginkgo.By("Setting up a VPA CRD")
439	SetupVPA(f, "200m", vpa_types.UpdateModeAuto, controller)
440
441	ginkgo.By("Waiting for pods to be evicted")
442	err = WaitForPodsEvicted(f, podList)
443	gomega.Expect(err).NotTo(gomega.HaveOccurred())
444}
445
446func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory string, replicas int32) *apiv1.PodList {
447	switch controllerKind {
448	case "Deployment":
449		SetupHamsterDeployment(f, cpu, memory, replicas)
450	case "ReplicationController":
451		setupHamsterReplicationController(f, cpu, memory, replicas)
452	case "Job":
453		setupHamsterJob(f, cpu, memory, replicas)
454	case "CronJob":
455		SetupHamsterCronJob(f, "*/2 * * * *", cpu, memory, replicas)
456	case "ReplicaSet":
457		setupHamsterRS(f, cpu, memory, replicas)
458	case "StatefulSet":
459		setupHamsterStateful(f, cpu, memory, replicas)
460	default:
461		framework.Failf("Unknown controller kind: %v", controllerKind)
462		return nil
463	}
464	pods, err := GetHamsterPods(f)
465	gomega.Expect(err).NotTo(gomega.HaveOccurred())
466	return pods
467}
468
469func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) {
470	hamsterContainer := SetupHamsterContainer(cpu, memory)
471	rc := framework.RcByNameContainer("hamster-rc", replicas, "k8s.gcr.io/ubuntu-slim:0.1",
472		hamsterLabels, hamsterContainer, nil)
473
474	rc.Namespace = f.Namespace.Name
475	err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc)
476	gomega.Expect(err).NotTo(gomega.HaveOccurred())
477	err = waitForRCPodsRunning(f, rc)
478	gomega.Expect(err).NotTo(gomega.HaveOccurred())
479}
480
481func waitForRCPodsRunning(f *framework.Framework, rc *apiv1.ReplicationController) error {
482	return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
483		podList, err := GetHamsterPods(f)
484		if err != nil {
485			framework.Logf("Error listing pods, retrying: %v", err)
486			return false, nil
487		}
488		podsRunning := int32(0)
489		for _, pod := range podList.Items {
490			if pod.Status.Phase == apiv1.PodRunning {
491				podsRunning += 1
492			}
493		}
494		return podsRunning == *rc.Spec.Replicas, nil
495	})
496}
497
498func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32) {
499	job := framework_job.NewTestJob("notTerminate", "hamster-job", apiv1.RestartPolicyOnFailure,
500		replicas, replicas, nil, 10)
501	job.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
502	for label, value := range hamsterLabels {
503		job.Spec.Template.Labels[label] = value
504	}
505	err := testutils.CreateJobWithRetries(f.ClientSet, f.Namespace.Name, job)
506	gomega.Expect(err).NotTo(gomega.HaveOccurred())
507	err = framework_job.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, replicas)
508	gomega.Expect(err).NotTo(gomega.HaveOccurred())
509}
510
511func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) {
512	rs := framework_rs.NewReplicaSet("hamster-rs", f.Namespace.Name, replicas,
513		hamsterLabels, "", "")
514	rs.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
515	err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs)
516	gomega.Expect(err).NotTo(gomega.HaveOccurred())
517	err = framework_rs.WaitForReadyReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name)
518	gomega.Expect(err).NotTo(gomega.HaveOccurred())
519}
520
521func setupHamsterStateful(f *framework.Framework, cpu, memory string, replicas int32) {
522	stateful := framework_ss.NewStatefulSet("hamster-stateful", f.Namespace.Name,
523		"hamster-service", replicas, nil, nil, hamsterLabels)
524
525	stateful.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
526	err := createStatefulSetSetWithRetries(f.ClientSet, f.Namespace.Name, stateful)
527	gomega.Expect(err).NotTo(gomega.HaveOccurred())
528	framework_ss.WaitForRunningAndReady(f.ClientSet, *stateful.Spec.Replicas, stateful)
529}
530
531func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1beta1.PodDisruptionBudget {
532	maxUnavailableIntstr := intstr.FromInt(maxUnavailable)
533	pdb := &policyv1beta1.PodDisruptionBudget{
534		ObjectMeta: metav1.ObjectMeta{
535			Name: name,
536		},
537		Spec: policyv1beta1.PodDisruptionBudgetSpec{
538			MaxUnavailable: &maxUnavailableIntstr,
539			Selector: &metav1.LabelSelector{
540				MatchLabels: hamsterLabels,
541			},
542		},
543	}
544	_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(pdb)
545	gomega.Expect(err).NotTo(gomega.HaveOccurred())
546	return pdb
547}
548
549func getCurrentPodSetForDeployment(c clientset.Interface, d *appsv1.Deployment) PodSet {
550	podList, err := framework_deployment.GetPodsForDeployment(c, d)
551	gomega.Expect(err).NotTo(gomega.HaveOccurred())
552	return MakePodSet(podList)
553}
554
555func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.ReplicaSet) error {
556	if obj == nil {
557		return fmt.Errorf("object provided to create is empty")
558	}
559	createFunc := func() (bool, error) {
560		_, err := c.AppsV1().ReplicaSets(namespace).Create(obj)
561		if err == nil || apierrs.IsAlreadyExists(err) {
562			return true, nil
563		}
564		if testutils.IsRetryableAPIError(err) {
565			return false, nil
566		}
567		return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
568	}
569	return testutils.RetryWithExponentialBackOff(createFunc)
570}
571
572func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.StatefulSet) error {
573	if obj == nil {
574		return fmt.Errorf("object provided to create is empty")
575	}
576	createFunc := func() (bool, error) {
577		_, err := c.AppsV1().StatefulSets(namespace).Create(obj)
578		if err == nil || apierrs.IsAlreadyExists(err) {
579			return true, nil
580		}
581		if testutils.IsRetryableAPIError(err) {
582			return false, nil
583		}
584		return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
585	}
586	return testutils.RetryWithExponentialBackOff(createFunc)
587}
588
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)