How to use Cleanup method of performance_test Package

Best Ginkgo code snippet using performance_test.Cleanup

Run Ginkgo automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

fetching_dependencies_test.go

Source: fetching_dependencies_test.go Github

copy
1package performance_test
2
3import (
4	"fmt"
5
6	. "github.com/onsi/ginkgo/v2"
7	. "github.com/onsi/gomega"
8	"github.com/onsi/gomega/gmeasure"
9)
10
11var _ = Describe("Fetching Dependencies", func() {
12	var cache gmeasure.ExperimentCache
13
14	BeforeEach(func() {
15		var err error
16		cache, err = gmeasure.NewExperimentCache("./fetching-dependencies-cache")
17		Ω(err).ShouldNot(HaveOccurred())
18
19		// we mount everything outside the Ginkgo parent directory to make sure GOMODULES doesn't get confused by the go.mod in Ginkgo's root
20		pfm = NewPerformanceFixtureManager(fmt.Sprintf("../../../ginkgo_perf_tmp_%d", GinkgoParallelProcess()))
21		gmcm = NewGoModCacheManager(fmt.Sprintf("../../../ginkgo_perf_cache_%d", GinkgoParallelProcess()))
22		if !DEBUG {
23			DeferCleanup(pfm.Cleanup)
24			DeferCleanup(gmcm.Cleanup)
25		}
26	})
27
28	Describe("Experiments", func() {
29		BeforeEach(func() {
30			pfm.MountFixture("performance")
31		})
32
33		It("runs a series of experiments with various scenarios", func() {
34			SampleScenarios(cache, 8, 1, false,
35				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 1, ConcurrentRunners: 1, Recurse: true, ClearGoModCache: true},
36				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 2, ConcurrentRunners: 1, Recurse: true, ClearGoModCache: true},
37				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 4, ConcurrentRunners: 1, Recurse: true, ClearGoModCache: true},
38				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 1, ConcurrentRunners: 1, GoModDownloadFirst: true, Recurse: true, ClearGoModCache: true},
39				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 2, ConcurrentRunners: 1, GoModDownloadFirst: true, Recurse: true, ClearGoModCache: true},
40				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 4, ConcurrentRunners: 1, GoModDownloadFirst: true, Recurse: true, ClearGoModCache: true},
41				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 2, ConcurrentRunners: 1, CompileFirstSuiteSerially: true, Recurse: true, ClearGoModCache: true},
42				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 4, ConcurrentRunners: 1, CompileFirstSuiteSerially: true, Recurse: true, ClearGoModCache: true},
43			)
44		})
45	})
46
47	Describe("Analysis", func() {
48		It("analyzes the various fetching dependencies scenarios to identify winners", func() {
49			AnalyzeCache(cache)
50		})
51	})
52})
53
Full Screen

compiling_and_running_test.go

Source: compiling_and_running_test.go Github

copy
1package performance_test
2
3import (
4	"fmt"
5
6	. "github.com/onsi/ginkgo/v2"
7	. "github.com/onsi/gomega"
8	"github.com/onsi/gomega/gmeasure"
9)
10
11var _ = Describe("Compiling and Running a single test package", func() {
12	var cache gmeasure.ExperimentCache
13
14	BeforeEach(func() {
15		var err error
16		cache, err = gmeasure.NewExperimentCache("./compiling-and-running-single-cache")
17		Ω(err).ShouldNot(HaveOccurred())
18
19		// we mount everything outside the Ginkgo parent directory to make sure GOMODULES doesn't get confused by the go.mod in Ginkgo's root
20		pfm = NewPerformanceFixtureManager(fmt.Sprintf("../../../ginkgo_perf_tmp_%d", GinkgoParallelProcess()))
21		gmcm = NewGoModCacheManager(fmt.Sprintf("../../../ginkgo_perf_cache_%d", GinkgoParallelProcess()))
22		if !DEBUG {
23			DeferCleanup(pfm.Cleanup)
24			DeferCleanup(gmcm.Cleanup)
25		}
26	})
27
28	Describe("Experiments", func() {
29		BeforeEach(func() {
30			pfm.MountFixture("performance")
31		})
32
33		It("runs a series of experiments with various scenarios", func() {
34			SampleScenarios(cache, 8, 1, true,
35				ScenarioSettings{Fixture: "performance", NumSuites: 1, ConcurrentCompilers: 1, ConcurrentRunners: 1},
36				ScenarioSettings{Fixture: "performance", NumSuites: 1, UseGoTestDirectly: true, ConcurrentGoTests: 1},
37				ScenarioSettings{Fixture: "performance", NumSuites: 1, UseGoTestDirectly: true, GoTestCompileThenRunSerially: true},
38			)
39		})
40	})
41
42	Describe("Analysis", func() {
43		It("analyzes the various scenarios to identify winners", func() {
44			AnalyzeCache(cache)
45		})
46	})
47})
48
49var _ = Describe("Compiling and Running multiple tests", func() {
50	var cache gmeasure.ExperimentCache
51
52	BeforeEach(func() {
53		var err error
54		cache, err = gmeasure.NewExperimentCache("./compiling-and-running-multiple-cache")
55		Ω(err).ShouldNot(HaveOccurred())
56
57		// we mount everything outside the Ginkgo parent directory to make sure GOMODULES doesn't get confused by the go.mod in Ginkgo's root
58		pfm = NewPerformanceFixtureManager(fmt.Sprintf("../../../ginkgo_perf_tmp_%d", GinkgoParallelProcess()))
59		gmcm = NewGoModCacheManager(fmt.Sprintf("../../../ginkgo_perf_cache_%d", GinkgoParallelProcess()))
60		if !DEBUG {
61			DeferCleanup(pfm.Cleanup)
62			DeferCleanup(gmcm.Cleanup)
63		}
64	})
65
66	Describe("Experiments", func() {
67		BeforeEach(func() {
68			pfm.MountFixture("performance")
69		})
70
71		It("runs a series of experiments with various scenarios", func() {
72			SampleScenarios(cache, 8, 1, true,
73				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 1, ConcurrentRunners: 1, Recurse: true},
74				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 2, ConcurrentRunners: 1, Recurse: true},
75				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 4, ConcurrentRunners: 1, Recurse: true},
76				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 2, ConcurrentRunners: 1, CompileFirstSuiteSerially: true, Recurse: true},
77				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 4, ConcurrentRunners: 1, CompileFirstSuiteSerially: true, Recurse: true},
78				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 2, ConcurrentRunners: 2, Recurse: true},
79				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 4, ConcurrentRunners: 2, Recurse: true},
80				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 2, ConcurrentRunners: 4, CompileFirstSuiteSerially: true, Recurse: true},
81				ScenarioSettings{Fixture: "performance", NumSuites: 5, ConcurrentCompilers: 4, ConcurrentRunners: 4, CompileFirstSuiteSerially: true, Recurse: true},
82				ScenarioSettings{Fixture: "performance", NumSuites: 5, UseGoTestDirectly: true, ConcurrentGoTests: 1, Recurse: true},
83				ScenarioSettings{Fixture: "performance", NumSuites: 5, UseGoTestDirectly: true, ConcurrentGoTests: 2, Recurse: true},
84				ScenarioSettings{Fixture: "performance", NumSuites: 5, UseGoTestDirectly: true, ConcurrentGoTests: 4, Recurse: true},
85				ScenarioSettings{Fixture: "performance", NumSuites: 5, UseGoTestDirectly: true, ConcurrentGoTests: 8, Recurse: true},
86				ScenarioSettings{Fixture: "performance", NumSuites: 5, UseGoTestDirectly: true, GoTestCompileThenRunSerially: true, Recurse: true},
87				ScenarioSettings{Fixture: "performance", NumSuites: 5, UseGoTestDirectly: true, GoTestRecurse: true, Recurse: true},
88			)
89		})
90	})
91
92	Describe("Analysis", func() {
93		It("analyzes the various scenarios to identify winners", func() {
94			AnalyzeCache(cache)
95		})
96	})
97})
98
Full Screen

large_suite_test.go

Source: large_suite_test.go Github

copy
1package performance_test
2
3import (
4	"fmt"
5	"os"
6
7	. "github.com/onsi/ginkgo/v2"
8	. "github.com/onsi/gomega"
9	"github.com/onsi/gomega/gexec"
10	"github.com/onsi/gomega/gmeasure"
11)
12
13func LoadOrCreate(cache gmeasure.ExperimentCache, name string, version int) (*gmeasure.Experiment, bool) {
14	experiment := cache.Load(name, version)
15	if experiment != nil {
16		return experiment, true
17	}
18	return gmeasure.NewExperiment(name), false
19}
20
21var _ = Describe("Running a large test suite", Ordered, Serial, func() {
22	var cache gmeasure.ExperimentCache
23	var REGENERATE_BENCHMARK = os.Getenv("BENCH") != ""
24	const BENCHMARK_VERSION = 2
25	const N = 10
26
27	var runtimes = []gmeasure.Stats{}
28
29	BeforeAll(func() {
30		if os.Getenv("PERF") == "" {
31			Skip("PERF environment not set, skipping")
32		}
33
34		var err error
35		cache, err = gmeasure.NewExperimentCache("./large-suite-cache")
36		Ω(err).ShouldNot(HaveOccurred())
37
38		pfm = NewPerformanceFixtureManager(fmt.Sprintf("./ginkgo_perf_tmp_%d", GinkgoParallelProcess()))
39		if !DEBUG {
40			DeferCleanup(pfm.Cleanup)
41		}
42		pfm.MountFixture("large_suite")
43
44		session := startGinkgo(pfm.PathTo("large_suite"), "build")
45		Eventually(session).Should(gexec.Exit(0))
46		Expect(pfm.PathTo("large_suite", "large_suite.test")).To(BeAnExistingFile())
47	})
48
49	var nameFor = func(nodes int, protocol string, interceptor string) string {
50		if nodes == 1 {
51			return "serial"
52		}
53		return "parallel" + "-" + protocol + "-" + interceptor
54	}
55
56	DescribeTable("scenarios",
57		func(nodes int, protocol string, interceptor string) {
58			var experiment *gmeasure.Experiment
59			name := nameFor(nodes, protocol, interceptor)
60
61			if REGENERATE_BENCHMARK {
62				experiment = gmeasure.NewExperiment(name + "-benchmark")
63			} else {
64				benchmark := cache.Load(name+"-benchmark", BENCHMARK_VERSION)
65				Ω(benchmark).ShouldNot(BeNil())
66				runtimes = append(runtimes, benchmark.GetStats("runtime"))
67				experiment = gmeasure.NewExperiment(name)
68			}
69			AddReportEntry(experiment.Name, experiment)
70
71			env := []string{}
72			if nodes > 1 {
73				env = append(env, "GINKGO_PARALLEL_PROTOCOL="+protocol)
74			}
75
76			experiment.SampleDuration("runtime", func(idx int) {
77				fmt.Printf("Running %s %d/%d\n", name, idx+1, N)
78				session := startGinkgoWithEnv(
79					pfm.PathTo("large_suite"),
80					env,
81					fmt.Sprintf("--procs=%d", nodes),
82					fmt.Sprintf("--output-interceptor-mode=%s", interceptor),
83					"large_suite.test",
84				)
85				Eventually(session).Should(gexec.Exit(0))
86			}, gmeasure.SamplingConfig{N: N})
87			runtimes = append(runtimes, experiment.GetStats("runtime"))
88
89			fmt.Printf("Profiling %s\n", name)
90			session := startGinkgoWithEnv(
91				pfm.PathTo("large_suite"),
92				env,
93				fmt.Sprintf("--procs=%d", nodes),
94				fmt.Sprintf("--output-interceptor-mode=%s", interceptor),
95				"--cpuprofile=CPU.profile",
96				"--blockprofile=BLOCK.profile",
97				"large_suite.test",
98			)
99			Eventually(session).Should(gexec.Exit(0))
100
101			if REGENERATE_BENCHMARK {
102				cache.Save(experiment.Name, BENCHMARK_VERSION, experiment)
103			}
104		},
105		nameFor,
106		Entry(nil, 1, "", ""),
107		Entry(nil, 2, "RPC", "DUP"),
108		Entry(nil, 2, "RPC", "SWAP"),
109		Entry(nil, 2, "RPC", "NONE"),
110		Entry(nil, 2, "HTTP", "DUP"),
111		Entry(nil, 2, "HTTP", "SWAP"),
112		Entry(nil, 2, "HTTP", "NONE"),
113	)
114
115	It("analyzes the experiments", func() {
116		if REGENERATE_BENCHMARK {
117			Skip("no analysis when generating benchmark")
118		}
119		AddReportEntry("Ranking", gmeasure.RankStats(gmeasure.LowerMedianIsBetter, runtimes...))
120	})
121})
122
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest

Trigger Cleanup code on LambdaTest Cloud Grid

Execute automation tests with Cleanup on a cloud-based Grid of 3000+ real browsers and operating systems for both web and mobile applications.

Test now for Free
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)