How to use InRunPhase method of internal Package

Best Ginkgo code snippet using internal.InRunPhase

Run Ginkgo automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

suite_test.go

Source: suite_test.go Github

copy
1package internal_test
2
3import (
4	"fmt"
5	"io"
6
7	. "github.com/onsi/ginkgo/v2"
8	"github.com/onsi/ginkgo/v2/internal"
9	"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
10	"github.com/onsi/ginkgo/v2/internal/parallel_support"
11	. "github.com/onsi/ginkgo/v2/internal/test_helpers"
12	"github.com/onsi/ginkgo/v2/types"
13	. "github.com/onsi/gomega"
14)
15
16var _ = Describe("Suite", func() {
17	It("is heavily integration tested over in internal_integration", func() {
18	})
19
20	var suite *internal.Suite
21	var failer *internal.Failer
22	var reporter *FakeReporter
23	var writer *internal.Writer
24	var outputInterceptor *FakeOutputInterceptor
25	var interruptHandler *interrupt_handler.InterruptHandler
26	var conf types.SuiteConfig
27	var rt *RunTracker
28	var client parallel_support.Client
29
30	BeforeEach(func() {
31		failer = internal.NewFailer()
32		reporter = &FakeReporter{}
33		writer = internal.NewWriter(io.Discard)
34		outputInterceptor = NewFakeOutputInterceptor()
35		client = nil
36		interruptHandler = interrupt_handler.NewInterruptHandler(0, client)
37		DeferCleanup(interruptHandler.Stop)
38		conf = types.SuiteConfig{
39			ParallelTotal:   1,
40			ParallelProcess: 1,
41		}
42		rt = NewRunTracker()
43		suite = internal.NewSuite()
44	})
45
46	Describe("Constructing Trees", func() {
47		Describe("PhaseBuildTopLevel vs PhaseBuildTree", func() {
48			var err1, err2, err3 error
49			BeforeEach(func() {
50				err1 = suite.PushNode(N(ntCon, "a top-level container", func() {
51					rt.Run("traversing outer")
52					err2 = suite.PushNode(N(ntCon, "a nested container", func() {
53						rt.Run("traversing nested")
54						err3 = suite.PushNode(N(ntIt, "an it", rt.T("running it")))
55					}))
56				}))
57			})
58
59			It("only traverses top-level containers when told to BuildTree", func() {
60				fmt.Fprintln(GinkgoWriter, "HELLO!")
61				Ω(rt).Should(HaveTrackedNothing())
62				Ω(suite.BuildTree()).Should(Succeed())
63				Ω(rt).Should(HaveTracked("traversing outer", "traversing nested"))
64
65				rt.Reset()
66				suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
67				Ω(rt).Should(HaveTracked("running it"))
68
69				Ω(err1).ShouldNot(HaveOccurred())
70				Ω(err2).ShouldNot(HaveOccurred())
71				Ω(err3).ShouldNot(HaveOccurred())
72			})
73		})
74
75		Describe("InRunPhase", func() {
76			It("returns true when in the run phase and false when not in the run phase", func() {
77				falsey := true
78				truey := false
79
80				err := suite.PushNode(N(ntCon, "a top-level container", func() {
81					falsey = suite.InRunPhase()
82					suite.PushNode(N(ntIt, "an it", func() {
83						truey = suite.InRunPhase()
84					}))
85				}))
86
87				Ω(suite.BuildTree()).Should(Succeed())
88				suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
89
90				Ω(err).ShouldNot(HaveOccurred())
91				Ω(truey).Should(BeTrue())
92				Ω(falsey).Should(BeFalse())
93			})
94		})
95
96		Context("when pushing nodes during PhaseRun", func() {
97			var pushNodeErrDuringRun error
98
99			BeforeEach(func() {
100				err := suite.PushNode(N(ntCon, "a top-level container", func() {
101					suite.PushNode(N(ntIt, "an it", func() {
102						rt.Run("in it")
103						pushNodeErrDuringRun = suite.PushNode(N(ntIt, "oops - illegal operation", cl, rt.T("illegal")))
104					}))
105				}))
106
107				Ω(err).ShouldNot(HaveOccurred())
108				Ω(suite.BuildTree()).Should(Succeed())
109			})
110
111			It("errors", func() {
112				suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
113				Ω(pushNodeErrDuringRun).Should(HaveOccurred())
114				Ω(rt).Should(HaveTracked("in it"))
115			})
116
117		})
118
119		Context("when the user attempts to fail during PhaseBuildTree", func() {
120			BeforeEach(func() {
121				suite.PushNode(N(ntCon, "a top-level container", func() {
122					failer.Fail("boom", cl)
123					panic("simulate ginkgo panic")
124				}))
125			})
126
127			It("errors", func() {
128				err := suite.BuildTree()
129				Ω(err.Error()).Should(ContainSubstring(cl.String()))
130				Ω(err.Error()).Should(ContainSubstring("simulate ginkgo panic"))
131			})
132		})
133
134		Context("when the user panics during PhaseBuildTree", func() {
135			BeforeEach(func() {
136				suite.PushNode(N(ntCon, "a top-level container", func() {
137					panic("boom")
138				}))
139			})
140
141			It("errors", func() {
142				err := suite.BuildTree()
143				Ω(err).Should(HaveOccurred())
144				Ω(err.Error()).Should(ContainSubstring("boom"))
145			})
146		})
147
148		Describe("Suite Nodes", func() {
149			Context("when pushing suite nodes at the top level", func() {
150				BeforeEach(func() {
151					err := suite.PushNode(N(types.NodeTypeBeforeSuite))
152					Ω(err).ShouldNot(HaveOccurred())
153
154					err = suite.PushNode(N(types.NodeTypeAfterSuite))
155					Ω(err).ShouldNot(HaveOccurred())
156				})
157
158				Context("when pushing more than one BeforeSuite node", func() {
159					It("errors", func() {
160						err := suite.PushNode(N(types.NodeTypeBeforeSuite))
161						Ω(err).Should(HaveOccurred())
162
163						err = suite.PushNode(N(types.NodeTypeSynchronizedBeforeSuite))
164						Ω(err).Should(HaveOccurred())
165					})
166				})
167
168				Context("when pushing more than one AfterSuite node", func() {
169					It("errors", func() {
170						err := suite.PushNode(N(types.NodeTypeAfterSuite))
171						Ω(err).Should(HaveOccurred())
172
173						err = suite.PushNode(N(types.NodeTypeSynchronizedAfterSuite))
174						Ω(err).Should(HaveOccurred())
175					})
176				})
177			})
178
179			Context("when pushing a serial node in an ordered container", func() {
180				Context("when the outer-most ordered container is marked serial", func() {
181					It("succeeds", func() {
182						var errors = make([]error, 3)
183						errors[0] = suite.PushNode(N(ntCon, "top-level-container", Ordered, Serial, func() {
184							errors[1] = suite.PushNode(N(ntCon, "inner-container", func() {
185								errors[2] = suite.PushNode(N(ntIt, "it", Serial, func() {}))
186							}))
187						}))
188						Ω(errors[0]).ShouldNot(HaveOccurred())
189						Ω(suite.BuildTree()).Should(Succeed())
190						Ω(errors[1]).ShouldNot(HaveOccurred())
191						Ω(errors[2]).ShouldNot(HaveOccurred())
192					})
193				})
194
195				Context("when the outer-most ordered container is not marked serial", func() {
196					It("errors", func() {
197						var errors = make([]error, 3)
198						errors[0] = suite.PushNode(N(ntCon, "top-level-container", Ordered, func() {
199							errors[1] = suite.PushNode(N(ntCon, "inner-container", func() {
200								errors[2] = suite.PushNode(N(ntIt, "it", Serial, cl, func() {}))
201							}))
202						}))
203						Ω(errors[0]).ShouldNot(HaveOccurred())
204						Ω(suite.BuildTree()).Should(Succeed())
205						Ω(errors[1]).ShouldNot(HaveOccurred())
206						Ω(errors[2]).Should(MatchError(types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(cl, ntIt)))
207					})
208				})
209			})
210
211			Context("when pushing BeforeAll and AfterAll nodes", func() {
212				Context("in an ordered container", func() {
213					It("succeeds", func() {
214						var errors = make([]error, 3)
215						errors[0] = suite.PushNode(N(ntCon, "top-level-container", Ordered, func() {
216							errors[1] = suite.PushNode(N(types.NodeTypeBeforeAll, func() {}))
217							errors[2] = suite.PushNode(N(types.NodeTypeAfterAll, func() {}))
218						}))
219						Ω(errors[0]).ShouldNot(HaveOccurred())
220						Ω(suite.BuildTree()).Should(Succeed())
221						Ω(errors[1]).ShouldNot(HaveOccurred())
222						Ω(errors[2]).ShouldNot(HaveOccurred())
223					})
224				})
225
226				Context("anywhere else", func() {
227					It("errors", func() {
228						var errors = make([]error, 3)
229						errors[0] = suite.PushNode(N(ntCon, "top-level-container", func() {
230							errors[1] = suite.PushNode(N(types.NodeTypeBeforeAll, cl, func() {}))
231							errors[2] = suite.PushNode(N(types.NodeTypeAfterAll, cl, func() {}))
232						}))
233						Ω(errors[0]).ShouldNot(HaveOccurred())
234						Ω(suite.BuildTree()).Should(Succeed())
235						Ω(errors[1]).Should(MatchError(types.GinkgoErrors.SetupNodeNotInOrderedContainer(cl, types.NodeTypeBeforeAll)))
236						Ω(errors[2]).Should(MatchError(types.GinkgoErrors.SetupNodeNotInOrderedContainer(cl, types.NodeTypeAfterAll)))
237					})
238				})
239			})
240
241			Context("when pushing a suite node during PhaseBuildTree", func() {
242				It("errors", func() {
243					var pushSuiteNodeErr error
244					err := suite.PushNode(N(ntCon, "top-level-container", func() {
245						pushSuiteNodeErr = suite.PushNode(N(types.NodeTypeBeforeSuite, cl))
246					}))
247
248					Ω(err).ShouldNot(HaveOccurred())
249					Ω(suite.BuildTree()).Should(Succeed())
250					Ω(pushSuiteNodeErr).Should(HaveOccurred())
251				})
252			})
253
254			Context("when pushing a suite node during PhaseRun", func() {
255				It("errors", func() {
256					var pushSuiteNodeErr error
257					err := suite.PushNode(N(ntIt, "top-level it", func() {
258						pushSuiteNodeErr = suite.PushNode(N(types.NodeTypeBeforeSuite, cl))
259					}))
260
261					Ω(err).ShouldNot(HaveOccurred())
262					Ω(suite.BuildTree()).Should(Succeed())
263					suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
264					Ω(pushSuiteNodeErr).Should(HaveOccurred())
265				})
266			})
267		})
268
269		Describe("Cleanup Nodes", func() {
270			Context("when pushing a cleanup node during PhaseTopLevel", func() {
271				It("errors", func() {
272					err := suite.PushNode(N(types.NodeTypeCleanupInvalid, cl))
273					Ω(err).Should(MatchError(types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(cl)))
274				})
275			})
276
277			Context("when pushing a cleanup node during PhaseBuildTree", func() {
278				It("errors", func() {
279					var errors = make([]error, 2)
280					errors[0] = suite.PushNode(N(ntCon, "container", func() {
281						errors[1] = suite.PushNode(N(types.NodeTypeCleanupInvalid, cl))
282					}))
283					Ω(errors[0]).ShouldNot(HaveOccurred())
284					Ω(suite.BuildTree()).Should(Succeed())
285					Ω(errors[1]).Should(MatchError(types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(cl)))
286				})
287			})
288
289			Context("when pushing a cleanup node in a ReportBeforeEach node", func() {
290				It("errors", func() {
291					var errors = make([]error, 4)
292					reportBeforeEachNode, _ := internal.NewReportBeforeEachNode(func(_ types.SpecReport) {
293						errors[3] = suite.PushNode(N(types.NodeTypeCleanupInvalid, cl))
294					}, types.NewCodeLocation(0))
295
296					errors[0] = suite.PushNode(N(ntCon, "container", func() {
297						errors[1] = suite.PushNode(reportBeforeEachNode)
298						errors[2] = suite.PushNode(N(ntIt, "test"))
299					}))
300					Ω(errors[0]).ShouldNot(HaveOccurred())
301
302					Ω(suite.BuildTree()).Should(Succeed())
303					Ω(errors[1]).ShouldNot(HaveOccurred())
304					Ω(errors[2]).ShouldNot(HaveOccurred())
305
306					suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
307					Ω(errors[3]).Should(MatchError(types.GinkgoErrors.PushingCleanupInReportingNode(cl, types.NodeTypeReportBeforeEach)))
308				})
309			})
310
311			Context("when pushing a cleanup node in a ReportAfterEach node", func() {
312				It("errors", func() {
313					var errors = make([]error, 4)
314					reportAfterEachNode, _ := internal.NewReportAfterEachNode(func(_ types.SpecReport) {
315						errors[3] = suite.PushNode(N(types.NodeTypeCleanupInvalid, cl))
316					}, types.NewCodeLocation(0))
317
318					errors[0] = suite.PushNode(N(ntCon, "container", func() {
319						errors[1] = suite.PushNode(N(ntIt, "test"))
320						errors[2] = suite.PushNode(reportAfterEachNode)
321					}))
322					Ω(errors[0]).ShouldNot(HaveOccurred())
323
324					Ω(suite.BuildTree()).Should(Succeed())
325					Ω(errors[1]).ShouldNot(HaveOccurred())
326					Ω(errors[2]).ShouldNot(HaveOccurred())
327
328					suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
329					Ω(errors[3]).Should(MatchError(types.GinkgoErrors.PushingCleanupInReportingNode(cl, types.NodeTypeReportAfterEach)))
330				})
331			})
332
333			Context("when pushing a cleanup node in a ReportAfterSuite node", func() {
334				It("errors", func() {
335					var errors = make([]error, 4)
336					reportAfterSuiteNode, _ := internal.NewReportAfterSuiteNode("report", func(_ types.Report) {
337						errors[3] = suite.PushNode(N(types.NodeTypeCleanupInvalid, cl))
338					}, types.NewCodeLocation(0))
339
340					errors[0] = suite.PushNode(N(ntCon, "container", func() {
341						errors[2] = suite.PushNode(N(ntIt, "test"))
342					}))
343					errors[1] = suite.PushNode(reportAfterSuiteNode)
344					Ω(errors[0]).ShouldNot(HaveOccurred())
345					Ω(errors[1]).ShouldNot(HaveOccurred())
346
347					Ω(suite.BuildTree()).Should(Succeed())
348					Ω(errors[2]).ShouldNot(HaveOccurred())
349
350					suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
351					Ω(errors[3]).Should(MatchError(types.GinkgoErrors.PushingCleanupInReportingNode(cl, types.NodeTypeReportAfterSuite)))
352				})
353			})
354
355			Context("when pushing a cleanup node within a cleanup node", func() {
356				It("errors", func() {
357					var errors = make([]error, 3)
358					errors[0] = suite.PushNode(N(ntIt, "It", func() {
359						cleanupNode, _ := internal.NewCleanupNode(nil, types.NewCustomCodeLocation("outerCleanup"), func() {
360							innerCleanupNode, _ := internal.NewCleanupNode(nil, cl, func() {})
361							errors[2] = suite.PushNode(innerCleanupNode)
362						})
363						errors[1] = suite.PushNode(cleanupNode)
364					}))
365					Ω(errors[0]).ShouldNot(HaveOccurred())
366					Ω(suite.BuildTree()).Should(Succeed())
367					suite.Run("suite", Labels{}, "/path/to/suite", failer, reporter, writer, outputInterceptor, interruptHandler, client, conf)
368					Ω(errors[1]).ShouldNot(HaveOccurred())
369					Ω(errors[2]).Should(MatchError(types.GinkgoErrors.PushingCleanupInCleanupNode(cl)))
370				})
371			})
372		})
373
374		Describe("ReportEntries", func() {
375			Context("when adding a report entry outside of the run phase", func() {
376				It("errors", func() {
377					entry, err := internal.NewReportEntry("name", cl)
378					Ω(err).ShouldNot(HaveOccurred())
379					err = suite.AddReportEntry(entry)
380					Ω(err).Should(MatchError(types.GinkgoErrors.AddReportEntryNotDuringRunPhase(cl)))
381					suite.BuildTree()
382					err = suite.AddReportEntry(entry)
383					Ω(err).Should(MatchError(types.GinkgoErrors.AddReportEntryNotDuringRunPhase(cl)))
384				})
385			})
386		})
387
388		When("using when", func() {
389			It("prepends 'when' to the test name", func() {
390				Ω(CurrentSpecReport().FullText()).Should(ContainSubstring(" when using when prepends"))
391			})
392		})
393	})
394})
395
Full Screen

core_dsl.go

Source: core_dsl.go Github

copy
1/*
2Ginkgo is a testing framework for Go designed to help you write expressive tests.
3https://github.com/onsi/ginkgo
4MIT-Licensed
5
6The godoc documentation outlines Ginkgo's API.  Since Ginkgo is a Domain-Specific Language it is important to
7build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that.
8You should start there - even a brief skim will be helpful.  At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter.
9
10Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega
11
12You can run Ginkgo specs with go test - however we recommend using the ginkgo cli.  It enables functionality
13that go test does not (especially running suites in parallel).  You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview
14or by running 'ginkgo help'.
15*/
16package ginkgo
17
18import (
19	"fmt"
20	"io"
21	"os"
22	"path/filepath"
23	"strings"
24	"time"
25
26	"github.com/onsi/ginkgo/v2/formatter"
27	"github.com/onsi/ginkgo/v2/internal"
28	"github.com/onsi/ginkgo/v2/internal/global"
29	"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
30	"github.com/onsi/ginkgo/v2/internal/parallel_support"
31	"github.com/onsi/ginkgo/v2/reporters"
32	"github.com/onsi/ginkgo/v2/types"
33)
34
35const GINKGO_VERSION = types.VERSION
36
37var flagSet types.GinkgoFlagSet
38var deprecationTracker = types.NewDeprecationTracker()
39var suiteConfig = types.NewDefaultSuiteConfig()
40var reporterConfig = types.NewDefaultReporterConfig()
41var suiteDidRun = false
42var outputInterceptor internal.OutputInterceptor
43var client parallel_support.Client
44
45func init() {
46	var err error
47	flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig)
48	exitIfErr(err)
49	GinkgoWriter = internal.NewWriter(os.Stdout)
50}
51
52func exitIfErr(err error) {
53	if err != nil {
54		if outputInterceptor != nil {
55			outputInterceptor.Shutdown()
56		}
57		if client != nil {
58			client.Close()
59		}
60		fmt.Fprintln(formatter.ColorableStdErr, err.Error())
61		os.Exit(1)
62	}
63}
64
65func exitIfErrors(errors []error) {
66	if len(errors) > 0 {
67		if outputInterceptor != nil {
68			outputInterceptor.Shutdown()
69		}
70		if client != nil {
71			client.Close()
72		}
73		for _, err := range errors {
74			fmt.Fprintln(formatter.ColorableStdErr, err.Error())
75		}
76		os.Exit(1)
77	}
78}
79
80//The interface implemented by GinkgoWriter
81type GinkgoWriterInterface interface {
82	io.Writer
83
84	Print(a ...interface{})
85	Printf(format string, a ...interface{})
86	Println(a ...interface{})
87
88	TeeTo(writer io.Writer)
89	ClearTeeWriters()
90}
91
92/*
93GinkgoWriter implements a GinkgoWriterInterface and io.Writer
94
95When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed
96to stdout.  Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
97only if the current test fails.
98
99GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer).
100Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers.  You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters()
101
102You can learn more at https://onsi.github.io/ginkgo/#logging-output
103*/
104var GinkgoWriter GinkgoWriterInterface
105
106//The interface by which Ginkgo receives *testing.T
107type GinkgoTestingT interface {
108	Fail()
109}
110
111/*
112GinkgoConfiguration returns the configuration of the current suite.
113
114The first return value is the SuiteConfig which controls aspects of how the suite runs,
115the second return value is the ReporterConfig which controls aspects of how Ginkgo's default
116reporter emits output.
117
118Mutating the returned configurations has no effect.  To reconfigure Ginkgo programmatically you need
119to pass in your mutated copies into RunSpecs().
120
121You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite
122*/
123func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) {
124	return suiteConfig, reporterConfig
125}
126
127/*
128GinkgoRandomSeed returns the seed used to randomize spec execution order.  It is
129useful for seeding your own pseudorandom number generators to ensure
130consistent executions from run to run, where your tests contain variability (for
131example, when selecting random spec data).
132
133You can learn more at https://onsi.github.io/ginkgo/#spec-randomization
134*/
135func GinkgoRandomSeed() int64 {
136	return suiteConfig.RandomSeed
137}
138
139/*
140GinkgoParallelProcess returns the parallel process number for the current ginkgo process
141The process number is 1-indexed.  You can use GinkgoParallelProcess() to shard access to shared
142resources across your suites.  You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs
143
144For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization
145*/
146func GinkgoParallelProcess() int {
147	return suiteConfig.ParallelProcess
148}
149
150/*
151PauseOutputInterception() pauses Ginkgo's output interception.  This is only relevant
152when running in parallel and output to stdout/stderr is being intercepted.  You generally
153don't need to call this function - however there are cases when Ginkgo's output interception
154mechanisms can interfere with external processes launched by the test process.
155
156In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr
157then Ginkgo's output interceptor will hang.  To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter.
158If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process
159then ResumeOutputInterception() after starting it.
160
161Note that PauseOutputInterception() does not cause stdout writes to print to the console -
162this simply stops intercepting and storing stdout writes to an internal buffer.
163*/
164func PauseOutputInterception() {
165	if outputInterceptor == nil {
166		return
167	}
168	outputInterceptor.PauseIntercepting()
169}
170
171//ResumeOutputInterception() - see docs for PauseOutputInterception()
172func ResumeOutputInterception() {
173	if outputInterceptor == nil {
174		return
175	}
176	outputInterceptor.ResumeIntercepting()
177}
178
179/*
180RunSpecs is the entry point for the Ginkgo spec runner.
181
182You must call this within a Golang testing TestX(t *testing.T) function.
183If you bootstrapped your suite with "ginkgo bootstrap" this is already
184done for you.
185
186Ginkgo is typically configured via command-line flags.  This configuration
187can be overridden, however, and passed into RunSpecs as optional arguments:
188
189	func TestMySuite(t *testing.T)  {
190		RegisterFailHandler(gomega.Fail)
191		// fetch the current config
192		suiteConfig, reporterConfig := GinkgoConfiguration()
193		// adjust it
194		suiteConfig.SkipStrings = []string{"NEVER-RUN"}
195		reporterConfig.FullTrace = true
196		// pass it in to RunSpecs
197		RunSpecs(t, "My Suite", suiteConfig, reporterConfig)
198	}
199
200Note that some configuration changes can lead to undefined behavior.  For example,
201you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible
202for setting these and orchestrating parallel specs across the parallel processes.  See http://onsi.github.io/ginkgo/#spec-parallelization
203for more on how specs are parallelized in Ginkgo.
204
205You can also pass suite-level Label() decorators to RunSpecs.  The passed-in labels will apply to all specs in the suite.
206*/
207func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
208	if suiteDidRun {
209		exitIfErr(types.GinkgoErrors.RerunningSuite())
210	}
211	suiteDidRun = true
212
213	suiteLabels := Labels{}
214	configErrors := []error{}
215	for _, arg := range args {
216		switch arg := arg.(type) {
217		case types.SuiteConfig:
218			suiteConfig = arg
219		case types.ReporterConfig:
220			reporterConfig = arg
221		case Labels:
222			suiteLabels = append(suiteLabels, arg...)
223		default:
224			configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
225		}
226	}
227	exitIfErrors(configErrors)
228
229	configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
230	if len(configErrors) > 0 {
231		fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
232		for _, err := range configErrors {
233			fmt.Fprintf(formatter.ColorableStdErr, err.Error())
234		}
235		os.Exit(1)
236	}
237
238	var reporter reporters.Reporter
239	if suiteConfig.ParallelTotal == 1 {
240		reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)
241		outputInterceptor = internal.NoopOutputInterceptor{}
242		client = nil
243	} else {
244		reporter = reporters.NoopReporter{}
245		switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
246		case "swap":
247			outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor()
248		case "none":
249			outputInterceptor = internal.NoopOutputInterceptor{}
250		default:
251			outputInterceptor = internal.NewOutputInterceptor()
252		}
253		client = parallel_support.NewClient(suiteConfig.ParallelHost)
254		if !client.Connect() {
255			client = nil
256			exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost))
257		}
258		defer client.Close()
259	}
260
261	writer := GinkgoWriter.(*internal.Writer)
262	if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 {
263		writer.SetMode(internal.WriterModeStreamAndBuffer)
264	} else {
265		writer.SetMode(internal.WriterModeBufferOnly)
266	}
267
268	if reporterConfig.WillGenerateReport() {
269		registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig)
270	}
271
272	err := global.Suite.BuildTree()
273	exitIfErr(err)
274
275	suitePath, err := os.Getwd()
276	exitIfErr(err)
277	suitePath, err = filepath.Abs(suitePath)
278	exitIfErr(err)
279
280	passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(suiteConfig.Timeout, client), client, suiteConfig)
281	outputInterceptor.Shutdown()
282
283	flagSet.ValidateDeprecations(deprecationTracker)
284	if deprecationTracker.DidTrackDeprecations() {
285		fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport())
286	}
287
288	if !passed {
289		t.Fail()
290	}
291
292	if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
293		fmt.Println("PASS | FOCUSED")
294		os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
295	}
296	return passed
297}
298
299/*
300Skip instructs Ginkgo to skip the current spec
301
302You can call Skip in any Setup or Subject node closure.
303
304For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs
305*/
306func Skip(message string, callerSkip ...int) {
307	skip := 0
308	if len(callerSkip) > 0 {
309		skip = callerSkip[0]
310	}
311	cl := types.NewCodeLocationWithStackTrace(skip + 1)
312	global.Failer.Skip(message, cl)
313	panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
314}
315
316/*
317Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
318
319Under the hood, Fail panics to end execution of the current spec.  Ginkgo will catch this panic and proceed with
320the subsequent spec.  If you call Fail, or make an assertion, within a goroutine launched by your spec you must
321add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail.
322
323You can call Fail in any Setup or Subject node closure.
324
325You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
326*/
327func Fail(message string, callerSkip ...int) {
328	skip := 0
329	if len(callerSkip) > 0 {
330		skip = callerSkip[0]
331	}
332
333	cl := types.NewCodeLocationWithStackTrace(skip + 1)
334	global.Failer.Fail(message, cl)
335	panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
336}
337
338/*
339AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite.
340
341You can call AbortSuite in any Setup or Subject node closure.
342
343You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites
344*/
345func AbortSuite(message string, callerSkip ...int) {
346	skip := 0
347	if len(callerSkip) > 0 {
348		skip = callerSkip[0]
349	}
350
351	cl := types.NewCodeLocationWithStackTrace(skip + 1)
352	global.Failer.AbortSuite(message, cl)
353	panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
354}
355
356/*
357GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
358Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
359calls out to Gomega
360
361Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
362further assertions from running.  This panic must be recovered.  Normally, Ginkgo recovers the panic for you,
363however if a panic originates on a goroutine *launched* from one of your specs there's no
364way for Ginkgo to rescue the panic.  To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
365
366You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
367*/
368func GinkgoRecover() {
369	e := recover()
370	if e != nil {
371		global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
372	}
373}
374
375// pushNode is used by the various test construction DSL methods to push nodes onto the suite
376// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits
377func pushNode(node internal.Node, errors []error) bool {
378	exitIfErrors(errors)
379	exitIfErr(global.Suite.PushNode(node))
380	return true
381}
382
383/*
384Describe nodes are Container nodes that allow you to organize your specs.  A Describe node's closure can contain any number of
385Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
386
387Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow.  It is idomatic
388to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens.
389
390You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
391In addition, container nodes can be decorated with a variety of decorators.  You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
392*/
393func Describe(text string, args ...interface{}) bool {
394	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
395}
396
397/*
398FDescribe focuses specs within the Describe block.
399*/
400func FDescribe(text string, args ...interface{}) bool {
401	args = append(args, internal.Focus)
402	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
403}
404
405/*
406PDescribe marks specs within the Describe block as pending.
407*/
408func PDescribe(text string, args ...interface{}) bool {
409	args = append(args, internal.Pending)
410	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
411}
412
413/*
414XDescribe marks specs within the Describe block as pending.
415
416XDescribe is an alias for PDescribe
417*/
418var XDescribe = PDescribe
419
420/* Context is an alias for Describe - it generates the exact same kind of Container node */
421var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
422
423/* When is an alias for Describe - it generates the exact same kind of Container node */
424func When(text string, args ...interface{}) bool {
425	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
426}
427
428/* When is an alias for Describe - it generates the exact same kind of Container node */
429func FWhen(text string, args ...interface{}) bool {
430	args = append(args, internal.Focus)
431	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
432}
433
434/* When is an alias for Describe - it generates the exact same kind of Container node */
435func PWhen(text string, args ...interface{}) bool {
436	args = append(args, internal.Pending)
437	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
438}
439
440var XWhen = PWhen
441
442/*
443It nodes are Subject nodes that contain your spec code and assertions.
444
445Each It node corresponds to an individual Ginkgo spec.  You cannot nest any other Ginkgo nodes within an It node's closure.
446
447You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
448In addition, subject nodes can be decorated with a variety of decorators.  You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
449*/
450func It(text string, args ...interface{}) bool {
451	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
452}
453
454/*
455FIt allows you to focus an individual It.
456*/
457func FIt(text string, args ...interface{}) bool {
458	args = append(args, internal.Focus)
459	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
460}
461
462/*
463PIt allows you to mark an individual It as pending.
464*/
465func PIt(text string, args ...interface{}) bool {
466	args = append(args, internal.Pending)
467	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
468}
469
470/*
471XIt allows you to mark an individual It as pending.
472
473XIt is an alias for PIt
474*/
475var XIt = PIt
476
477/*
478Specify is an alias for It - it can allow for more natural wording in some context.
479*/
480var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt
481
482/*
483By allows you to better document complex Specs.
484
485Generally you should try to keep your Its short and to the point.  This is not always possible, however,
486especially in the context of integration tests that capture complex or lengthy workflows.
487
488By allows you to document such flows.  By may be called within a Setup or Subject node (It, BeforeEach, etc...)
489and will simply log the passed in text to the GinkgoWriter.  If By is handed a function it will immediately run the function.
490
491By will also generate and attach a ReportEntry to the spec.  This will ensure that By annotations appear in Ginkgo's machine-readable reports.
492
493Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry
494You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
495*/
496func By(text string, callback ...func()) {
497	if !global.Suite.InRunPhase() {
498		exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1)))
499	}
500	value := struct {
501		Text     string
502		Duration time.Duration
503	}{
504		Text: text,
505	}
506	t := time.Now()
507	AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t)
508	formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor)
509	GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT)))
510	if len(callback) == 1 {
511		callback[0]()
512		value.Duration = time.Since(t)
513	}
514	if len(callback) > 1 {
515		panic("just one callback per By, please")
516	}
517}
518
519/*
520BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run.
521When running in parallel, each parallel process will call BeforeSuite.
522
523You may only register *one* BeforeSuite handler per test suite.  You typically do so in your bootstrap file at the top level.
524
525You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
526You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
527*/
528func BeforeSuite(body func()) bool {
529	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", body))
530}
531
532/*
533AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed.
534AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs.
535
536When running in parallel, each parallel process will call AfterSuite.
537
538You may only register *one* AfterSuite handler per test suite.  You typically do so in your bootstrap file at the top level.
539
540You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
541You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
542*/
543func AfterSuite(body func()) bool {
544	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", body))
545}
546
547/*
548SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information
549from that setup to the rest of the suite setup on all processes.  This is useful for performing expensive or singleton setup once, then passing
550information from that setup to all parallel processes.
551
552SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them.
553The first function is only run on parallel process #1.  The second is run on all processes, but *only* after the first function completes successfully.  The functions have the following signatures:
554
555The first function (which only runs on process #1) has the signature:
556
557	func() []byte
558
559The byte array returned by the first function is then passed to the second function, which has the signature:
560
561	func(data []byte)
562
563You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
564You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
565*/
566func SynchronizedBeforeSuite(process1Body func() []byte, allProcessBody func([]byte)) bool {
567	return pushNode(internal.NewSynchronizedBeforeSuiteNode(process1Body, allProcessBody, types.NewCodeLocation(1)))
568}
569
570/*
571SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes
572and a piece that must only run once - on process #1.
573
574SynchronizedAfterSuite accomplishes this by taking *two* function arguments.  The first runs on all processes.  The second runs only on parallel process #1
575and *only* after all other processes have finished and exited.  This ensures that process #1, and any resources it is managing, remain alive until
576all other processes are finished.
577
578Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results.
579
580You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
581You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
582*/
583func SynchronizedAfterSuite(allProcessBody func(), process1Body func()) bool {
584	return pushNode(internal.NewSynchronizedAfterSuiteNode(allProcessBody, process1Body, types.NewCodeLocation(1)))
585}
586
587/*
588BeforeEach nodes are Setup nodes whose closures run before It node closures.  When multiple BeforeEach nodes
589are defined in nested Container nodes the outermost BeforeEach node closures are run first.
590
591You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
592You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
593*/
594func BeforeEach(args ...interface{}) bool {
595	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
596}
597
598/*
599JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure.
600This can allow you to separate configuration from creation of resources for a spec.
601
602You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
603You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
604*/
605func JustBeforeEach(args ...interface{}) bool {
606	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
607}
608
609/*
610AfterEach nodes are Setup nodes whose closures run after It node closures.  When multiple AfterEach nodes
611are defined in nested Container nodes the innermost AfterEach node closures are run first.
612
613Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results.
614
615You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
616You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
617*/
618func AfterEach(args ...interface{}) bool {
619	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
620}
621
622/*
623JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec.
624
625You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
626You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
627*/
628func JustAfterEach(args ...interface{}) bool {
629	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
630}
631
632/*
633BeforeAll nodes are Setup nodes that can occur inside Ordered contaienrs.  They run just once before any specs in the Ordered container run.
634
635Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
636
637You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
638You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
639And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
640*/
641func BeforeAll(args ...interface{}) bool {
642	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
643}
644
645/*
646AfterAll nodes are Setup nodes that can occur inside Ordered contaienrs.  They run just once after all specs in the Ordered container have run.
647
648Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
649
650Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior.
651
652You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
653You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
654And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
655*/
656func AfterAll(args ...interface{}) bool {
657	return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
658}
659
660/*
661DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec.
662
663DeferCleanup can be passed:
6641. A function that takes no arguments and returns no values.
6652. A function that returns an error (in which case it will assert that the returned error was nil, or it will fail the spec).
6663. A function that takes arguments (and optionally returns an error) followed by a list of arguments to passe to the function. For example:
667
668    BeforeEach(func() {
669        DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO"))
670        os.SetEnv("FOO", "BAR")
671    })
672
673will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
674
675When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node)
676When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node)
677When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node)
678
679Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called.  As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
680You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
681*/
682func DeferCleanup(args ...interface{}) {
683	fail := func(message string, cl types.CodeLocation) {
684		global.Failer.Fail(message, cl)
685	}
686	pushNode(internal.NewCleanupNode(fail, args...))
687}
688
Full Screen

suite.go

Source: suite.go Github

copy
1package internal
2
3import (
4	"fmt"
5	"time"
6
7	"github.com/onsi/ginkgo/v2/formatter"
8	"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
9	"github.com/onsi/ginkgo/v2/internal/parallel_support"
10	"github.com/onsi/ginkgo/v2/reporters"
11	"github.com/onsi/ginkgo/v2/types"
12)
13
14type Phase uint
15
16const (
17	PhaseBuildTopLevel Phase = iota
18	PhaseBuildTree
19	PhaseRun
20)
21
22type Suite struct {
23	tree               *TreeNode
24	topLevelContainers Nodes
25
26	phase Phase
27
28	suiteNodes   Nodes
29	cleanupNodes Nodes
30
31	failer            *Failer
32	reporter          reporters.Reporter
33	writer            WriterInterface
34	outputInterceptor OutputInterceptor
35	interruptHandler  interrupt_handler.InterruptHandlerInterface
36	config            types.SuiteConfig
37
38	skipAll           bool
39	report            types.Report
40	currentSpecReport types.SpecReport
41	currentNode       Node
42
43	client parallel_support.Client
44}
45
46func NewSuite() *Suite {
47	return &Suite{
48		tree:  &TreeNode{},
49		phase: PhaseBuildTopLevel,
50	}
51}
52
53func (suite *Suite) BuildTree() error {
54	// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
55	// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
56	suite.phase = PhaseBuildTree
57	for _, topLevelContainer := range suite.topLevelContainers {
58		err := suite.PushNode(topLevelContainer)
59		if err != nil {
60			return err
61		}
62	}
63	return nil
64}
65
66func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
67	if suite.phase != PhaseBuildTree {
68		panic("cannot run before building the tree = call suite.BuildTree() first")
69	}
70	ApplyNestedFocusPolicyToTree(suite.tree)
71	specs := GenerateSpecsFromTreeRoot(suite.tree)
72	specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
73
74	suite.phase = PhaseRun
75	suite.client = client
76	suite.failer = failer
77	suite.reporter = reporter
78	suite.writer = writer
79	suite.outputInterceptor = outputInterceptor
80	suite.interruptHandler = interruptHandler
81	suite.config = suiteConfig
82
83	success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
84
85	return success, hasProgrammaticFocus
86}
87
88func (suite *Suite) InRunPhase() bool {
89	return suite.phase == PhaseRun
90}
91
92/*
93  Tree Construction methods
94
95  PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
96*/
97
98func (suite *Suite) PushNode(node Node) error {
99	if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
100		return suite.pushCleanupNode(node)
101	}
102
103	if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
104		return suite.pushSuiteNode(node)
105	}
106
107	if suite.phase == PhaseRun {
108		return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
109	}
110
111	if node.MarkedSerial {
112		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
113		if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
114			return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
115		}
116	}
117
118	if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
119		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
120		if firstOrderedNode.IsZero() {
121			return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
122		}
123	}
124
125	if node.NodeType == types.NodeTypeContainer {
126		// During PhaseBuildTopLevel we only track the top level containers without entering them
127		// We only enter the top level container nodes during PhaseBuildTree
128		//
129		// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
130		// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
131		// is invoked.  This makes the lifecycle easier to reason about and solves issues like #693.
132		if suite.phase == PhaseBuildTopLevel {
133			suite.topLevelContainers = append(suite.topLevelContainers, node)
134			return nil
135		}
136		if suite.phase == PhaseBuildTree {
137			parentTree := suite.tree
138			suite.tree = &TreeNode{Node: node}
139			parentTree.AppendChild(suite.tree)
140			err := func() (err error) {
141				defer func() {
142					if e := recover(); e != nil {
143						err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
144					}
145				}()
146				node.Body()
147				return err
148			}()
149			suite.tree = parentTree
150			return err
151		}
152	} else {
153		suite.tree.AppendChild(&TreeNode{Node: node})
154		return nil
155	}
156
157	return nil
158}
159
160func (suite *Suite) pushSuiteNode(node Node) error {
161	if suite.phase == PhaseBuildTree {
162		return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
163	}
164
165	if suite.phase == PhaseRun {
166		return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
167	}
168
169	switch node.NodeType {
170	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
171		existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
172		if len(existingBefores) > 0 {
173			return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
174		}
175	case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
176		existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
177		if len(existingAfters) > 0 {
178			return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
179		}
180	}
181
182	suite.suiteNodes = append(suite.suiteNodes, node)
183	return nil
184}
185
186func (suite *Suite) pushCleanupNode(node Node) error {
187	if suite.phase != PhaseRun || suite.currentNode.IsZero() {
188		return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
189	}
190
191	switch suite.currentNode.NodeType {
192	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
193		node.NodeType = types.NodeTypeCleanupAfterSuite
194	case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
195		node.NodeType = types.NodeTypeCleanupAfterAll
196	case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
197		return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
198	case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
199		return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
200	default:
201		node.NodeType = types.NodeTypeCleanupAfterEach
202	}
203
204	node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
205	node.NestingLevel = suite.currentNode.NestingLevel
206	suite.cleanupNodes = append(suite.cleanupNodes, node)
207
208	return nil
209}
210
211/*
212  Spec Running methods - used during PhaseRun
213*/
214func (suite *Suite) CurrentSpecReport() types.SpecReport {
215	report := suite.currentSpecReport
216	if suite.writer != nil {
217		report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
218	}
219	return report
220}
221
222func (suite *Suite) AddReportEntry(entry ReportEntry) error {
223	if suite.phase != PhaseRun {
224		return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
225	}
226	suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
227	return nil
228}
229
230func (suite *Suite) isRunningInParallel() bool {
231	return suite.config.ParallelTotal > 1
232}
233
234func (suite *Suite) processCurrentSpecReport() {
235	suite.reporter.DidRun(suite.currentSpecReport)
236	if suite.isRunningInParallel() {
237		suite.client.PostDidRun(suite.currentSpecReport)
238	}
239	suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
240
241	if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
242		suite.report.SuiteSucceeded = false
243		if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
244			suite.skipAll = true
245			if suite.isRunningInParallel() {
246				suite.client.PostAbort()
247			}
248		}
249	}
250}
251
252func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
253	numSpecsThatWillBeRun := specs.CountWithoutSkip()
254
255	suite.report = types.Report{
256		SuitePath:                 suitePath,
257		SuiteDescription:          description,
258		SuiteLabels:               suiteLabels,
259		SuiteConfig:               suite.config,
260		SuiteHasProgrammaticFocus: hasProgrammaticFocus,
261		PreRunStats: types.PreRunStats{
262			TotalSpecs:       len(specs),
263			SpecsThatWillRun: numSpecsThatWillBeRun,
264		},
265		StartTime: time.Now(),
266	}
267
268	suite.reporter.SuiteWillBegin(suite.report)
269	if suite.isRunningInParallel() {
270		suite.client.PostSuiteWillBegin(suite.report)
271	}
272
273	suite.report.SuiteSucceeded = true
274	suite.runBeforeSuite(numSpecsThatWillBeRun)
275
276	if suite.report.SuiteSucceeded {
277		groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
278		nextIndex := MakeIncrementingIndexCounter()
279		if suite.isRunningInParallel() {
280			nextIndex = suite.client.FetchNextCounter
281		}
282
283		for {
284			groupedSpecIdx, err := nextIndex()
285			if err != nil {
286				suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
287				suite.report.SuiteSucceeded = false
288				break
289			}
290
291			if groupedSpecIdx >= len(groupedSpecIndices) {
292				if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
293					groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
294					suite.client.BlockUntilNonprimaryProcsHaveFinished()
295					continue
296				}
297				break
298			}
299
300			// the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
301			// we encapsulate that complexity in the notion of a Group that can run
302			// Group is really just an extension of suite so it gets passed a suite and has access to all its internals
303			// Note that group is stateful and intended for single use!
304			newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
305		}
306
307		if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
308			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
309			suite.report.SuiteSucceeded = false
310		}
311	}
312
313	suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
314
315	interruptStatus := suite.interruptHandler.Status()
316	if interruptStatus.Interrupted {
317		suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
318		suite.report.SuiteSucceeded = false
319	}
320	suite.report.EndTime = time.Now()
321	suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
322
323	if suite.config.ParallelProcess == 1 {
324		suite.runReportAfterSuite()
325	}
326	suite.reporter.SuiteDidEnd(suite.report)
327	if suite.isRunningInParallel() {
328		suite.client.PostSuiteDidEnd(suite.report)
329	}
330
331	return suite.report.SuiteSucceeded
332}
333
334func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
335	interruptStatus := suite.interruptHandler.Status()
336	beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
337	if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
338		suite.currentSpecReport = types.SpecReport{
339			LeafNodeType:     beforeSuiteNode.NodeType,
340			LeafNodeLocation: beforeSuiteNode.CodeLocation,
341			ParallelProcess:  suite.config.ParallelProcess,
342		}
343		suite.reporter.WillRun(suite.currentSpecReport)
344		suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
345		if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
346			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
347			suite.skipAll = true
348		}
349		suite.processCurrentSpecReport()
350	}
351}
352
353func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
354	afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
355	if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
356		suite.currentSpecReport = types.SpecReport{
357			LeafNodeType:     afterSuiteNode.NodeType,
358			LeafNodeLocation: afterSuiteNode.CodeLocation,
359			ParallelProcess:  suite.config.ParallelProcess,
360		}
361		suite.reporter.WillRun(suite.currentSpecReport)
362		suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
363		suite.processCurrentSpecReport()
364	}
365
366	afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
367	if len(afterSuiteCleanup) > 0 {
368		for _, cleanupNode := range afterSuiteCleanup {
369			suite.currentSpecReport = types.SpecReport{
370				LeafNodeType:     cleanupNode.NodeType,
371				LeafNodeLocation: cleanupNode.CodeLocation,
372				ParallelProcess:  suite.config.ParallelProcess,
373			}
374			suite.reporter.WillRun(suite.currentSpecReport)
375			suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
376			suite.processCurrentSpecReport()
377		}
378	}
379}
380
381func (suite *Suite) runReportAfterSuite() {
382	for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
383		suite.currentSpecReport = types.SpecReport{
384			LeafNodeType:     node.NodeType,
385			LeafNodeLocation: node.CodeLocation,
386			LeafNodeText:     node.Text,
387			ParallelProcess:  suite.config.ParallelProcess,
388		}
389		suite.reporter.WillRun(suite.currentSpecReport)
390		suite.runReportAfterSuiteNode(node, suite.report)
391		suite.processCurrentSpecReport()
392	}
393}
394
395func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
396	nodes := spec.Nodes.WithType(nodeType)
397	if nodeType == types.NodeTypeReportAfterEach {
398		nodes = nodes.SortedByDescendingNestingLevel()
399	}
400	if nodeType == types.NodeTypeReportBeforeEach {
401		nodes = nodes.SortedByAscendingNestingLevel()
402	}
403	if len(nodes) == 0 {
404		return
405	}
406
407	for i := range nodes {
408		suite.writer.Truncate()
409		suite.outputInterceptor.StartInterceptingOutput()
410		report := suite.currentSpecReport
411		nodes[i].Body = func() {
412			nodes[i].ReportEachBody(report)
413		}
414		suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
415			"{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node.  To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}",
416			nodeType, nodeType, nodeType,
417			nodes[i].CodeLocation,
418		))
419		state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i]))
420		suite.interruptHandler.ClearInterruptPlaceholderMessage()
421		// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
422		// Also, if the reporter is every aborted - always override the state to propagate the abort
423		if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
424			suite.currentSpecReport.State = state
425			suite.currentSpecReport.Failure = failure
426		}
427		suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
428		suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
429	}
430}
431
432func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
433	if suite.config.DryRun {
434		suite.currentSpecReport.State = types.SpecStatePassed
435		return
436	}
437
438	suite.writer.Truncate()
439	suite.outputInterceptor.StartInterceptingOutput()
440	suite.currentSpecReport.StartTime = time.Now()
441
442	var err error
443	switch node.NodeType {
444	case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
445		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
446	case types.NodeTypeCleanupAfterSuite:
447		if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
448			err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
449		}
450		if err == nil {
451			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
452		}
453	case types.NodeTypeSynchronizedBeforeSuite:
454		var data []byte
455		var runAllProcs bool
456		if suite.config.ParallelProcess == 1 {
457			if suite.config.ParallelTotal > 1 {
458				suite.outputInterceptor.StopInterceptingAndReturnOutput()
459				suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
460			}
461			node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() }
462			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
463			if suite.config.ParallelTotal > 1 {
464				suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
465				suite.outputInterceptor.StartInterceptingOutput()
466				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
467					err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
468				} else {
469					err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
470				}
471			}
472			runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
473		} else {
474			var proc1State types.SpecState
475			proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
476			switch proc1State {
477			case types.SpecStatePassed:
478				runAllProcs = true
479			case types.SpecStateFailed, types.SpecStatePanicked:
480				err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
481			case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
482				suite.currentSpecReport.State = proc1State
483			}
484		}
485		if runAllProcs {
486			node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) }
487			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
488		}
489	case types.NodeTypeSynchronizedAfterSuite:
490		node.Body = node.SynchronizedAfterSuiteAllProcsBody
491		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
492		if suite.config.ParallelProcess == 1 {
493			if suite.config.ParallelTotal > 1 {
494				err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
495			}
496			if err == nil {
497				if suite.config.ParallelTotal > 1 {
498					suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
499					suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
500				}
501
502				node.Body = node.SynchronizedAfterSuiteProc1Body
503				state, failure := suite.runNode(node, interruptChannel, "")
504				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
505					suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
506				}
507			}
508		}
509	}
510
511	if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
512		suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
513	}
514
515	suite.currentSpecReport.EndTime = time.Now()
516	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
517	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
518	suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
519
520	return
521}
522
523func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
524	suite.writer.Truncate()
525	suite.outputInterceptor.StartInterceptingOutput()
526	suite.currentSpecReport.StartTime = time.Now()
527
528	if suite.config.ParallelTotal > 1 {
529		aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
530		if err != nil {
531			suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
532			return
533		}
534		report = report.Add(aggregatedReport)
535	}
536
537	node.Body = func() { node.ReportAfterSuiteBody(report) }
538	suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
539		"{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node.  To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}",
540		node.CodeLocation,
541	))
542	suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "")
543	suite.interruptHandler.ClearInterruptPlaceholderMessage()
544
545	suite.currentSpecReport.EndTime = time.Now()
546	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
547	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
548	suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
549
550	return
551}
552
553func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) {
554	if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
555		suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
556	}
557
558	suite.currentNode = node
559	defer func() {
560		suite.currentNode = Node{}
561	}()
562
563	if suite.config.EmitSpecProgress {
564		if text == "" {
565			text = "TOP-LEVEL"
566		}
567		s := fmt.Sprintf("[%s] %s\n  %s\n", node.NodeType.String(), text, node.CodeLocation.String())
568		suite.writer.Write([]byte(s))
569	}
570
571	var failure types.Failure
572	failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
573	if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
574		failure.FailureNodeContext = types.FailureNodeIsLeafNode
575	} else if node.NestingLevel <= 0 {
576		failure.FailureNodeContext = types.FailureNodeAtTopLevel
577	} else {
578		failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
579	}
580
581	outcomeC := make(chan types.SpecState)
582	failureC := make(chan types.Failure)
583
584	go func() {
585		finished := false
586		defer func() {
587			if e := recover(); e != nil || !finished {
588				suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
589			}
590
591			outcome, failureFromRun := suite.failer.Drain()
592			outcomeC <- outcome
593			failureC <- failureFromRun
594		}()
595
596		node.Body()
597		finished = true
598	}()
599
600	select {
601	case outcome := <-outcomeC:
602		failureFromRun := <-failureC
603		if outcome == types.SpecStatePassed {
604			return outcome, types.Failure{}
605		}
606		failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
607		return outcome, failure
608	case <-interruptChannel:
609		failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
610		return types.SpecStateInterrupted, failure
611	}
612}
613
614func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
615	return types.Failure{
616		Message:             message,
617		Location:            node.CodeLocation,
618		FailureNodeContext:  types.FailureNodeIsLeafNode,
619		FailureNodeType:     node.NodeType,
620		FailureNodeLocation: node.CodeLocation,
621	}
622}
623
624func max(a, b int) int {
625	if a > b {
626		return a
627	}
628	return b
629}
630
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest

Most used method in

Trigger InRunPhase code on LambdaTest Cloud Grid

Execute automation tests with InRunPhase on a cloud-based Grid of 3000+ real browsers and operating systems for both web and mobile applications.

Test now for Free
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)