How to use runSuiteNode method of internal Package

Best Ginkgo code snippet using internal.runSuiteNode

Run Ginkgo automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

suite.go

Source: suite.go Github

copy
1package internal
2
3import (
4	"fmt"
5	"time"
6
7	"github.com/onsi/ginkgo/v2/formatter"
8	"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
9	"github.com/onsi/ginkgo/v2/internal/parallel_support"
10	"github.com/onsi/ginkgo/v2/reporters"
11	"github.com/onsi/ginkgo/v2/types"
12)
13
14type Phase uint
15
16const (
17	PhaseBuildTopLevel Phase = iota
18	PhaseBuildTree
19	PhaseRun
20)
21
22type Suite struct {
23	tree               *TreeNode
24	topLevelContainers Nodes
25
26	phase Phase
27
28	suiteNodes   Nodes
29	cleanupNodes Nodes
30
31	failer            *Failer
32	reporter          reporters.Reporter
33	writer            WriterInterface
34	outputInterceptor OutputInterceptor
35	interruptHandler  interrupt_handler.InterruptHandlerInterface
36	config            types.SuiteConfig
37
38	skipAll           bool
39	report            types.Report
40	currentSpecReport types.SpecReport
41	currentNode       Node
42
43	client parallel_support.Client
44}
45
46func NewSuite() *Suite {
47	return &Suite{
48		tree:  &TreeNode{},
49		phase: PhaseBuildTopLevel,
50	}
51}
52
53func (suite *Suite) BuildTree() error {
54	// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
55	// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
56	suite.phase = PhaseBuildTree
57	for _, topLevelContainer := range suite.topLevelContainers {
58		err := suite.PushNode(topLevelContainer)
59		if err != nil {
60			return err
61		}
62	}
63	return nil
64}
65
66func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
67	if suite.phase != PhaseBuildTree {
68		panic("cannot run before building the tree = call suite.BuildTree() first")
69	}
70	ApplyNestedFocusPolicyToTree(suite.tree)
71	specs := GenerateSpecsFromTreeRoot(suite.tree)
72	specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
73
74	suite.phase = PhaseRun
75	suite.client = client
76	suite.failer = failer
77	suite.reporter = reporter
78	suite.writer = writer
79	suite.outputInterceptor = outputInterceptor
80	suite.interruptHandler = interruptHandler
81	suite.config = suiteConfig
82
83	success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
84
85	return success, hasProgrammaticFocus
86}
87
88/*
89  Tree Construction methods
90
91  PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
92*/
93
94func (suite *Suite) PushNode(node Node) error {
95	if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
96		return suite.pushCleanupNode(node)
97	}
98
99	if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
100		return suite.pushSuiteNode(node)
101	}
102
103	if suite.phase == PhaseRun {
104		return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
105	}
106
107	if node.MarkedSerial {
108		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
109		if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
110			return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
111		}
112	}
113
114	if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
115		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
116		if firstOrderedNode.IsZero() {
117			return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
118		}
119	}
120
121	if node.NodeType == types.NodeTypeContainer {
122		// During PhaseBuildTopLevel we only track the top level containers without entering them
123		// We only enter the top level container nodes during PhaseBuildTree
124		//
125		// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
126		// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
127		// is invoked.  This makes the lifecycle easier to reason about and solves issues like #693.
128		if suite.phase == PhaseBuildTopLevel {
129			suite.topLevelContainers = append(suite.topLevelContainers, node)
130			return nil
131		}
132		if suite.phase == PhaseBuildTree {
133			parentTree := suite.tree
134			suite.tree = &TreeNode{Node: node}
135			parentTree.AppendChild(suite.tree)
136			err := func() (err error) {
137				defer func() {
138					if e := recover(); e != nil {
139						err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
140					}
141				}()
142				node.Body()
143				return err
144			}()
145			suite.tree = parentTree
146			return err
147		}
148	} else {
149		suite.tree.AppendChild(&TreeNode{Node: node})
150		return nil
151	}
152
153	return nil
154}
155
156func (suite *Suite) pushSuiteNode(node Node) error {
157	if suite.phase == PhaseBuildTree {
158		return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
159	}
160
161	if suite.phase == PhaseRun {
162		return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
163	}
164
165	switch node.NodeType {
166	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
167		existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
168		if len(existingBefores) > 0 {
169			return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
170		}
171	case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
172		existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
173		if len(existingAfters) > 0 {
174			return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
175		}
176	}
177
178	suite.suiteNodes = append(suite.suiteNodes, node)
179	return nil
180}
181
182func (suite *Suite) pushCleanupNode(node Node) error {
183	if suite.phase != PhaseRun || suite.currentNode.IsZero() {
184		return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
185	}
186
187	switch suite.currentNode.NodeType {
188	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
189		node.NodeType = types.NodeTypeCleanupAfterSuite
190	case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
191		node.NodeType = types.NodeTypeCleanupAfterAll
192	case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
193		return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
194	case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
195		return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
196	default:
197		node.NodeType = types.NodeTypeCleanupAfterEach
198	}
199
200	node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
201	node.NestingLevel = suite.currentNode.NestingLevel
202	suite.cleanupNodes = append(suite.cleanupNodes, node)
203
204	return nil
205}
206
207/*
208  Spec Running methods - used during PhaseRun
209*/
210func (suite *Suite) CurrentSpecReport() types.SpecReport {
211	report := suite.currentSpecReport
212	if suite.writer != nil {
213		report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
214	}
215	return report
216}
217
218func (suite *Suite) AddReportEntry(entry ReportEntry) error {
219	if suite.phase != PhaseRun {
220		return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
221	}
222	suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
223	return nil
224}
225
226func (suite *Suite) isRunningInParallel() bool {
227	return suite.config.ParallelTotal > 1
228}
229
230func (suite *Suite) processCurrentSpecReport() {
231	suite.reporter.DidRun(suite.currentSpecReport)
232	if suite.isRunningInParallel() {
233		suite.client.PostDidRun(suite.currentSpecReport)
234	}
235	suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
236
237	if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
238		suite.report.SuiteSucceeded = false
239		if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
240			suite.skipAll = true
241			if suite.isRunningInParallel() {
242				suite.client.PostAbort()
243			}
244		}
245	}
246}
247
248func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
249	numSpecsThatWillBeRun := specs.CountWithoutSkip()
250
251	suite.report = types.Report{
252		SuitePath:                 suitePath,
253		SuiteDescription:          description,
254		SuiteLabels:               suiteLabels,
255		SuiteConfig:               suite.config,
256		SuiteHasProgrammaticFocus: hasProgrammaticFocus,
257		PreRunStats: types.PreRunStats{
258			TotalSpecs:       len(specs),
259			SpecsThatWillRun: numSpecsThatWillBeRun,
260		},
261		StartTime: time.Now(),
262	}
263
264	suite.reporter.SuiteWillBegin(suite.report)
265	if suite.isRunningInParallel() {
266		suite.client.PostSuiteWillBegin(suite.report)
267	}
268
269	suite.report.SuiteSucceeded = true
270	suite.runBeforeSuite(numSpecsThatWillBeRun)
271
272	if suite.report.SuiteSucceeded {
273		groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
274		nextIndex := MakeIncrementingIndexCounter()
275		if suite.isRunningInParallel() {
276			nextIndex = suite.client.FetchNextCounter
277		}
278
279		for {
280			groupedSpecIdx, err := nextIndex()
281			if err != nil {
282				suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
283				suite.report.SuiteSucceeded = false
284				break
285			}
286
287			if groupedSpecIdx >= len(groupedSpecIndices) {
288				if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
289					groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
290					suite.client.BlockUntilNonprimaryProcsHaveFinished()
291					continue
292				}
293				break
294			}
295
296			// the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
297			// we encapsulate that complexity in the notion of a Group that can run
298			// Group is really just an extension of suite so it gets passed a suite and has access to all its internals
299			// Note that group is stateful and intedned for single use!
300			newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
301		}
302
303		if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
304			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
305			suite.report.SuiteSucceeded = false
306		}
307	}
308
309	suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
310
311	interruptStatus := suite.interruptHandler.Status()
312	if interruptStatus.Interrupted {
313		suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
314		suite.report.SuiteSucceeded = false
315	}
316	suite.report.EndTime = time.Now()
317	suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
318
319	if suite.config.ParallelProcess == 1 {
320		suite.runReportAfterSuite()
321	}
322	suite.reporter.SuiteDidEnd(suite.report)
323	if suite.isRunningInParallel() {
324		suite.client.PostSuiteDidEnd(suite.report)
325	}
326
327	return suite.report.SuiteSucceeded
328}
329
330func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
331	interruptStatus := suite.interruptHandler.Status()
332	beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
333	if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
334		suite.currentSpecReport = types.SpecReport{
335			LeafNodeType:     beforeSuiteNode.NodeType,
336			LeafNodeLocation: beforeSuiteNode.CodeLocation,
337			ParallelProcess:  suite.config.ParallelProcess,
338		}
339		suite.reporter.WillRun(suite.currentSpecReport)
340		suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
341		if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
342			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
343			suite.skipAll = true
344		}
345		suite.processCurrentSpecReport()
346	}
347}
348
349func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
350	afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
351	if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
352		suite.currentSpecReport = types.SpecReport{
353			LeafNodeType:     afterSuiteNode.NodeType,
354			LeafNodeLocation: afterSuiteNode.CodeLocation,
355			ParallelProcess:  suite.config.ParallelProcess,
356		}
357		suite.reporter.WillRun(suite.currentSpecReport)
358		suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
359		suite.processCurrentSpecReport()
360	}
361
362	afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
363	if len(afterSuiteCleanup) > 0 {
364		for _, cleanupNode := range afterSuiteCleanup {
365			suite.currentSpecReport = types.SpecReport{
366				LeafNodeType:     cleanupNode.NodeType,
367				LeafNodeLocation: cleanupNode.CodeLocation,
368				ParallelProcess:  suite.config.ParallelProcess,
369			}
370			suite.reporter.WillRun(suite.currentSpecReport)
371			suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
372			suite.processCurrentSpecReport()
373		}
374	}
375}
376
377func (suite *Suite) runReportAfterSuite() {
378	for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
379		suite.currentSpecReport = types.SpecReport{
380			LeafNodeType:     node.NodeType,
381			LeafNodeLocation: node.CodeLocation,
382			LeafNodeText:     node.Text,
383			ParallelProcess:  suite.config.ParallelProcess,
384		}
385		suite.reporter.WillRun(suite.currentSpecReport)
386		suite.runReportAfterSuiteNode(node, suite.report)
387		suite.processCurrentSpecReport()
388	}
389}
390
391func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
392	if suite.config.DryRun {
393		return
394	}
395
396	nodes := spec.Nodes.WithType(nodeType)
397	if nodeType == types.NodeTypeReportAfterEach {
398		nodes = nodes.SortedByDescendingNestingLevel()
399	}
400	if nodeType == types.NodeTypeReportBeforeEach {
401		nodes = nodes.SortedByAscendingNestingLevel()
402	}
403	if len(nodes) == 0 {
404		return
405	}
406
407	for i := range nodes {
408		suite.writer.Truncate()
409		suite.outputInterceptor.StartInterceptingOutput()
410		report := suite.currentSpecReport
411		nodes[i].Body = func() {
412			nodes[i].ReportEachBody(report)
413		}
414		suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
415			"{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node.  To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}",
416			nodeType, nodeType, nodeType,
417			nodes[i].CodeLocation,
418		))
419		state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i]))
420		suite.interruptHandler.ClearInterruptPlaceholderMessage()
421		// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
422		// Also, if the reporter is every aborted - always override the state to propagate the abort
423		if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
424			suite.currentSpecReport.State = state
425			suite.currentSpecReport.Failure = failure
426		}
427		suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
428		suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
429	}
430}
431
432func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
433	if suite.config.DryRun {
434		suite.currentSpecReport.State = types.SpecStatePassed
435		return
436	}
437
438	suite.writer.Truncate()
439	suite.outputInterceptor.StartInterceptingOutput()
440	suite.currentSpecReport.StartTime = time.Now()
441
442	var err error
443	switch node.NodeType {
444	case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
445		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
446	case types.NodeTypeCleanupAfterSuite:
447		if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
448			err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
449		}
450		if err == nil {
451			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
452		}
453	case types.NodeTypeSynchronizedBeforeSuite:
454		var data []byte
455		var runAllProcs bool
456		if suite.config.ParallelProcess == 1 {
457			if suite.config.ParallelTotal > 1 {
458				suite.outputInterceptor.StopInterceptingAndReturnOutput()
459				suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
460			}
461			node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() }
462			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
463			if suite.config.ParallelTotal > 1 {
464				suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
465				suite.outputInterceptor.StartInterceptingOutput()
466				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
467					err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
468				} else {
469					err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
470				}
471			}
472			runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
473		} else {
474			var proc1State types.SpecState
475			proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
476			switch proc1State {
477			case types.SpecStatePassed:
478				runAllProcs = true
479			case types.SpecStateFailed, types.SpecStatePanicked:
480				err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
481			case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
482				suite.currentSpecReport.State = proc1State
483			}
484		}
485		if runAllProcs {
486			node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) }
487			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
488		}
489	case types.NodeTypeSynchronizedAfterSuite:
490		node.Body = node.SynchronizedAfterSuiteAllProcsBody
491		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
492		if suite.config.ParallelProcess == 1 {
493			if suite.config.ParallelTotal > 1 {
494				err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
495			}
496			if err == nil {
497				if suite.config.ParallelTotal > 1 {
498					suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
499					suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
500				}
501
502				node.Body = node.SynchronizedAfterSuiteProc1Body
503				state, failure := suite.runNode(node, interruptChannel, "")
504				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
505					suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
506				}
507			}
508		}
509	}
510
511	if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
512		suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
513	}
514
515	suite.currentSpecReport.EndTime = time.Now()
516	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
517	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
518	suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
519
520	return
521}
522
523func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
524	if suite.config.DryRun {
525		suite.currentSpecReport.State = types.SpecStatePassed
526		return
527	}
528
529	suite.writer.Truncate()
530	suite.outputInterceptor.StartInterceptingOutput()
531	suite.currentSpecReport.StartTime = time.Now()
532
533	if suite.config.ParallelTotal > 1 {
534		aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
535		if err != nil {
536			suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
537			return
538		}
539		report = report.Add(aggregatedReport)
540	}
541
542	node.Body = func() { node.ReportAfterSuiteBody(report) }
543	suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
544		"{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node.  To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}",
545		node.CodeLocation,
546	))
547	suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "")
548	suite.interruptHandler.ClearInterruptPlaceholderMessage()
549
550	suite.currentSpecReport.EndTime = time.Now()
551	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
552	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
553	suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
554
555	return
556}
557
558func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) {
559	if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
560		suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
561	}
562
563	suite.currentNode = node
564	defer func() {
565		suite.currentNode = Node{}
566	}()
567
568	if suite.config.EmitSpecProgress {
569		if text == "" {
570			text = "TOP-LEVEL"
571		}
572		s := fmt.Sprintf("[%s] %s\n  %s\n", node.NodeType.String(), text, node.CodeLocation.String())
573		suite.writer.Write([]byte(s))
574	}
575
576	var failure types.Failure
577	failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
578	if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
579		failure.FailureNodeContext = types.FailureNodeIsLeafNode
580	} else if node.NestingLevel <= 0 {
581		failure.FailureNodeContext = types.FailureNodeAtTopLevel
582	} else {
583		failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
584	}
585
586	outcomeC := make(chan types.SpecState)
587	failureC := make(chan types.Failure)
588
589	go func() {
590		finished := false
591		defer func() {
592			if e := recover(); e != nil || !finished {
593				suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
594			}
595
596			outcome, failureFromRun := suite.failer.Drain()
597			outcomeC <- outcome
598			failureC <- failureFromRun
599		}()
600
601		node.Body()
602		finished = true
603	}()
604
605	select {
606	case outcome := <-outcomeC:
607		failureFromRun := <-failureC
608		if outcome == types.SpecStatePassed {
609			return outcome, types.Failure{}
610		}
611		failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
612		return outcome, failure
613	case <-interruptChannel:
614		failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
615		return types.SpecStateInterrupted, failure
616	}
617}
618
619func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
620	return types.Failure{
621		Message:             message,
622		Location:            node.CodeLocation,
623		FailureNodeContext:  types.FailureNodeIsLeafNode,
624		FailureNodeType:     node.NodeType,
625		FailureNodeLocation: node.CodeLocation,
626	}
627}
628
629func max(a, b int) int {
630	if a > b {
631		return a
632	}
633	return b
634}
635
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest

Most used method in

Trigger runSuiteNode code on LambdaTest Cloud Grid

Execute automation tests with runSuiteNode on a cloud-based Grid of 3000+ real browsers and operating systems for both web and mobile applications.

Test now for Free
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)