How to use SuiteNodeDuringRunPhase method of types Package

Best Ginkgo code snippet using types.SuiteNodeDuringRunPhase

Run Ginkgo automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

suite.go

Source: suite.go Github

copy
1package internal
2
3import (
4	"fmt"
5	"time"
6
7	"github.com/onsi/ginkgo/v2/formatter"
8	"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
9	"github.com/onsi/ginkgo/v2/internal/parallel_support"
10	"github.com/onsi/ginkgo/v2/reporters"
11	"github.com/onsi/ginkgo/v2/types"
12)
13
14type Phase uint
15
16const (
17	PhaseBuildTopLevel Phase = iota
18	PhaseBuildTree
19	PhaseRun
20)
21
22type Suite struct {
23	tree               *TreeNode
24	topLevelContainers Nodes
25
26	phase Phase
27
28	suiteNodes   Nodes
29	cleanupNodes Nodes
30
31	failer            *Failer
32	reporter          reporters.Reporter
33	writer            WriterInterface
34	outputInterceptor OutputInterceptor
35	interruptHandler  interrupt_handler.InterruptHandlerInterface
36	config            types.SuiteConfig
37
38	skipAll           bool
39	report            types.Report
40	currentSpecReport types.SpecReport
41	currentNode       Node
42
43	client parallel_support.Client
44}
45
46func NewSuite() *Suite {
47	return &Suite{
48		tree:  &TreeNode{},
49		phase: PhaseBuildTopLevel,
50	}
51}
52
53func (suite *Suite) BuildTree() error {
54	// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
55	// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
56	suite.phase = PhaseBuildTree
57	for _, topLevelContainer := range suite.topLevelContainers {
58		err := suite.PushNode(topLevelContainer)
59		if err != nil {
60			return err
61		}
62	}
63	return nil
64}
65
66func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
67	if suite.phase != PhaseBuildTree {
68		panic("cannot run before building the tree = call suite.BuildTree() first")
69	}
70	ApplyNestedFocusPolicyToTree(suite.tree)
71	specs := GenerateSpecsFromTreeRoot(suite.tree)
72	specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
73
74	suite.phase = PhaseRun
75	suite.client = client
76	suite.failer = failer
77	suite.reporter = reporter
78	suite.writer = writer
79	suite.outputInterceptor = outputInterceptor
80	suite.interruptHandler = interruptHandler
81	suite.config = suiteConfig
82
83	success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
84
85	return success, hasProgrammaticFocus
86}
87
88func (suite *Suite) InRunPhase() bool {
89	return suite.phase == PhaseRun
90}
91
92/*
93  Tree Construction methods
94
95  PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
96*/
97
98func (suite *Suite) PushNode(node Node) error {
99	if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
100		return suite.pushCleanupNode(node)
101	}
102
103	if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
104		return suite.pushSuiteNode(node)
105	}
106
107	if suite.phase == PhaseRun {
108		return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
109	}
110
111	if node.MarkedSerial {
112		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
113		if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
114			return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
115		}
116	}
117
118	if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
119		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
120		if firstOrderedNode.IsZero() {
121			return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
122		}
123	}
124
125	if node.NodeType == types.NodeTypeContainer {
126		// During PhaseBuildTopLevel we only track the top level containers without entering them
127		// We only enter the top level container nodes during PhaseBuildTree
128		//
129		// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
130		// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
131		// is invoked.  This makes the lifecycle easier to reason about and solves issues like #693.
132		if suite.phase == PhaseBuildTopLevel {
133			suite.topLevelContainers = append(suite.topLevelContainers, node)
134			return nil
135		}
136		if suite.phase == PhaseBuildTree {
137			parentTree := suite.tree
138			suite.tree = &TreeNode{Node: node}
139			parentTree.AppendChild(suite.tree)
140			err := func() (err error) {
141				defer func() {
142					if e := recover(); e != nil {
143						err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
144					}
145				}()
146				node.Body()
147				return err
148			}()
149			suite.tree = parentTree
150			return err
151		}
152	} else {
153		suite.tree.AppendChild(&TreeNode{Node: node})
154		return nil
155	}
156
157	return nil
158}
159
160func (suite *Suite) pushSuiteNode(node Node) error {
161	if suite.phase == PhaseBuildTree {
162		return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
163	}
164
165	if suite.phase == PhaseRun {
166		return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
167	}
168
169	switch node.NodeType {
170	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
171		existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
172		if len(existingBefores) > 0 {
173			return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
174		}
175	case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
176		existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
177		if len(existingAfters) > 0 {
178			return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
179		}
180	}
181
182	suite.suiteNodes = append(suite.suiteNodes, node)
183	return nil
184}
185
186func (suite *Suite) pushCleanupNode(node Node) error {
187	if suite.phase != PhaseRun || suite.currentNode.IsZero() {
188		return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
189	}
190
191	switch suite.currentNode.NodeType {
192	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
193		node.NodeType = types.NodeTypeCleanupAfterSuite
194	case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
195		node.NodeType = types.NodeTypeCleanupAfterAll
196	case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
197		return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
198	case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
199		return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
200	default:
201		node.NodeType = types.NodeTypeCleanupAfterEach
202	}
203
204	node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
205	node.NestingLevel = suite.currentNode.NestingLevel
206	suite.cleanupNodes = append(suite.cleanupNodes, node)
207
208	return nil
209}
210
211/*
212  Spec Running methods - used during PhaseRun
213*/
214func (suite *Suite) CurrentSpecReport() types.SpecReport {
215	report := suite.currentSpecReport
216	if suite.writer != nil {
217		report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
218	}
219	return report
220}
221
222func (suite *Suite) AddReportEntry(entry ReportEntry) error {
223	if suite.phase != PhaseRun {
224		return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
225	}
226	suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
227	return nil
228}
229
230func (suite *Suite) isRunningInParallel() bool {
231	return suite.config.ParallelTotal > 1
232}
233
234func (suite *Suite) processCurrentSpecReport() {
235	suite.reporter.DidRun(suite.currentSpecReport)
236	if suite.isRunningInParallel() {
237		suite.client.PostDidRun(suite.currentSpecReport)
238	}
239	suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
240
241	if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
242		suite.report.SuiteSucceeded = false
243		if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
244			suite.skipAll = true
245			if suite.isRunningInParallel() {
246				suite.client.PostAbort()
247			}
248		}
249	}
250}
251
252func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
253	numSpecsThatWillBeRun := specs.CountWithoutSkip()
254
255	suite.report = types.Report{
256		SuitePath:                 suitePath,
257		SuiteDescription:          description,
258		SuiteLabels:               suiteLabels,
259		SuiteConfig:               suite.config,
260		SuiteHasProgrammaticFocus: hasProgrammaticFocus,
261		PreRunStats: types.PreRunStats{
262			TotalSpecs:       len(specs),
263			SpecsThatWillRun: numSpecsThatWillBeRun,
264		},
265		StartTime: time.Now(),
266	}
267
268	suite.reporter.SuiteWillBegin(suite.report)
269	if suite.isRunningInParallel() {
270		suite.client.PostSuiteWillBegin(suite.report)
271	}
272
273	suite.report.SuiteSucceeded = true
274	suite.runBeforeSuite(numSpecsThatWillBeRun)
275
276	if suite.report.SuiteSucceeded {
277		groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
278		nextIndex := MakeIncrementingIndexCounter()
279		if suite.isRunningInParallel() {
280			nextIndex = suite.client.FetchNextCounter
281		}
282
283		for {
284			groupedSpecIdx, err := nextIndex()
285			if err != nil {
286				suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
287				suite.report.SuiteSucceeded = false
288				break
289			}
290
291			if groupedSpecIdx >= len(groupedSpecIndices) {
292				if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
293					groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
294					suite.client.BlockUntilNonprimaryProcsHaveFinished()
295					continue
296				}
297				break
298			}
299
300			// the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
301			// we encapsulate that complexity in the notion of a Group that can run
302			// Group is really just an extension of suite so it gets passed a suite and has access to all its internals
303			// Note that group is stateful and intended for single use!
304			newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
305		}
306
307		if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
308			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
309			suite.report.SuiteSucceeded = false
310		}
311	}
312
313	suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
314
315	interruptStatus := suite.interruptHandler.Status()
316	if interruptStatus.Interrupted {
317		suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
318		suite.report.SuiteSucceeded = false
319	}
320	suite.report.EndTime = time.Now()
321	suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
322
323	if suite.config.ParallelProcess == 1 {
324		suite.runReportAfterSuite()
325	}
326	suite.reporter.SuiteDidEnd(suite.report)
327	if suite.isRunningInParallel() {
328		suite.client.PostSuiteDidEnd(suite.report)
329	}
330
331	return suite.report.SuiteSucceeded
332}
333
334func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
335	interruptStatus := suite.interruptHandler.Status()
336	beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
337	if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
338		suite.currentSpecReport = types.SpecReport{
339			LeafNodeType:     beforeSuiteNode.NodeType,
340			LeafNodeLocation: beforeSuiteNode.CodeLocation,
341			ParallelProcess:  suite.config.ParallelProcess,
342		}
343		suite.reporter.WillRun(suite.currentSpecReport)
344		suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
345		if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
346			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
347			suite.skipAll = true
348		}
349		suite.processCurrentSpecReport()
350	}
351}
352
353func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
354	afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
355	if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
356		suite.currentSpecReport = types.SpecReport{
357			LeafNodeType:     afterSuiteNode.NodeType,
358			LeafNodeLocation: afterSuiteNode.CodeLocation,
359			ParallelProcess:  suite.config.ParallelProcess,
360		}
361		suite.reporter.WillRun(suite.currentSpecReport)
362		suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
363		suite.processCurrentSpecReport()
364	}
365
366	afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
367	if len(afterSuiteCleanup) > 0 {
368		for _, cleanupNode := range afterSuiteCleanup {
369			suite.currentSpecReport = types.SpecReport{
370				LeafNodeType:     cleanupNode.NodeType,
371				LeafNodeLocation: cleanupNode.CodeLocation,
372				ParallelProcess:  suite.config.ParallelProcess,
373			}
374			suite.reporter.WillRun(suite.currentSpecReport)
375			suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
376			suite.processCurrentSpecReport()
377		}
378	}
379}
380
381func (suite *Suite) runReportAfterSuite() {
382	for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
383		suite.currentSpecReport = types.SpecReport{
384			LeafNodeType:     node.NodeType,
385			LeafNodeLocation: node.CodeLocation,
386			LeafNodeText:     node.Text,
387			ParallelProcess:  suite.config.ParallelProcess,
388		}
389		suite.reporter.WillRun(suite.currentSpecReport)
390		suite.runReportAfterSuiteNode(node, suite.report)
391		suite.processCurrentSpecReport()
392	}
393}
394
395func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
396	nodes := spec.Nodes.WithType(nodeType)
397	if nodeType == types.NodeTypeReportAfterEach {
398		nodes = nodes.SortedByDescendingNestingLevel()
399	}
400	if nodeType == types.NodeTypeReportBeforeEach {
401		nodes = nodes.SortedByAscendingNestingLevel()
402	}
403	if len(nodes) == 0 {
404		return
405	}
406
407	for i := range nodes {
408		suite.writer.Truncate()
409		suite.outputInterceptor.StartInterceptingOutput()
410		report := suite.currentSpecReport
411		nodes[i].Body = func() {
412			nodes[i].ReportEachBody(report)
413		}
414		suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
415			"{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node.  To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}",
416			nodeType, nodeType, nodeType,
417			nodes[i].CodeLocation,
418		))
419		state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i]))
420		suite.interruptHandler.ClearInterruptPlaceholderMessage()
421		// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
422		// Also, if the reporter is every aborted - always override the state to propagate the abort
423		if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
424			suite.currentSpecReport.State = state
425			suite.currentSpecReport.Failure = failure
426		}
427		suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
428		suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
429	}
430}
431
432func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
433	if suite.config.DryRun {
434		suite.currentSpecReport.State = types.SpecStatePassed
435		return
436	}
437
438	suite.writer.Truncate()
439	suite.outputInterceptor.StartInterceptingOutput()
440	suite.currentSpecReport.StartTime = time.Now()
441
442	var err error
443	switch node.NodeType {
444	case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
445		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
446	case types.NodeTypeCleanupAfterSuite:
447		if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
448			err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
449		}
450		if err == nil {
451			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
452		}
453	case types.NodeTypeSynchronizedBeforeSuite:
454		var data []byte
455		var runAllProcs bool
456		if suite.config.ParallelProcess == 1 {
457			if suite.config.ParallelTotal > 1 {
458				suite.outputInterceptor.StopInterceptingAndReturnOutput()
459				suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
460			}
461			node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() }
462			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
463			if suite.config.ParallelTotal > 1 {
464				suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
465				suite.outputInterceptor.StartInterceptingOutput()
466				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
467					err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
468				} else {
469					err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
470				}
471			}
472			runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
473		} else {
474			var proc1State types.SpecState
475			proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
476			switch proc1State {
477			case types.SpecStatePassed:
478				runAllProcs = true
479			case types.SpecStateFailed, types.SpecStatePanicked:
480				err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
481			case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
482				suite.currentSpecReport.State = proc1State
483			}
484		}
485		if runAllProcs {
486			node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) }
487			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
488		}
489	case types.NodeTypeSynchronizedAfterSuite:
490		node.Body = node.SynchronizedAfterSuiteAllProcsBody
491		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
492		if suite.config.ParallelProcess == 1 {
493			if suite.config.ParallelTotal > 1 {
494				err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
495			}
496			if err == nil {
497				if suite.config.ParallelTotal > 1 {
498					suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
499					suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
500				}
501
502				node.Body = node.SynchronizedAfterSuiteProc1Body
503				state, failure := suite.runNode(node, interruptChannel, "")
504				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
505					suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
506				}
507			}
508		}
509	}
510
511	if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
512		suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
513	}
514
515	suite.currentSpecReport.EndTime = time.Now()
516	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
517	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
518	suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
519
520	return
521}
522
523func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
524	suite.writer.Truncate()
525	suite.outputInterceptor.StartInterceptingOutput()
526	suite.currentSpecReport.StartTime = time.Now()
527
528	if suite.config.ParallelTotal > 1 {
529		aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
530		if err != nil {
531			suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
532			return
533		}
534		report = report.Add(aggregatedReport)
535	}
536
537	node.Body = func() { node.ReportAfterSuiteBody(report) }
538	suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
539		"{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node.  To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}",
540		node.CodeLocation,
541	))
542	suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "")
543	suite.interruptHandler.ClearInterruptPlaceholderMessage()
544
545	suite.currentSpecReport.EndTime = time.Now()
546	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
547	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
548	suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
549
550	return
551}
552
553func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) {
554	if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
555		suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
556	}
557
558	suite.currentNode = node
559	defer func() {
560		suite.currentNode = Node{}
561	}()
562
563	if suite.config.EmitSpecProgress {
564		if text == "" {
565			text = "TOP-LEVEL"
566		}
567		s := fmt.Sprintf("[%s] %s\n  %s\n", node.NodeType.String(), text, node.CodeLocation.String())
568		suite.writer.Write([]byte(s))
569	}
570
571	var failure types.Failure
572	failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
573	if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
574		failure.FailureNodeContext = types.FailureNodeIsLeafNode
575	} else if node.NestingLevel <= 0 {
576		failure.FailureNodeContext = types.FailureNodeAtTopLevel
577	} else {
578		failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
579	}
580
581	outcomeC := make(chan types.SpecState)
582	failureC := make(chan types.Failure)
583
584	go func() {
585		finished := false
586		defer func() {
587			if e := recover(); e != nil || !finished {
588				suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
589			}
590
591			outcome, failureFromRun := suite.failer.Drain()
592			outcomeC <- outcome
593			failureC <- failureFromRun
594		}()
595
596		node.Body()
597		finished = true
598	}()
599
600	select {
601	case outcome := <-outcomeC:
602		failureFromRun := <-failureC
603		if outcome == types.SpecStatePassed {
604			return outcome, types.Failure{}
605		}
606		failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
607		return outcome, failure
608	case <-interruptChannel:
609		failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
610		return types.SpecStateInterrupted, failure
611	}
612}
613
614func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
615	return types.Failure{
616		Message:             message,
617		Location:            node.CodeLocation,
618		FailureNodeContext:  types.FailureNodeIsLeafNode,
619		FailureNodeType:     node.NodeType,
620		FailureNodeLocation: node.CodeLocation,
621	}
622}
623
624func max(a, b int) int {
625	if a > b {
626		return a
627	}
628	return b
629}
630
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest

Most used method in

Trigger SuiteNodeDuringRunPhase code on LambdaTest Cloud Grid

Execute automation tests with SuiteNodeDuringRunPhase on a cloud-based Grid of 3000+ real browsers and operating systems for both web and mobile applications.

Test now for Free
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)