How to use SortedByAscendingNestingLevel method of internal Package

Best Ginkgo code snippet using internal.SortedByAscendingNestingLevel

Run Ginkgo automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

group.go

Source: group.go Github

copy
1package internal
2
3import (
4	"fmt"
5	"time"
6
7	"github.com/onsi/ginkgo/v2/types"
8)
9
10type runOncePair struct {
11	//nodeId should only run once...
12	nodeID   uint
13	nodeType types.NodeType
14	//...for specs in a hierarchy that includes this context
15	containerID uint
16}
17
18func (pair runOncePair) isZero() bool {
19	return pair.nodeID == 0
20}
21
22func runOncePairForNode(node Node, containerID uint) runOncePair {
23	return runOncePair{
24		nodeID:      node.ID,
25		nodeType:    node.NodeType,
26		containerID: containerID,
27	}
28}
29
30type runOncePairs []runOncePair
31
32func runOncePairsForSpec(spec Spec) runOncePairs {
33	pairs := runOncePairs{}
34
35	containers := spec.Nodes.WithType(types.NodeTypeContainer)
36	for _, node := range spec.Nodes {
37		if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
38			pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
39		} else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
40			passedIntoAnOrderedContainer := false
41			firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
42				passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
43				return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
44			})
45			if firstOrderedContainerDeeperThanNode.IsZero() {
46				continue
47			}
48			pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
49		}
50	}
51
52	return pairs
53}
54
55func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
56	for i := range pairs {
57		if pairs[i].nodeID == nodeID {
58			return pairs[i]
59		}
60	}
61	return runOncePair{}
62}
63
64func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
65	for i := range pairs {
66		if pairs[i] == pair {
67			return true
68		}
69	}
70	return false
71}
72
73func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
74	count := 0
75	for i := range pairs {
76		if pairs[i].nodeType.Is(nodeTypes) {
77			count++
78		}
79	}
80
81	out, j := make(runOncePairs, count), 0
82	for i := range pairs {
83		if pairs[i].nodeType.Is(nodeTypes) {
84			out[j] = pairs[i]
85			j++
86		}
87	}
88	return out
89}
90
91type group struct {
92	suite          *Suite
93	specs          Specs
94	runOncePairs   map[uint]runOncePairs
95	runOnceTracker map[runOncePair]types.SpecState
96
97	succeeded bool
98}
99
100func newGroup(suite *Suite) *group {
101	return &group{
102		suite:          suite,
103		runOncePairs:   map[uint]runOncePairs{},
104		runOnceTracker: map[runOncePair]types.SpecState{},
105		succeeded:      true,
106	}
107}
108
109func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
110	return types.SpecReport{
111		ContainerHierarchyTexts:     spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
112		ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
113		ContainerHierarchyLabels:    spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
114		LeafNodeLocation:            spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
115		LeafNodeType:                types.NodeTypeIt,
116		LeafNodeText:                spec.FirstNodeWithType(types.NodeTypeIt).Text,
117		LeafNodeLabels:              []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
118		ParallelProcess:             g.suite.config.ParallelProcess,
119		IsSerial:                    spec.Nodes.HasNodeMarkedSerial(),
120		IsInOrderedContainer:        !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
121	}
122}
123
124func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
125	if spec.Nodes.HasNodeMarkedPending() {
126		return types.SpecStatePending, types.Failure{}
127	}
128	if spec.Skip {
129		return types.SpecStateSkipped, types.Failure{}
130	}
131	if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll {
132		return types.SpecStateSkipped, types.Failure{}
133	}
134	if !g.succeeded {
135		return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
136			"Spec skipped because an earlier spec in an ordered container failed")
137	}
138	beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
139	for _, pair := range beforeOncePairs {
140		if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
141			return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
142				fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
143		}
144	}
145	if g.suite.config.DryRun {
146		return types.SpecStatePassed, types.Failure{}
147	}
148	return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
149}
150
151func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
152	lastSpecID := uint(0)
153	for idx := range g.specs {
154		if g.specs[idx].Skip {
155			continue
156		}
157		sID := g.specs[idx].SubjectID()
158		if g.runOncePairs[sID].hasRunOncePair(pair) {
159			lastSpecID = sID
160		}
161	}
162	return lastSpecID == specID
163}
164
165func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
166	interruptStatus := g.suite.interruptHandler.Status()
167
168	pairs := g.runOncePairs[spec.SubjectID()]
169
170	nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
171	nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
172	nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
173	nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
174	terminatingNode, terminatingPair := Node{}, runOncePair{}
175
176	for _, node := range nodes {
177		oncePair := pairs.runOncePairFor(node.ID)
178		if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
179			continue
180		}
181		g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node))
182		g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
183		if !oncePair.isZero() {
184			g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
185		}
186		if g.suite.currentSpecReport.State != types.SpecStatePassed {
187			terminatingNode, terminatingPair = node, oncePair
188			break
189		}
190	}
191
192	afterNodeWasRun := map[uint]bool{}
193	includeDeferCleanups := false
194	for {
195		nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
196		nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
197		nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
198		if !terminatingNode.IsZero() {
199			nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
200		}
201		if includeDeferCleanups {
202			nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
203			nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
204		}
205		nodes = nodes.Filter(func(node Node) bool {
206			if afterNodeWasRun[node.ID] {
207				//this node has already been run on this attempt, don't rerun it
208				return false
209			}
210			pair := runOncePair{}
211			switch node.NodeType {
212			case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
213				// check if we were generated in an AfterNode that has already run
214				if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
215					return true // we were, so we should definitely run this cleanup now
216				}
217				// looks like this cleanup nodes was generated by a before node or it.
218				// the run-once status of a cleanup node is governed by the run-once status of its generator
219				pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
220			default:
221				pair = pairs.runOncePairFor(node.ID)
222			}
223			if pair.isZero() {
224				// this node is not governed by any run-once policy, we should run it
225				return true
226			}
227			// it's our last chance to run if we're the last spec for our oncePair
228			isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
229
230			switch g.suite.currentSpecReport.State {
231			case types.SpecStatePassed: //this attempt is passing...
232				return isLastSpecWithPair //...we should run-once if we'this is our last chance
233			case types.SpecStateSkipped: //the spec was skipped by the user...
234				if isLastSpecWithPair {
235					return true //...we're the last spec, so we should run the AfterNode
236				}
237				if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
238					return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
239				}
240			case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
241				if isFinalAttempt {
242					return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
243				}
244				if !terminatingPair.isZero() { // ...and it failed in a run-once.  which will be running again
245					if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
246						return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
247					} else {
248						return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
249					}
250				}
251			case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
252				return true //...that means the test run is over and we should clean up the stack.  Run the AfterNode
253			}
254			return false
255		})
256
257		if len(nodes) == 0 && includeDeferCleanups {
258			break
259		}
260
261		for _, node := range nodes {
262			afterNodeWasRun[node.ID] = true
263			state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node))
264			g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
265			if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
266				g.suite.currentSpecReport.State = state
267				g.suite.currentSpecReport.Failure = failure
268			}
269		}
270		includeDeferCleanups = true
271	}
272
273}
274
275func (g *group) run(specs Specs) {
276	g.specs = specs
277	for _, spec := range g.specs {
278		g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
279	}
280
281	for _, spec := range g.specs {
282		g.suite.currentSpecReport = g.initialReportForSpec(spec)
283		g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
284		g.suite.reporter.WillRun(g.suite.currentSpecReport)
285		g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
286
287		skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
288
289		g.suite.currentSpecReport.StartTime = time.Now()
290		if !skip {
291			maxAttempts := max(1, spec.FlakeAttempts())
292			if g.suite.config.FlakeAttempts > 0 {
293				maxAttempts = g.suite.config.FlakeAttempts
294			}
295			for attempt := 0; attempt < maxAttempts; attempt++ {
296				g.suite.currentSpecReport.NumAttempts = attempt + 1
297				g.suite.writer.Truncate()
298				g.suite.outputInterceptor.StartInterceptingOutput()
299				if attempt > 0 {
300					fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed.  Retrying...\n", attempt)
301				}
302
303				g.attemptSpec(attempt == maxAttempts-1, spec)
304
305				g.suite.currentSpecReport.EndTime = time.Now()
306				g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
307				g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
308				g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
309
310				if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
311					break
312				}
313			}
314		}
315
316		g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
317		g.suite.processCurrentSpecReport()
318		if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
319			g.succeeded = false
320		}
321		g.suite.currentSpecReport = types.SpecReport{}
322	}
323}
324
325func (g *group) oldRun(specs Specs) {
326	var suite = g.suite
327	nodeState := map[uint]types.SpecState{}
328	groupSucceeded := true
329
330	indexOfLastSpecContainingNodeID := func(id uint) int {
331		lastIdx := -1
332		for idx := range specs {
333			if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip {
334				lastIdx = idx
335			}
336		}
337		return lastIdx
338	}
339
340	for i, spec := range specs {
341		suite.currentSpecReport = types.SpecReport{
342			ContainerHierarchyTexts:     spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
343			ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
344			ContainerHierarchyLabels:    spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
345			LeafNodeLocation:            spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
346			LeafNodeType:                types.NodeTypeIt,
347			LeafNodeText:                spec.FirstNodeWithType(types.NodeTypeIt).Text,
348			LeafNodeLabels:              []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
349			ParallelProcess:             suite.config.ParallelProcess,
350			IsSerial:                    spec.Nodes.HasNodeMarkedSerial(),
351			IsInOrderedContainer:        !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
352		}
353
354		skip := spec.Skip
355		if spec.Nodes.HasNodeMarkedPending() {
356			skip = true
357			suite.currentSpecReport.State = types.SpecStatePending
358		} else {
359			if suite.interruptHandler.Status().Interrupted || suite.skipAll {
360				skip = true
361			}
362			if !groupSucceeded {
363				skip = true
364				suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
365					"Spec skipped because an earlier spec in an ordered container failed")
366			}
367			for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) {
368				if nodeState[node.ID] == types.SpecStateSkipped {
369					skip = true
370					suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
371						"Spec skipped because Skip() was called in BeforeAll")
372					break
373				}
374			}
375			if skip {
376				suite.currentSpecReport.State = types.SpecStateSkipped
377			}
378		}
379
380		if suite.config.DryRun && !skip {
381			skip = true
382			suite.currentSpecReport.State = types.SpecStatePassed
383		}
384
385		suite.reporter.WillRun(suite.currentSpecReport)
386		//send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
387		suite.reportEach(spec, types.NodeTypeReportBeforeEach)
388		if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
389			//the reportEach failed, skip this spec
390			skip = true
391		}
392
393		suite.currentSpecReport.StartTime = time.Now()
394		maxAttempts := max(1, spec.FlakeAttempts())
395		if suite.config.FlakeAttempts > 0 {
396			maxAttempts = suite.config.FlakeAttempts
397		}
398
399		for attempt := 0; !skip && (attempt < maxAttempts); attempt++ {
400			suite.currentSpecReport.NumAttempts = attempt + 1
401			suite.writer.Truncate()
402			suite.outputInterceptor.StartInterceptingOutput()
403			if attempt > 0 {
404				fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed.  Retrying...\n", attempt)
405			}
406			isFinalAttempt := (attempt == maxAttempts-1)
407
408			interruptStatus := suite.interruptHandler.Status()
409			deepestNestingLevelAttained := -1
410			var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool {
411				return nodeState[n.ID] != types.SpecStatePassed
412			})
413			nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
414			nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
415			nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...)
416
417			var terminatingNode Node
418			for j := range nodes {
419				deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel)
420				suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j]))
421				suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
422				nodeState[nodes[j].ID] = suite.currentSpecReport.State
423				if suite.currentSpecReport.State != types.SpecStatePassed {
424					terminatingNode = nodes[j]
425					break
426				}
427			}
428
429			afterAllNodesThatRan := map[uint]bool{}
430			// pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes
431			runAfterAndCleanupNodes := func(nodes Nodes) {
432				for j := range nodes {
433					state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j]))
434					suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
435					nodeState[nodes[j].ID] = state
436					if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
437						suite.currentSpecReport.State = state
438						suite.currentSpecReport.Failure = failure
439						if state != types.SpecStatePassed {
440							terminatingNode = nodes[j]
441						}
442					}
443					if nodes[j].NodeType.Is(types.NodeTypeAfterAll) {
444						afterAllNodesThatRan[nodes[j].ID] = true
445					}
446				}
447			}
448
449			// pull out a helper that captures the logic of whether or not we should run a given After node.
450			// there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries
451			shouldRunAfterNode := func(n Node) bool {
452				if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
453					return true
454				}
455				var id uint
456				if n.NodeType.Is(types.NodeTypeAfterAll) {
457					id = n.ID
458					if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again.
459						return false
460					}
461				}
462				if n.NodeType.Is(types.NodeTypeCleanupAfterAll) {
463					id = n.NodeIDWhereCleanupWasGenerated
464				}
465				isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i
466
467				switch suite.currentSpecReport.State {
468				case types.SpecStatePassed: //we've passed so far...
469					return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it
470				case types.SpecStateSkipped: //the spec was skipped by the user...
471					if isLastSpecWithNode {
472						return true //...we're the last spec, so we should run the AfterNode
473					}
474					if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel {
475						return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip
476					}
477				case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
478					if isFinalAttempt {
479						return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
480					}
481					if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) {
482						//...we'll be rerunning a BeforeAll so we should cleanup after it if...
483						if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel {
484							return true //we're at the same nesting level
485						}
486						if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
487							return true //we're a DeferCleanup generated by it
488						}
489					}
490					if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) {
491						//...we'll be rerunning an AfterAll so we should cleanup after it if...
492						if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
493							return true //we're a DeferCleanup generated by it
494						}
495					}
496				case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
497					return true //...that means the test run is over and we should clean up the stack.  Run the AfterNode
498				}
499				return false
500			}
501
502			// first pass - run all the JustAfterEach, Aftereach, and AfterAlls.  Our shoudlRunAfterNode filter function will clean up the AfterAlls for us.
503			afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel()
504			afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...)
505			afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained)
506			afterNodes = afterNodes.Filter(shouldRunAfterNode)
507			runAfterAndCleanupNodes(afterNodes)
508
509			// second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls:
510			afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
511			runAfterAndCleanupNodes(afterNodes)
512
513			// now we run any DeferCleanups
514			afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()
515			afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...)
516			runAfterAndCleanupNodes(afterNodes)
517
518			// third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls.
519			afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
520			runAfterAndCleanupNodes(afterNodes)
521
522			// and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up
523			afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode)
524			runAfterAndCleanupNodes(afterNodes)
525
526			suite.currentSpecReport.EndTime = time.Now()
527			suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
528			suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
529			suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
530
531			if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
532				break
533			}
534		}
535
536		//send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
537		suite.reportEach(spec, types.NodeTypeReportAfterEach)
538		suite.processCurrentSpecReport()
539		if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
540			groupSucceeded = false
541		}
542		suite.currentSpecReport = types.SpecReport{}
543	}
544}
545
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest

Most used method in

Trigger SortedByAscendingNestingLevel code on LambdaTest Cloud Grid

Execute automation tests with SortedByAscendingNestingLevel on a cloud-based Grid of 3000+ real browsers and operating systems for both web and mobile applications.

Test now for Free
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)