How to use DefaultBackoff method of utils Package

Best Rod code snippet using utils.DefaultBackoff

template_engine.go

Source:template_engine.go Github

copy

Full Screen

...187 if datasetUFSTotalBytes > currentCachedCapacityBytes+capacity {188 nonCacheable = datasetUFSTotalBytes - currentCachedCapacityBytes - capacity189 }190 // 3.Update the runtime status191 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {192 runtime, err := utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)193 if err != nil {194 return err195 }196 runtimeToUpdate := runtime.DeepCopy()197 if len(runtimeToUpdate.Status.CacheStates) == 0 {198 runtimeToUpdate.Status.CacheStates = map[data.CacheStateName]string{}199 }200 runtimeToUpdate.Status.CacheStates[data.CacheCapacity] = units.BytesSize(float64(capacity))201 // runtimeToUpdate.Status.CacheStates[data.Cacheable] = units.BytesSize(float64(0))202 runtimeToUpdate.Status.CacheStates[data.Cached] = units.BytesSize(float64(0))203 runtimeToUpdate.Status.CacheStates[data.NonCacheable] = units.BytesSize(float64(nonCacheable))204 runtimeToUpdate.Status.CacheStates[data.CachedPercentage] = fmt.Sprintf("%.2f %%", 0.00)205 // runtimeToUpdate.Status.CacheStates[data.Cacheable]206 err = b.Client.Status().Update(context.TODO(), runtimeToUpdate)207 if err != nil {208 b.Log.Error(err, "Failed update runtime")209 }210 return err211 })212 // TODO:(cheyang)Algorithm needs optimization213 return expectedWorkerNum, err214}215// Remove the nodes to put the worker for caching216func (b *TemplateEngine) RemoveCacheNodes() (err error) {217 var (218 nodeList *corev1.NodeList = &corev1.NodeList{}219 labelNameToAddRaw = common.LabelAnnotationStorageCapacityPrefix + "raw-" + b.Type() + "-" + b.Config.Name220 labelNameToAdd = common.LabelAnnotationStorageCapacityPrefix + "human-" + b.Type() + "-" + b.Config.Name221 labelName = common.LabelAnnotationStorageCapacityPrefix + b.Type() + "-" + b.Config.Name222 labelCommonName = common.LabelAnnotationStorageCapacityPrefix + b.Config.Name223 )224 err = b.List(context.TODO(), nodeList, &client.ListOptions{})225 if err != nil {226 return227 }228 // 1.select the nodes229 // TODO(cheyang) Need consider node selector230 for _, node := range nodeList.Items {231 // nodes = append(nodes, &node)232 toUpdate := node.DeepCopy()233 if len(toUpdate.Labels) == 0 {234 continue235 }236 delete(toUpdate.Labels, labelNameToAddRaw)237 delete(toUpdate.Labels, labelNameToAdd)238 delete(toUpdate.Labels, labelName)239 delete(toUpdate.Labels, labelCommonName)240 if len(toUpdate.Labels) < len(node.Labels) {241 err := b.Client.Update(context.TODO(), toUpdate)242 if err != nil {243 return err244 }245 }246 }247 return248}249// label the node to cache250func (b *TemplateEngine) LabelCachedNodes(selectedNodes []NodeInfo, cacheCapacity uint64) (err error) {251 // labelNameToAdd := common.LabelAnnotationStorageCapacityPrefix + b.Config.Name252 var (253 needed uint64 = cacheCapacity254 )255 for _, n := range selectedNodes {256 allocated, err := b.labelCachedNode(n, needed)257 if err != nil {258 b.Log.Error(err, "LabelCachedNodes")259 return err260 }261 needed = needed - allocated262 // if toUpdate.Labels == nil {263 // toUpdate.Labels = make(map[string]string)264 // }265 // toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]266 // _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)267 // labels := node.Labels268 // for _, label := range labels {269 // }270 }271 return err272}273func (b *TemplateEngine) labelCachedNode(selectedNode NodeInfo, needed uint64) (allocated uint64, err error) {274 var (275 labelNameToAddRaw = common.LabelAnnotationStorageCapacityPrefix + "raw-" + b.Type() + "-" + b.Config.Name276 // labelNameToAddKiB = common.LabelAnnotationStorageCapacityPrefix + "KB-" + b.Type() + "-" + b.Config.Name277 // labelNameToAddMiB = common.LabelAnnotationStorageCapacityPrefix + "MB-" + b.Type() + "-" + b.Config.Name278 // labelNameToAddGiB = common.LabelAnnotationStorageCapacityPrefix + "GB-" + b.Type() + "-" + b.Config.Name279 labelNameToAdd = common.LabelAnnotationStorageCapacityPrefix + "human-" + b.Type() + "-" + b.Config.Name280 labelName = common.LabelAnnotationStorageCapacityPrefix + b.Type() + "-" + b.Config.Name281 labelCommonName = common.LabelAnnotationStorageCapacityPrefix + b.Config.Name282 )283 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {284 nodeName := selectedNode.GetName()285 node, err := kubeclient.GetNode(b.Client, nodeName)286 if node == nil {287 return err288 }289 toUpdate := node.DeepCopy()290 if toUpdate.Labels == nil {291 toUpdate.Labels = make(map[string]string)292 }293 if needed < selectedNode.GetAvailableStorageCapacity() {294 value := units.BytesSize(float64(needed))295 toUpdate.Labels[labelNameToAdd] = value296 // For now, we are not able to set node label to pods due to https://github.com/kubernetes/kubernetes/issues/40610297 // We have set it in pod level298 err := b.SetRuntimeMaxMemory(selectedNode.GetName(), value)299 if err != nil {300 return err301 }302 toUpdate.Labels[labelNameToAddRaw] = fmt.Sprintf("%d", needed)303 allocated = needed304 toUpdate.Labels[labelName] = "true"305 toUpdate.Labels[labelCommonName] = "true"306 } else {307 value := units.BytesSize(float64(selectedNode.GetAvailableStorageCapacity()))308 toUpdate.Labels[labelNameToAdd] = value309 err := b.SetRuntimeMaxMemory(selectedNode.GetName(), value)310 if err != nil {311 return err312 }313 toUpdate.Labels[labelNameToAddRaw] = fmt.Sprintf("%d", selectedNode.GetAvailableStorageCapacity())314 allocated = selectedNode.GetAvailableStorageCapacity()315 toUpdate.Labels[labelName] = "true"316 toUpdate.Labels[labelCommonName] = "true"317 }318 // toUpdate.Labels[labelNameToAdd] = "true"319 err = b.Client.Update(context.TODO(), toUpdate)320 if err != nil {321 b.Log.Error(err, "LabelCachedNodes")322 return err323 }324 return nil325 })326 if err != nil {327 b.Log.Error(err, "LabelCacheNode")328 return allocated, err329 }330 return allocated, nil331}332// Setup the ddc engine333func (b *TemplateEngine) Setup(ctx common.ReconcileRequestContext) (ready bool, err error) {334 var shouldSetupMaster, masterReady bool335 b.Log.V(1).Info("Get", "Runtime", ctx.Runtime)336 b.Log.V(1).Info("Get", "Dataset", ctx.Dataset)337 runtime := ctx.Runtime338 dataset := ctx.Dataset339 // If the dataset condition is created, it means the dataset is already setup340 index, _ := utils.GetDatasetCachedCondition(dataset.Status.CacheStatus.Conditions, data.Ready)341 if index != -1 {342 b.Log.V(1).Info("The runtime is already setup.")343 ready = true344 return ready, nil345 }346 switch runtime.Status.MasterPhase {347 case data.RuntimePhaseNone:348 shouldSetupMaster = true349 default:350 shouldSetupMaster = false351 }352 // 1. Setup Master353 if shouldSetupMaster {354 desiredNum, err := b.SetupMaster()355 if err != nil {356 b.Log.Error(err, "SetupMaster")357 return ready, err358 }359 // 1.1 Update the runtime360 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {361 runtimeToUpdate, err := utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)362 if err != nil {363 return err364 }365 runtimeToUpdate.Status.MasterPhase = data.RuntimePhaseNotReady366 runtimeToUpdate.Status.DesiredMasterNumberScheduled = desiredNum367 runtimeToUpdate.Status.ValueFileConfigmap = dataset.Name + "-" + b.Type() + "-values"368 if len(runtimeToUpdate.Status.Conditions) == 0 {369 runtimeToUpdate.Status.Conditions = []data.RuntimeCondition{}370 }371 cond := utils.NewRuntimeCondition(data.RuntimeMasterInitialized, data.RuntimeMasterInitializedReason,372 "The master is initialized.", corev1.ConditionTrue)373 runtimeToUpdate.Status.Conditions =374 utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions,375 cond)376 return b.Client.Status().Update(ctx.Context, runtimeToUpdate)377 })378 if err != nil {379 b.Log.Error(err, "Update runtime status")380 return ready, err381 }382 // 1.2 update the status of dataset383 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {384 datasetToUpdate, err := utils.GetDataset(b.Client, b.Config.Name, b.Config.Namespace)385 if err != nil {386 return err387 }388 datasetToUpdate.Status.Phase = data.PendingDatasetPhase389 if len(datasetToUpdate.Status.CacheStatus.Conditions) == 0 {390 datasetToUpdate.Status.CacheStatus.Conditions = []data.CacheCondition{}391 }392 cond := utils.NewDatasetCachedCondition(data.Planned,393 data.DatasetPlannedReason,394 "The ddc runtime is creating",395 corev1.ConditionTrue)396 datasetToUpdate.Status.CacheStatus.Conditions =397 utils.UpdateDatasetCachedCondition(datasetToUpdate.Status.CacheStatus.Conditions,398 cond)399 // datasetToUpdate.Status.CacheStatus.Conditions = append(datasetToUpdate.Status.CacheStatus.Conditions,400 // data.CacheCondition{401 // Type: data.Planned,402 // })403 return b.Client.Status().Update(ctx.Context, datasetToUpdate)404 })405 if err != nil {406 b.Log.Error(err, "Update dataset status")407 return ready, err408 }409 return ready, nil410 }411 masterReady, err = b.IsMasterReady(runtime)412 if err != nil {413 return ready, err414 }415 if !masterReady {416 return masterReady, err417 }418 // Update the condition of the runtime419 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {420 runtime, err := utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)421 if err != nil {422 return err423 }424 runtimeToUpdate := runtime.DeepCopy()425 if len(runtimeToUpdate.Status.Conditions) == 0 {426 runtimeToUpdate.Status.Conditions = []data.RuntimeCondition{}427 }428 cond := utils.NewRuntimeCondition(data.RuntimeMasterReady, data.RuntimeMasterReadyReason,429 "The master is ready.", corev1.ConditionTrue)430 runtimeToUpdate.Status.Conditions =431 utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions,432 cond)433 runtimeToUpdate.Status.MasterPhase = data.RuntimePhaseReady434 if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) {435 return b.Client.Status().Update(ctx.Context, runtimeToUpdate)436 }437 return nil438 })439 if err != nil {440 b.Log.Error(err, "Update runtime to master ready")441 return ready, err442 }443 // 2.Check the ufs capacity444 //TODO:(cheyang) sync ufs storage once by now, will be predically in future445 var shouldSetupWorkers bool446 switch runtime.Status.WorkerPhase {447 case data.RuntimePhaseNone:448 shouldSetupWorkers = true449 default:450 shouldSetupWorkers = false451 }452 if shouldSetupWorkers {453 desiredNum, err := b.SetupWorkers(dataset)454 if err != nil {455 b.Log.Error(err, "SetupWorker")456 return ready, err457 }458 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {459 runtime, err := utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)460 if err != nil {461 return err462 }463 runtimeToUpdate := runtime.DeepCopy()464 runtimeToUpdate.Status.WorkerPhase = data.RuntimePhaseNotReady465 runtimeToUpdate.Status.DesiredWorkerNumberScheduled = desiredNum466 runtimeToUpdate.Status.FusePhase = data.RuntimePhaseNotReady467 runtimeToUpdate.Status.DesiredFuseNumberScheduled = desiredNum468 if len(runtimeToUpdate.Status.Conditions) == 0 {469 runtimeToUpdate.Status.Conditions = []data.RuntimeCondition{}470 }471 cond := utils.NewRuntimeCondition(data.RuntimeWorkersInitialized, data.RuntimeWorkersInitializedReason,472 "The workers are initialized.", corev1.ConditionTrue)473 runtimeToUpdate.Status.Conditions =474 utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions,475 cond)476 fuseCond := utils.NewRuntimeCondition(data.RuntimeFusesInitialized, data.RuntimeFusesInitializedReason,477 "The fuses are initialized.", corev1.ConditionTrue)478 runtimeToUpdate.Status.Conditions =479 utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions,480 fuseCond)481 if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) {482 return b.Client.Status().Update(ctx.Context, runtimeToUpdate)483 }484 return nil485 })486 if err != nil {487 b.Log.Error(err, "Update runtime")488 return ready, err489 }490 }491 runtime, err = utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)492 if err != nil {493 return ready, err494 }495 workerReady, err := b.AreWorkersReady(runtime)496 if err != nil {497 b.Log.Error(err, "Check if the workers are ready")498 return workerReady, err499 }500 if !workerReady {501 return workerReady, err502 }503 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {504 runtime, err := utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)505 if err != nil {506 return err507 }508 runtimeToUpdate := runtime.DeepCopy()509 if len(runtimeToUpdate.Status.Conditions) == 0 {510 runtimeToUpdate.Status.Conditions = []data.RuntimeCondition{}511 }512 cond := utils.NewRuntimeCondition(data.RuntimeWorkersReady, data.RuntimeWorkersReadyReason,513 "The workers are ready.", corev1.ConditionTrue)514 runtimeToUpdate.Status.Conditions =515 utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions,516 cond)517 fuseCond := utils.NewRuntimeCondition(data.RuntimeFusesReady, data.RuntimeFusesReadyReason,518 "The fuses are ready.", corev1.ConditionTrue)519 runtimeToUpdate.Status.Conditions =520 utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions,521 fuseCond)522 if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) {523 return b.Client.Status().Update(ctx.Context, runtimeToUpdate)524 }525 return nil526 })527 // 4. Setup the init Runtime status528 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {529 runtime, err := utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)530 if err != nil {531 b.Log.Error(err, "Get Runtime")532 return err533 }534 ready, err = b.UpdateRuntimeStatus(runtime)535 if err != nil {536 b.Log.Error(err, "Update runtime")537 return err538 }539 return nil540 })541 if err != nil {542 b.Log.Error(err, "Update runtime")543 return ready, err544 }545 // 5.Setup the init status of Dataset546 err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {547 dataset, err = utils.GetDataset(b.Client, b.Config.Name, b.Config.Namespace)548 if err != nil {549 b.Log.Error(err, "Get dataset")550 return err551 }552 runtime, err := utils.GetRuntime(b.Client, b.Config.Name, b.Config.Namespace)553 if err != nil {554 return err555 }556 datasetToUpdate := dataset.DeepCopy()557 datasetToUpdate.Status.Phase = data.ReadyDatasetPhase558 cond := utils.NewDatasetCachedCondition(data.Ready, data.DatasetReadyReason,559 "The ddc runtime is ready.",560 corev1.ConditionTrue)...

Full Screen

Full Screen

kit.go

Source:kit.go Github

copy

Full Screen

...14// ClearScreen imported15var ClearScreen = utils.ClearScreen16// CountSleeper imported17var CountSleeper = utils.CountSleeper18// DefaultBackoff imported19var DefaultBackoff = utils.DefaultBackoff20// Dump imported21var Dump = utils.Dump22// E imported23var E = utils.E24// E1 imported25var E1 = utils.E126// Err imported27var Err = utils.Err28// ErrArg imported29var ErrArg = utils.ErrArg30// ErrInjector imported31type ErrInjector = utils.ErrInjector32// ErrMaxSleepCount imported33var ErrMaxSleepCount = utils.ErrMaxSleepCount...

Full Screen

Full Screen

sleeper.go

Source:sleeper.go Github

copy

Full Screen

...27 count++28 return nil29 }30}31// DefaultBackoff algorithm: A(n) = A(n-1) * random[1.9, 2.1)32func DefaultBackoff(interval time.Duration) time.Duration {33 scale := 2 + (rand.Float64()-0.5)*0.234 return time.Duration(float64(interval) * scale)35}36// BackoffSleeper returns a sleeper that sleeps in a backoff manner every time get called.37// If algorithm is nil, DefaultBackoff will be used.38// Set interval and maxInterval to the same value to make it a constant interval sleeper.39// If maxInterval is not greater than 0, it will wake immediately.40func BackoffSleeper(init, maxInterval time.Duration, algorithm func(time.Duration) time.Duration) Sleeper {41 if algorithm == nil {42 algorithm = DefaultBackoff43 }44 return func(ctx context.Context) error {45 // wake immediately46 if maxInterval <= 0 {47 return nil48 }49 var interval time.Duration50 if init < maxInterval {51 interval = algorithm(init)52 } else {53 interval = maxInterval54 }55 t := time.NewTicker(interval)56 defer t.Stop()...

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 utils.DefaultBackoff(3, func() error {4 fmt.Println("Hello")5 })6}7import (8func main() {9 utils.DefaultBackoff(3, func() error {10 fmt.Println("Hello")11 })12}13import (14func main() {15 utils.DefaultBackoff(3, func() error {16 fmt.Println("Hello")17 })18}19import (20func main() {21 utils.DefaultBackoff(3, func() error {22 fmt.Println("Hello")23 })24}25import (26func main() {27 utils.DefaultBackoff(3, func() error {28 fmt.Println("Hello")29 })30}31import (32func main() {33 utils.DefaultBackoff(3, func() error {34 fmt.Println("Hello")35 })36}37import (38func main() {39 utils.DefaultBackoff(3, func() error {40 fmt.Println("Hello")41 })42}43import (44func main() {45 utils.DefaultBackoff(3, func() error {46 fmt.Println("Hello")47 })48}

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 var backoff = utils.DefaultBackoff()4 err := backoff.Retry(func() error {5 fmt.Println("I am retrying")6 })

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 utils.DefaultBackoff(func() error {4 fmt.Println("DefaultBackoff")5 })6 utils.DefaultBackoff(func() error {7 fmt.Println("DefaultBackoff")8 return fmt.Errorf("error")9 })10 utils.DefaultBackoff(func() error {11 fmt.Println("DefaultBackoff")12 }, 5, 100*time.Millisecond, 10*time.Second)13}

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1func main() {2 backoff := utils.DefaultBackoff()3 backoff2 := utils.Backoff()4 fmt.Println(backoff)5 fmt.Println(backoff2)6}7{1 2 0 0 0 0 0 0 0 0 0}8{1 2 0 0 0 0 0 0 0 0 0}9func main() {10 backoff := utils.DefaultBackoff()11 fmt.Println(backoff)12 fmt.Println(backoff)13}14{1 2 0 0 0 0 0 0 0 0 0}15{1 2 5 0 0 0 0 0 0 0 0}16func main() {17 backoff := utils.DefaultBackoff()18 fmt.Println(backoff)19 fmt.Println(backoff)20 fmt.Println(backoff)21}22{1 2 0 0 0 0 0 0 0 0 0}23{1 2 5 0 0 0 0 0 0 0 0}24{1 2 10 0 0 0 0 0 0 0 0}

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println("DefaultBackoff")4 fmt.Println(utils.DefaultBackoff(2, 2))5}6import (7func main() {8 fmt.Println("DefaultBackoff")9 fmt.Println(utils.DefaultBackoff(2, 3))10}11import (12func main() {13 fmt.Println("DefaultBackoff")14 fmt.Println(utils.DefaultBackoff(2, 4))15}16import (17func main() {18 fmt.Println("DefaultBackoff")19 fmt.Println(utils.DefaultBackoff(2, 5))20}21import (22func main() {23 fmt.Println("DefaultBackoff")24 fmt.Println(utils.DefaultBackoff(2, 6))25}26import (27func main() {28 fmt.Println("DefaultBackoff")29 fmt.Println(utils.DefaultBackoff(2, 7))30}31import (32func main() {33 fmt.Println("DefaultBackoff")34 fmt.Println(utils.DefaultBackoff(2, 8))35}36import (37func main() {38 fmt.Println("DefaultBackoff")39 fmt.Println(utils.DefaultBackoff(2, 9))40}

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 utils.DefaultBackoff(5 * time.Second)4 fmt.Println("Application is backoffed")5}6import (7func main() {8 utils.DefaultBackoff(10 * time.Second)9 fmt.Println("Application is backoffed")10}11import (12func main() {13 utils.DefaultBackoff(20 * time.Second)14 fmt.Println("Application is backoffed")15}16import (17func main() {18 utils.DefaultBackoff(30 * time.Second)19 fmt.Println("Application is backoffed")20}21import (22func main() {23 utils.DefaultBackoff(40 * time.Second)24 fmt.Println("Application is backoffed")25}26import (

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 backoff := utils.DefaultBackoff()4 err := backoff.Retry(func() error {5 })6 if err != nil {7 }8}9import (10func main() {11 backoff := utils.NewBackoff(utils.WithMaxRetries(10), utils.WithMaxDelay(10))12 err := backoff.Retry(func() error {13 })14 if err != nil {15 }16}17import (18func main() {19 backoff := utils.NewBackoff(utils.WithMaxRetries(10), utils.WithMaxDelay(10))20 err := backoff.Retry(func() error {21 })22 if err != nil {23 }24}25import (26func main() {27 backoff := utils.NewBackoff(utils.WithMaxRetries(10), utils.WithMaxDelay(10))

Full Screen

Full Screen

DefaultBackoff

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 utils.DefaultBackoff()4 fmt.Println("Main method")5}6import (7func DefaultBackoff() {8 backoff := utils.NewDefaultBackoff()9 for backoff.Attempt() {10 fmt.Println("Attempt: ", backoff.AttemptNum())11 time.Sleep(backoff.Delay())12 }13}14import (15type Backoff struct {16}17func NewDefaultBackoff() *Backoff {18 return &Backoff{19 }20}21func NewBackoff(initialDelay, maxDelay time.Duration, maxRetries int) *Backoff {22 return &Backoff{

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful