How to use testFinished method of cloud Package

Best K6 code snippet using cloud.testFinished

output.go

Source:output.go Github

copy

Full Screen

...252 out.logger.Debug("Aggregation stopped, stopping metric emission...")253 close(out.stopOutput)254 out.outputDone.Wait()255 out.logger.Debug("Metric emission stopped, calling cloud API...")256 err := out.testFinished()257 if err != nil {258 out.logger.WithFields(logrus.Fields{"error": err}).Warn("Failed to send test finished to the cloud")259 } else {260 out.logger.Debug("Cloud output successfully stopped!")261 }262 return err263}264// Description returns the URL with the test run results.265func (out *Output) Description() string {266 return fmt.Sprintf("cloud (%s)", cloudapi.URLForResults(out.referenceID, out.config))267}268// SetRunStatus receives the latest run status from the Engine.269func (out *Output) SetRunStatus(status lib.RunStatus) {270 out.runStatus = status271}272// SetThresholds receives the thresholds before the output is Start()-ed.273func (out *Output) SetThresholds(scriptThresholds map[string]metrics.Thresholds) {274 thresholds := make(map[string][]*metrics.Threshold)275 for name, t := range scriptThresholds {276 thresholds[name] = append(thresholds[name], t.Thresholds...)277 }278 out.thresholds = thresholds279}280// SetTestRunStopCallback receives the function that stops the engine on error281func (out *Output) SetTestRunStopCallback(stopFunc func(error)) {282 out.engineStopFunc = stopFunc283}284func useCloudTags(source *httpext.Trail) *httpext.Trail {285 name, nameExist := source.Tags.Get("name")286 url, urlExist := source.Tags.Get("url")287 if !nameExist || !urlExist || name == url {288 return source289 }290 newTags := source.Tags.CloneTags()291 newTags["url"] = name292 dest := new(httpext.Trail)293 *dest = *source294 dest.Tags = metrics.IntoSampleTags(&newTags)295 dest.Samples = nil296 return dest297}298// AddMetricSamples receives a set of metric samples. This method is never299// called concurrently, so it defers as much of the work as possible to the300// asynchronous goroutines initialized in Start().301func (out *Output) AddMetricSamples(sampleContainers []metrics.SampleContainer) {302 select {303 case <-out.stopSendingMetrics:304 return305 default:306 }307 if out.referenceID == "" {308 return309 }310 newSamples := []*Sample{}311 newHTTPTrails := []*httpext.Trail{}312 for _, sampleContainer := range sampleContainers {313 switch sc := sampleContainer.(type) {314 case *httpext.Trail:315 sc = useCloudTags(sc)316 // Check if aggregation is enabled,317 if out.config.AggregationPeriod.Duration > 0 {318 newHTTPTrails = append(newHTTPTrails, sc)319 } else {320 newSamples = append(newSamples, NewSampleFromTrail(sc))321 }322 case *netext.NetTrail:323 // TODO: aggregate?324 values := map[string]float64{325 metrics.DataSentName: float64(sc.BytesWritten),326 metrics.DataReceivedName: float64(sc.BytesRead),327 }328 if sc.FullIteration {329 values[metrics.IterationDurationName] = metrics.D(sc.EndTime.Sub(sc.StartTime))330 values[metrics.IterationsName] = 1331 }332 newSamples = append(newSamples, &Sample{333 Type: DataTypeMap,334 Metric: "iter_li_all",335 Data: &SampleDataMap{336 Time: toMicroSecond(sc.GetTime()),337 Tags: sc.GetTags(),338 Values: values,339 },340 })341 default:342 for _, sample := range sampleContainer.GetSamples() {343 newSamples = append(newSamples, &Sample{344 Type: DataTypeSingle,345 Metric: sample.Metric.Name,346 Data: &SampleDataSingle{347 Type: sample.Metric.Type,348 Time: toMicroSecond(sample.Time),349 Tags: sample.Tags,350 Value: sample.Value,351 },352 })353 }354 }355 }356 if len(newSamples) > 0 || len(newHTTPTrails) > 0 {357 out.bufferMutex.Lock()358 out.bufferSamples = append(out.bufferSamples, newSamples...)359 out.bufferHTTPTrails = append(out.bufferHTTPTrails, newHTTPTrails...)360 out.bufferMutex.Unlock()361 }362}363//nolint:funlen,nestif,gocognit364func (out *Output) aggregateHTTPTrails(waitPeriod time.Duration) {365 out.bufferMutex.Lock()366 newHTTPTrails := out.bufferHTTPTrails367 out.bufferHTTPTrails = nil368 out.bufferMutex.Unlock()369 aggrPeriod := int64(out.config.AggregationPeriod.Duration)370 // Distribute all newly buffered HTTP trails into buckets and sub-buckets371 // this key is here specifically to not incur more allocations then necessary372 // if you change this code please run the benchmarks and add the results to the commit message373 var subBucketKey [3]string374 for _, trail := range newHTTPTrails {375 trailTags := trail.GetTags()376 bucketID := trail.GetTime().UnixNano() / aggrPeriod377 // Get or create a time bucket for that trail period378 bucket, ok := out.aggrBuckets[bucketID]379 if !ok {380 bucket = make(map[[3]string]aggregationBucket)381 out.aggrBuckets[bucketID] = bucket382 }383 subBucketKey[0], _ = trailTags.Get("name")384 subBucketKey[1], _ = trailTags.Get("group")385 subBucketKey[2], _ = trailTags.Get("status")386 subBucket, ok := bucket[subBucketKey]387 if !ok {388 subBucket = aggregationBucket{}389 bucket[subBucketKey] = subBucket390 }391 // Either use an existing subbucket key or use the trail tags as a new one392 subSubBucketKey := trailTags393 subSubBucket, ok := subBucket[subSubBucketKey]394 if !ok {395 for sbTags, sb := range subBucket {396 if trailTags.IsEqual(sbTags) {397 subSubBucketKey = sbTags398 subSubBucket = sb399 break400 }401 }402 }403 subBucket[subSubBucketKey] = append(subSubBucket, trail)404 }405 // Which buckets are still new and we'll wait for trails to accumulate before aggregating406 bucketCutoffID := time.Now().Add(-waitPeriod).UnixNano() / aggrPeriod407 iqrRadius := out.config.AggregationOutlierIqrRadius.Float64408 iqrLowerCoef := out.config.AggregationOutlierIqrCoefLower.Float64409 iqrUpperCoef := out.config.AggregationOutlierIqrCoefUpper.Float64410 newSamples := []*Sample{}411 // Handle all aggregation buckets older than bucketCutoffID412 for bucketID, subBuckets := range out.aggrBuckets {413 if bucketID > bucketCutoffID {414 continue415 }416 for _, subBucket := range subBuckets {417 for tags, httpTrails := range subBucket {418 // start := time.Now() // this is in a combination with the log at the end419 trailCount := int64(len(httpTrails))420 if trailCount < out.config.AggregationMinSamples.Int64 {421 for _, trail := range httpTrails {422 newSamples = append(newSamples, NewSampleFromTrail(trail))423 }424 continue425 }426 aggrData := &SampleDataAggregatedHTTPReqs{427 Time: toMicroSecond(time.Unix(0, bucketID*aggrPeriod+aggrPeriod/2)),428 Type: "aggregated_trend",429 Tags: tags,430 }431 if out.config.AggregationSkipOutlierDetection.Bool {432 // Simply add up all HTTP trails, no outlier detection433 for _, trail := range httpTrails {434 aggrData.Add(trail)435 }436 } else {437 connDurations := make(durations, trailCount)438 reqDurations := make(durations, trailCount)439 for i, trail := range httpTrails {440 connDurations[i] = trail.ConnDuration441 reqDurations[i] = trail.Duration442 }443 var minConnDur, maxConnDur, minReqDur, maxReqDur time.Duration444 if trailCount < out.config.AggregationOutlierAlgoThreshold.Int64 {445 // Since there are fewer samples, we'll use the interpolation-enabled and446 // more precise sorting-based algorithm447 minConnDur, maxConnDur = connDurations.SortGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef, true)448 minReqDur, maxReqDur = reqDurations.SortGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef, true)449 } else {450 minConnDur, maxConnDur = connDurations.SelectGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef)451 minReqDur, maxReqDur = reqDurations.SelectGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef)452 }453 for _, trail := range httpTrails {454 if trail.ConnDuration < minConnDur ||455 trail.ConnDuration > maxConnDur ||456 trail.Duration < minReqDur ||457 trail.Duration > maxReqDur {458 // Seems like an outlier, add it as a standalone metric459 newSamples = append(newSamples, NewSampleFromTrail(trail))460 } else {461 // Aggregate the trail462 aggrData.Add(trail)463 }464 }465 }466 aggrData.CalcAverages()467 if aggrData.Count > 0 {468 /*469 out.logger.WithFields(logrus.Fields{470 "http_samples": aggrData.Count,471 "ratio": fmt.Sprintf("%.2f", float64(aggrData.Count)/float64(trailCount)),472 "t": time.Since(start),473 }).Debug("Aggregated HTTP metrics")474 //*/475 newSamples = append(newSamples, &Sample{476 Type: DataTypeAggregatedHTTPReqs,477 Metric: "http_req_li_all",478 Data: aggrData,479 })480 }481 }482 }483 delete(out.aggrBuckets, bucketID)484 }485 if len(newSamples) > 0 {486 out.bufferMutex.Lock()487 out.bufferSamples = append(out.bufferSamples, newSamples...)488 out.bufferMutex.Unlock()489 }490}491func (out *Output) flushHTTPTrails() {492 out.bufferMutex.Lock()493 defer out.bufferMutex.Unlock()494 newSamples := []*Sample{}495 for _, trail := range out.bufferHTTPTrails {496 newSamples = append(newSamples, NewSampleFromTrail(trail))497 }498 for _, bucket := range out.aggrBuckets {499 for _, subBucket := range bucket {500 for _, trails := range subBucket {501 for _, trail := range trails {502 newSamples = append(newSamples, NewSampleFromTrail(trail))503 }504 }505 }506 }507 out.bufferHTTPTrails = nil508 out.aggrBuckets = map[int64]map[[3]string]aggregationBucket{}509 out.bufferSamples = append(out.bufferSamples, newSamples...)510}511func (out *Output) shouldStopSendingMetrics(err error) bool {512 if err == nil {513 return false514 }515 if errResp, ok := err.(cloudapi.ErrorResponse); ok && errResp.Response != nil {516 return errResp.Response.StatusCode == http.StatusForbidden && errResp.Code == 4517 }518 return false519}520type pushJob struct {521 done chan error522 samples []*Sample523}524// ceil(a/b)525func ceilDiv(a, b int) int {526 r := a / b527 if a%b != 0 {528 r++529 }530 return r531}532func (out *Output) pushMetrics() {533 out.bufferMutex.Lock()534 if len(out.bufferSamples) == 0 {535 out.bufferMutex.Unlock()536 return537 }538 buffer := out.bufferSamples539 out.bufferSamples = nil540 out.bufferMutex.Unlock()541 count := len(buffer)542 out.logger.WithFields(logrus.Fields{543 "samples": count,544 }).Debug("Pushing metrics to cloud")545 start := time.Now()546 numberOfPackages := ceilDiv(len(buffer), int(out.config.MaxMetricSamplesPerPackage.Int64))547 numberOfWorkers := int(out.config.MetricPushConcurrency.Int64)548 if numberOfWorkers > numberOfPackages {549 numberOfWorkers = numberOfPackages550 }551 ch := make(chan pushJob, numberOfPackages)552 for i := 0; i < numberOfWorkers; i++ {553 go func() {554 for job := range ch {555 err := out.client.PushMetric(out.referenceID, job.samples)556 job.done <- err557 if out.shouldStopSendingMetrics(err) {558 return559 }560 }561 }()562 }563 jobs := make([]pushJob, 0, numberOfPackages)564 for len(buffer) > 0 {565 size := len(buffer)566 if size > int(out.config.MaxMetricSamplesPerPackage.Int64) {567 size = int(out.config.MaxMetricSamplesPerPackage.Int64)568 }569 job := pushJob{done: make(chan error, 1), samples: buffer[:size]}570 ch <- job571 jobs = append(jobs, job)572 buffer = buffer[size:]573 }574 close(ch)575 for _, job := range jobs {576 err := <-job.done577 if err != nil {578 if out.shouldStopSendingMetrics(err) {579 out.logger.WithError(err).Warn("Stopped sending metrics to cloud due to an error")580 if out.config.StopOnError.Bool {581 out.engineStopFunc(err)582 }583 close(out.stopSendingMetrics)584 break585 }586 out.logger.WithError(err).Warn("Failed to send metrics to cloud")587 }588 }589 out.logger.WithFields(logrus.Fields{590 "samples": count,591 "t": time.Since(start),592 }).Debug("Pushing metrics to cloud finished")593}594func (out *Output) testFinished() error {595 if out.referenceID == "" || out.config.PushRefID.Valid {596 return nil597 }598 testTainted := false599 thresholdResults := make(cloudapi.ThresholdResult)600 for name, thresholds := range out.thresholds {601 thresholdResults[name] = make(map[string]bool)602 for _, t := range thresholds {603 thresholdResults[name][t.Source] = t.LastFailed604 if t.LastFailed {605 testTainted = true606 }607 }608 }...

Full Screen

Full Screen

collector.go

Source:collector.go Github

copy

Full Screen

...197 }()198 }199 defer func() {200 wg.Wait()201 c.testFinished()202 }()203 pushTicker := time.NewTicker(time.Duration(c.config.MetricPushInterval.Duration))204 for {205 select {206 case <-c.stopSendingMetricsCh:207 return208 default:209 }210 select {211 case <-quit:212 c.pushMetrics()213 return214 case <-pushTicker.C:215 c.pushMetrics()216 }217 }218}219func useCloudTags(source *httpext.Trail) *httpext.Trail {220 name, nameExist := source.Tags.Get("name")221 url, urlExist := source.Tags.Get("url")222 if !nameExist || !urlExist || name == url {223 return source224 }225 newTags := source.Tags.CloneTags()226 newTags["url"] = name227 dest := new(httpext.Trail)228 *dest = *source229 dest.Tags = stats.IntoSampleTags(&newTags)230 dest.Samples = nil231 return dest232}233// Collect receives a set of samples. This method is never called concurrently, and only while234// the context for Run() is valid, but should defer as much work as possible to Run().235func (c *Collector) Collect(sampleContainers []stats.SampleContainer) {236 select {237 case <-c.stopSendingMetricsCh:238 return239 default:240 }241 if c.referenceID == "" {242 return243 }244 newSamples := []*Sample{}245 newHTTPTrails := []*httpext.Trail{}246 for _, sampleContainer := range sampleContainers {247 switch sc := sampleContainer.(type) {248 case *httpext.Trail:249 sc = useCloudTags(sc)250 // Check if aggregation is enabled,251 if c.config.AggregationPeriod.Duration > 0 {252 newHTTPTrails = append(newHTTPTrails, sc)253 } else {254 newSamples = append(newSamples, NewSampleFromTrail(sc))255 }256 case *netext.NetTrail:257 //TODO: aggregate?258 values := map[string]float64{259 metrics.DataSent.Name: float64(sc.BytesWritten),260 metrics.DataReceived.Name: float64(sc.BytesRead),261 }262 if sc.FullIteration {263 values[metrics.IterationDuration.Name] = stats.D(sc.EndTime.Sub(sc.StartTime))264 values[metrics.Iterations.Name] = 1265 }266 newSamples = append(newSamples, &Sample{267 Type: DataTypeMap,268 Metric: "iter_li_all",269 Data: &SampleDataMap{270 Time: Timestamp(sc.GetTime()),271 Tags: sc.GetTags(),272 Values: values,273 }})274 default:275 for _, sample := range sampleContainer.GetSamples() {276 newSamples = append(newSamples, &Sample{277 Type: DataTypeSingle,278 Metric: sample.Metric.Name,279 Data: &SampleDataSingle{280 Type: sample.Metric.Type,281 Time: Timestamp(sample.Time),282 Tags: sample.Tags,283 Value: sample.Value,284 },285 })286 }287 }288 }289 if len(newSamples) > 0 || len(newHTTPTrails) > 0 {290 c.bufferMutex.Lock()291 c.bufferSamples = append(c.bufferSamples, newSamples...)292 c.bufferHTTPTrails = append(c.bufferHTTPTrails, newHTTPTrails...)293 c.bufferMutex.Unlock()294 }295}296func (c *Collector) aggregateHTTPTrails(waitPeriod time.Duration) {297 c.bufferMutex.Lock()298 newHTTPTrails := c.bufferHTTPTrails299 c.bufferHTTPTrails = nil300 c.bufferMutex.Unlock()301 aggrPeriod := int64(c.config.AggregationPeriod.Duration)302 // Distribute all newly buffered HTTP trails into buckets and sub-buckets303 for _, trail := range newHTTPTrails {304 trailTags := trail.GetTags()305 bucketID := trail.GetTime().UnixNano() / aggrPeriod306 // Get or create a time bucket for that trail period307 bucket, ok := c.aggrBuckets[bucketID]308 if !ok {309 bucket = aggregationBucket{}310 c.aggrBuckets[bucketID] = bucket311 }312 // Either use an existing subbucket key or use the trail tags as a new one313 subBucketKey := trailTags314 subBucket, ok := bucket[subBucketKey]315 if !ok {316 for sbTags, sb := range bucket {317 if trailTags.IsEqual(sbTags) {318 subBucketKey = sbTags319 subBucket = sb320 break321 }322 }323 }324 bucket[subBucketKey] = append(subBucket, trail)325 }326 // Which buckets are still new and we'll wait for trails to accumulate before aggregating327 bucketCutoffID := time.Now().Add(-waitPeriod).UnixNano() / aggrPeriod328 iqrRadius := c.config.AggregationOutlierIqrRadius.Float64329 iqrLowerCoef := c.config.AggregationOutlierIqrCoefLower.Float64330 iqrUpperCoef := c.config.AggregationOutlierIqrCoefUpper.Float64331 newSamples := []*Sample{}332 // Handle all aggregation buckets older than bucketCutoffID333 for bucketID, subBuckets := range c.aggrBuckets {334 if bucketID > bucketCutoffID {335 continue336 }337 for tags, httpTrails := range subBuckets {338 trailCount := int64(len(httpTrails))339 if trailCount < c.config.AggregationMinSamples.Int64 {340 for _, trail := range httpTrails {341 newSamples = append(newSamples, NewSampleFromTrail(trail))342 }343 continue344 }345 aggrData := &SampleDataAggregatedHTTPReqs{346 Time: Timestamp(time.Unix(0, bucketID*aggrPeriod+aggrPeriod/2)),347 Type: "aggregated_trend",348 Tags: tags,349 }350 if c.config.AggregationSkipOutlierDetection.Bool {351 // Simply add up all HTTP trails, no outlier detection352 for _, trail := range httpTrails {353 aggrData.Add(trail)354 }355 } else {356 connDurations := make(durations, trailCount)357 reqDurations := make(durations, trailCount)358 for i, trail := range httpTrails {359 connDurations[i] = trail.ConnDuration360 reqDurations[i] = trail.Duration361 }362 var minConnDur, maxConnDur, minReqDur, maxReqDur time.Duration363 if trailCount < c.config.AggregationOutlierAlgoThreshold.Int64 {364 // Since there are fewer samples, we'll use the interpolation-enabled and365 // more precise sorting-based algorithm366 minConnDur, maxConnDur = connDurations.SortGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef, true)367 minReqDur, maxReqDur = reqDurations.SortGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef, true)368 } else {369 minConnDur, maxConnDur = connDurations.SelectGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef)370 minReqDur, maxReqDur = reqDurations.SelectGetNormalBounds(iqrRadius, iqrLowerCoef, iqrUpperCoef)371 }372 for _, trail := range httpTrails {373 if trail.ConnDuration < minConnDur ||374 trail.ConnDuration > maxConnDur ||375 trail.Duration < minReqDur ||376 trail.Duration > maxReqDur {377 // Seems like an outlier, add it as a standalone metric378 newSamples = append(newSamples, NewSampleFromTrail(trail))379 } else {380 // Aggregate the trail381 aggrData.Add(trail)382 }383 }384 }385 aggrData.CalcAverages()386 if aggrData.Count > 0 {387 logrus.WithFields(logrus.Fields{388 "http_samples": aggrData.Count,389 }).Debug("Aggregated HTTP metrics")390 newSamples = append(newSamples, &Sample{391 Type: DataTypeAggregatedHTTPReqs,392 Metric: "http_req_li_all",393 Data: aggrData,394 })395 }396 }397 delete(c.aggrBuckets, bucketID)398 }399 if len(newSamples) > 0 {400 c.bufferMutex.Lock()401 c.bufferSamples = append(c.bufferSamples, newSamples...)402 c.bufferMutex.Unlock()403 }404}405func (c *Collector) flushHTTPTrails() {406 c.bufferMutex.Lock()407 defer c.bufferMutex.Unlock()408 newSamples := []*Sample{}409 for _, trail := range c.bufferHTTPTrails {410 newSamples = append(newSamples, NewSampleFromTrail(trail))411 }412 for _, bucket := range c.aggrBuckets {413 for _, trails := range bucket {414 for _, trail := range trails {415 newSamples = append(newSamples, NewSampleFromTrail(trail))416 }417 }418 }419 c.bufferHTTPTrails = nil420 c.aggrBuckets = map[int64]aggregationBucket{}421 c.bufferSamples = append(c.bufferSamples, newSamples...)422}423func (c *Collector) shouldStopSendingMetrics(err error) bool {424 if err == nil {425 return false426 }427 if errResp, ok := err.(ErrorResponse); ok && errResp.Response != nil {428 return errResp.Response.StatusCode == http.StatusForbidden && errResp.Code == 4429 }430 return false431}432func (c *Collector) pushMetrics() {433 c.bufferMutex.Lock()434 if len(c.bufferSamples) == 0 {435 c.bufferMutex.Unlock()436 return437 }438 buffer := c.bufferSamples439 c.bufferSamples = nil440 c.bufferMutex.Unlock()441 logrus.WithFields(logrus.Fields{442 "samples": len(buffer),443 }).Debug("Pushing metrics to cloud")444 for len(buffer) > 0 {445 var size = len(buffer)446 if size > int(c.config.MaxMetricSamplesPerPackage.Int64) {447 size = int(c.config.MaxMetricSamplesPerPackage.Int64)448 }449 err := c.client.PushMetric(c.referenceID, c.config.NoCompress.Bool, buffer[:size])450 if err != nil {451 if c.shouldStopSendingMetrics(err) {452 logrus.WithError(err).Warn("Stopped sending metrics to cloud due to an error")453 close(c.stopSendingMetricsCh)454 break455 }456 logrus.WithError(err).Warn("Failed to send metrics to cloud")457 }458 buffer = buffer[size:]459 }460}461func (c *Collector) testFinished() {462 if c.referenceID == "" {463 return464 }465 testTainted := false466 thresholdResults := make(ThresholdResult)467 for name, thresholds := range c.thresholds {468 thresholdResults[name] = make(map[string]bool)469 for _, t := range thresholds {470 thresholdResults[name][t.Source] = t.LastFailed471 if t.LastFailed {472 testTainted = true473 }474 }475 }...

Full Screen

Full Screen

cloud.go

Source:cloud.go Github

copy

Full Screen

1package cloud2import (3 "fmt"4 "os"5 "time"6 "github.com/go-logr/logr"7 "github.com/sirupsen/logrus"8 "go.k6.io/k6/cloudapi"9 "go.k6.io/k6/lib"10 "go.k6.io/k6/lib/consts"11 "go.k6.io/k6/lib/types"12 "gopkg.in/guregu/null.v3"13)14var client *cloudapi.Client15type InspectOutput struct {16 External struct {17 Loadimpact struct {18 Name string `json:"name"`19 ProjectID int64 `json:"projectID"`20 } `json:"loadimpact"`21 } `json:"ext"`22 TotalDuration types.NullDuration `json:"totalDuration"`23 MaxVUs uint64 `json:"maxVUs"`24 Thresholds map[string][]string `json:"thresholds,omitempty"`25}26type TestRun struct {27 Name string `json:"name"`28 ProjectID int64 `json:"project_id,omitempty"`29 VUsMax int64 `json:"vus"`30 Thresholds map[string][]string `json:"thresholds"`31 Duration int64 `json:"duration"`32 ProcessThresholds bool `json:"process_thresholds"`33 Instances int32 `json:"instances"`34}35func CreateTestRun(opts InspectOutput, instances int32, host, token string, log logr.Logger) (string, error) {36 if len(opts.External.Loadimpact.Name) < 1 {37 opts.External.Loadimpact.Name = "k6-operator-test"38 }39 cloudConfig := cloudapi.NewConfig()40 if opts.External.Loadimpact.ProjectID > 0 {41 cloudConfig.ProjectID = null.NewInt(opts.External.Loadimpact.ProjectID, true)42 }43 logger := &logrus.Logger{44 Out: os.Stdout,45 Formatter: new(logrus.TextFormatter),46 Hooks: make(logrus.LevelHooks),47 Level: logrus.InfoLevel,48 }49 if opts.Thresholds == nil {50 opts.Thresholds = make(map[string][]string)51 }52 if len(host) == 0 {53 host = cloudConfig.Host.String54 }55 client = cloudapi.NewClient(logger, token, host, consts.Version, time.Duration(time.Minute))56 resp, err := createTestRun(client, host, &TestRun{57 Name: opts.External.Loadimpact.Name,58 ProjectID: cloudConfig.ProjectID.Int64,59 VUsMax: int64(opts.MaxVUs),60 Thresholds: opts.Thresholds,61 Duration: int64(opts.TotalDuration.TimeDuration().Seconds()),62 ProcessThresholds: true,63 Instances: instances,64 })65 if err != nil {66 return "", err67 }68 return resp.ReferenceID, nil69}70// We cannot use cloudapi.TestRun struct and cloudapi.Client.CreateTestRun call because they're not aware of71// process_thresholds argument; so let's use custom struct and function instead72func createTestRun(client *cloudapi.Client, host string, testRun *TestRun) (*cloudapi.CreateTestRunResponse, error) {73 url := host + "/v1/tests"74 req, err := client.NewRequest("POST", url, testRun)75 if err != nil {76 return nil, err77 }78 ctrr := cloudapi.CreateTestRunResponse{}79 err = client.Do(req, &ctrr)80 if err != nil {81 return nil, err82 }83 if ctrr.ReferenceID == "" {84 return nil, fmt.Errorf("failed to get a reference ID")85 }86 return &ctrr, nil87}88func FinishTestRun(refID string) error {89 return client.TestFinished(refID, cloudapi.ThresholdResult(90 map[string]map[string]bool{},91 ), false, lib.RunStatusFinished)92}...

Full Screen

Full Screen

testFinished

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cloud := new(Cloud)4 cloud.testFinished()5}6import (7func main() {8 cloud := new(Cloud)9 cloud.testFinished()10}11import (12func main() {13 cloud := new(Cloud)14 cloud.testFinished()15}16import (17func main() {18 cloud := new(Cloud)19 cloud.testFinished()20}21import (22func main() {23 cloud := new(Cloud)24 cloud.testFinished()25}26import (27func main() {28 cloud := new(Cloud)29 cloud.testFinished()30}31import (32func main() {33 cloud := new(Cloud)34 cloud.testFinished()35}36import (37func main() {38 cloud := new(Cloud)39 cloud.testFinished()40}41import (42func main() {43 cloud := new(Cloud)44 cloud.testFinished()45}46import (47func main() {48 cloud := new(Cloud)49 cloud.testFinished()50}51import (52func main() {53 cloud := new(Cloud)54 cloud.testFinished()55}56import (57func main() {

Full Screen

Full Screen

testFinished

Using AI Code Generation

copy

Full Screen

1import (2type cloud struct {3}4func (c *cloud) testFinished() {5 fmt.Println("test finished")6}7func main() {8 c.testFinished()9}

Full Screen

Full Screen

testFinished

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cloud := test.NewCloud()4 fmt.Println(cloud.TestFinished())5}6import (7func main() {8 cloud := test.NewCloud()9 fmt.Println(cloud.TestFinished())10}11type Cloud struct {12}13func NewCloud() *Cloud {14 return &Cloud{}15}16func (cloud *Cloud) TestFinished() bool {17}18import (19func main() {20 cloud := test.NewCloud()21 fmt.Println(cloud.TestFinished())22}23 /usr/local/go/src/test (from $GOROOT)24 /home/username/go/src/test (from $GOPATH)25 /home/username/go/src/test (from $GOPATH)26import (27func main() {28 cloud := test.NewCloud()29 fmt.Println(cloud.TestFinished())30}31 /usr/local/go/src/test (from $GOROOT)32 /home/username/go/src/test (from $GOPATH)33 /home/username/go/src/test (from $GOPATH)

Full Screen

Full Screen

testFinished

Using AI Code Generation

copy

Full Screen

1import (2type Cloud struct {3}4func (c *Cloud) testFinished() bool {5}6func main() {7 c := Cloud{"AWS"}8 fmt.Println(c.testFinished())9}10import (11type Cloud struct {12}13func (c Cloud) testFinished() bool {14}15func main() {16 c := Cloud{"AWS"}17 fmt.Println(c.testFinished())18}19import (20type Cloud struct {21}22func (c *Cloud) testFinished() bool {23}24func main() {25 c := Cloud{"AWS"}26 fmt.Println(c.testFinished())27}28import

Full Screen

Full Screen

testFinished

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 c := cloud{}4 c.testFinished()5}6import (7func main() {8 c := cloud{}9 c.testFinished()10}11import (12type cloud struct {13}14func (c *cloud) testFinished() {15 fmt.Println("test finished")16}17func (c *cloud) testFinished() {18 c.testFinished()19}20func main() {21 c := cloud{}22 c.testFinished()23}24import (25type cloud struct {26}27func (c *cloud) testFinished() {28 fmt.Println("test finished")29}30func (c *cloud) testFinished() {31 c.testFinished()32}33func main() {34 c := cloud{}35 c.testFinished()36}

Full Screen

Full Screen

testFinished

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println("Hello, playground")4 c := cloud.New("test")5 c.TestFinished()6}7import (8func main() {9 fmt.Println("Hello, playground")10 c := cloud.New("test")11 c.TestFinished()12}13import (14type Cloud struct {15}16func New(name string) *Cloud {17 return &Cloud{Name: name}18}19func (c *Cloud) TestFinished() {20 fmt.Println("test finished")21}

Full Screen

Full Screen

testFinished

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 c := cloud{1, "Amazon"}4 fmt.Println("Before calling testFinished method")5 c.testFinished()6 fmt.Println("After calling testFinished method")7}8import (9func main() {10 c := cloud{1, "Amazon"}11 fmt.Println("Before calling testFinished method")12 c.testFinished()13 fmt.Println("After calling testFinished method")14}15When we are importing a package, the package is imported with the name of t

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run K6 automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Most used method in

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful