How to use newTestEngineWithRegistry method of core Package

Best K6 code snippet using core.newTestEngineWithRegistry

engine_test.go

Source:engine_test.go Github

copy

Full Screen

...46)47const isWindows = runtime.GOOS == "windows"48// TODO: completely rewrite all of these tests49// Wrapper around NewEngine that applies a logger and manages the options.50func newTestEngineWithRegistry( //nolint:golint51 t *testing.T, runCtx context.Context, runner lib.Runner, outputs []output.Output, opts lib.Options,52 registry *metrics.Registry,53) (engine *Engine, run func() error, wait func()) {54 if runner == nil {55 runner = &minirunner.MiniRunner{}56 }57 globalCtx, globalCancel := context.WithCancel(context.Background())58 var runCancel func()59 if runCtx == nil {60 runCtx, runCancel = context.WithCancel(globalCtx)61 }62 logger := logrus.New()63 logger.SetOutput(testutils.NewTestOutput(t))64 newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{65 MetricSamplesBufferSize: null.NewInt(200, false),66 }.Apply(runner.GetOptions()).Apply(opts), logger)67 require.NoError(t, err)68 require.Empty(t, newOpts.Validate())69 require.NoError(t, runner.SetOptions(newOpts))70 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)71 execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger)72 require.NoError(t, err)73 engine, err = NewEngine(execScheduler, opts, lib.RuntimeOptions{}, outputs, logger, registry)74 require.NoError(t, err)75 require.NoError(t, engine.OutputManager.StartOutputs())76 run, waitFn, err := engine.Init(globalCtx, runCtx)77 require.NoError(t, err)78 return engine, run, func() {79 if runCancel != nil {80 runCancel()81 }82 globalCancel()83 waitFn()84 engine.OutputManager.StopOutputs()85 }86}87func newTestEngine(88 t *testing.T, runCtx context.Context, runner lib.Runner, outputs []output.Output, opts lib.Options, //nolint:revive89) (engine *Engine, run func() error, wait func()) {90 return newTestEngineWithRegistry(t, runCtx, runner, outputs, opts, metrics.NewRegistry())91}92func TestNewEngine(t *testing.T) {93 t.Parallel()94 newTestEngine(t, nil, nil, nil, lib.Options{})95}96func TestEngineRun(t *testing.T) {97 t.Parallel()98 logrus.SetLevel(logrus.DebugLevel)99 t.Run("exits with context", func(t *testing.T) {100 t.Parallel()101 done := make(chan struct{})102 runner := &minirunner.MiniRunner{103 Fn: func(ctx context.Context, _ *lib.State, _ chan<- metrics.SampleContainer) error {104 <-ctx.Done()105 close(done)106 return nil107 },108 }109 duration := 100 * time.Millisecond110 ctx, cancel := context.WithTimeout(context.Background(), duration)111 defer cancel()112 _, run, wait := newTestEngine(t, ctx, runner, nil, lib.Options{})113 defer wait()114 startTime := time.Now()115 assert.NoError(t, run())116 assert.WithinDuration(t, startTime.Add(duration), time.Now(), 100*time.Millisecond)117 <-done118 })119 t.Run("exits with executor", func(t *testing.T) {120 t.Parallel()121 e, run, wait := newTestEngine(t, nil, nil, nil, lib.Options{122 VUs: null.IntFrom(10),123 Iterations: null.IntFrom(100),124 })125 defer wait()126 assert.NoError(t, run())127 assert.Equal(t, uint64(100), e.ExecutionScheduler.GetState().GetFullIterationCount())128 })129 // Make sure samples are discarded after context close (using "cutoff" timestamp in local.go)130 t.Run("collects samples", func(t *testing.T) {131 t.Parallel()132 registry := metrics.NewRegistry()133 testMetric, err := registry.NewMetric("test_metric", metrics.Trend)134 require.NoError(t, err)135 signalChan := make(chan interface{})136 runner := &minirunner.MiniRunner{137 Fn: func(ctx context.Context, _ *lib.State, out chan<- metrics.SampleContainer) error {138 metrics.PushIfNotDone(ctx, out, metrics.Sample{Metric: testMetric, Time: time.Now(), Value: 1})139 close(signalChan)140 <-ctx.Done()141 metrics.PushIfNotDone(ctx, out, metrics.Sample{Metric: testMetric, Time: time.Now(), Value: 1})142 return nil143 },144 }145 mockOutput := mockoutput.New()146 ctx, cancel := context.WithCancel(context.Background())147 _, run, wait := newTestEngineWithRegistry(t, ctx, runner, []output.Output{mockOutput}, lib.Options{148 VUs: null.IntFrom(1),149 Iterations: null.IntFrom(1),150 }, registry)151 errC := make(chan error)152 go func() { errC <- run() }()153 <-signalChan154 cancel()155 assert.NoError(t, <-errC)156 wait()157 found := 0158 for _, s := range mockOutput.Samples {159 if s.Metric != testMetric {160 continue161 }162 found++163 assert.Equal(t, 1.0, s.Value, "wrong value")164 }165 assert.Equal(t, 1, found, "wrong number of samples")166 })167}168func TestEngineAtTime(t *testing.T) {169 t.Parallel()170 ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)171 defer cancel()172 _, run, wait := newTestEngine(t, ctx, nil, nil, lib.Options{173 VUs: null.IntFrom(2),174 Duration: types.NullDurationFrom(20 * time.Second),175 })176 defer wait()177 assert.NoError(t, run())178}179func TestEngineStopped(t *testing.T) {180 t.Parallel()181 ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)182 defer cancel()183 e, run, wait := newTestEngine(t, ctx, nil, nil, lib.Options{184 VUs: null.IntFrom(1),185 Duration: types.NullDurationFrom(20 * time.Second),186 })187 defer wait()188 assert.NoError(t, run())189 assert.Equal(t, false, e.IsStopped(), "engine should be running")190 e.Stop()191 assert.Equal(t, true, e.IsStopped(), "engine should be stopped")192 e.Stop() // test that a second stop doesn't panic193}194func TestEngineOutput(t *testing.T) {195 t.Parallel()196 registry := metrics.NewRegistry()197 testMetric, err := registry.NewMetric("test_metric", metrics.Trend)198 require.NoError(t, err)199 runner := &minirunner.MiniRunner{200 Fn: func(ctx context.Context, _ *lib.State, out chan<- metrics.SampleContainer) error {201 out <- metrics.Sample{Metric: testMetric}202 return nil203 },204 }205 mockOutput := mockoutput.New()206 e, run, wait := newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{207 VUs: null.IntFrom(1),208 Iterations: null.IntFrom(1),209 }, registry)210 assert.NoError(t, run())211 wait()212 cSamples := []metrics.Sample{}213 for _, sample := range mockOutput.Samples {214 if sample.Metric == testMetric {215 cSamples = append(cSamples, sample)216 }217 }218 metric := e.MetricsEngine.ObservedMetrics["test_metric"]219 if assert.NotNil(t, metric) {220 sink := metric.Sink.(*metrics.TrendSink) // nolint: forcetypeassert221 if assert.NotNil(t, sink) {222 numOutputSamples := len(cSamples)223 numEngineSamples := len(sink.Values)224 assert.Equal(t, numEngineSamples, numOutputSamples)225 }226 }227}228func TestEngine_processSamples(t *testing.T) {229 t.Parallel()230 t.Run("metric", func(t *testing.T) {231 t.Parallel()232 registry := metrics.NewRegistry()233 metric, err := registry.NewMetric("my_metric", metrics.Gauge)234 require.NoError(t, err)235 e, _, wait := newTestEngineWithRegistry(t, nil, nil, nil, lib.Options{}, registry)236 e.OutputManager.AddMetricSamples(237 []metrics.SampleContainer{metrics.Sample{Metric: metric, Value: 1.25, Tags: metrics.IntoSampleTags(&map[string]string{"a": "1"})}},238 )239 e.Stop()240 wait()241 assert.IsType(t, &metrics.GaugeSink{}, e.MetricsEngine.ObservedMetrics["my_metric"].Sink)242 })243 t.Run("submetric", func(t *testing.T) {244 t.Parallel()245 registry := metrics.NewRegistry()246 metric, err := registry.NewMetric("my_metric", metrics.Gauge)247 require.NoError(t, err)248 ths := metrics.NewThresholds([]string{`value<2`})249 gotParseErr := ths.Parse()250 require.NoError(t, gotParseErr)251 e, _, wait := newTestEngineWithRegistry(t, nil, nil, nil, lib.Options{252 Thresholds: map[string]metrics.Thresholds{253 "my_metric{a:1}": ths,254 },255 }, registry)256 e.OutputManager.AddMetricSamples(257 []metrics.SampleContainer{metrics.Sample{Metric: metric, Value: 1.25, Tags: metrics.IntoSampleTags(&map[string]string{"a": "1", "b": "2"})}},258 )259 e.Stop()260 wait()261 assert.Len(t, e.MetricsEngine.ObservedMetrics, 2)262 sms := e.MetricsEngine.ObservedMetrics["my_metric{a:1}"]263 assert.EqualValues(t, map[string]string{"a": "1"}, sms.Sub.Tags.CloneTags())264 assert.IsType(t, &metrics.GaugeSink{}, e.MetricsEngine.ObservedMetrics["my_metric"].Sink)265 assert.IsType(t, &metrics.GaugeSink{}, e.MetricsEngine.ObservedMetrics["my_metric{a:1}"].Sink)266 })267}268func TestEngineThresholdsWillAbort(t *testing.T) {269 t.Parallel()270 registry := metrics.NewRegistry()271 metric, err := registry.NewMetric("my_metric", metrics.Gauge)272 require.NoError(t, err)273 // The incoming samples for the metric set it to 1.25. Considering274 // the metric is of type Gauge, value > 1.25 should always fail, and275 // trigger an abort.276 ths := metrics.NewThresholds([]string{"value>1.25"})277 gotParseErr := ths.Parse()278 require.NoError(t, gotParseErr)279 ths.Thresholds[0].AbortOnFail = true280 thresholds := map[string]metrics.Thresholds{metric.Name: ths}281 e, _, wait := newTestEngineWithRegistry(t, nil, nil, nil, lib.Options{Thresholds: thresholds}, registry)282 e.OutputManager.AddMetricSamples(283 []metrics.SampleContainer{metrics.Sample{Metric: metric, Value: 1.25, Tags: metrics.IntoSampleTags(&map[string]string{"a": "1"})}},284 )285 e.Stop()286 wait()287 assert.True(t, e.thresholdsTainted)288}289func TestEngineAbortedByThresholds(t *testing.T) {290 t.Parallel()291 registry := metrics.NewRegistry()292 metric, err := registry.NewMetric("my_metric", metrics.Gauge)293 require.NoError(t, err)294 // The MiniRunner sets the value of the metric to 1.25. Considering295 // the metric is of type Gauge, value > 1.25 should always fail, and296 // trigger an abort.297 // **N.B**: a threshold returning an error, won't trigger an abort.298 ths := metrics.NewThresholds([]string{"value>1.25"})299 gotParseErr := ths.Parse()300 require.NoError(t, gotParseErr)301 ths.Thresholds[0].AbortOnFail = true302 thresholds := map[string]metrics.Thresholds{metric.Name: ths}303 done := make(chan struct{})304 runner := &minirunner.MiniRunner{305 Fn: func(ctx context.Context, _ *lib.State, out chan<- metrics.SampleContainer) error {306 out <- metrics.Sample{Metric: metric, Value: 1.25, Tags: metrics.IntoSampleTags(&map[string]string{"a": "1"})}307 <-ctx.Done()308 close(done)309 return nil310 },311 }312 _, run, wait := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, registry)313 defer wait()314 go func() {315 assert.NoError(t, run())316 }()317 select {318 case <-done:319 return320 case <-time.After(10 * time.Second):321 assert.Fail(t, "Test should have completed within 10 seconds")322 }323}324func TestEngine_processThresholds(t *testing.T) {325 t.Parallel()326 testdata := map[string]struct {327 pass bool328 ths map[string][]string329 }{330 "passing": {true, map[string][]string{"my_metric": {"value<2"}}},331 "failing": {false, map[string][]string{"my_metric": {"value>1.25"}}},332 "submetric,match,passing": {true, map[string][]string{"my_metric{a:1}": {"value<2"}}},333 "submetric,match,failing": {false, map[string][]string{"my_metric{a:1}": {"value>1.25"}}},334 "submetric,nomatch,passing": {true, map[string][]string{"my_metric{a:2}": {"value<2"}}},335 "submetric,nomatch,failing": {false, map[string][]string{"my_metric{a:2}": {"value>1.25"}}},336 "unused,passing": {true, map[string][]string{"unused_counter": {"count==0"}}},337 "unused,failing": {false, map[string][]string{"unused_counter": {"count>1"}}},338 "unused,subm,passing": {true, map[string][]string{"unused_counter{a:2}": {"count<1"}}},339 "unused,subm,failing": {false, map[string][]string{"unused_counter{a:2}": {"count>1"}}},340 "used,passing": {true, map[string][]string{"used_counter": {"count==2"}}},341 "used,failing": {false, map[string][]string{"used_counter": {"count<1"}}},342 "used,subm,passing": {true, map[string][]string{"used_counter{b:1}": {"count==2"}}},343 "used,not-subm,passing": {true, map[string][]string{"used_counter{b:2}": {"count==0"}}},344 "used,invalid-subm,passing1": {true, map[string][]string{"used_counter{c:''}": {"count==0"}}},345 "used,invalid-subm,failing1": {false, map[string][]string{"used_counter{c:''}": {"count>0"}}},346 "used,invalid-subm,passing2": {true, map[string][]string{"used_counter{c:}": {"count==0"}}},347 "used,invalid-subm,failing2": {false, map[string][]string{"used_counter{c:}": {"count>0"}}},348 }349 for name, data := range testdata {350 name, data := name, data351 t.Run(name, func(t *testing.T) {352 t.Parallel()353 registry := metrics.NewRegistry()354 gaugeMetric, err := registry.NewMetric("my_metric", metrics.Gauge)355 require.NoError(t, err)356 counterMetric, err := registry.NewMetric("used_counter", metrics.Counter)357 require.NoError(t, err)358 _, err = registry.NewMetric("unused_counter", metrics.Counter)359 require.NoError(t, err)360 thresholds := make(map[string]metrics.Thresholds, len(data.ths))361 for m, srcs := range data.ths {362 ths := metrics.NewThresholds(srcs)363 gotParseErr := ths.Parse()364 require.NoError(t, gotParseErr)365 thresholds[m] = ths366 }367 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)368 defer cancel()369 e, run, wait := newTestEngineWithRegistry(370 t, ctx, &minirunner.MiniRunner{}, nil, lib.Options{Thresholds: thresholds}, registry,371 )372 e.OutputManager.AddMetricSamples(373 []metrics.SampleContainer{374 metrics.Sample{Metric: gaugeMetric, Value: 1.25, Tags: metrics.IntoSampleTags(&map[string]string{"a": "1"})},375 metrics.Sample{Metric: counterMetric, Value: 2, Tags: metrics.IntoSampleTags(&map[string]string{"b": "1"})},376 },377 )378 require.NoError(t, run())379 wait()380 assert.Equal(t, data.pass, !e.IsTainted())381 })382 }383}384func getMetricSum(mo *mockoutput.MockOutput, name string) (result float64) {385 for _, sc := range mo.SampleContainers {386 for _, s := range sc.GetSamples() {387 if s.Metric.Name == name {388 result += s.Value389 }390 }391 }392 return393}394func getMetricCount(mo *mockoutput.MockOutput, name string) (result uint) {395 for _, sc := range mo.SampleContainers {396 for _, s := range sc.GetSamples() {397 if s.Metric.Name == name {398 result++399 }400 }401 }402 return403}404func getMetricMax(mo *mockoutput.MockOutput, name string) (result float64) {405 for _, sc := range mo.SampleContainers {406 for _, s := range sc.GetSamples() {407 if s.Metric.Name == name && s.Value > result {408 result = s.Value409 }410 }411 }412 return413}414const expectedHeaderMaxLength = 550415// FIXME: This test is too brittle, consider simplifying.416func TestSentReceivedMetrics(t *testing.T) {417 t.Parallel()418 tb := httpmultibin.NewHTTPMultiBin(t)419 tr := tb.Replacer.Replace420 type testScript struct {421 Code string422 NumRequests int64423 ExpectedDataSent int64424 ExpectedDataReceived int64425 }426 testScripts := []testScript{427 {tr(`import http from "k6/http";428 export default function() {429 http.get("HTTPBIN_URL/bytes/15000");430 }`), 1, 0, 15000},431 // NOTE: This needs to be improved, in the case of HTTPS IN URL432 // it's highly possible to meet the case when data received is out433 // of in the possible interval434 {tr(`import http from "k6/http";435 export default function() {436 http.get("HTTPBIN_URL/bytes/5000");437 http.get("HTTPSBIN_URL/bytes/5000");438 http.batch(["HTTPBIN_URL/bytes/10000", "HTTPBIN_URL/bytes/20000", "HTTPSBIN_URL/bytes/10000"]);439 }`), 5, 0, 50000},440 {tr(`import http from "k6/http";441 let data = "0123456789".repeat(100);442 export default function() {443 http.post("HTTPBIN_URL/ip", {444 file: http.file(data, "test.txt")445 });446 }`), 1, 1000, 100},447 // NOTE(imiric): This needs to keep testing against /ws-echo-invalid because448 // this test is highly sensitive to metric data, and slightly differing449 // WS server implementations might introduce flakiness.450 // See https://github.com/k6io/k6/pull/1149451 {tr(`import ws from "k6/ws";452 let data = "0123456789".repeat(100);453 export default function() {454 ws.connect("WSBIN_URL/ws-echo-invalid", null, function (socket) {455 socket.on('open', function open() {456 socket.send(data);457 });458 socket.on('message', function (message) {459 socket.close();460 });461 });462 }`), 2, 1000, 1000},463 }464 type testCase struct{ Iterations, VUs int64 }465 testCases := []testCase{466 {1, 1}, {2, 2}, {2, 1}, {5, 2}, {25, 2}, {50, 5},467 }468 runTest := func(t *testing.T, ts testScript, tc testCase, noConnReuse bool) (float64, float64) {469 registry := metrics.NewRegistry()470 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)471 r, err := js.New(472 &lib.RuntimeState{473 Logger: testutils.NewLogger(t),474 BuiltinMetrics: builtinMetrics,475 Registry: registry,476 },477 &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(ts.Code)},478 nil,479 )480 require.NoError(t, err)481 mockOutput := mockoutput.New()482 _, run, wait := newTestEngine(t, nil, r, []output.Output{mockOutput}, lib.Options{483 Iterations: null.IntFrom(tc.Iterations),484 VUs: null.IntFrom(tc.VUs),485 Hosts: tb.Dialer.Hosts,486 InsecureSkipTLSVerify: null.BoolFrom(true),487 NoVUConnectionReuse: null.BoolFrom(noConnReuse),488 Batch: null.IntFrom(20),489 })490 errC := make(chan error)491 go func() { errC <- run() }()492 select {493 case <-time.After(10 * time.Second):494 t.Fatal("Test timed out")495 case err := <-errC:496 require.NoError(t, err)497 }498 wait()499 checkData := func(name string, expected int64) float64 {500 data := getMetricSum(mockOutput, name)501 expectedDataMin := float64(expected * tc.Iterations)502 expectedDataMax := float64((expected + ts.NumRequests*expectedHeaderMaxLength) * tc.Iterations)503 if data < expectedDataMin || data > expectedDataMax {504 t.Errorf(505 "The %s sum should be in the interval [%f, %f] but was %f",506 name, expectedDataMin, expectedDataMax, data,507 )508 }509 return data510 }511 return checkData(metrics.DataSentName, ts.ExpectedDataSent),512 checkData(metrics.DataReceivedName, ts.ExpectedDataReceived)513 }514 getTestCase := func(t *testing.T, ts testScript, tc testCase) func(t *testing.T) {515 return func(t *testing.T) {516 t.Parallel()517 noReuseSent, noReuseReceived := runTest(t, ts, tc, true)518 reuseSent, reuseReceived := runTest(t, ts, tc, false)519 if noReuseSent < reuseSent {520 t.Errorf("reuseSent=%f is greater than noReuseSent=%f", reuseSent, noReuseSent)521 }522 if noReuseReceived < reuseReceived {523 t.Errorf("reuseReceived=%f is greater than noReuseReceived=%f", reuseReceived, noReuseReceived)524 }525 }526 }527 // This Run will not return until the parallel subtests complete.528 t.Run("group", func(t *testing.T) {529 t.Parallel()530 for tsNum, ts := range testScripts {531 for tcNum, tc := range testCases {532 t.Run(533 fmt.Sprintf("SentReceivedMetrics_script[%d]_case[%d](%d,%d)", tsNum, tcNum, tc.Iterations, tc.VUs),534 getTestCase(t, ts, tc),535 )536 }537 }538 })539}540func TestRunTags(t *testing.T) {541 t.Parallel()542 tb := httpmultibin.NewHTTPMultiBin(t)543 runTagsMap := map[string]string{"foo": "bar", "test": "mest", "over": "written"}544 runTags := metrics.NewSampleTags(runTagsMap)545 script := []byte(tb.Replacer.Replace(`546 import http from "k6/http";547 import ws from "k6/ws";548 import { Counter } from "k6/metrics";549 import { group, check, fail } from "k6";550 let customTags = { "over": "the rainbow" };551 let params = { "tags": customTags};552 let statusCheck = { "status is 200": (r) => r.status === 200 }553 let myCounter = new Counter("mycounter");554 export default function() {555 group("http", function() {556 check(http.get("HTTPSBIN_URL", params), statusCheck, customTags);557 check(http.get("HTTPBIN_URL/status/418", params), statusCheck, customTags);558 })559 group("websockets", function() {560 var response = ws.connect("WSBIN_URL/ws-echo", params, function (socket) {561 socket.on('open', function open() {562 console.log('ws open and say hello');563 socket.send("hello");564 });565 socket.on('message', function (message) {566 console.log('ws got message ' + message);567 if (message != "hello") {568 fail("Expected to receive 'hello' but got '" + message + "' instead !");569 }570 console.log('ws closing socket...');571 socket.close();572 });573 socket.on('close', function () {574 console.log('ws close');575 });576 socket.on('error', function (e) {577 console.log('ws error: ' + e.error());578 });579 });580 console.log('connect returned');581 check(response, { "status is 101": (r) => r && r.status === 101 }, customTags);582 })583 myCounter.add(1, customTags);584 }585 `))586 registry := metrics.NewRegistry()587 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)588 r, err := js.New(589 &lib.RuntimeState{590 Logger: testutils.NewLogger(t),591 BuiltinMetrics: builtinMetrics,592 Registry: registry,593 },594 &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script},595 nil,596 )597 require.NoError(t, err)598 mockOutput := mockoutput.New()599 _, run, wait := newTestEngine(t, nil, r, []output.Output{mockOutput}, lib.Options{600 Iterations: null.IntFrom(3),601 VUs: null.IntFrom(2),602 Hosts: tb.Dialer.Hosts,603 RunTags: runTags,604 SystemTags: &metrics.DefaultSystemTagSet,605 InsecureSkipTLSVerify: null.BoolFrom(true),606 })607 errC := make(chan error)608 go func() { errC <- run() }()609 select {610 case <-time.After(10 * time.Second):611 t.Fatal("Test timed out")612 case err := <-errC:613 require.NoError(t, err)614 }615 wait()616 systemMetrics := []string{617 metrics.VUsName, metrics.VUsMaxName, metrics.IterationsName, metrics.IterationDurationName,618 metrics.GroupDurationName, metrics.DataSentName, metrics.DataReceivedName,619 }620 getExpectedOverVal := func(metricName string) string {621 for _, sysMetric := range systemMetrics {622 if sysMetric == metricName {623 return runTagsMap["over"]624 }625 }626 return "the rainbow"627 }628 for _, s := range mockOutput.Samples {629 for key, expVal := range runTagsMap {630 val, ok := s.Tags.Get(key)631 if key == "over" {632 expVal = getExpectedOverVal(s.Metric.Name)633 }634 assert.True(t, ok)635 assert.Equalf(t, expVal, val, "Wrong tag value in sample for metric %#v", s.Metric)636 }637 }638}639func TestSetupTeardownThresholds(t *testing.T) {640 t.Parallel()641 tb := httpmultibin.NewHTTPMultiBin(t)642 script := []byte(tb.Replacer.Replace(`643 import http from "k6/http";644 import { check } from "k6";645 import { Counter } from "k6/metrics";646 let statusCheck = { "status is 200": (r) => r.status === 200 }647 let myCounter = new Counter("setup_teardown");648 export let options = {649 iterations: 5,650 thresholds: {651 "setup_teardown": ["count == 2"],652 "iterations": ["count == 5"],653 "http_reqs": ["count == 7"],654 },655 };656 export function setup() {657 check(http.get("HTTPBIN_IP_URL"), statusCheck) && myCounter.add(1);658 };659 export default function () {660 check(http.get("HTTPBIN_IP_URL"), statusCheck);661 };662 export function teardown() {663 check(http.get("HTTPBIN_IP_URL"), statusCheck) && myCounter.add(1);664 };665 `))666 registry := metrics.NewRegistry()667 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)668 runner, err := js.New(669 &lib.RuntimeState{670 Logger: testutils.NewLogger(t),671 BuiltinMetrics: builtinMetrics,672 Registry: registry,673 },674 &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script},675 nil,676 )677 require.NoError(t, err)678 engine, run, wait := newTestEngine(t, nil, runner, nil, lib.Options{679 SystemTags: &metrics.DefaultSystemTagSet,680 SetupTimeout: types.NullDurationFrom(3 * time.Second),681 TeardownTimeout: types.NullDurationFrom(3 * time.Second),682 VUs: null.IntFrom(3),683 })684 defer wait()685 errC := make(chan error)686 go func() { errC <- run() }()687 select {688 case <-time.After(10 * time.Second):689 t.Fatal("Test timed out")690 case err := <-errC:691 require.NoError(t, err)692 require.False(t, engine.IsTainted())693 }694}695func TestSetupException(t *testing.T) {696 t.Parallel()697 script := []byte(`698 import bar from "./bar.js";699 export function setup() {700 bar();701 };702 export default function() {703 };704 `)705 memfs := afero.NewMemMapFs()706 require.NoError(t, afero.WriteFile(memfs, "/bar.js", []byte(`707 export default function () {708 baz();709 }710 function baz() {711 throw new Error("baz");712 }713 `), 0x666))714 registry := metrics.NewRegistry()715 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)716 runner, err := js.New(717 &lib.RuntimeState{718 Logger: testutils.NewLogger(t),719 BuiltinMetrics: builtinMetrics,720 Registry: registry,721 },722 &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script},723 map[string]afero.Fs{"file": memfs},724 )725 require.NoError(t, err)726 _, run, wait := newTestEngine(t, nil, runner, nil, lib.Options{727 SystemTags: &metrics.DefaultSystemTagSet,728 SetupTimeout: types.NullDurationFrom(3 * time.Second),729 TeardownTimeout: types.NullDurationFrom(3 * time.Second),730 VUs: null.IntFrom(3),731 })732 defer wait()733 errC := make(chan error)734 go func() { errC <- run() }()735 select {736 case <-time.After(10 * time.Second):737 t.Fatal("Test timed out")738 case err := <-errC:739 require.Error(t, err)740 var exception errext.Exception741 require.ErrorAs(t, err, &exception)742 require.Equal(t, "Error: baz\n\tat baz (file:///bar.js:6:16(3))\n"+743 "\tat file:///bar.js:3:8(3)\n\tat setup (file:///script.js:4:2(4))\n\tat native\n",744 err.Error())745 }746}747func TestVuInitException(t *testing.T) {748 t.Parallel()749 script := []byte(`750 export let options = {751 vus: 3,752 iterations: 5,753 };754 export default function() {};755 if (__VU == 2) {756 throw new Error('oops in ' + __VU);757 }758 `)759 logger := testutils.NewLogger(t)760 registry := metrics.NewRegistry()761 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)762 runner, err := js.New(763 &lib.RuntimeState{764 Logger: testutils.NewLogger(t),765 BuiltinMetrics: builtinMetrics,766 Registry: registry,767 },768 &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script},769 nil,770 )771 require.NoError(t, err)772 opts, err := executor.DeriveScenariosFromShortcuts(runner.GetOptions(), nil)773 require.NoError(t, err)774 require.Empty(t, opts.Validate())775 require.NoError(t, runner.SetOptions(opts))776 execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger)777 require.NoError(t, err)778 engine, err := NewEngine(execScheduler, opts, lib.RuntimeOptions{}, nil, logger, registry)779 require.NoError(t, err)780 ctx, cancel := context.WithCancel(context.Background())781 defer cancel()782 _, _, err = engine.Init(ctx, ctx) // no need for 2 different contexts783 require.Error(t, err)784 var exception errext.Exception785 require.ErrorAs(t, err, &exception)786 assert.Equal(t, "Error: oops in 2\n\tat file:///script.js:10:9(31)\n", err.Error())787 var errWithHint errext.HasHint788 require.ErrorAs(t, err, &errWithHint)789 assert.Equal(t, "error while initializing VU #2 (script exception)", errWithHint.Hint())790}791func TestEmittedMetricsWhenScalingDown(t *testing.T) {792 t.Parallel()793 tb := httpmultibin.NewHTTPMultiBin(t)794 script := []byte(tb.Replacer.Replace(`795 import http from "k6/http";796 import { sleep } from "k6";797 export let options = {798 systemTags: ["iter", "vu", "url"],799 scenarios: {800 we_need_hard_stop_and_ramp_down: {801 executor: "ramping-vus",802 // Start with 2 VUs for 4 seconds and then quickly scale down to 1 for the next 4s and then quit803 startVUs: 2,804 stages: [805 { duration: "4s", target: 2 },806 { duration: "0s", target: 1 },807 { duration: "4s", target: 1 },808 ],809 gracefulStop: "0s",810 gracefulRampDown: "0s",811 },812 },813 };814 export default function () {815 console.log("VU " + __VU + " starting iteration #" + __ITER);816 http.get("HTTPBIN_IP_URL/bytes/15000");817 sleep(3.1);818 http.get("HTTPBIN_IP_URL/bytes/15000");819 console.log("VU " + __VU + " ending iteration #" + __ITER);820 };821 `))822 registry := metrics.NewRegistry()823 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)824 runner, err := js.New(825 &lib.RuntimeState{826 Logger: testutils.NewLogger(t),827 BuiltinMetrics: builtinMetrics,828 Registry: registry,829 },830 &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script},831 nil,832 )833 require.NoError(t, err)834 mockOutput := mockoutput.New()835 engine, run, wait := newTestEngine(t, nil, runner, []output.Output{mockOutput}, lib.Options{})836 errC := make(chan error)837 go func() { errC <- run() }()838 select {839 case <-time.After(12 * time.Second):840 t.Fatal("Test timed out")841 case err := <-errC:842 require.NoError(t, err)843 wait()844 require.False(t, engine.IsTainted())845 }846 // The 3.1 sleep in the default function would cause the first VU to complete 2 full iterations847 // and stat executing its third one, while the second VU will only fully complete 1 iteration848 // and will be canceled in the middle of its second one.849 assert.Equal(t, 3.0, getMetricSum(mockOutput, metrics.IterationsName))850 // That means that we expect to see 8 HTTP requests in total, 3*2=6 from the complete iterations851 // and one each from the two iterations that would be canceled in the middle of their execution852 assert.Equal(t, 8.0, getMetricSum(mockOutput, metrics.HTTPReqsName))853 // And we expect to see the data_received for all 8 of those requests. Previously, the data for854 // the 8th request (the 3rd one in the first VU before the test ends) was cut off by the engine855 // because it was emitted after the test officially ended. But that was mostly an unintended856 // consequence of the fact that those metrics were emitted only after an iteration ended when857 // it was interrupted.858 dataReceivedExpectedMin := 15000.0 * 8859 dataReceivedExpectedMax := (15000.0 + expectedHeaderMaxLength) * 8860 dataReceivedActual := getMetricSum(mockOutput, metrics.DataReceivedName)861 if dataReceivedActual < dataReceivedExpectedMin || dataReceivedActual > dataReceivedExpectedMax {862 t.Errorf(863 "The data_received sum should be in the interval [%f, %f] but was %f",864 dataReceivedExpectedMin, dataReceivedExpectedMax, dataReceivedActual,865 )866 }867 // Also, the interrupted iterations shouldn't affect the average iteration_duration in any way, only868 // complete iterations should be taken into account869 durationCount := float64(getMetricCount(mockOutput, metrics.IterationDurationName))870 assert.Equal(t, 3.0, durationCount)871 durationSum := getMetricSum(mockOutput, metrics.IterationDurationName)872 assert.InDelta(t, 3.35, durationSum/(1000*durationCount), 0.25)873}874func TestMetricsEmission(t *testing.T) {875 if !isWindows {876 t.Parallel()877 }878 testCases := []struct {879 method string880 minIterDuration string881 defaultBody string882 expCount, expIters float64883 }{884 // Since emission of Iterations happens before the minIterationDuration885 // sleep is done, we expect to receive metrics for all executions of886 // the `default` function, despite of the lower overall duration setting.887 {"minIterationDuration", `"300ms"`, "testCounter.add(1);", 16.0, 16.0},888 // With the manual sleep method and no minIterationDuration, the last889 // `default` execution will be cutoff by the duration setting, so only890 // 3 sets of metrics are expected.891 {"sleepBeforeCounterAdd", "null", "sleep(0.3); testCounter.add(1); ", 12.0, 12.0},892 // The counter should be sent, but the last iteration will be incomplete893 {"sleepAfterCounterAdd", "null", "testCounter.add(1); sleep(0.3); ", 16.0, 12.0},894 }895 for _, tc := range testCases {896 tc := tc897 t.Run(tc.method, func(t *testing.T) {898 if !isWindows {899 t.Parallel()900 }901 registry := metrics.NewRegistry()902 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)903 runner, err := js.New(904 &lib.RuntimeState{905 Logger: testutils.NewLogger(t),906 BuiltinMetrics: builtinMetrics,907 Registry: registry,908 },909 &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(fmt.Sprintf(`910 import { sleep } from "k6";911 import { Counter } from "k6/metrics";912 let testCounter = new Counter("testcounter");913 export let options = {914 scenarios: {915 we_need_hard_stop: {916 executor: "constant-vus",917 vus: 4,918 duration: "1s",919 gracefulStop: "0s",920 },921 },922 minIterationDuration: %s,923 };924 export default function() {925 %s926 }927 `, tc.minIterDuration, tc.defaultBody))},928 nil,929 )930 require.NoError(t, err)931 mockOutput := mockoutput.New()932 engine, run, wait := newTestEngine(t, nil, runner, []output.Output{mockOutput}, runner.GetOptions())933 errC := make(chan error)934 go func() { errC <- run() }()935 select {936 case <-time.After(10 * time.Second):937 t.Fatal("Test timed out")938 case err := <-errC:939 require.NoError(t, err)940 wait()941 require.False(t, engine.IsTainted())942 }943 assert.Equal(t, tc.expIters, getMetricSum(mockOutput, metrics.IterationsName))944 assert.Equal(t, tc.expCount, getMetricSum(mockOutput, "testcounter"))945 })946 }947}948//nolint: funlen949func TestMinIterationDurationInSetupTeardownStage(t *testing.T) {950 t.Parallel()951 setupScript := `952 import { sleep } from "k6";953 export function setup() {954 sleep(1);955 }956 export let options = {957 minIterationDuration: "2s",958 scenarios: {959 we_need_hard_stop: {960 executor: "constant-vus",961 vus: 2,962 duration: "1.9s",963 gracefulStop: "0s",964 },965 },966 setupTimeout: "3s",967 };968 export default function () {969 };`970 teardownScript := `971 import { sleep } from "k6";972 export let options = {973 minIterationDuration: "2s",974 scenarios: {975 we_need_hard_stop: {976 executor: "constant-vus",977 vus: 2,978 duration: "1.9s",979 gracefulStop: "0s",980 },981 },982 teardownTimeout: "3s",983 };984 export default function () {985 };986 export function teardown() {987 sleep(1);988 }989`990 tests := []struct {991 name, script string992 }{993 {"Test setup", setupScript},994 {"Test teardown", teardownScript},995 }996 for _, tc := range tests {997 tc := tc998 t.Run(tc.name, func(t *testing.T) {999 t.Parallel()1000 registry := metrics.NewRegistry()1001 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)1002 runner, err := js.New(1003 &lib.RuntimeState{1004 Logger: testutils.NewLogger(t),1005 BuiltinMetrics: builtinMetrics,1006 Registry: registry,1007 },1008 &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script)},1009 nil,1010 )1011 require.NoError(t, err)1012 engine, run, wait := newTestEngine(t, nil, runner, nil, runner.GetOptions())1013 errC := make(chan error)1014 go func() { errC <- run() }()1015 select {1016 case <-time.After(10 * time.Second):1017 t.Fatal("Test timed out")1018 case err := <-errC:1019 require.NoError(t, err)1020 wait()1021 require.False(t, engine.IsTainted())1022 }1023 })1024 }1025}1026func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) {1027 t.Parallel()1028 registry := metrics.NewRegistry()1029 testMetric, err := registry.NewMetric("teardown_metric", metrics.Counter)1030 require.NoError(t, err)1031 ctx, cancel := context.WithCancel(context.Background())1032 runner := &minirunner.MiniRunner{1033 Fn: func(ctx context.Context, _ *lib.State, out chan<- metrics.SampleContainer) error {1034 cancel() // we cancel the runCtx immediately after the test starts1035 return nil1036 },1037 TeardownFn: func(ctx context.Context, out chan<- metrics.SampleContainer) error {1038 out <- metrics.Sample{Metric: testMetric, Value: 1}1039 return nil1040 },1041 }1042 mockOutput := mockoutput.New()1043 _, run, wait := newTestEngineWithRegistry(t, ctx, runner, []output.Output{mockOutput}, lib.Options{1044 VUs: null.IntFrom(1), Iterations: null.IntFrom(1),1045 }, registry)1046 assert.NoError(t, run())1047 wait()1048 var count float641049 for _, sample := range mockOutput.Samples {1050 if sample.Metric == testMetric {1051 count += sample.Value1052 }1053 }1054 assert.Equal(t, 1.0, count)1055}1056func TestActiveVUsCount(t *testing.T) {1057 t.Parallel()...

Full Screen

Full Screen

newTestEngineWithRegistry

Using AI Code Generation

copy

Full Screen

1registry := NewRegistry()2registry := NewRegistry()3registry := NewRegistry()4registry := NewRegistry()5newTestEngineWithRegistry(t, registry)6registry := NewRegistry()

Full Screen

Full Screen

newTestEngineWithRegistry

Using AI Code Generation

copy

Full Screen

1import (2func Test(t *testing.T) {3 check.TestingT(t)4}5type DockerSuite struct {6}7var _ = check.Suite(&DockerSuite{})8func (s *DockerSuite) SetUpSuite(c *check.C) {9}10func (s *DockerSuite) SetUpTest(c *check.C) {11 s.d = daemon.New(c)12 s.d.StartWithBusybox(c)13}14func (s *DockerSuite) TearDownTest(c *check.C) {15 s.d.Stop(c)16}17func (s *DockerSuite) TestPluginInstall(c *check.C) {18 out, err := s.d.Cmd("plugin", "install", pluginNameWithTag)19 c.Assert(err, checker.IsNil, check.Commentf(out))20 out, err = s.d.Cmd("plugin", "install", pluginNameWithTag)21 c.Assert(err, checker.NotNil, check.Commentf(out))22 c.Assert(out, checker.Contains, "already installed")23 out, err = s.d.Cmd("plugin", "rm", pluginNameWithTag)24 c.Assert(err, checker.IsNil, check.Commentf(out))

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful