Best Testkube code snippet using client.NewJobOptionsFromExecutionOptions
job.go
Source:job.go  
...272	execution.ExecutionResult = result273	c.metrics.IncExecuteTest(*execution)274	c.Emitter.Notify(testkube.NewEventEndTestSuccess(execution))275}276// NewJobOptionsFromExecutionOptions compose JobOptions based on ExecuteOptions277func NewJobOptionsFromExecutionOptions(options ExecuteOptions) JobOptions {278	return JobOptions{279		Image:          options.ExecutorSpec.Image,280		ImageOverride:  options.ImageOverride,281		HasSecrets:     options.HasSecrets,282		JobTemplate:    options.ExecutorSpec.JobTemplate,283		TestName:       options.TestName,284		Namespace:      options.Namespace,285		SecretEnvs:     options.Request.SecretEnvs,286		HTTPProxy:      options.Request.HttpProxy,287		HTTPSProxy:     options.Request.HttpsProxy,288		UsernameSecret: options.UsernameSecret,289		TokenSecret:    options.TokenSecret,290	}291}292// GetJobPods returns job pods293func (c *JobExecutor) GetJobPods(podsClient tcorev1.PodInterface, jobName string, retryNr, retryCount int) (*corev1.PodList, error) {294	pods, err := podsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "job-name=" + jobName})295	if err != nil {296		return nil, err297	}298	if retryNr == retryCount {299		return nil, fmt.Errorf("retry count exceeeded, there are no active pods with given id=%s", jobName)300	}301	if len(pods.Items) == 0 {302		time.Sleep(time.Duration(retryNr * 500 * int(time.Millisecond))) // increase backoff timeout303		return c.GetJobPods(podsClient, jobName, retryNr+1, retryCount)304	}305	return pods, nil306}307// TailJobLogs - locates logs for job pod(s)308func (c *JobExecutor) TailJobLogs(id string, logs chan []byte) (err error) {309	podsClient := c.ClientSet.CoreV1().Pods(c.Namespace)310	ctx := context.Background()311	pods, err := c.GetJobPods(podsClient, id, 1, 10)312	if err != nil {313		close(logs)314		return err315	}316	for _, pod := range pods.Items {317		if pod.Labels["job-name"] == id {318			l := c.Log.With("podNamespace", pod.Namespace, "podName", pod.Name, "podStatus", pod.Status)319			switch pod.Status.Phase {320			case corev1.PodRunning:321				l.Debug("tailing pod logs: immediately")322				return c.TailPodLogs(ctx, pod, logs)323			case corev1.PodFailed:324				err := fmt.Errorf("can't get pod logs, pod failed: %s/%s", pod.Namespace, pod.Name)325				l.Errorw(err.Error())326				return c.GetLastLogLineError(ctx, pod)327			default:328				l.Debugw("tailing job logs: waiting for pod to be ready")329				if err = wait.PollImmediate(pollInterval, pollTimeout, IsPodReady(c.ClientSet, pod.Name, c.Namespace)); err != nil {330					l.Errorw("poll immediate error when tailing logs", "error", err)331					return c.GetLastLogLineError(ctx, pod)332				}333				l.Debug("tailing pod logs")334				return c.TailPodLogs(ctx, pod, logs)335			}336		}337	}338	return339}340func (c *JobExecutor) TailPodLogs(ctx context.Context, pod corev1.Pod, logs chan []byte) (err error) {341	count := int64(1)342	var containers []string343	for _, container := range pod.Spec.InitContainers {344		containers = append(containers, container.Name)345	}346	for _, container := range pod.Spec.Containers {347		containers = append(containers, container.Name)348	}349	go func() {350		defer close(logs)351		for _, container := range containers {352			podLogOptions := corev1.PodLogOptions{353				Follow:    true,354				TailLines: &count,355				Container: container,356			}357			podLogRequest := c.ClientSet.CoreV1().358				Pods(c.Namespace).359				GetLogs(pod.Name, &podLogOptions)360			stream, err := podLogRequest.Stream(ctx)361			if err != nil {362				c.Log.Errorw("stream error", "error", err)363				continue364			}365			reader := bufio.NewReader(stream)366			for {367				b, err := reader.ReadBytes('\n')368				if err != nil {369					if err == io.EOF {370						err = nil371					}372					break373				}374				c.Log.Debug("TailPodLogs stream scan", "out", b, "pod", pod.Name)375				logs <- b376			}377			if err != nil {378				c.Log.Errorw("scanner error", "error", err)379			}380		}381	}()382	return383}384// GetPodLogError returns last line as error385func (c *JobExecutor) GetPodLogError(ctx context.Context, pod corev1.Pod) (logsBytes []byte, err error) {386	// error line should be last one387	return c.GetPodLogs(pod, 1)388}389// GetLastLogLineError return error if last line is failed390func (c *JobExecutor) GetLastLogLineError(ctx context.Context, pod corev1.Pod) error {391	l := c.Log.With("pod", pod.Name, "namespace", pod.Namespace)392	log, err := c.GetPodLogError(ctx, pod)393	if err != nil {394		return fmt.Errorf("getPodLogs error: %w", err)395	}396	l.Debugw("log", "got last log bytes", string(log)) // in case distorted log bytes397	entry, err := output.GetLogEntry(log)398	if err != nil {399		return fmt.Errorf("GetLogEntry error: %w", err)400	}401	c.Log.Errorw("got last log entry", "log", entry.String())402	return fmt.Errorf("error from last log entry: %s", entry.String())403}404// GetPodLogs returns pod logs bytes405func (c *JobExecutor) GetPodLogs(pod corev1.Pod, logLinesCount ...int64) (logs []byte, err error) {406	count := int64(100)407	if len(logLinesCount) > 0 {408		count = logLinesCount[0]409	}410	var containers []string411	for _, container := range pod.Spec.InitContainers {412		containers = append(containers, container.Name)413	}414	for _, container := range pod.Spec.Containers {415		containers = append(containers, container.Name)416	}417	for _, container := range containers {418		podLogOptions := corev1.PodLogOptions{419			Follow:    false,420			TailLines: &count,421			Container: container,422		}423		podLogRequest := c.ClientSet.CoreV1().424			Pods(c.Namespace).425			GetLogs(pod.Name, &podLogOptions)426		stream, err := podLogRequest.Stream(context.TODO())427		if err != nil {428			if len(logs) != 0 && strings.Contains(err.Error(), "PodInitializing") {429				return logs, nil430			}431			return logs, err432		}433		defer stream.Close()434		buf := new(bytes.Buffer)435		_, err = io.Copy(buf, stream)436		if err != nil {437			if len(logs) != 0 && strings.Contains(err.Error(), "PodInitializing") {438				return logs, nil439			}440			return logs, err441		}442		logs = append(logs, buf.Bytes()...)443	}444	return logs, nil445}446// AbortK8sJob aborts K8S by job name447func (c *JobExecutor) Abort(jobName string) *testkube.ExecutionResult {448	var zero int64 = 0449	bg := metav1.DeletePropagationBackground450	jobs := c.ClientSet.BatchV1().Jobs(c.Namespace)451	err := jobs.Delete(context.TODO(), jobName, metav1.DeleteOptions{452		GracePeriodSeconds: &zero,453		PropagationPolicy:  &bg,454	})455	if err != nil {456		return &testkube.ExecutionResult{457			Status: testkube.ExecutionStatusFailed,458			Output: err.Error(),459		}460	}461	return &testkube.ExecutionResult{462		Status: testkube.ExecutionStatusCancelled,463	}464}465// NewJobSpec is a method to create new job spec466func NewJobSpec(log *zap.SugaredLogger, options JobOptions) (*batchv1.Job, error) {467	secretEnvVars := prepareSecretEnvs(options)468	tmpl, err := template.New("job").Parse(options.JobTemplate)469	if err != nil {470		return nil, fmt.Errorf("creating job spec from options.JobTemplate error: %w", err)471	}472	options.Jsn = strings.ReplaceAll(options.Jsn, "'", "''")473	var buffer bytes.Buffer474	if err = tmpl.ExecuteTemplate(&buffer, "job", options); err != nil {475		return nil, fmt.Errorf("executing job spec template: %w", err)476	}477	var job batchv1.Job478	jobSpec := buffer.String()479	log.Debug("Job specification", jobSpec)480	decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewBufferString(jobSpec), len(jobSpec))481	if err := decoder.Decode(&job); err != nil {482		return nil, fmt.Errorf("decoding job spec error: %w", err)483	}484	env := append(envVars, secretEnvVars...)485	if options.HTTPProxy != "" {486		env = append(env, corev1.EnvVar{Name: "HTTP_PROXY", Value: options.HTTPProxy})487	}488	if options.HTTPSProxy != "" {489		env = append(env, corev1.EnvVar{Name: "HTTPS_PROXY", Value: options.HTTPSProxy})490	}491	for i := range job.Spec.Template.Spec.InitContainers {492		job.Spec.Template.Spec.InitContainers[i].Env = append(job.Spec.Template.Spec.InitContainers[i].Env, env...)493	}494	for i := range job.Spec.Template.Spec.Containers {495		job.Spec.Template.Spec.Containers[i].Env = append(job.Spec.Template.Spec.Containers[i].Env, env...)496		// override container image if provided497		if options.ImageOverride != "" {498			job.Spec.Template.Spec.Containers[i].Image = options.ImageOverride499		}500	}501	return &job, nil502}503// IsPodReady defines if pod is ready or failed for logs scrapping504func IsPodReady(c *kubernetes.Clientset, podName, namespace string) wait.ConditionFunc {505	return func() (bool, error) {506		pod, err := c.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{})507		if err != nil {508			return false, err509		}510		switch pod.Status.Phase {511		case corev1.PodSucceeded:512			return true, nil513		case corev1.PodFailed:514			return true, fmt.Errorf("pod %s/%s failed", pod.Namespace, pod.Name)515		}516		return false, nil517	}518}519func NewJobOptions(initImage, jobTemplate string, execution testkube.Execution, options ExecuteOptions) (jobOptions JobOptions, err error) {520	jsn, err := json.Marshal(execution)521	if err != nil {522		return jobOptions, err523	}524	jobOptions = NewJobOptionsFromExecutionOptions(options)525	jobOptions.Name = execution.Id526	jobOptions.Namespace = execution.TestNamespace527	jobOptions.Jsn = string(jsn)528	jobOptions.InitImage = initImage529	jobOptions.TestName = execution.TestName530	if jobOptions.JobTemplate == "" {531		jobOptions.JobTemplate = jobTemplate532	}533	jobOptions.Variables = execution.Variables534	return535}536// prepareSecetEnvs generates secret envs from job options537func prepareSecretEnvs(options JobOptions) (secretEnvVars []corev1.EnvVar) {538	secretEnvVars = secretenv.NewEnvManager().Prepare(options.SecretEnvs, options.Variables)...NewJobOptionsFromExecutionOptions
Using AI Code Generation
1import (2func main() {3    if home := homedir.HomeDir(); home != "" {4        kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")5    } else {6        kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")7    }8    flag.Parse()9    config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)10    if err != nil {11        panic(err.Error())12    }13    clientset, err := versioned.NewForConfig(config)14    if err != nil {15        panic(err.Error())16    }17    job, err := clientset.TektonV1alpha1().PipelineRuns(namespace).Get(context.TODO(), jobName, metav1.GetOptions{})18    if errors.IsNotFound(err) {19        fmt.Printf("PipelineRun %s in namespace %s not found20    } else if statusError, isStatus := err.(*errors.StatusError); isStatus {21        fmt.Printf("Error getting PipelineRun %s in namespace %s: %v22    } else if err != nil {23        panic(err.Error())24    } else {25        fmt.Printf("Found PipelineRun: %s26    }27}NewJobOptionsFromExecutionOptions
Using AI Code Generation
1import (2func main() {3	reporter, err := statsd.NewReporter(4		statsd.Configuration{5			Writer: writer.NewWriter(writer.Options{6			}),7		},8	if err != nil {9		panic(err)10	}11	scope, closer := tally.NewRootScope(tally.ScopeOptions{12		Tags:     map[string]string{"env": "production"},13		SanitizeOptions: &tally.SanitizeOptions{14			NameCharacters: tally.ValidCharacters{15				Ranges: []tally.CharacterRange{16					{Low: 0, High: 255},17				},18			},19			KeyCharacters: tally.ValidCharacters{20				Ranges: []tally.CharacterRange{21					{Low: 0, High: 255},22				},23			},24			ValueCharacters: tally.ValidCharacters{25				Ranges: []tally.CharacterRange{26					{Low: 0, High: 255},27				},28			},29		},30	}, 1*time.Second)31	defer closer.Close()32	scope.Counter("counter").Inc(1)33	scope.Gauge("gauge").Update(42)34	scope.Timer("timer").Record(42 * time.Millisecond)35	childScope := scope.Tagged(map[string]stringNewJobOptionsFromExecutionOptions
Using AI Code Generation
1import (2func main() {3	ctx := context.Background()4	client, err := bigquery.NewClient(ctx, "bigquery-public-data", option.WithoutAuthentication())5	if err != nil {6		log.Fatalf("bigquery.NewClient: %v", err)7	}8	q := client.Query("SELECT 17 AS my_col")9	q.DefaultDataset = client.Dataset("samples")10	job, err := q.NewJob(ctx)11	if err != nil {12		log.Fatalf("Query.NewJob: %v", err)13	}14	job, err = q.NewJob(ctx)15	if err != nil {16		log.Fatalf("Query.NewJob: %v", err)17	}18	job, err = q.NewJob(ctx)19	if err != nil {20		log.Fatalf("Query.NewJob: %v", err)21	}22	job, err = q.NewJob(ctx)23	if err != nil {24		log.Fatalf("Query.NewJob: %v", err)25	}26	job, err = q.NewJob(ctx)27	if err != nil {28		log.Fatalf("Query.NewJob: %v", err)29	}30	job, err = q.NewJob(ctx)31	if err != nil {32		log.Fatalf("Query.NewJob: %v", err)33	}NewJobOptionsFromExecutionOptions
Using AI Code Generation
1import (2func main() {3	executionOptions := client.ExecutionOptions{4	}5	jobOptions, _ := client.NewJobOptionsFromExecutionOptions(executionOptions)6	fmt.Printf("%+v7}8import (9func main() {10	jobOptions, _ := client.NewJobOptionsFromFlags()11	fmt.Printf("%+v12}13{ContainerID: Timeout:}14import (15func main() {16	jobOptions, _ := client.NewJobOptionsFromFlags()17	executionOptions, _ := client.NewExecutionOptionsFromJobOptions(jobOptions)18	fmt.Printf("%+v19}20import (21func main() {22	jobOptions, _ := client.NewJobOptionsFromFlags()23	executionOptions, _ := client.NewExecutionOptionsFromJobOptions(jobOptions)Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
