How to use GetName method of executor Package

Best K6 code snippet using executor.GetName

thermos.go

Source:thermos.go Github

copy

Full Screen

...47func (p portSpecByName) Swap(i, j int) {48 p[i], p[j] = p[j], p[i]49}50func (p portSpecByName) Less(i, j int) bool {51 return strings.Compare(p[i].GetName(), p[j].GetName()) < 052}53// ConvertForThermosExecutor takes JobSpec as an input, generates and attaches54// thermos executor data if conversion could happen, and returns a mutated55// version of JobSpec.56func ConvertForThermosExecutor(57 jobSpec *stateless.JobSpec,58 thermosConfig config.ThermosExecutorConfig,59) (*stateless.JobSpec, error) {60 defaultSpec := jobSpec.GetDefaultSpec()61 convert, err := requiresThermosConvert(defaultSpec)62 if err != nil {63 return nil, err64 }65 if convert {66 newSpec, err := convertPodSpec(67 defaultSpec,68 defaultSpec,69 jobSpec,70 thermosConfig,71 )72 if err != nil {73 return nil, err74 }75 jobSpec.DefaultSpec = newSpec76 }77 for instanceID, instanceSpec := range jobSpec.GetInstanceSpec() {78 mergedSpec := taskconfig.MergePodSpec(defaultSpec, instanceSpec)79 convert, err := requiresThermosConvert(mergedSpec)80 if err != nil {81 return nil, err82 }83 if convert {84 newSpec, err := convertPodSpec(85 instanceSpec,86 mergedSpec,87 jobSpec,88 thermosConfig,89 )90 if err != nil {91 return nil, err92 }93 jobSpec.InstanceSpec[instanceID] = newSpec94 }95 }96 return jobSpec, nil97}98// requiresThermosConvert checks if the Peloton PodSpec requires thermos99// executor conversion. Throws an error if the PodSpec requires conversion,100// but fails validation.101func requiresThermosConvert(podSpec *pod.PodSpec) (bool, error) {102 if podSpec == nil {103 return false, nil104 }105 // Requires thermos data conversion, if MesosSpec inside PodSpec is106 // custom executor and executor data is empty.107 mesosSpec := podSpec.GetMesosSpec()108 if mesosSpec.GetExecutorSpec().GetType() != apachemesos.PodSpec_ExecutorSpec_EXECUTOR_TYPE_CUSTOM ||109 len(mesosSpec.GetExecutorSpec().GetData()) > 0 {110 return false, nil111 }112 containers := podSpec.GetContainers()113 if len(containers) == 0 {114 return false, nil115 }116 // Container image must be defined for main container117 mainContainer := containers[0]118 if len(mainContainer.GetImage()) == 0 {119 return false, yarpcerrors.InvalidArgumentErrorf("container image must be defined")120 }121 // Check if name of the containers are defined, and does not have duplicates122 containerNames := map[string]struct{}{}123 for _, c := range append(124 podSpec.GetInitContainers(),125 podSpec.GetContainers()...,126 ) {127 n := c.GetName()128 if len(n) == 0 {129 return false, yarpcerrors.InvalidArgumentErrorf("container does not have name specified")130 }131 if _, ok := containerNames[n]; ok {132 return false, yarpcerrors.InvalidArgumentErrorf("duplicate name found in container names")133 }134 containerNames[n] = struct{}{}135 }136 // Verify volumes and build volumes map137 volumes := map[string]*volume.VolumeSpec{}138 for _, v := range podSpec.GetVolumes() {139 if len(v.GetName()) == 0 {140 return false, yarpcerrors.InvalidArgumentErrorf("volume does not have name specified")141 }142 if _, ok := volumes[v.GetName()]; ok {143 return false, yarpcerrors.InvalidArgumentErrorf("duplicate volume name found in pod")144 }145 switch v.GetType() {146 case volume.VolumeSpec_VOLUME_TYPE_EMPTY_DIR:147 return false, yarpcerrors.InvalidArgumentErrorf("empty dir volume type not supported for volume: %s", v.GetName())148 case volume.VolumeSpec_VOLUME_TYPE_HOST_PATH:149 if len(v.GetHostPath().GetPath()) == 0 {150 return false, yarpcerrors.InvalidArgumentErrorf("path is empty for host_path volume")151 }152 case volume.VolumeSpec_VOLUME_TYPE_INVALID:153 return false, yarpcerrors.InvalidArgumentErrorf("invalid volume type for volume: %s", v.GetName())154 }155 volumes[v.GetName()] = v156 }157 // Verify all containers for volume mounts and environment variables158 envs := map[string]struct{}{}159 mounts := map[string]struct{}{}160 for _, c := range append(161 podSpec.GetInitContainers(),162 podSpec.GetContainers()...,163 ) {164 // Verify volume mounts165 for _, m := range c.GetVolumeMounts() {166 if len(m.GetName()) == 0 {167 return false, yarpcerrors.InvalidArgumentErrorf("volume mount does not specify volume name")168 }169 if len(m.GetMountPath()) == 0 {170 return false, yarpcerrors.InvalidArgumentErrorf("volume mount does not specify mount path")171 }172 if _, ok := volumes[m.GetName()]; !ok {173 return false, yarpcerrors.InvalidArgumentErrorf("volume not defined: %s", m.GetName())174 }175 if _, ok := mounts[m.GetName()]; ok {176 return false, yarpcerrors.InvalidArgumentErrorf("duplicate volume mount not allowed")177 }178 mounts[m.GetName()] = struct{}{}179 }180 // Verify environment variables181 for _, e := range c.GetEnvironment() {182 if len(e.GetName()) == 0 {183 return false, yarpcerrors.InvalidArgumentErrorf("environment variable name not defined")184 }185 if _, ok := envs[e.GetName()]; ok {186 return false, yarpcerrors.InvalidArgumentErrorf("duplicate environment variable not allowed")187 }188 envs[e.GetName()] = struct{}{}189 }190 }191 return true, nil192}193// collectResources collects resources (including ports) from all containers194// in the PodSpec.195func collectResources(podSpec *pod.PodSpec) (*pod.ResourceSpec, []*pod.PortSpec) {196 // Collect maximum resource and ports allocated (key'ed by port name)197 // by initial containers.198 maxInitRes := &pod.ResourceSpec{}199 initPorts := make(map[string]*pod.PortSpec)200 for _, initContainer := range podSpec.GetInitContainers() {201 res := initContainer.GetResource()202 if res.GetCpuLimit() > maxInitRes.GetCpuLimit() {203 maxInitRes.CpuLimit = res.CpuLimit204 }205 if res.GetMemLimitMb() > maxInitRes.GetMemLimitMb() {206 maxInitRes.MemLimitMb = res.MemLimitMb207 }208 if res.GetDiskLimitMb() > maxInitRes.GetDiskLimitMb() {209 maxInitRes.DiskLimitMb = res.DiskLimitMb210 }211 if res.GetFdLimit() > maxInitRes.GetFdLimit() {212 maxInitRes.FdLimit = res.FdLimit213 }214 if res.GetGpuLimit() > maxInitRes.GetGpuLimit() {215 maxInitRes.GpuLimit = res.GpuLimit216 }217 for _, port := range initContainer.GetPorts() {218 if _, ok := initPorts[port.GetName()]; !ok {219 initPorts[port.GetName()] = port220 }221 }222 }223 // Collect sum of resources and ports allocated (key'ed by port name)224 // by containers225 sumRes := &pod.ResourceSpec{}226 ports := make(map[string]*pod.PortSpec)227 for _, container := range podSpec.GetContainers() {228 res := container.GetResource()229 sumRes.CpuLimit = sumRes.GetCpuLimit() + res.GetCpuLimit()230 sumRes.MemLimitMb = sumRes.GetMemLimitMb() + res.GetMemLimitMb()231 sumRes.DiskLimitMb = sumRes.GetDiskLimitMb() + res.GetDiskLimitMb()232 sumRes.FdLimit = sumRes.GetFdLimit() + res.GetFdLimit()233 sumRes.GpuLimit = sumRes.GetGpuLimit() + res.GetGpuLimit()234 for _, port := range container.GetPorts() {235 if _, ok := ports[port.GetName()]; !ok {236 ports[port.GetName()] = port237 }238 }239 }240 // Returned resource would be max of (maxInitRes, sumRes)241 // Returned ports would be merged list of (initPorts, ports)242 resultRes := &pod.ResourceSpec{243 CpuLimit: math.Max(maxInitRes.GetCpuLimit(), sumRes.GetCpuLimit()),244 MemLimitMb: math.Max(maxInitRes.GetMemLimitMb(), sumRes.GetMemLimitMb()),245 DiskLimitMb: math.Max(maxInitRes.GetDiskLimitMb(), sumRes.GetDiskLimitMb()),246 GpuLimit: math.Max(maxInitRes.GetGpuLimit(), sumRes.GetGpuLimit()),247 // Using a function here since math.Max only supports float64248 FdLimit: func() uint32 {249 if maxInitRes.GetFdLimit() > sumRes.GetFdLimit() {250 return maxInitRes.GetFdLimit()251 }252 return sumRes.GetFdLimit()253 }(),254 }255 portsMap := make(map[string]*pod.PortSpec)256 for n, p := range initPorts {257 if _, ok := portsMap[n]; !ok {258 portsMap[n] = p259 }260 }261 for n, p := range ports {262 if _, ok := portsMap[n]; !ok {263 portsMap[n] = p264 }265 }266 resultPorts := make([]*pod.PortSpec, 0, len(portsMap))267 for _, p := range portsMap {268 resultPorts = append(resultPorts, p)269 }270 // Make sure the order of the ports are consistent to avoid271 // unnecessary job restarts.272 sort.Stable(portSpecByName(resultPorts))273 return resultRes, resultPorts274}275// createDockerInfo create mesos DockerInfo struct from ContainerSpec276func createDockerInfo(podSpec *pod.PodSpec) *mesos.ContainerInfo_DockerInfo {277 // TODO: Validate parameters are set in ContainerSpec278 // TODO: Right now we are overriding all the custom docker279 // parameters passed in, thus we need to figure out some way to support280 // thinsg like ulimit, cap-add, pids-limit etc.281 var params []*mesos.Parameter282 mainContainer := podSpec.GetContainers()[0]283 // Build volumes map284 volumes := map[string]*volume.VolumeSpec{}285 for _, v := range podSpec.GetVolumes() {286 volumes[v.GetName()] = v287 }288 for _, c := range append(289 podSpec.GetInitContainers(),290 podSpec.GetContainers()...,291 ) {292 // Generate docker environment parameters293 for _, env := range c.GetEnvironment() {294 params = append(params, &mesos.Parameter{295 Key: ptr.String("env"),296 Value: ptr.String(env.GetName() + "=" + env.GetValue()),297 })298 }299 // Generate docker volume parameters300 for _, mount := range c.GetVolumeMounts() {301 var param *mesos.Parameter302 v := volumes[mount.GetName()]303 switch v.GetType() {304 case volume.VolumeSpec_VOLUME_TYPE_HOST_PATH:305 value := v.GetHostPath().GetPath() + ":" + mount.GetMountPath() + ":"306 if mount.GetReadOnly() {307 value += "ro"308 } else {309 value += "rw"310 }311 param = &mesos.Parameter{312 Key: ptr.String("volume"),313 Value: ptr.String(value),314 }315 }316 if param != nil {317 params = append(params, param)318 }319 }320 }321 return &mesos.ContainerInfo_DockerInfo{322 Image: ptr.String(mainContainer.GetImage()),323 Parameters: params,324 }325}326// convert converts Peloton PodSpec to Aurora TaskConfig thrift structure.327func convert(328 jobSpec *stateless.JobSpec,329 podSpec *pod.PodSpec,330) (*aurora.TaskConfig, error) {331 collectedRes, collectedPorts := collectResources(podSpec)332 // tier333 tier := convertTier(jobSpec)334 // resources335 var resources []*aurora.Resource336 if collectedRes.GetCpuLimit() > 0 {337 resources = append(resources, &aurora.Resource{338 NumCpus: ptr.Float64(collectedRes.GetCpuLimit()),339 })340 }341 if collectedRes.GetMemLimitMb() > 0 {342 resources = append(resources, &aurora.Resource{343 RamMb: ptr.Int64(int64(collectedRes.GetMemLimitMb())),344 })345 }346 if collectedRes.GetDiskLimitMb() > 0 {347 resources = append(resources, &aurora.Resource{348 DiskMb: ptr.Int64(int64(collectedRes.GetDiskLimitMb())),349 })350 }351 if collectedRes.GetGpuLimit() > 0 {352 resources = append(resources, &aurora.Resource{353 NumGpus: ptr.Int64(int64(collectedRes.GetGpuLimit())),354 })355 }356 for _, port := range collectedPorts {357 resources = append(resources, &aurora.Resource{358 NamedPort: ptr.String(port.GetName()),359 })360 }361 // metadata362 metadata := make([]*aurora.Metadata, 0, len(podSpec.GetLabels()))363 for _, label := range podSpec.GetLabels() {364 key := label.GetKey()365 value := label.GetValue()366 // Aurora attaches "org.apache.aurora.metadata." prefix when367 // translating job metadata to mesos task label, reverting the368 // behavior here.369 if strings.HasPrefix(key, _auroraLabelPrefix) {370 key = strings.TrimPrefix(key, _auroraLabelPrefix)371 }372 metadata = append(metadata, &aurora.Metadata{373 Key: ptr.String(key),374 Value: ptr.String(value),375 })376 }377 // container378 docker := createDockerInfo(podSpec)379 dockerParams := make([]*aurora.DockerParameter, 0, len(docker.GetParameters()))380 for _, param := range docker.GetParameters() {381 dockerParams = append(dockerParams, &aurora.DockerParameter{382 Name: ptr.String(param.GetKey()),383 Value: ptr.String(param.GetValue()),384 })385 }386 container := &aurora.Container{387 Docker: &aurora.DockerContainer{388 Image: ptr.String(docker.GetImage()),389 Parameters: dockerParams,390 },391 }392 // executor_data393 executorData, err := convertExecutorData(jobSpec, podSpec)394 if err != nil {395 return nil, errors.Wrap(err, "failed to convert pod spec to executor data")396 }397 executorDataStr, err := json.Marshal(executorData)398 if err != nil {399 return nil, errors.Wrap(err, "failed to marshal executor data")400 }401 // task_config402 t := &aurora.TaskConfig{403 Job: &aurora.JobKey{404 Role: ptr.String(jobSpec.GetName()),405 Environment: ptr.String(_jobEnvironment),406 Name: ptr.String(jobSpec.GetName() + "." + jobSpec.GetName()),407 },408 IsService: ptr.Bool(true),409 Priority: ptr.Int32(int32(jobSpec.GetSla().GetPriority())),410 Production: ptr.Bool(false),411 Tier: ptr.String(tier),412 Resources: resources,413 Metadata: metadata,414 Container: container,415 ExecutorConfig: &aurora.ExecutorConfig{416 Name: ptr.String("AuroraExecutor"),417 Data: ptr.String(string(executorDataStr)),418 },419 // TODO: Fill MaxTaskFailures420 }421 if len(jobSpec.GetOwner()) > 0 {422 t.Owner = &aurora.Identity{User: ptr.String(jobSpec.GetOwner())}423 }424 return t, nil425}426// convertHealthCheckConfig generates HealthCheckConfig struct based on427// mainContainer's livenessCheck config.428func convertHealthCheckConfig(429 envs []*pod.Environment,430 livenessCheck *pod.HealthCheckSpec,431) (*HealthCheckConfig, error) {432 if !livenessCheck.GetEnabled() {433 return nil, nil434 }435 healthCheckConfig := NewHealthCheckConfig()436 switch livenessCheck.GetType() {437 case pod.HealthCheckSpec_HEALTH_CHECK_TYPE_COMMAND:438 healthCheckConfig.HealthChecker = NewHealthCheckerConfig()439 healthCheckConfig.HealthChecker.Shell = NewShellHealthChecker()440 healthCheckConfig.HealthChecker.Shell.ShellCommand = ptr.String(convertCmdline(envs, livenessCheck.GetCommand()))441 case pod.HealthCheckSpec_HEALTH_CHECK_TYPE_HTTP:442 var schema string443 if len(livenessCheck.GetHttpGet().GetScheme()) > 0 {444 schema = livenessCheck.GetHttpGet().GetScheme()445 } else {446 schema = "http"447 }448 endpoint := schema + "://127.0.0.1"449 if livenessCheck.GetHttpGet().GetPort() > 0 {450 endpoint += ":" + strconv.Itoa(int(livenessCheck.GetHttpGet().GetPort()))451 }452 endpoint += livenessCheck.GetHttpGet().GetPath()453 healthCheckConfig.HealthChecker = NewHealthCheckerConfig()454 healthCheckConfig.HealthChecker.Http = NewHttpHealthChecker()455 healthCheckConfig.HealthChecker.Http.Endpoint = ptr.String(endpoint)456 default:457 return nil, yarpcerrors.InvalidArgumentErrorf("unsupported liveness check type: %s", livenessCheck.GetType())458 }459 if livenessCheck.GetInitialIntervalSecs() > 0 {460 healthCheckConfig.InitialIntervalSecs = ptr.Float64(float64(livenessCheck.GetInitialIntervalSecs()))461 }462 if livenessCheck.GetIntervalSecs() > 0 {463 healthCheckConfig.IntervalSecs = ptr.Float64(float64(livenessCheck.GetIntervalSecs()))464 }465 if livenessCheck.GetMaxConsecutiveFailures() > 0 {466 healthCheckConfig.MaxConsecutiveFailures = ptr.Int32(int32(livenessCheck.GetMaxConsecutiveFailures()))467 }468 if livenessCheck.GetSuccessThreshold() > 0 {469 healthCheckConfig.MinConsecutiveSuccesses = ptr.Int32(int32(livenessCheck.GetSuccessThreshold()))470 }471 if livenessCheck.GetTimeoutSecs() > 0 {472 healthCheckConfig.TimeoutSecs = ptr.Float64(float64(livenessCheck.GetTimeoutSecs()))473 }474 return healthCheckConfig, nil475}476// convertTask generates Task struct based on JobSpec, PodSpec and ResourceSpec.477func convertTask(jobSpec *stateless.JobSpec, podSpec *pod.PodSpec, res *pod.ResourceSpec) *Task {478 task := NewTask()479 task.Name = ptr.String(jobSpec.GetName())480 if podSpec.GetKillGracePeriodSeconds() > 0 {481 task.FinalizationWait = ptr.Int32(int32(podSpec.GetKillGracePeriodSeconds()))482 }483 task.Resources = NewResources()484 if res.GetCpuLimit() > 0 {485 task.Resources.Cpu = ptr.Float64(res.GetCpuLimit())486 }487 if res.GetMemLimitMb() > 0 {488 task.Resources.RamBytes = ptr.Int64(int64(res.GetMemLimitMb() * MbInBytes))489 }490 if res.GetDiskLimitMb() > 0 {491 task.Resources.DiskBytes = ptr.Int64(int64(res.GetDiskLimitMb() * MbInBytes))492 }493 if res.GetGpuLimit() > 0 {494 task.Resources.Gpu = ptr.Int32(int32(res.GetGpuLimit()))495 }496 // Start init containers in order defined, and all init containers before497 // regular containers498 for _, c := range podSpec.GetContainers() {499 constraint := NewConstraint()500 for _, ic := range podSpec.GetInitContainers() {501 constraint.Order = append(constraint.Order, ptr.String(ic.GetName()))502 }503 constraint.Order = append(constraint.Order, ptr.String(c.GetName()))504 task.Constraints = append(task.Constraints, constraint)505 }506 // Convert ContainerSpecs to Processes507 for _, c := range append(508 podSpec.GetInitContainers(),509 podSpec.GetContainers()...,510 ) {511 process := NewProcess()512 process.Name = ptr.String(c.GetName())513 process.Cmdline = ptr.String(convertCmdline(c.GetEnvironment(), c.GetEntrypoint()))514 task.Processes = append(task.Processes, process)515 }516 return task517}518// convertExecutorData generates ExecutorData struct based on Peloton PodSpec.519func convertExecutorData(520 jobSpec *stateless.JobSpec,521 podSpec *pod.PodSpec,522) (*ExecutorData, error) {523 mainContainer := podSpec.GetContainers()[0]524 collectedRes, _ := collectResources(podSpec)525 // health_check_config526 healthCheckConfig, err := convertHealthCheckConfig(527 mainContainer.GetEnvironment(),528 mainContainer.GetLivenessCheck(),529 )530 if err != nil {531 return nil, err532 }533 // task534 task := convertTask(jobSpec, podSpec, collectedRes)535 executorData := NewExecutorData()536 executorData.Role = ptr.String(jobSpec.GetName())537 executorData.Environment = ptr.String(_jobEnvironment)538 executorData.Name = ptr.String(jobSpec.GetName() + "." + jobSpec.GetName())539 executorData.Priority = ptr.Int32(int32(jobSpec.GetSla().GetPriority()))540 executorData.Production = ptr.Bool(false)541 executorData.Tier = ptr.String(convertTier(jobSpec))542 executorData.CronCollisionPolicy = ptr.String("KILL_EXISTING")543 executorData.MaxTaskFailures = ptr.Int32(1)544 executorData.EnableHooks = ptr.Bool(false)545 executorData.HealthCheckConfig = healthCheckConfig546 executorData.Task = task547 // TODO: Fill Cluster548 return executorData, nil549}550// convertTier returns tier string based on input Peloton JobSpec.551func convertTier(jobSpec *stateless.JobSpec) string {552 if jobSpec.GetSla().GetPreemptible() && jobSpec.GetSla().GetRevocable() {553 return "revocable"554 }555 return "preemptible"556}557// convertCmdline returns the command line string based on input Peloton558// CommandSpec. It replaces variables inside command arguments with actual559// values which defined in container environments, and wrap it in a single560// quote.561func convertCmdline(562 envs []*pod.Environment,563 command *pod.CommandSpec,564) string {565 envMap := map[string]string{}566 for _, env := range envs {567 envMap[env.GetName()] = env.GetValue()568 }569 mapping := expansion.MappingFuncFor(envMap)570 var cmd []string571 for _, arg := range append(572 []string{command.GetValue()},573 command.GetArguments()...,574 ) {575 // Each cmdline arg is generated in following steps:576 // 1. Do kubernetes style environment variable expansion, i.e.577 // $(ENV) -> env-value, $$(ENV) -> $(ENV)578 // 2. Escape single quote characters by ending currently quoted579 // string append single quote, then start a newly quoted string,580 // i.e. ' -> '"'"'581 // 3. Wrap the whole argument in single quote582 cmd = append(cmd, "'"+strings.Replace(expansion.Expand(arg, mapping), "'", `'"'"'`, -1)+"'")583 }584 return strings.Join(cmd, " ")585}586// convertToData converts Peloton PodSpec to thermos executor data587// encoded in thrift binary protocol.588func convertToData(589 jobSpec *stateless.JobSpec,590 podSpec *pod.PodSpec,591) ([]byte, error) {592 taskConfig, err := convert(jobSpec, podSpec)593 if err != nil {594 return nil, err595 }596 taskConfigBytes, err := thermos.EncodeTaskConfig(taskConfig)597 if err != nil {598 return nil, err599 }600 return taskConfigBytes, nil601}602// mutatePodSpec mutates Peloton PodSpec to be usable by v1alpha API handler603// along with generated thermos executor data.604func mutatePodSpec(605 spec *pod.PodSpec,606 executorData []byte,607 thermosConfig config.ThermosExecutorConfig,608) error {609 collectedRes, collectedPorts := collectResources(spec)610 // Generate DockerInfo, CommandInfo and ExecutorInfo in Mesos v1 struct,611 // and convert them to the new fields.612 dockerInfo := createDockerInfo(spec)613 var dockerParameters []*apachemesos.PodSpec_DockerParameter614 for _, p := range dockerInfo.GetParameters() {615 dockerParameters = append(dockerParameters, &apachemesos.PodSpec_DockerParameter{616 Key: p.GetKey(),617 Value: p.GetValue(),618 })619 }620 commandInfo := thermosConfig.NewThermosCommandInfo()621 var uris []*apachemesos.PodSpec_URI622 for _, u := range commandInfo.GetUris() {623 uris = append(uris, &apachemesos.PodSpec_URI{624 Value: u.GetValue(),625 Executable: u.GetExecutable(),626 })627 }628 executorInfo := thermosConfig.NewThermosExecutorInfo(executorData)629 var executorType apachemesos.PodSpec_ExecutorSpec_ExecutorType630 executorResources := &apachemesos.PodSpec_ExecutorSpec_Resources{}631 switch executorInfo.GetType() {632 case mesos.ExecutorInfo_DEFAULT:633 executorType = apachemesos.PodSpec_ExecutorSpec_EXECUTOR_TYPE_DEFAULT634 case mesos.ExecutorInfo_CUSTOM:635 executorType = apachemesos.PodSpec_ExecutorSpec_EXECUTOR_TYPE_CUSTOM636 default:637 executorType = apachemesos.PodSpec_ExecutorSpec_EXECUTOR_TYPE_INVALID638 }639 for _, r := range executorInfo.GetResources() {640 if r.GetType() == mesos.Value_SCALAR {641 if r.GetName() == "cpus" {642 executorResources.Cpu = r.GetScalar().GetValue()643 }644 if r.GetName() == "mem" {645 executorResources.MemMb = r.GetScalar().GetValue()646 }647 }648 }649 // Attach fields to PodSpec650 if spec.MesosSpec == nil {651 spec.MesosSpec = &apachemesos.PodSpec{}652 }653 spec.MesosSpec.Type = apachemesos.PodSpec_CONTAINER_TYPE_DOCKER654 spec.MesosSpec.DockerParameters = dockerParameters655 spec.MesosSpec.Uris = uris656 spec.MesosSpec.Shell = commandInfo.GetShell()657 spec.MesosSpec.ExecutorSpec = &apachemesos.PodSpec_ExecutorSpec{658 Type: executorType,...

Full Screen

Full Screen

deploy.go

Source:deploy.go Github

copy

Full Screen

...26)27func (i *LxdCInstance) GetNodeHooks4Event(event string, proj *specs.LxdCProject, group *specs.LxdCGroup, node *specs.LxdCNode) []specs.LxdCHook {28 // Retrieve project hooks29 projHooks := proj.GetHooks4Nodes(event, []string{"*"})30 projHooks = specs.FilterHooks4Node(&projHooks, []string{node.GetName(), "host"})31 // Retrieve group hooks32 groupHooks := group.GetHooks4Nodes(event, []string{"*"})33 groupHooks = specs.FilterHooks4Node(&groupHooks, []string{node.GetName(), "host"})34 ans := projHooks35 ans = append(ans, groupHooks...)36 ans = append(ans, node.GetAllHooks(event)...)37 return ans38}39func (i *LxdCInstance) ApplyProject(projectName string) error {40 env := i.GetEnvByProjectName(projectName)41 if env == nil {42 return errors.New("No environment found for project " + projectName)43 }44 proj := env.GetProjectByName(projectName)45 if proj == nil {46 return errors.New("No project found with name " + projectName)47 }48 if i.NodesPrefix != "" {49 proj.SetNodesPrefix(i.NodesPrefix)50 }51 // Get only host hooks. All other hooks are handled by group and node.52 preProjHooks := proj.GetHooks4Nodes("pre-project", []string{"host"})53 postProjHooks := proj.GetHooks4Nodes("post-project", []string{"*", "host"})54 // Execute pre-project hooks55 i.Logger.Debug(fmt.Sprintf(56 "[%s] Running %d pre-project hooks... ", projectName, len(preProjHooks)))57 err := i.ProcessHooks(&preProjHooks, proj, nil, nil)58 if err != nil {59 return err60 }61 compiler, err := template.NewProjectTemplateCompiler(env, proj)62 if err != nil {63 return err64 }65 // Compiler project files66 err = template.CompileProjectFiles(proj, compiler, template.CompilerOpts{})67 if err != nil {68 return err69 }70 for _, grp := range proj.Groups {71 if !grp.ToProcess(i.GroupsEnabled, i.GroupsDisabled) {72 i.Logger.Debug("Skipped group ", grp.Name)73 continue74 }75 err := i.ApplyGroup(&grp, proj, env, compiler)76 if err != nil {77 return err78 }79 }80 // Execute post-project hooks81 i.Logger.Debug(fmt.Sprintf(82 "[%s] Running %d post-project hooks... ", projectName, len(preProjHooks)))83 err = i.ProcessHooks(&postProjHooks, proj, nil, nil)84 if err != nil {85 return err86 }87 return nil88}89func (i *LxdCInstance) ProcessHooks(hooks *[]specs.LxdCHook, proj *specs.LxdCProject, group *specs.LxdCGroup, targetNode *specs.LxdCNode) error {90 var res int91 nodes := []specs.LxdCNode{}92 storeVar := false93 executorMap := make(map[string]*lxd_executor.LxdCExecutor, 0)94 if len(*hooks) > 0 {95 runSingleCmd := func(h *specs.LxdCHook, node, cmds string) error {96 var executor *lxd_executor.LxdCExecutor97 envs, err := proj.GetEnvsMap()98 if err != nil {99 return err100 }101 if _, ok := envs["HOME"]; !ok {102 envs["HOME"] = "/"103 }104 if node != "host" {105 var grp *specs.LxdCGroup = nil106 var nodeEntity *specs.LxdCNode = nil107 _, _, grp, nodeEntity = i.GetEntitiesByNodeName(node)108 if nodeEntity == nil && i.NodesPrefix != "" {109 // Trying to search node with prefix110 _, _, grp, nodeEntity = i.GetEntitiesByNodeName(111 fmt.Sprintf("%s-%s", i.NodesPrefix, node))112 if nodeEntity != nil {113 node = fmt.Sprintf("%s-%s", i.NodesPrefix, node)114 }115 }116 if nodeEntity != nil {117 json, err := nodeEntity.ToJson()118 if err != nil {119 return err120 }121 envs["node"] = json122 if nodeEntity.Labels != nil && len(nodeEntity.Labels) > 0 {123 for k, v := range nodeEntity.Labels {124 envs[k] = v125 }126 }127 if _, ok := executorMap[node]; !ok {128 // Initialize executor129 executor = lxd_executor.NewLxdCExecutor(grp.Connection,130 i.Config.GetGeneral().LxdConfDir, []string{}, grp.Ephemeral,131 i.Config.GetLogging().CmdsOutput,132 i.Config.GetLogging().RuntimeCmdsOutput)133 err := executor.Setup()134 if err != nil {135 return err136 }137 executor.SetP2PMode(i.Config.GetGeneral().P2PMode)138 executorMap[node] = executor139 } else {140 if group == nil {141 return errors.New(fmt.Sprintf(142 "Error on retrieve node information for %s and hook %v",143 node, h))144 }145 executor = lxd_executor.NewLxdCExecutor(group.Connection,146 i.Config.GetGeneral().LxdConfDir, []string{}, group.Ephemeral,147 i.Config.GetLogging().CmdsOutput,148 i.Config.GetLogging().RuntimeCmdsOutput)149 err := executor.Setup()150 if err != nil {151 return err152 }153 executor.SetP2PMode(i.Config.GetGeneral().P2PMode)154 }155 // Initialize entrypoint to ensure to set always the156 if nodeEntity.Entrypoint != nil && len(nodeEntity.Entrypoint) > 0 {157 executor.Entrypoint = nodeEntity.Entrypoint158 } else {159 executor.Entrypoint = []string{}160 }161 } else {162 executor = executorMap[node]163 }164 } else {165 connection := "local"166 ephemeral := true167 if group != nil {168 connection = group.Connection169 ephemeral = group.Ephemeral170 }171 // Initialize executor with local LXD connection172 executor = lxd_executor.NewLxdCExecutor(connection,173 i.Config.GetGeneral().LxdConfDir, []string{}, ephemeral,174 i.Config.GetLogging().CmdsOutput,175 i.Config.GetLogging().RuntimeCmdsOutput)176 executor.SetP2PMode(i.Config.GetGeneral().P2PMode)177 // NOTE: I don't need to run executor.Setup() for host node.178 }179 if h.Out2Var != "" || h.Err2Var != "" {180 storeVar = true181 } else {182 storeVar = false183 }184 if h.Node == "host" {185 if storeVar {186 res, err = executor.RunHostCommandWithOutput4Var(cmds, h.Out2Var, h.Err2Var, &envs, h.Entrypoint)187 } else {188 if i.Config.GetLogging().RuntimeCmdsOutput {189 emitter := executor.GetEmitter()190 res, err = executor.RunHostCommandWithOutput(191 cmds, envs,192 (emitter.(*lxd_executor.LxdCEmitter)).GetHostWriterStdout(),193 (emitter.(*lxd_executor.LxdCEmitter)).GetHostWriterStderr(),194 h.Entrypoint,195 )196 } else {197 res, err = executor.RunHostCommand(cmds, envs, h.Entrypoint)198 }199 }200 } else {201 if storeVar {202 res, err = executor.RunCommandWithOutput4Var(node, cmds, h.Out2Var, h.Err2Var, &envs, h.Entrypoint)203 } else {204 if i.Config.GetLogging().RuntimeCmdsOutput {205 emitter := executor.GetEmitter()206 res, err = executor.RunCommandWithOutput(207 node, cmds, envs,208 (emitter.(*lxd_executor.LxdCEmitter)).GetLxdWriterStdout(),209 (emitter.(*lxd_executor.LxdCEmitter)).GetLxdWriterStderr(),210 h.Entrypoint)211 } else {212 res, err = executor.RunCommand(213 node, cmds, envs, h.Entrypoint,214 )215 }216 }217 }218 if err != nil {219 i.Logger.Error("Error " + err.Error())220 return err221 }222 if res != 0 {223 i.Logger.Error(fmt.Sprintf("Command result wrong (%d). Exiting.", res))224 return errors.New("Error on execute command: " + cmds)225 }226 if storeVar {227 if len(proj.Environments) == 0 {228 proj.AddEnvironment(&specs.LxdCEnvVars{EnvVars: make(map[string]interface{}, 0)})229 }230 if h.Out2Var != "" {231 proj.Environments[len(proj.Environments)-1].EnvVars[h.Out2Var] = envs[h.Out2Var]232 }233 if h.Err2Var != "" {234 proj.Environments[len(proj.Environments)-1].EnvVars[h.Err2Var] = envs[h.Err2Var]235 }236 }237 return nil238 }239 // Retrieve list of nodes240 if group != nil {241 nodes = group.Nodes242 } else {243 for _, g := range proj.Groups {244 nodes = append(nodes, g.Nodes...)245 }246 }247 for _, h := range *hooks {248 // Check if hooks must be processed249 if !h.ToProcess(i.FlagsEnabled, i.FlagsDisabled) {250 i.Logger.Debug("Skipped hooks ", h)251 continue252 }253 if h.Commands != nil && len(h.Commands) > 0 {254 for _, cmds := range h.Commands {255 switch h.Node {256 case "", "*":257 if targetNode != nil {258 err := runSingleCmd(&h, targetNode.GetName(), cmds)259 if err != nil {260 return err261 }262 } else {263 for _, node := range nodes {264 err := runSingleCmd(&h, node.GetName(), cmds)265 if err != nil {266 return err267 }268 }269 }270 default:271 err := runSingleCmd(&h, h.Node, cmds)272 if err != nil {273 return err274 }275 }276 }277 }278 }279 }280 return nil281}282func (i *LxdCInstance) ApplyGroup(group *specs.LxdCGroup, proj *specs.LxdCProject, env *specs.LxdCEnvironment, compiler template.LxdCTemplateCompiler) error {283 var syncSourceDir string284 envBaseAbs, err := filepath.Abs(filepath.Dir(env.File))285 if err != nil {286 return err287 }288 // Retrieve pre-group hooks from project289 preGroupHooks := proj.GetHooks4Nodes("pre-group", []string{"*", "host"})290 // Retrieve pre-group hooks from group291 preGroupHooks = append(preGroupHooks, group.GetHooks4Nodes("pre-group", []string{"*", "host"})...)292 // Run pre-group hooks293 i.Logger.Debug(fmt.Sprintf(294 "[%s - %s] Running %d pre-group hooks... ", proj.Name, group.Name, len(preGroupHooks)))295 err = i.ProcessHooks(&preGroupHooks, proj, group, nil)296 if err != nil {297 return err298 }299 // We need reload variables updated from out2var/err2var hooks.300 compiler.InitVars()301 // Compile group templates302 err = template.CompileGroupFiles(group, compiler, template.CompilerOpts{})303 if err != nil {304 return err305 }306 // Initialize executor307 executor := lxd_executor.NewLxdCExecutor(group.Connection,308 i.Config.GetGeneral().LxdConfDir, []string{}, group.Ephemeral,309 i.Config.GetLogging().CmdsOutput,310 i.Config.GetLogging().RuntimeCmdsOutput)311 err = executor.Setup()312 if err != nil {313 return err314 }315 executor.SetP2PMode(i.Config.GetGeneral().P2PMode)316 // Retrieve the list of configured profiles317 instanceProfiles, err := executor.GetProfilesList()318 if err != nil {319 return errors.New(320 fmt.Sprintf("Error on retrieve the list of instance profile of the group %s: %s",321 group.Name, err.Error()))322 }323 // TODO: implement parallel creation324 for _, node := range group.Nodes {325 syncSourceDir = ""326 // Initialize entrypoint to ensure to set always the327 if node.Entrypoint != nil && len(node.Entrypoint) > 0 {328 executor.Entrypoint = node.Entrypoint329 } else {330 executor.Entrypoint = []string{}331 }332 isPresent, err := executor.IsPresentContainer(node.GetName())333 if err != nil {334 i.Logger.Error("Error on check if container " +335 node.GetName() + " is present: " + err.Error())336 return err337 }338 i.Logger.Debug(fmt.Sprintf(339 "[%s - %s] Node %s is present: %v.",340 proj.Name, group.Name, node.GetName(), isPresent))341 if !isPresent {342 // Retrieve pre-node-creation hooks343 preCreationHooks := i.GetNodeHooks4Event("pre-node-creation", proj, group, &node)344 // Run pre-node-creation hooks345 i.Logger.Debug(fmt.Sprintf(346 "[%s - %s] Running %d pre-node-creation hooks for node %s... ",347 proj.Name, group.Name, len(preCreationHooks), node.GetName()))348 err = i.ProcessHooks(&preCreationHooks, proj, group, &node)349 if err != nil {350 return err351 }352 profiles := []string{}353 profiles = append(profiles, group.CommonProfiles...)354 profiles = append(profiles, node.Profiles...)355 configMap := node.GetLxdConfig(group.GetLxdConfig())356 i.Logger.Debug(fmt.Sprintf("[%s] Using profiles %s",357 node.GetName(), profiles))358 i.Logger.Debug(fmt.Sprintf("[%s] Using config map %s",359 node.GetName(), configMap))360 err := i.validateProfiles(instanceProfiles, profiles)361 if err != nil {362 return err363 }364 err = executor.CreateContainerWithConfig(node.GetName(), node.ImageSource,365 node.ImageRemoteServer, profiles, configMap)366 if err != nil {367 i.Logger.Error("Error on create container " +368 node.GetName() + ":" + err.Error())369 return err370 }371 postCreationHooks := i.GetNodeHooks4Event("post-node-creation", proj, group, &node)372 // Run post-node-creation hooks373 i.Logger.Debug(fmt.Sprintf(374 "[%s - %s] Running %d post-node-creation hooks for node %s... ",375 proj.Name, group.Name, len(postCreationHooks), node.GetName()))376 err = i.ProcessHooks(&postCreationHooks, proj, group, &node)377 if err != nil {378 return err379 }380 } else {381 isRunning, err := executor.IsRunningContainer(node.GetName())382 if err != nil {383 i.Logger.Error(384 fmt.Sprintf("Error on check if container %s is running: %s",385 node.GetName(), err.Error()))386 return err387 }388 if !isRunning {389 // Run post-node-creation hooks390 i.Logger.Debug(fmt.Sprintf(391 "[%s - %s] Node %s is already present but not running. I'm starting it.",392 proj.Name, group.Name, node.GetName()))393 err = executor.StartContainer(node.GetName())394 if err != nil {395 i.Logger.Error(396 fmt.Sprintf("Error on start container %s: %s",397 node.GetName(), err.Error()))398 return err399 }400 }401 }402 // Retrieve pre-node-sync hooks of the node from project403 preSyncHooks := i.GetNodeHooks4Event("pre-node-sync", proj, group, &node)404 // Run pre-node-sync hooks405 err = i.ProcessHooks(&preSyncHooks, proj, group, &node)406 if err != nil {407 return err408 }409 // We need reload variables updated from out2var/err2var hooks.410 compiler.InitVars()411 // Compile node templates412 err = template.CompileNodeFiles(node, compiler, template.CompilerOpts{})413 if err != nil {414 return err415 }416 if len(node.SyncResources) > 0 && !i.SkipSync {417 if node.SourceDir != "" {418 if node.IsSourcePathRelative() {419 syncSourceDir = filepath.Join(envBaseAbs, node.SourceDir)420 } else {421 syncSourceDir = node.SourceDir422 }423 } else {424 // Use env file directory425 syncSourceDir = envBaseAbs426 }427 i.Logger.Debug(i.Logger.Aurora.Bold(428 i.Logger.Aurora.BrightCyan(429 ">>> [" + node.GetName() + "] Using sync source basedir " +430 syncSourceDir)))431 nResources := len(node.SyncResources)432 i.Logger.InfoC(433 i.Logger.Aurora.Bold(434 i.Logger.Aurora.BrightCyan(435 fmt.Sprintf(">>> [%s] Syncing %d resources... - :bus:",436 node.GetName(), nResources))))437 for idx, resource := range node.SyncResources {438 var sourcePath string439 if filepath.IsAbs(resource.Source) {440 sourcePath = resource.Source441 } else {442 sourcePath = filepath.Join(syncSourceDir, resource.Source)443 }444 i.Logger.DebugC(445 i.Logger.Aurora.Italic(446 i.Logger.Aurora.BrightCyan(447 fmt.Sprintf(">>> [%s] %s => %s",448 node.GetName(), resource.Source,449 resource.Destination))))450 err = executor.RecursivePushFile(node.GetName(),451 sourcePath, resource.Destination)452 if err != nil {453 i.Logger.Debug("Error on sync from sourcePath " + sourcePath +454 " to dest " + resource.Destination)455 i.Logger.Error("Error on sync " + resource.Source + ": " + err.Error())456 return err457 }458 i.Logger.InfoC(459 i.Logger.Aurora.BrightCyan(460 fmt.Sprintf(">>> [%s] - [%2d/%2d] %s - :check_mark:",461 node.GetName(), idx+1, nResources, resource.Destination)))462 }463 }464 // Retrieve post-node-sync hooks of the node from project465 postSyncHooks := i.GetNodeHooks4Event("post-node-sync", proj, group, &node)466 // Run post-node-sync hooks467 err = i.ProcessHooks(&postSyncHooks, proj, group, &node)468 if err != nil {469 return err470 }471 }472 // Retrieve post-group hooks from project473 postGroupHooks := proj.GetHooks4Nodes("post-group", []string{"*", "host"})474 postGroupHooks = append(postGroupHooks, group.GetHooks4Nodes("post-group", []string{"*", "host"})...)475 // Execute post-group hooks476 i.Logger.Debug(fmt.Sprintf(477 "[%s - %s] Running %d post-group hooks... ", proj.Name, group.Name, len(postGroupHooks)))478 err = i.ProcessHooks(&postGroupHooks, proj, group, nil)479 return err480}481func (i *LxdCInstance) ApplyCommand(c *specs.LxdCCommand, proj *specs.LxdCProject, envs []string, varfiles []string) error {482 if c == nil {483 return errors.New("Invalid command")484 }485 if proj == nil {486 return errors.New("Invalid project")487 }488 env := i.GetEnvByProjectName(proj.GetName())489 if env == nil {490 return errors.New(fmt.Sprintf("No environment found for project " + proj.GetName()))491 }492 envBaseDir, err := filepath.Abs(filepath.Dir(env.File))493 if err != nil {494 return err495 }496 // Load envs from commands.497 if len(c.VarFiles) > 0 {498 for _, varFile := range c.VarFiles {499 envs, err := i.loadEnvFile(envBaseDir, varFile, proj)500 if err != nil {501 return errors.New(502 fmt.Sprintf(503 "Error on load additional envs var file %s: %s",504 varFile, err.Error()),...

Full Screen

Full Screen

executor.go

Source:executor.go Github

copy

Full Screen

...86 } else {87 for _, inputLocation := range i.GetInputShardLocations() {88 wg.Add(1)89 inChan := util.NewPiper()90 // println(i.GetName(), "connecting to", inputLocation.Address(), "to read", inputLocation.GetName())91 go func(inputLocation *pb.DatasetShardLocation) {92 err := netchan.DialReadChannel(wg, i.GetName(), inputLocation.Address(), inputLocation.GetName(), inputLocation.GetOnDisk(), inChan.Writer)93 if err != nil {94 ioErrChan <- fmt.Errorf("Failed %s reading %s from %s: %v", i.GetName(), inputLocation.GetName(), inputLocation.Address(), err)95 }96 }(inputLocation)97 readers = append(readers, inChan.Reader)98 }99 }100 return101}102func setupWriters(wg *sync.WaitGroup, ioErrChan chan error,103 i *pb.Instruction, outPiper *util.Piper, isLast bool, readerCount int) (writers []io.Writer) {104 if !isLast {105 writers = append(writers, outPiper.Writer)106 } else {107 for _, outputLocation := range i.GetOutputShardLocations() {108 wg.Add(1)109 outChan := util.NewPiper()110 // println(i.GetName(), "connecting to", outputLocation.Address(), "to write", outputLocation.GetName(), "readerCount", readerCount)111 go func(outputLocation *pb.DatasetShardLocation) {112 err := netchan.DialWriteChannel(wg, i.GetName(), outputLocation.Address(), outputLocation.GetName(), outputLocation.GetOnDisk(), outChan.Reader, readerCount)113 if err != nil {114 ioErrChan <- fmt.Errorf("Failed %s writing %s to %s: %v", i.GetName(), outputLocation.GetName(), outputLocation.Address(), err)115 }116 }(outputLocation)117 writers = append(writers, outChan.Writer)118 }119 }120 return121}122func (exe *Executor) ExecuteInstruction(wg *sync.WaitGroup, ioErrChan, exeErrChan chan error,123 inChan, outChan *util.Piper, prevIsPipe bool, i *pb.Instruction, isFirst, isLast bool, readerCount int) {124 defer wg.Done()125 readers := setupReaders(wg, ioErrChan, i, inChan, isFirst)126 writers := setupWriters(wg, ioErrChan, i, outChan, isLast, readerCount)127 defer func() {128 for _, writer := range writers {129 if c, ok := writer.(io.Closer); ok {130 c.Close()131 }132 }133 }()134 util.BufWrites(writers, func(writers []io.Writer) {135 if f := instruction.InstructionRunner.GetInstructionFunction(i); f != nil {136 //TODO use the stats137 stats := &instruction.Stats{}138 err := f(readers, writers, stats)139 if err != nil {140 // println(i.GetName(), "running error", err.Error())141 exeErrChan <- fmt.Errorf("Failed executing %s: %v", i.GetName(), err)142 }143 return144 }145 //TODO add errChan to scripts also?146 // println("starting", *i.Name, "inChan", inChan, "outChan", outChan)147 if i.GetScript() != nil {148 command := exec.Command(149 i.GetScript().GetPath(), i.GetScript().GetArgs()...,150 )151 wg.Add(1)152 util.Execute(wg, i.GetName(), command, readers[0], writers[0], prevIsPipe, i.GetScript().GetIsPipe(), false, os.Stderr)153 } else {154 panic("what is this? " + i.String())155 }156 })157}...

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1func main() {2 e := executor.Executor{}3 e.GetName()4}5func main() {6 e := executor.Executor{}7 e.GetName()8}9func main() {10 e := executor.Executor{}11 e.GetName()12}13func main() {14 e := executor.Executor{}15 e.GetName()16}17func main() {18 e := executor.Executor{}19 e.GetName()20}21func main() {22 e := executor.Executor{}23 e.GetName()24}25func main() {26 e := executor.Executor{}27 e.GetName()28}29func main() {30 e := executor.Executor{}31 e.GetName()32}33func main() {34 e := executor.Executor{}35 e.GetName()36}37func main() {38 e := executor.Executor{}39 e.GetName()40}41func main() {42 e := executor.Executor{}43 e.GetName()44}45func main() {46 e := executor.Executor{}47 e.GetName()48}49func main() {50 e := executor.Executor{}51 e.GetName()52}53func main() {54 e := executor.Executor{}55 e.GetName()56}57func main() {58 e := executor.Executor{}59 e.GetName()60}61func main() {62 e := executor.Executor{}63 e.GetName()64}

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1import "executor"2func main() {3 e.GetName()4}5import "executor"6func main() {7 e.GetName()8}

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 e := executor.NewExecutor("Rajat")4 fmt.Println(e.GetName())5}6type Executor struct {7}8func NewExecutor(name string) *Executor {9 return &Executor{Name: name}10}11func (e *Executor) GetName() string {12}

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1import "fmt"2import "github.com/GoTraining/1"3func main() {4 fmt.Println(executor.GetName())5}6import "fmt"7import "github.com/GoTraining/1"8func main() {9 fmt.Println(executor.GetName())10}11func GetName() string {12}13import "fmt"14import "github.com/GoTraining/1"15func main() {16 fmt.Println(GetName())17}18import "fmt"19import "github.com/GoTraining/1"20func main() {21 fmt.Println(GetName())22}23In the above folder structure, the package 1 is importing the package 2 and

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1import "fmt"2type Executor interface {3 GetName() string4}5type Executor1 struct {6}7func (e *Executor1) GetName() string {8}9type Executor2 struct {10}11func (e *Executor2) GetName() string {12}13type Executor3 struct {14}15func (e *Executor3) GetName() string {16}17func main() {18 executors := []Executor{&Executor1{}, &Executor2{}, &Executor3{}}19 for _, e := range executors {20 fmt.Println(e.GetName())21 }22}23import "fmt"24type Executor interface {25 GetName() string26}27type Executor1 struct {28}29func (e *Executor1) GetName() string {30}31type Executor2 struct {32}33func (e *Executor2) GetName() string {34}35type Executor3 struct {36}37func (e *Executor3) GetName() string {38}39func main() {40 executors := []Executor{&Executor1{}, &Executor2{}, &Executor3{}}41 for _, e := range executors {

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 exec := executor.Executor{}4 fmt.Println(exec.GetName())5}6type Executor struct {7}8func (exec Executor) GetName() string {9}

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println(executor.GetName())4}5import (6func main() {7 executor := executor.Executor{}8 fmt.Println(executor.GetName())9}10import (11func main() {12 executor := executor.Executor{}13 fmt.Println(executor.GetName())14}15import (16func main() {17 executor := executor.Executor{}18 fmt.Println(executor.GetName())19}

Full Screen

Full Screen

GetName

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3 executor := Executor{}4 fmt.Println(executor.GetName())5}6import "fmt"7func main() {8 executor := Executor{}9 fmt.Println(executor.GetName())10}11import "fmt"12func main() {13 executor := Executor{}14 fmt.Println(executor.GetName())15}16import "fmt"17func main() {18 executor := Executor{}19 fmt.Println(executor.GetName())20}21import "fmt"22func main() {23 executor := Executor{}24 fmt.Println(executor.GetName())25}26import "fmt"27func main() {28 executor := Executor{}29 fmt.Println(executor.GetName())30}31import "fmt"32func main() {33 executor := Executor{}

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run K6 automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Most used method in

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful