How to use sanitizeName method of crds Package

Best Testkube code snippet using crds.sanitizeName

tiltfile_state.go

Source:tiltfile_state.go Github

copy

Full Screen

1package tiltfile2import (3 "context"4 "fmt"5 "path/filepath"6 "strings"7 "time"8 "github.com/looplab/tarjan"9 "github.com/pkg/errors"10 "go.starlark.net/starlark"11 "go.starlark.net/syntax"12 "golang.org/x/mod/semver"13 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"14 "github.com/tilt-dev/tilt/internal/controllers/apis/cmdimage"15 "github.com/tilt-dev/tilt/internal/controllers/apis/dockerimage"16 "github.com/tilt-dev/tilt/internal/controllers/apis/liveupdate"17 "github.com/tilt-dev/tilt/internal/controllers/apiset"18 "github.com/tilt-dev/tilt/internal/localexec"19 "github.com/tilt-dev/tilt/internal/tiltfile/hasher"20 "github.com/tilt-dev/tilt/internal/tiltfile/links"21 "github.com/tilt-dev/tilt/internal/tiltfile/print"22 "github.com/tilt-dev/tilt/internal/tiltfile/probe"23 "github.com/tilt-dev/tilt/internal/tiltfile/sys"24 "github.com/tilt-dev/tilt/internal/tiltfile/tiltextension"25 "github.com/tilt-dev/tilt/pkg/apis"26 "github.com/tilt-dev/tilt/internal/container"27 "github.com/tilt-dev/tilt/internal/dockercompose"28 "github.com/tilt-dev/tilt/internal/feature"29 "github.com/tilt-dev/tilt/internal/k8s"30 "github.com/tilt-dev/tilt/internal/ospath"31 "github.com/tilt-dev/tilt/internal/sliceutils"32 "github.com/tilt-dev/tilt/internal/tiltfile/analytics"33 "github.com/tilt-dev/tilt/internal/tiltfile/config"34 "github.com/tilt-dev/tilt/internal/tiltfile/dockerprune"35 "github.com/tilt-dev/tilt/internal/tiltfile/encoding"36 "github.com/tilt-dev/tilt/internal/tiltfile/git"37 "github.com/tilt-dev/tilt/internal/tiltfile/include"38 "github.com/tilt-dev/tilt/internal/tiltfile/io"39 tiltfile_k8s "github.com/tilt-dev/tilt/internal/tiltfile/k8s"40 "github.com/tilt-dev/tilt/internal/tiltfile/k8scontext"41 "github.com/tilt-dev/tilt/internal/tiltfile/loaddynamic"42 "github.com/tilt-dev/tilt/internal/tiltfile/metrics"43 "github.com/tilt-dev/tilt/internal/tiltfile/os"44 "github.com/tilt-dev/tilt/internal/tiltfile/secretsettings"45 "github.com/tilt-dev/tilt/internal/tiltfile/shlex"46 "github.com/tilt-dev/tilt/internal/tiltfile/starkit"47 "github.com/tilt-dev/tilt/internal/tiltfile/starlarkstruct"48 "github.com/tilt-dev/tilt/internal/tiltfile/telemetry"49 "github.com/tilt-dev/tilt/internal/tiltfile/updatesettings"50 tfv1alpha1 "github.com/tilt-dev/tilt/internal/tiltfile/v1alpha1"51 "github.com/tilt-dev/tilt/internal/tiltfile/version"52 "github.com/tilt-dev/tilt/internal/tiltfile/watch"53 fwatch "github.com/tilt-dev/tilt/internal/watch"54 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"55 "github.com/tilt-dev/tilt/pkg/logger"56 "github.com/tilt-dev/tilt/pkg/model"57)58var unmatchedImageNoConfigsWarning = "We could not find any deployment instructions, e.g. `k8s_yaml` or `docker_compose`.\n" +59 "Skipping all image builds until we know how to deploy them."60var unmatchedImageAllUnresourcedWarning = "No Kubernetes configs with images found.\n" +61 "If you are using CRDs, add k8s_kind() to tell Tilt how to find images.\n" +62 "https://docs.tilt.dev/api.html#api.k8s_kind"63var pkgInitTime = time.Now()64type resourceSet struct {65 dc dcResourceSet // currently only support one d-c.yml66 k8s []*k8sResource67}68type tiltfileState struct {69 // set at creation70 ctx context.Context71 dcCli dockercompose.DockerComposeClient72 webHost model.WebHost73 execer localexec.Execer74 k8sContextPlugin k8scontext.Plugin75 versionPlugin version.Plugin76 configPlugin *config.Plugin77 extensionPlugin *tiltextension.Plugin78 features feature.FeatureSet79 // added to during execution80 buildIndex *buildIndex81 k8sObjectIndex *tiltfile_k8s.State82 // The mutation semantics of these 3 things are a bit fuzzy83 // Objects are moved back and forth between them in different84 // phases of tiltfile execution and post-execution assembly.85 //86 // TODO(nick): Move these into a unified k8sObjectIndex that87 // maintains consistent internal state. Right now the state88 // is duplicated.89 k8s []*k8sResource90 k8sByName map[string]*k8sResource91 k8sUnresourced []k8s.K8sEntity92 dc dcResourceSet // currently only support one d-c.yml93 dcByName map[string]*dcService94 dcResOptions map[string]*dcResourceOptions95 k8sResourceOptions []k8sResourceOptions96 localResources []*localResource97 localByName map[string]*localResource98 // ensure that any images are pushed to/pulled from this registry, rewriting names if needed99 defaultReg *v1alpha1.RegistryHosting100 k8sKinds map[k8s.ObjectSelector]*tiltfile_k8s.KindInfo101 workloadToResourceFunction workloadToResourceFunction102 // for assembly103 usedImages map[string]bool104 // count how many times each builtin is called, for analytics105 builtinCallCounts map[string]int106 // how many times each arg is used on each builtin107 builtinArgCounts map[string]map[string]int108 // any LiveUpdate steps that have been created but not used by a LiveUpdate will cause an error, to ensure109 // that users aren't accidentally using step-creating functions incorrectly110 // stored as a map of string(declarationPosition) -> step111 // it'd be appealing to store this as a map[liveUpdateStep]bool, but then things get weird if we have two steps112 // with the same hashcode (like, all restartcontainer steps)113 unconsumedLiveUpdateSteps map[string]liveUpdateStep114 // global trigger mode -- will be the default for all manifests (tho user can still explicitly set115 // triggerMode for a specific manifest)116 triggerMode triggerMode117 // for error reporting in case it's called twice118 triggerModeCallPosition syntax.Position119 teamID string120 secretSettings model.SecretSettings121 apiObjects apiset.ObjectSet122 logger logger.Logger123 // postExecReadFiles is generally a mistake -- it means that if tiltfile execution fails,124 // these will never be read. Remove these when you can!!!125 postExecReadFiles []string126 // Temporary directory for storing generated artifacts during the lifetime of the tiltfile context.127 // The directory is recursively deleted when the context is done.128 scratchDir *fwatch.TempDir129}130func newTiltfileState(131 ctx context.Context,132 dcCli dockercompose.DockerComposeClient,133 webHost model.WebHost,134 execer localexec.Execer,135 k8sContextPlugin k8scontext.Plugin,136 versionPlugin version.Plugin,137 configPlugin *config.Plugin,138 extensionPlugin *tiltextension.Plugin,139 features feature.FeatureSet) *tiltfileState {140 return &tiltfileState{141 ctx: ctx,142 dcCli: dcCli,143 webHost: webHost,144 execer: execer,145 k8sContextPlugin: k8sContextPlugin,146 versionPlugin: versionPlugin,147 configPlugin: configPlugin,148 extensionPlugin: extensionPlugin,149 buildIndex: newBuildIndex(),150 k8sObjectIndex: tiltfile_k8s.NewState(),151 k8sByName: make(map[string]*k8sResource),152 dcByName: make(map[string]*dcService),153 dcResOptions: make(map[string]*dcResourceOptions),154 localByName: make(map[string]*localResource),155 usedImages: make(map[string]bool),156 logger: logger.Get(ctx),157 builtinCallCounts: make(map[string]int),158 builtinArgCounts: make(map[string]map[string]int),159 unconsumedLiveUpdateSteps: make(map[string]liveUpdateStep),160 localResources: []*localResource{},161 triggerMode: TriggerModeAuto,162 features: features,163 secretSettings: model.DefaultSecretSettings(),164 apiObjects: apiset.ObjectSet{},165 k8sKinds: tiltfile_k8s.InitialKinds(),166 }167}168// print() for fulfilling the starlark thread callback169func (s *tiltfileState) print(_ *starlark.Thread, msg string) {170 s.logger.Infof("%s", msg)171}172// Load loads the Tiltfile in `filename`, and returns the manifests matching `matching`.173//174// This often returns a starkit.Model even on error, because the starkit.Model175// has a record of what happened during the execution (what files were read, etc).176//177// TODO(nick): Eventually this will just return a starkit.Model, which will contain178// all the mutable state collected by execution.179func (s *tiltfileState) loadManifests(tf *v1alpha1.Tiltfile) ([]model.Manifest, starkit.Model, error) {180 s.logger.Infof("Loading Tiltfile at: %s", tf.Spec.Path)181 result, err := starkit.ExecFile(tf,182 s,183 include.IncludeFn{},184 git.NewPlugin(),185 os.NewPlugin(),186 sys.NewPlugin(),187 io.NewPlugin(),188 s.k8sContextPlugin,189 dockerprune.NewPlugin(),190 analytics.NewPlugin(),191 s.versionPlugin,192 s.configPlugin,193 starlarkstruct.NewPlugin(),194 telemetry.NewPlugin(),195 metrics.NewPlugin(),196 updatesettings.NewPlugin(),197 secretsettings.NewPlugin(),198 encoding.NewPlugin(),199 shlex.NewPlugin(),200 watch.NewPlugin(),201 loaddynamic.NewPlugin(),202 s.extensionPlugin,203 links.NewPlugin(),204 print.NewPlugin(),205 probe.NewPlugin(),206 tfv1alpha1.NewPlugin(),207 hasher.NewPlugin(),208 )209 if err != nil {210 return nil, result, starkit.UnpackBacktrace(err)211 }212 resources, unresourced, err := s.assemble()213 if err != nil {214 return nil, result, err215 }216 us, err := updatesettings.GetState(result)217 if err != nil {218 return nil, result, err219 }220 err = s.assertAllImagesMatched(us)221 if err != nil {222 s.logger.Warnf("%s", err.Error())223 }224 manifests := []model.Manifest{}225 k8sContextState, err := k8scontext.GetState(result)226 if err != nil {227 return nil, result, err228 }229 if len(resources.k8s) > 0 || len(unresourced) > 0 {230 ms, err := s.translateK8s(resources.k8s, us)231 if err != nil {232 return nil, result, err233 }234 manifests = append(manifests, ms...)235 isAllowed := k8sContextState.IsAllowed(tf)236 if !isAllowed {237 kubeContext := k8sContextState.KubeContext()238 return nil, result, fmt.Errorf(`Stop! %s might be production.239If you're sure you want to deploy there, add:240 allow_k8s_contexts('%s')241to your Tiltfile. Otherwise, switch k8s contexts and restart Tilt.`, kubeContext, kubeContext)242 }243 }244 if !resources.dc.Empty() {245 if err := s.validateDockerComposeVersion(); err != nil {246 return nil, result, err247 }248 ms, err := s.translateDC(resources.dc)249 if err != nil {250 return nil, result, err251 }252 manifests = append(manifests, ms...)253 }254 err = s.validateLiveUpdatesForManifests(manifests)255 if err != nil {256 return nil, result, err257 }258 err = s.checkForUnconsumedLiveUpdateSteps()259 if err != nil {260 return nil, result, err261 }262 localManifests, err := s.translateLocal()263 if err != nil {264 return nil, result, err265 }266 manifests = append(manifests, localManifests...)267 if len(unresourced) > 0 {268 mn := model.UnresourcedYAMLManifestName269 r := &k8sResource{270 name: mn.String(),271 entities: unresourced,272 podReadinessMode: model.PodReadinessIgnore,273 }274 kt, err := s.k8sDeployTarget(mn.TargetName(), r, nil, us)275 if err != nil {276 return nil, starkit.Model{}, err277 }278 yamlManifest := model.Manifest{Name: mn}.WithDeployTarget(kt)279 manifests = append(manifests, yamlManifest)280 }281 err = s.sanitizeDependencies(manifests)282 if err != nil {283 return nil, starkit.Model{}, err284 }285 for i := range manifests {286 // ensure all manifests have a label indicating they're owned287 // by the Tiltfile - some reconcilers have special handling288 l := manifests[i].Labels289 if l == nil {290 l = make(map[string]string)291 }292 manifests[i] = manifests[i].WithLabels(l)293 err := manifests[i].Validate()294 if err != nil {295 // Even on manifest validation errors, we may be able296 // to use other kinds of models (e.g., watched files)297 return manifests, result, err298 }299 }300 return manifests, result, nil301}302// Builtin functions303const (304 // build functions305 dockerBuildN = "docker_build"306 customBuildN = "custom_build"307 defaultRegistryN = "default_registry"308 // docker compose functions309 dockerComposeN = "docker_compose"310 dcResourceN = "dc_resource"311 // k8s functions312 k8sYamlN = "k8s_yaml"313 filterYamlN = "filter_yaml"314 k8sResourceN = "k8s_resource"315 portForwardN = "port_forward"316 k8sKindN = "k8s_kind"317 k8sImageJSONPathN = "k8s_image_json_path"318 workloadToResourceFunctionN = "workload_to_resource_function"319 k8sCustomDeployN = "k8s_custom_deploy"320 // local resource functions321 localResourceN = "local_resource"322 testN = "test" // a deprecated fork of local resource323 // file functions324 localN = "local"325 kustomizeN = "kustomize"326 helmN = "helm"327 // live update functions328 fallBackOnN = "fall_back_on"329 syncN = "sync"330 runN = "run"331 restartContainerN = "restart_container"332 // trigger mode333 triggerModeN = "trigger_mode"334 triggerModeAutoN = "TRIGGER_MODE_AUTO"335 triggerModeManualN = "TRIGGER_MODE_MANUAL"336 // feature flags337 enableFeatureN = "enable_feature"338 disableFeatureN = "disable_feature"339 disableSnapshotsN = "disable_snapshots"340 // other functions341 setTeamN = "set_team"342)343type triggerMode int344func (m triggerMode) String() string {345 switch m {346 case TriggerModeAuto:347 return triggerModeAutoN348 case TriggerModeManual:349 return triggerModeManualN350 default:351 return fmt.Sprintf("unknown trigger mode with value %d", m)352 }353}354func (t triggerMode) Type() string {355 return "TriggerMode"356}357func (t triggerMode) Freeze() {358 // noop359}360func (t triggerMode) Truth() starlark.Bool {361 return starlark.MakeInt(int(t)).Truth()362}363func (t triggerMode) Hash() (uint32, error) {364 return starlark.MakeInt(int(t)).Hash()365}366var _ starlark.Value = triggerMode(0)367const (368 TriggerModeUnset triggerMode = iota369 TriggerModeAuto triggerMode = iota370 TriggerModeManual triggerMode = iota371)372func (s *tiltfileState) triggerModeForResource(resourceTriggerMode triggerMode) triggerMode {373 if resourceTriggerMode != TriggerModeUnset {374 return resourceTriggerMode375 } else {376 return s.triggerMode377 }378}379func starlarkTriggerModeToModel(triggerMode triggerMode, autoInit bool) (model.TriggerMode, error) {380 switch triggerMode {381 case TriggerModeAuto:382 if !autoInit {383 return model.TriggerModeAutoWithManualInit, nil384 }385 return model.TriggerModeAuto, nil386 case TriggerModeManual:387 if autoInit {388 return model.TriggerModeManualWithAutoInit, nil389 } else {390 return model.TriggerModeManual, nil391 }392 default:393 return 0, fmt.Errorf("unknown triggerMode %v", triggerMode)394 }395}396// count how many times each Builtin is called, for analytics397func (s *tiltfileState) OnBuiltinCall(name string, fn *starlark.Builtin) {398 s.builtinCallCounts[name]++399}400func (s *tiltfileState) OnExec(t *starlark.Thread, tiltfilePath string, contents []byte) error {401 return nil402}403// wrap a builtin such that it's only allowed to run when we have a known safe k8s context404// (none (e.g., docker-compose), local, or specified by `allow_k8s_contexts`)405func (s *tiltfileState) potentiallyK8sUnsafeBuiltin(f starkit.Function) starkit.Function {406 return func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {407 tf, err := starkit.StartTiltfileFromThread(thread)408 if err != nil {409 return nil, err410 }411 model, err := starkit.ModelFromThread(thread)412 if err != nil {413 return nil, err414 }415 k8sContextState, err := k8scontext.GetState(model)416 if err != nil {417 return nil, err418 }419 isAllowed := k8sContextState.IsAllowed(tf)420 if !isAllowed {421 kubeContext := k8sContextState.KubeContext()422 return nil, fmt.Errorf(`Refusing to run '%s' because %s might be a production kube context.423If you're sure you want to continue add:424 allow_k8s_contexts('%s')425before this function call in your Tiltfile. Otherwise, switch k8s contexts and restart Tilt.`, fn.Name(), kubeContext, kubeContext)426 }427 return f(thread, fn, args, kwargs)428 }429}430func (s *tiltfileState) unpackArgs(fnname string, args starlark.Tuple, kwargs []starlark.Tuple, pairs ...interface{}) error {431 err := starlark.UnpackArgs(fnname, args, kwargs, pairs...)432 if err == nil {433 var paramNames []string434 for i, o := range pairs {435 if i%2 == 0 {436 name := strings.TrimSuffix(o.(string), "?")437 paramNames = append(paramNames, name)438 }439 }440 usedParamNames := paramNames[:args.Len()]441 for _, p := range kwargs {442 name := strings.TrimSuffix(string(p[0].(starlark.String)), "?")443 usedParamNames = append(usedParamNames, name)444 }445 _, ok := s.builtinArgCounts[fnname]446 if !ok {447 s.builtinArgCounts[fnname] = make(map[string]int)448 }449 for _, paramName := range usedParamNames {450 s.builtinArgCounts[fnname][paramName]++451 }452 }453 return err454}455// TODO(nick): Split these into separate plugins456func (s *tiltfileState) OnStart(e *starkit.Environment) error {457 e.SetArgUnpacker(s.unpackArgs)458 e.SetPrint(s.print)459 e.SetContext(s.ctx)460 for _, b := range []struct {461 name string462 builtin starkit.Function463 }{464 {localN, s.potentiallyK8sUnsafeBuiltin(s.local)},465 {dockerBuildN, s.dockerBuild},466 {customBuildN, s.customBuild},467 {defaultRegistryN, s.defaultRegistry},468 {dockerComposeN, s.dockerCompose},469 {dcResourceN, s.dcResource},470 {k8sYamlN, s.k8sYaml},471 {filterYamlN, s.filterYaml},472 {k8sResourceN, s.k8sResource},473 {k8sCustomDeployN, s.k8sCustomDeploy},474 {localResourceN, s.localResource},475 {testN, s.localResource},476 {portForwardN, s.portForward},477 {k8sKindN, s.k8sKind},478 {k8sImageJSONPathN, s.k8sImageJsonPath},479 {workloadToResourceFunctionN, s.workloadToResourceFunctionFn},480 {kustomizeN, s.kustomize},481 {helmN, s.helm},482 {triggerModeN, s.triggerModeFn},483 {fallBackOnN, s.liveUpdateFallBackOn},484 {syncN, s.liveUpdateSync},485 {runN, s.liveUpdateRun},486 {restartContainerN, s.liveUpdateRestartContainer},487 {enableFeatureN, s.enableFeature},488 {disableFeatureN, s.disableFeature},489 {disableSnapshotsN, s.disableSnapshots},490 {setTeamN, s.setTeam},491 } {492 err := e.AddBuiltin(b.name, b.builtin)493 if err != nil {494 return err495 }496 }497 for _, v := range []struct {498 name string499 value starlark.Value500 }{501 {triggerModeAutoN, TriggerModeAuto},502 {triggerModeManualN, TriggerModeManual},503 } {504 err := e.AddValue(v.name, v.value)505 if err != nil {506 return err507 }508 }509 return nil510}511func (s *tiltfileState) assemble() (resourceSet, []k8s.K8sEntity, error) {512 err := s.assembleImages()513 if err != nil {514 return resourceSet{}, nil, err515 }516 err = s.assembleK8s()517 if err != nil {518 return resourceSet{}, nil, err519 }520 err = s.assembleDC()521 if err != nil {522 return resourceSet{}, nil, err523 }524 return resourceSet{525 dc: s.dc,526 k8s: s.k8s,527 }, s.k8sUnresourced, nil528}529// Emit an error if there are unmatches images.530//531// There are 4 mistakes people commonly make if they532// have unmatched images:533// 1. They didn't include any Kubernetes or Docker Compose configs at all.534// 2. They included Kubernetes configs, but they're custom resources535// and Tilt can't infer the image.536// 3. They typo'd the image name, and need help finding the right name.537// 4. The tooling they're using to generating the k8s resources538// isn't generating what they expect.539//540// This function intends to help with cases (1)-(3).541// Long-term, we want to have better tooling to help with (4),542// like being able to see k8s resources as they move thru543// the build system.544func (s *tiltfileState) assertAllImagesMatched(us model.UpdateSettings) error {545 unmatchedImages := s.buildIndex.unmatchedImages()546 unmatchedImages = filterUnmatchedImages(us, unmatchedImages)547 if len(unmatchedImages) == 0 {548 return nil549 }550 if len(s.dc.services) == 0 && len(s.k8s) == 0 && len(s.k8sUnresourced) == 0 {551 return fmt.Errorf(unmatchedImageNoConfigsWarning)552 }553 if len(s.k8s) == 0 && len(s.k8sUnresourced) != 0 {554 return fmt.Errorf(unmatchedImageAllUnresourcedWarning)555 }556 configType := "Kubernetes"557 if len(s.dc.services) > 0 {558 configType = "Docker Compose"559 }560 return s.buildIndex.unmatchedImageWarning(unmatchedImages[0], configType)561}562func (s *tiltfileState) assembleImages() error {563 for _, imageBuilder := range s.buildIndex.images {564 if imageBuilder.dbDockerfile != "" {565 depImages, err := imageBuilder.dbDockerfile.FindImages(imageBuilder.dbBuildArgs)566 if err != nil {567 return err568 }569 for _, depImage := range depImages {570 depBuilder := s.buildIndex.findBuilderForConsumedImage(depImage)571 if depBuilder == nil {572 // Images in the Dockerfile that don't have docker_build573 // instructions are OK. We'll pull them as prebuilt images.574 continue575 }576 imageBuilder.imageMapDeps = append(imageBuilder.imageMapDeps, depBuilder.ImageMapName())577 }578 }579 for _, depImage := range imageBuilder.customImgDeps {580 depBuilder := s.buildIndex.findBuilderForConsumedImage(depImage)581 if depBuilder == nil {582 // If the user specifically said to depend on this image, there583 // must be a build instruction for it.584 return fmt.Errorf("image %q: image dep %q not found",585 imageBuilder.configurationRef.RefFamiliarString(), container.FamiliarString(depImage))586 }587 imageBuilder.imageMapDeps = append(imageBuilder.imageMapDeps, depBuilder.ImageMapName())588 }589 }590 return nil591}592func (s *tiltfileState) assembleDC() error {593 if len(s.dc.services) > 0 && !container.IsEmptyRegistry(s.defaultReg) {594 return errors.New("default_registry is not supported with docker compose")595 }596 for _, svc := range s.dc.services {597 builder := s.buildIndex.findBuilderForConsumedImage(svc.ImageRef())598 if builder != nil {599 // there's a Tilt-managed builder (e.g. docker_build or custom_build) for this image reference, so use that600 svc.ImageMapDeps = append(svc.ImageMapDeps, builder.ImageMapName())601 } else {602 // create a DockerComposeBuild image target and consume it if this service has a build section in YAML603 err := s.maybeAddDockerComposeImageBuilder(svc)604 if err != nil {605 return err606 }607 }608 // TODO(maia): throw warning if609 // a. there is an img ref from config, and img ref from user doesn't match610 // b. there is no img ref from config, and img ref from user is not of form .*_<svc_name>611 }612 return nil613}614func (s *tiltfileState) maybeAddDockerComposeImageBuilder(svc *dcService) error {615 build := svc.ServiceConfig.Build616 if build == nil || build.Context == "" {617 // this Docker Compose service has no build info - it relies purely on618 // a pre-existing image (e.g. from a registry)619 return nil620 }621 buildContext := build.Context622 if !filepath.IsAbs(buildContext) {623 // the Compose loader should always ensure that context paths are absolute upfront624 return fmt.Errorf("Docker Compose service %q has a relative build path: %q", svc.Name, buildContext)625 }626 dfPath := build.Dockerfile627 if dfPath == "" {628 // Per Compose spec, the default is "Dockerfile" (in the context dir)629 dfPath = "Dockerfile"630 }631 if !filepath.IsAbs(dfPath) {632 dfPath = filepath.Join(buildContext, dfPath)633 }634 imageRef := svc.ImageRef()635 err := s.buildIndex.addImage(636 &dockerImage{637 buildType: DockerComposeBuild,638 configurationRef: container.NewRefSelector(imageRef),639 dockerComposeService: svc.Name,640 dockerComposeLocalVolumePaths: svc.MountedLocalDirs,641 dbBuildPath: buildContext,642 dbDockerfilePath: dfPath,643 })644 if err != nil {645 return err646 }647 b := s.buildIndex.findBuilderForConsumedImage(imageRef)648 svc.ImageMapDeps = append(svc.ImageMapDeps, b.ImageMapName())649 return nil650}651func (s *tiltfileState) assembleK8s() error {652 err := s.assembleK8sByWorkload()653 if err != nil {654 return err655 }656 err = s.assembleK8sUnresourced()657 if err != nil {658 return err659 }660 resourcedEntities := []k8s.K8sEntity{}661 for _, r := range s.k8sByName {662 resourcedEntities = append(resourcedEntities, r.entities...)663 }664 allEntities := append(resourcedEntities, s.k8sUnresourced...)665 fragmentsToEntities := k8s.FragmentsToEntities(allEntities)666 fullNames := make([]string, len(allEntities))667 for i, e := range allEntities {668 fullNames[i] = fullNameFromK8sEntity(e)669 }670 for _, opts := range s.k8sResourceOptions {671 if opts.manuallyGrouped {672 r, err := s.makeK8sResource(opts.newName)673 if err != nil {674 return err675 }676 r.manuallyGrouped = true677 s.k8sByName[opts.newName] = r678 }679 if r, ok := s.k8sByName[opts.workload]; ok {680 // Options are added, so aggregate options from previous resource calls.681 r.extraPodSelectors = append(r.extraPodSelectors, opts.extraPodSelectors...)682 if opts.podReadinessMode != model.PodReadinessNone {683 r.podReadinessMode = opts.podReadinessMode684 }685 if opts.discoveryStrategy != "" {686 r.discoveryStrategy = opts.discoveryStrategy687 }688 r.portForwards = append(r.portForwards, opts.portForwards...)689 if opts.triggerMode != TriggerModeUnset {690 r.triggerMode = opts.triggerMode691 }692 if opts.autoInit.IsSet {693 r.autoInit = bool(opts.autoInit.Value)694 }695 r.resourceDeps = append(r.resourceDeps, opts.resourceDeps...)696 r.links = append(r.links, opts.links...)697 for k, v := range opts.labels {698 r.labels[k] = v699 }700 if opts.newName != "" && opts.newName != r.name {701 err := s.checkResourceConflict(opts.newName)702 if err != nil {703 return fmt.Errorf("%s: k8s_resource specified to rename %q to %q: %v",704 opts.tiltfilePosition.String(), r.name, opts.newName, err)705 }706 delete(s.k8sByName, r.name)707 r.name = opts.newName708 s.k8sByName[r.name] = r709 }710 selectors := make([]k8s.ObjectSelector, len(opts.objects))711 for i, o := range opts.objects {712 s, err := k8s.SelectorFromString(o)713 if err != nil {714 return errors.Wrapf(err, "Error making selector from string %q", o)715 }716 selectors[i] = s717 }718 for i, o := range opts.objects {719 entities, ok := fragmentsToEntities[strings.ToLower(o)]720 if !ok || len(entities) == 0 {721 return fmt.Errorf("No object identified by the fragment %q could be found. Possible objects are: %s", o, sliceutils.QuotedStringList(fullNames))722 }723 if len(entities) > 1 {724 matchingObjects := make([]string, len(entities))725 for i, e := range entities {726 matchingObjects[i] = fullNameFromK8sEntity(e)727 }728 return fmt.Errorf("%q is not a unique fragment. Objects that match %q are %s", o, o, sliceutils.QuotedStringList(matchingObjects))729 }730 entitiesToRemove := filterEntitiesBySelector(s.k8sUnresourced, selectors[i])731 if len(entitiesToRemove) == 0 {732 // we've already taken these entities out of unresourced733 remainingUnresourced := make([]string, len(s.k8sUnresourced))734 for i, entity := range s.k8sUnresourced {735 remainingUnresourced[i] = fullNameFromK8sEntity(entity)736 }737 return fmt.Errorf("No object identified by the fragment %q could be found in remaining YAML. Valid remaining fragments are: %s", o, sliceutils.QuotedStringList(remainingUnresourced))738 }739 if len(entitiesToRemove) > 1 {740 panic(fmt.Sprintf("Fragment %q matches %d resources. Each object fragment must match exactly 1 resource. This should NOT be possible at this point in the code, we should have already checked that this fragment was unique", o, len(entitiesToRemove)))741 }742 s.addEntityToResourceAndRemoveFromUnresourced(entitiesToRemove[0], r)743 }744 } else {745 var knownResources []string746 for name := range s.k8sByName {747 knownResources = append(knownResources, name)748 }749 return fmt.Errorf("%s: k8s_resource specified unknown resource %q. known k8s resources: %s",750 opts.tiltfilePosition.String(), opts.workload, strings.Join(knownResources, ", "))751 }752 }753 for _, r := range s.k8s {754 if err := s.validateK8s(r); err != nil {755 return err756 }757 }758 return nil759}760// NOTE(dmiller): This isn't _technically_ a fullname since it is missing "group" (core, apps, data, etc)761// A true full name would look like "foo:secret:mynamespace:core"762// However because we763// a) couldn't think of a concrete case where you would need to specify group764// b) being able to do so would make things more complicated, like in the case where you want to specify the group of765//766// a cluster scoped object but are unable to specify the namespace (e.g. foo:clusterrole::rbac.authorization.k8s.io)767//768// we decided to leave it off for now. When we encounter a concrete use case for specifying group it shouldn't be too769// hard to add it here and in the docs.770func fullNameFromK8sEntity(e k8s.K8sEntity) string {771 return k8s.SelectorStringFromParts([]string{e.Name(), e.GVK().Kind, e.Namespace().String()})772}773func filterEntitiesBySelector(entities []k8s.K8sEntity, sel k8s.ObjectSelector) []k8s.K8sEntity {774 ret := []k8s.K8sEntity{}775 for _, e := range entities {776 if sel.Matches(e) {777 ret = append(ret, e)778 }779 }780 return ret781}782func (s *tiltfileState) addEntityToResourceAndRemoveFromUnresourced(e k8s.K8sEntity, r *k8sResource) {783 r.entities = append(r.entities, e)784 for i, ur := range s.k8sUnresourced {785 if ur == e {786 // delete from unresourced787 s.k8sUnresourced = append(s.k8sUnresourced[:i], s.k8sUnresourced[i+1:]...)788 return789 }790 }791 panic("Unable to find entity in unresourced YAML after checking that it was there. This should never happen")792}793func (s *tiltfileState) assembleK8sByWorkload() error {794 locators := s.k8sImageLocatorsList()795 var workloads, rest []k8s.K8sEntity796 for _, e := range s.k8sUnresourced {797 isWorkload, err := s.isWorkload(e, locators)798 if err != nil {799 return err800 }801 if isWorkload {802 workloads = append(workloads, e)803 } else {804 rest = append(rest, e)805 }806 }807 s.k8sUnresourced = rest808 resourceNames, err := s.calculateResourceNames(workloads)809 if err != nil {810 return err811 }812 for i, resourceName := range resourceNames {813 workload := workloads[i]814 res, err := s.makeK8sResource(resourceName)815 if err != nil {816 return errors.Wrapf(err, "error making resource for workload %s", newK8sObjectID(workload))817 }818 err = res.addEntities([]k8s.K8sEntity{workload}, locators, s.envVarImages())819 if err != nil {820 return err821 }822 // find any other entities that match the workload's labels (e.g., services),823 // and move them from unresourced to this resource824 match, rest, err := k8s.FilterByMatchesPodTemplateSpec(workload, s.k8sUnresourced)825 if err != nil {826 return err827 }828 err = res.addEntities(match, locators, s.envVarImages())829 if err != nil {830 return err831 }832 s.k8sUnresourced = rest833 }834 return nil835}836func (s *tiltfileState) envVarImages() []container.RefSelector {837 var r []container.RefSelector838 // explicitly don't care about order839 for _, img := range s.buildIndex.images {840 if !img.matchInEnvVars {841 continue842 }843 r = append(r, img.configurationRef)844 }845 return r846}847func (s *tiltfileState) isWorkload(e k8s.K8sEntity, locators []k8s.ImageLocator) (bool, error) {848 for sel := range s.k8sKinds {849 if sel.Matches(e) {850 return true, nil851 }852 }853 images, err := e.FindImages(locators, s.envVarImages())854 if err != nil {855 return false, errors.Wrapf(err, "finding images in %s", e.Name())856 } else {857 return len(images) > 0, nil858 }859}860// assembleK8sUnresourced makes k8sResources for all k8s entities that:861// a. are not already attached to a Tilt resource, and862// b. will result in pods,863// and stores the resulting resource(s) on the tiltfileState.864// (We smartly grouping pod-creating entities with some kinds of865// corresponding entities, e.g. services),866func (s *tiltfileState) assembleK8sUnresourced() error {867 withPodSpec, allRest, err := k8s.FilterByHasPodTemplateSpec(s.k8sUnresourced)868 if err != nil {869 return nil870 }871 for _, e := range withPodSpec {872 target, err := s.k8sResourceForName(e.Name())873 if err != nil {874 return err875 }876 target.entities = append(target.entities, e)877 match, rest, err := k8s.FilterByMatchesPodTemplateSpec(e, allRest)878 if err != nil {879 return err880 }881 target.entities = append(target.entities, match...)882 allRest = rest883 }884 s.k8sUnresourced = allRest885 return nil886}887func (s *tiltfileState) validateK8s(r *k8sResource) error {888 if len(r.entities) == 0 && r.customDeploy == nil {889 return fmt.Errorf("resource %q: could not associate any k8s_yaml() or k8s_custom_deploy() with this resource", r.name)890 }891 for _, ref := range r.imageRefs {892 builder := s.buildIndex.findBuilderForConsumedImage(ref)893 if builder != nil {894 r.imageMapDeps = append(r.imageMapDeps, builder.ImageMapName())895 continue896 }897 metadata, ok := r.imageDepsMetadata[ref.String()]898 if ok && metadata.required {899 return fmt.Errorf("resource %q: image build %q not found", r.name, container.FamiliarString(ref))900 }901 }902 return nil903}904// k8sResourceForName returns the k8sResource with which this name is associated905// (either an existing resource or a new one).906func (s *tiltfileState) k8sResourceForName(name string) (*k8sResource, error) {907 if r, ok := s.k8sByName[name]; ok {908 return r, nil909 }910 // otherwise, create a new resource911 return s.makeK8sResource(name)912}913// Auto-infer the readiness mode914//915// CONVO:916// jazzdan: This still feels overloaded to me917// nicks: i think whenever we define a new CRD, we need to know:918// how to find the images in it919// how to find any pods it deploys (if they can't be found by owner references)920// if it should not expect pods at all (e.g., PostgresVersion)921// if it should wait for the pods to be ready before building the next resource (e.g., servers)922// if it should wait for the pods to be complete before building the next resource (e.g., jobs)923// and it's complicated a bit by the fact that there are both normal CRDs where the image shows up in the same place each time, and more meta CRDs (like HelmRelease) where it might appear in different places924//925// feels like we're still doing this very ad-hoc rather than holistically926func (s *tiltfileState) inferPodReadinessMode(r *k8sResource) model.PodReadinessMode {927 // The mode set directly on the resource has highest priority.928 if r.podReadinessMode != model.PodReadinessNone {929 return r.podReadinessMode930 }931 // Next, check if any of the k8s kinds have a mode.932 hasMode := make(map[model.PodReadinessMode]bool)933 for _, e := range r.entities {934 for sel, info := range s.k8sKinds {935 if sel.Matches(e) {936 hasMode[info.PodReadinessMode] = true937 }938 }939 }940 modes := []model.PodReadinessMode{model.PodReadinessWait, model.PodReadinessIgnore, model.PodReadinessSucceeded}941 for _, m := range modes {942 if hasMode[m] {943 return m944 }945 }946 // Auto-infer based on context947 //948 // If the resource was949 // 1) manually grouped (i.e., we didn't find any images in it)950 // 2) doesn't have pod selectors, and951 // 3) doesn't depend on images952 // assume that it will never create pods.953 if r.manuallyGrouped && len(r.extraPodSelectors) == 0 && len(r.imageMapDeps) == 0 {954 return model.PodReadinessIgnore955 }956 return model.PodReadinessWait957}958func (s *tiltfileState) translateK8s(resources []*k8sResource, updateSettings model.UpdateSettings) ([]model.Manifest, error) {959 var result []model.Manifest960 for _, r := range resources {961 mn := model.ManifestName(r.name)962 tm, err := starlarkTriggerModeToModel(s.triggerModeForResource(r.triggerMode), r.autoInit)963 if err != nil {964 return nil, errors.Wrapf(err, "error in resource %s options", mn)965 }966 var mds []model.ManifestName967 for _, md := range r.resourceDeps {968 mds = append(mds, model.ManifestName(md))969 }970 m := model.Manifest{971 Name: mn,972 TriggerMode: tm,973 ResourceDependencies: mds,974 }975 m = m.WithLabels(r.labels)976 iTargets, err := s.imgTargetsForDeps(mn, r.imageMapDeps)977 if err != nil {978 return nil, errors.Wrapf(err, "getting image build info for %s", r.name)979 }980 for i, iTarget := range iTargets {981 if liveupdate.IsEmptySpec(iTarget.LiveUpdateSpec) {982 continue983 }984 iTarget.LiveUpdateReconciler = true985 iTargets[i] = iTarget986 }987 m = m.WithImageTargets(iTargets)988 k8sTarget, err := s.k8sDeployTarget(mn.TargetName(), r, iTargets, updateSettings)989 if err != nil {990 return nil, errors.Wrapf(err, "creating K8s deploy target for %s", r.name)991 }992 m = m.WithDeployTarget(k8sTarget)993 result = append(result, m)994 }995 err := maybeRestartContainerDeprecationError(result)996 if err != nil {997 return nil, err998 }999 return result, nil1000}1001func (s *tiltfileState) k8sDeployTarget(targetName model.TargetName, r *k8sResource, imageTargets []model.ImageTarget, updateSettings model.UpdateSettings) (model.K8sTarget, error) {1002 var kdTemplateSpec *v1alpha1.KubernetesDiscoveryTemplateSpec1003 if len(r.extraPodSelectors) != 0 {1004 kdTemplateSpec = &v1alpha1.KubernetesDiscoveryTemplateSpec{1005 ExtraSelectors: k8s.SetsAsLabelSelectors(r.extraPodSelectors),1006 }1007 }1008 sinceTime := apis.NewTime(pkgInitTime)1009 applySpec := v1alpha1.KubernetesApplySpec{1010 Cluster: v1alpha1.ClusterNameDefault,1011 Timeout: metav1.Duration{Duration: updateSettings.K8sUpsertTimeout()},1012 PortForwardTemplateSpec: k8s.PortForwardTemplateSpec(s.defaultedPortForwards(r.portForwards)),1013 DiscoveryStrategy: r.discoveryStrategy,1014 KubernetesDiscoveryTemplateSpec: kdTemplateSpec,1015 PodLogStreamTemplateSpec: &v1alpha1.PodLogStreamTemplateSpec{1016 SinceTime: &sinceTime,1017 IgnoreContainers: []string{1018 string(container.IstioInitContainerName),1019 string(container.IstioSidecarContainerName),1020 },1021 },1022 }1023 var deps []string1024 var ignores []v1alpha1.IgnoreDef1025 if r.customDeploy != nil {1026 deps = r.customDeploy.deps1027 ignores = append(ignores, model.DockerignoresToIgnores(r.customDeploy.ignores)...)1028 applySpec.ApplyCmd = toKubernetesApplyCmd(r.customDeploy.applyCmd)1029 applySpec.DeleteCmd = toKubernetesApplyCmd(r.customDeploy.deleteCmd)1030 applySpec.RestartOn = &v1alpha1.RestartOnSpec{1031 FileWatches: []string{apis.SanitizeName(fmt.Sprintf("%s:apply", targetName.String()))},1032 }1033 } else {1034 entities := k8s.SortedEntities(r.entities)1035 var err error1036 applySpec.YAML, err = k8s.SerializeSpecYAML(entities)1037 if err != nil {1038 return model.K8sTarget{}, err1039 }1040 for _, locator := range s.k8sImageLocatorsList() {1041 if k8s.LocatorMatchesOne(locator, entities) {1042 applySpec.ImageLocators = append(applySpec.ImageLocators, locator.ToSpec())1043 }1044 }1045 }1046 ignores = append(ignores, repoIgnoresForPaths(deps)...)1047 t, err := k8s.NewTarget(targetName, applySpec, s.inferPodReadinessMode(r), r.links)1048 if err != nil {1049 return model.K8sTarget{}, err1050 }1051 t = t.WithImageDependencies(model.FilterLiveUpdateOnly(r.imageMapDeps, imageTargets)).1052 WithRefInjectCounts(r.imageRefInjectCounts()).1053 WithPathDependencies(deps).1054 WithIgnores(ignores)1055 return t, nil1056}1057// Fill in default values in port-forwarding.1058//1059// In Kubernetes, "defaulted" is used as a verb to say "if a YAML value of a specification1060// was left blank, the API server should fill in the value with a default". See:1061//1062// https://kubernetes.io/docs/tasks/manage-kubernetes-objects/declarative-config/#default-field-values1063//1064// In Tilt, we typically do this in the Tiltfile loader post-execution.1065// Here, we default the port-forward Host to the WebHost.1066//1067// TODO(nick): I think the "right" way to do this is to give the starkit plugin system1068// a "default"-ing hook that runs post-execution.1069func (s *tiltfileState) defaultedPortForwards(pfs []model.PortForward) []model.PortForward {1070 result := make([]model.PortForward, 0, len(pfs))1071 for _, pf := range pfs {1072 if pf.Host == "" {1073 pf.Host = string(s.webHost)1074 }1075 result = append(result, pf)1076 }1077 return result1078}1079func (s *tiltfileState) validateLiveUpdatesForManifests(manifests []model.Manifest) error {1080 for _, m := range manifests {1081 err := s.validateLiveUpdatesForManifest(m)1082 if err != nil {1083 return err1084 }1085 }1086 return nil1087}1088// validateLiveUpdatesForManifest checks any image targets on the1089// given manifest the contain any illegal LiveUpdates1090func (s *tiltfileState) validateLiveUpdatesForManifest(m model.Manifest) error {1091 g, err := model.NewTargetGraph(m.TargetSpecs())1092 if err != nil {1093 return err1094 }1095 for _, iTarg := range m.ImageTargets {1096 isDeployed := m.IsImageDeployed(iTarg)1097 // This check only applies to images with live updates.1098 if liveupdate.IsEmptySpec(iTarg.LiveUpdateSpec) {1099 continue1100 }1101 // TODO(nick): If an undeployed base image has a live-update component, we1102 // should probably emit a different kind of warning.1103 if !isDeployed {1104 continue1105 }1106 err = s.validateLiveUpdate(iTarg, g)1107 if err != nil {1108 return err1109 }1110 }1111 return nil1112}1113func (s *tiltfileState) validateLiveUpdate(iTarget model.ImageTarget, g model.TargetGraph) error {1114 luSpec := iTarget.LiveUpdateSpec1115 if liveupdate.IsEmptySpec(luSpec) {1116 return nil1117 }1118 var watchedPaths []string1119 err := g.VisitTree(iTarget, func(t model.TargetSpec) error {1120 current, ok := t.(model.ImageTarget)1121 if !ok {1122 return nil1123 }1124 watchedPaths = append(watchedPaths, current.Dependencies()...)1125 return nil1126 })1127 if err != nil {1128 return err1129 }1130 // Verify that all a) sync step src's and b) fall_back_on files are children of a watched paths.1131 // (If not, we'll never even get "file changed" events for them--they're nonsensical input, throw an error.)1132 for _, sync := range liveupdate.SyncSteps(luSpec) {1133 if !ospath.IsChildOfOne(watchedPaths, sync.LocalPath) {1134 return fmt.Errorf("sync step source '%s' is not a child of any watched filepaths (%v)",1135 sync.LocalPath, watchedPaths)1136 }1137 }1138 pathSet := liveupdate.FallBackOnFiles(luSpec)1139 for _, path := range pathSet.Paths {1140 resolved := path1141 if !filepath.IsAbs(resolved) {1142 resolved = filepath.Join(pathSet.BaseDirectory, path)1143 }1144 if !ospath.IsChildOfOne(watchedPaths, resolved) {1145 return fmt.Errorf("fall_back_on paths '%s' is not a child of any watched filepaths (%v)",1146 resolved, watchedPaths)1147 }1148 }1149 return nil1150}1151func (s *tiltfileState) validateDockerComposeVersion() error {1152 const minimumDockerComposeVersion = "v1.28.3"1153 dcVersion, _, err := s.dcCli.Version(s.ctx)1154 if err != nil {1155 logger.Get(s.ctx).Debugf("Failed to determine Docker Compose version: %v", err)1156 } else if semver.Compare(dcVersion, minimumDockerComposeVersion) == -1 {1157 return fmt.Errorf(1158 "Tilt requires Docker Compose %s+ (you have %s). Please upgrade and re-launch Tilt.",1159 minimumDockerComposeVersion,1160 dcVersion)1161 } else if semver.Major(dcVersion) == "v2" && semver.Compare(dcVersion, "v2.2") < 0 {1162 logger.Get(s.ctx).Warnf("Using Docker Compose %s (version < 2.2) may result in errors or broken functionality.\n"+1163 "For best results, we recommend upgrading to Docker Compose >= v2.2.0.", dcVersion)1164 } else if semver.Prerelease(dcVersion) != "" {1165 logger.Get(s.ctx).Warnf("You are running a pre-release version of Docker Compose (%s), which is unsupported.\n"+1166 "You might encounter errors or broken functionality.", dcVersion)1167 }1168 return nil1169}1170func maybeRestartContainerDeprecationError(manifests []model.Manifest) error {1171 var needsError []model.ManifestName1172 for _, m := range manifests {1173 if needsRestartContainerDeprecationError(m) {1174 needsError = append(needsError, m.Name)1175 }1176 }1177 if len(needsError) > 0 {1178 return fmt.Errorf("%s", restartContainerDeprecationError(needsError))1179 }1180 return nil1181}1182func needsRestartContainerDeprecationError(m model.Manifest) bool {1183 // 7/2/20: we've deprecated restart_container() in favor of the restart_process plugin.1184 // If this is a k8s resource with a restart_container step, throw a deprecation error.1185 // (restart_container is still allowed for Docker Compose resources)1186 if !m.IsK8s() {1187 return false1188 }1189 for _, iTarg := range m.ImageTargets {1190 if liveupdate.ShouldRestart(iTarg.LiveUpdateSpec) {1191 return true1192 }1193 }1194 return false1195}1196// Grabs all image targets for the given references,1197// as well as any of their transitive dependencies.1198func (s *tiltfileState) imgTargetsForDeps(mn model.ManifestName, imageMapDeps []string) ([]model.ImageTarget, error) {1199 claimStatus := make(map[string]claim, len(imageMapDeps))1200 return s.imgTargetsForDepsHelper(mn, imageMapDeps, claimStatus)1201}1202func (s *tiltfileState) imgTargetsForDepsHelper(mn model.ManifestName, imageMapDeps []string, claimStatus map[string]claim) ([]model.ImageTarget, error) {1203 iTargets := make([]model.ImageTarget, 0, len(imageMapDeps))1204 for _, imName := range imageMapDeps {1205 image := s.buildIndex.findBuilderByImageMapName(imName)1206 if image == nil {1207 return nil, fmt.Errorf("Internal error: no image builder found for id %s", imName)1208 }1209 claim := claimStatus[imName]1210 if claim == claimFinished {1211 // Skip this target, an earlier call has already built it1212 continue1213 } else if claim == claimPending {1214 return nil, fmt.Errorf("Image dependency cycle: %s", image.configurationRef)1215 }1216 claimStatus[imName] = claimPending1217 var overrideCommand *v1alpha1.ImageMapOverrideCommand1218 if !image.entrypoint.Empty() {1219 overrideCommand = &v1alpha1.ImageMapOverrideCommand{1220 Command: image.entrypoint.Argv,1221 }1222 }1223 iTarget := model.ImageTarget{1224 ImageMapSpec: v1alpha1.ImageMapSpec{1225 Selector: image.configurationRef.RefFamiliarString(),1226 MatchInEnvVars: image.matchInEnvVars,1227 MatchExact: image.configurationRef.MatchExact(),1228 OverrideCommand: overrideCommand,1229 OverrideArgs: image.overrideArgs,1230 },1231 LiveUpdateSpec: image.liveUpdate,1232 }1233 if !liveupdate.IsEmptySpec(image.liveUpdate) {1234 iTarget.LiveUpdateName = liveupdate.GetName(mn, iTarget.ID())1235 }1236 contextIgnores, fileWatchIgnores, err := s.ignoresForImage(image)1237 if err != nil {1238 return nil, err1239 }1240 switch image.Type() {1241 case DockerBuild:1242 iTarget.DockerImageName = dockerimage.GetName(mn, iTarget.ID())1243 spec := v1alpha1.DockerImageSpec{1244 DockerfileContents: image.dbDockerfile.String(),1245 Context: image.dbBuildPath,1246 Args: image.dbBuildArgs,1247 Target: image.targetStage,1248 SSHAgentConfigs: image.sshSpecs,1249 Secrets: image.secretSpecs,1250 Network: image.network,1251 CacheFrom: image.cacheFrom,1252 Pull: image.pullParent,1253 Platform: image.platform,1254 ExtraTags: image.extraTags,1255 ContextIgnores: contextIgnores,1256 }1257 iTarget = iTarget.WithBuildDetails(model.DockerBuild{DockerImageSpec: spec})1258 case CustomBuild:1259 iTarget.CmdImageName = cmdimage.GetName(mn, iTarget.ID())1260 spec := v1alpha1.CmdImageSpec{1261 Args: image.customCommand.Argv,1262 Dir: image.workDir,1263 OutputTag: image.customTag,1264 OutputsImageRefTo: image.outputsImageRefTo,1265 }1266 if image.skipsLocalDocker {1267 spec.OutputMode = v1alpha1.CmdImageOutputRemote1268 } else if image.disablePush {1269 spec.OutputMode = v1alpha1.CmdImageOutputLocalDockerAndRemote1270 } else {1271 spec.OutputMode = v1alpha1.CmdImageOutputLocalDocker1272 }1273 r := model.CustomBuild{1274 CmdImageSpec: spec,1275 Deps: image.customDeps,1276 }1277 iTarget = iTarget.WithBuildDetails(r)1278 case DockerComposeBuild:1279 bd := model.DockerComposeBuild{1280 Service: image.dockerComposeService,1281 Context: image.dbBuildPath,1282 }1283 iTarget = iTarget.WithBuildDetails(bd)1284 case UnknownBuild:1285 return nil, fmt.Errorf("no build info for image %s", image.configurationRef.RefFamiliarString())1286 }1287 iTarget = iTarget.WithImageMapDeps(image.imageMapDeps).1288 WithFileWatchIgnores(fileWatchIgnores)1289 depTargets, err := s.imgTargetsForDepsHelper(mn, image.imageMapDeps, claimStatus)1290 if err != nil {1291 return nil, err1292 }1293 iTargets = append(iTargets, depTargets...)1294 iTargets = append(iTargets, iTarget)1295 claimStatus[imName] = claimFinished1296 }1297 return iTargets, nil1298}1299func (s *tiltfileState) translateDC(dc dcResourceSet) ([]model.Manifest, error) {1300 var result []model.Manifest1301 for _, svc := range dc.services {1302 iTargets, err := s.imgTargetsForDeps(model.ManifestName(svc.Name), svc.ImageMapDeps)1303 if err != nil {1304 return nil, errors.Wrapf(err, "getting image build info for %s", svc.Name)1305 }1306 for _, iTarg := range iTargets {1307 if iTarg.OverrideCommand != nil {1308 return nil, fmt.Errorf("docker_build/custom_build.entrypoint not supported for Docker Compose resources")1309 }1310 }1311 m, err := s.dcServiceToManifest(svc, dc, iTargets)1312 if err != nil {1313 return nil, err1314 }1315 result = append(result, m)1316 }1317 return result, nil1318}1319type claim int1320const (1321 claimNone claim = iota1322 claimPending1323 claimFinished1324)1325var _ claim = claimNone1326func (s *tiltfileState) triggerModeFn(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {1327 var triggerMode triggerMode1328 err := s.unpackArgs(fn.Name(), args, kwargs, "trigger_mode", &triggerMode)1329 if err != nil {1330 return nil, err1331 }1332 if s.triggerModeCallPosition.IsValid() {1333 return starlark.None, fmt.Errorf("%s can only be called once. It was already called at %s", fn.Name(), s.triggerModeCallPosition.String())1334 }1335 s.triggerMode = triggerMode1336 s.triggerModeCallPosition = thread.CallFrame(1).Pos1337 return starlark.None, nil1338}1339func (s *tiltfileState) setTeam(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {1340 var teamID string1341 err := s.unpackArgs(fn.Name(), args, kwargs, "team_id", &teamID)1342 if err != nil {1343 return nil, err1344 }1345 if len(teamID) == 0 {1346 return nil, errors.New("team_id cannot be empty")1347 }1348 if s.teamID != "" {1349 return nil, fmt.Errorf("team_id set multiple times (to '%s' and '%s')", s.teamID, teamID)1350 }1351 s.teamID = teamID1352 return starlark.None, nil1353}1354func (s *tiltfileState) translateLocal() ([]model.Manifest, error) {1355 var result []model.Manifest1356 for _, r := range s.localResources {1357 mn := model.ManifestName(r.name)1358 tm, err := starlarkTriggerModeToModel(s.triggerModeForResource(r.triggerMode), r.autoInit)1359 if err != nil {1360 return nil, errors.Wrapf(err, "error in resource %s options", mn)1361 }1362 paths := append([]string{}, r.deps...)1363 paths = append(paths, r.threadDir)1364 ignores := repoIgnoresForPaths(paths)1365 if len(r.ignores) != 0 {1366 ignores = append(ignores, v1alpha1.IgnoreDef{1367 BasePath: r.threadDir,1368 Patterns: r.ignores,1369 })1370 }1371 lt := model.NewLocalTarget(model.TargetName(r.name), r.updateCmd, r.serveCmd, r.deps).1372 WithAllowParallel(r.allowParallel || r.updateCmd.Empty()).1373 WithLinks(r.links).1374 WithReadinessProbe(r.readinessProbe)1375 lt.FileWatchIgnores = ignores1376 var mds []model.ManifestName1377 for _, md := range r.resourceDeps {1378 mds = append(mds, model.ManifestName(md))1379 }1380 m := model.Manifest{1381 Name: mn,1382 TriggerMode: tm,1383 ResourceDependencies: mds,1384 }.WithDeployTarget(lt)1385 m = m.WithLabels(r.labels)1386 result = append(result, m)1387 }1388 return result, nil1389}1390func (s *tiltfileState) tempDir() (*fwatch.TempDir, error) {1391 if s.scratchDir == nil {1392 dir, err := fwatch.NewDir("tiltfile")1393 if err != nil {1394 return dir, err1395 }1396 s.scratchDir = dir1397 go func() {1398 <-s.ctx.Done()1399 _ = s.scratchDir.TearDown()1400 }()1401 }1402 return s.scratchDir, nil1403}1404func (s *tiltfileState) sanitizeDependencies(ms []model.Manifest) error {1405 // warn + delete resource deps that don't exist1406 // error if resource deps are not a DAG1407 knownResources := make(map[model.ManifestName]bool)1408 for _, m := range ms {1409 knownResources[m.Name] = true1410 }1411 // construct the graph and make sure all edges are valid1412 edges := make(map[interface{}][]interface{})1413 for i, m := range ms {1414 var sanitizedDeps []model.ManifestName1415 for _, b := range m.ResourceDependencies {1416 if m.Name == b {1417 return fmt.Errorf("resource %s specified a dependency on itself", m.Name)1418 }1419 if _, ok := knownResources[b]; !ok {1420 logger.Get(s.ctx).Warnf("resource %s specified a dependency on unknown resource %s - dependency ignored", m.Name, b)1421 continue1422 }1423 edges[m.Name] = append(edges[m.Name], b)1424 sanitizedDeps = append(sanitizedDeps, b)1425 }1426 m.ResourceDependencies = sanitizedDeps1427 ms[i] = m1428 }1429 // check for cycles1430 connections := tarjan.Connections(edges)1431 for _, g := range connections {1432 if len(g) > 1 {1433 var nodes []string1434 for i := range g {1435 nodes = append(nodes, string(g[len(g)-i-1].(model.ManifestName)))1436 }1437 nodes = append(nodes, string(g[len(g)-1].(model.ManifestName)))1438 return fmt.Errorf("cycle detected in resource dependency graph: %s", strings.Join(nodes, " -> "))1439 }1440 }1441 return nil1442}1443func toKubernetesApplyCmd(cmd model.Cmd) *v1alpha1.KubernetesApplyCmd {1444 if cmd.Empty() {1445 return nil1446 }1447 return &v1alpha1.KubernetesApplyCmd{1448 Args: cmd.Argv,1449 Dir: cmd.Dir,1450 Env: cmd.Env,1451 }1452}1453func (s *tiltfileState) ignoresForImage(image *dockerImage) (contextIgnores []v1alpha1.IgnoreDef, fileWatchIgnores []v1alpha1.IgnoreDef, err error) {1454 dockerignores, err := s.dockerignoresForImage(image)1455 if err != nil {1456 return nil, nil, fmt.Errorf("reading dockerignore for %s: %v", image.configurationRef.RefFamiliarString(), err)1457 }1458 if image.tiltfilePath != "" {1459 contextIgnores = append(contextIgnores, v1alpha1.IgnoreDef{BasePath: image.tiltfilePath})1460 }1461 contextIgnores = append(contextIgnores, s.repoIgnoresForImage(image)...)1462 contextIgnores = append(contextIgnores, model.DockerignoresToIgnores(dockerignores)...)1463 for i := range contextIgnores {1464 fileWatchIgnores = append(fileWatchIgnores, *contextIgnores[i].DeepCopy())1465 }1466 if image.dbDockerfilePath != "" {1467 // while this might seem unusual, we actually do NOT want the1468 // ImageTarget to watch the Dockerfile itself because the image1469 // builder does not actually use the Dockerfile on-disk! instead,1470 // the Tiltfile watches the Dockerfile and always reads it in as1471 // part of execution, storing the full contents in the ImageTarget1472 // so that we can rewrite it in memory to inject image references1473 // and more1474 // as a result, if BOTH the Tiltfile and the ImageTarget watch the1475 // Dockerfile, it'll result in a race condition, as the ImageTarget1476 // build might see the change first and re-execute _before_ the1477 // Tiltfile, meaning it's running with a stale version of the1478 // Dockerfile1479 fileWatchIgnores = append(fileWatchIgnores, v1alpha1.IgnoreDef{BasePath: image.dbDockerfilePath})1480 }1481 if image.Type() == DockerComposeBuild {1482 // Docker Compose local volumes are mounted into the running container,1483 // so we don't want to watch these paths, as that'd trigger rebuilds1484 // instead of the desired Live Update-ish behavior1485 // note that they ARE eligible for usage within the Docker context, as1486 // it's a common pattern to include some files (e.g. config) in the1487 // image but then mount a local volume on top of it for local dev1488 for _, p := range image.dockerComposeLocalVolumePaths {1489 fileWatchIgnores = append(fileWatchIgnores, v1alpha1.IgnoreDef{BasePath: p})1490 }1491 }1492 return1493}1494var _ starkit.Plugin = &tiltfileState{}1495var _ starkit.OnExecPlugin = &tiltfileState{}1496var _ starkit.OnBuiltinCallPlugin = &tiltfileState{}...

Full Screen

Full Screen

tests_crds.go

Source:tests_crds.go Github

copy

Full Screen

...108 return nil, ErrTypeNotDetected109 }110 name := filepath.Base(path)111 test := &client.UpsertTestOptions{112 Name: sanitizeName(name),113 Namespace: namespace,114 Content: &testkube.TestContent{115 Type_: string(testkube.TestContentTypeString),116 Data: fmt.Sprintf("%q", strings.TrimSpace(string(content))),117 },118 Type_: testType,119 }120 return test, nil121}122// sanitizeName sanitizes test name123func sanitizeName(path string) string {124 path = strings.TrimSuffix(path, filepath.Ext(path))125 reg := regexp.MustCompile("[^a-zA-Z0-9-]+")126 path = reg.ReplaceAllString(path, "-")127 path = strings.TrimLeft(path, "-")128 path = strings.TrimRight(path, "-")129 path = strings.ToLower(path)130 if len(path) > 63 {131 return path[:63]132 }133 return path134}135// addEnvToTest adds env files to tests136func addEnvToTests(tests map[string]map[string]client.UpsertTestOptions,137 testEnvs, testSecretEnvs map[string]map[string]map[string]string) (envTests []client.UpsertTestOptions) {138 d := detector.NewDefaultDetector()139 for testType, values := range tests {140 for testName, test := range values {141 testMap := map[string]client.UpsertTestOptions{}142 for envName := range testEnvs[testType] {143 if filename, ok := testEnvs[testType][envName][testName]; ok {144 data, err := os.ReadFile(filename)145 if err != nil {146 ui.UseStderr()147 ui.Warn(fmt.Sprintf("read variables file %s got an error: %v", filename, err))148 continue149 }150 envTest := test151 envTest.Name = sanitizeName(envTest.Name + "-" + envName)152 envTest.ExecutionRequest = &testkube.ExecutionRequest{153 VariablesFile: fmt.Sprintf("%q", strings.TrimSpace(string(data))),154 }155 testMap[envTest.Name] = envTest156 }157 }158 for secretEnvName := range testSecretEnvs[testType] {159 if filename, ok := testSecretEnvs[testType][secretEnvName][testName]; ok {160 data, err := os.ReadFile(filename)161 if err != nil {162 ui.UseStderr()163 ui.Warn(fmt.Sprintf("read secret variables file %s got an error: %v", filename, err))164 continue165 }166 if adapter := d.GetAdapter(testType); adapter != nil {167 variables, err := adapter.GetSecretVariables(string(data))168 if err != nil {169 ui.UseStderr()170 ui.Warn(fmt.Sprintf("parse secret file %s got an error: %v", filename, err))171 continue172 }173 secretEnvTest := test174 secretEnvTest.Name = sanitizeName(secretEnvTest.Name + "-" + secretEnvName)175 if envTest, ok := testMap[secretEnvTest.Name]; ok {176 secretEnvTest = envTest177 }178 if secretEnvTest.ExecutionRequest == nil {179 secretEnvTest.ExecutionRequest = &testkube.ExecutionRequest{}180 }181 secretEnvTest.ExecutionRequest.Variables = variables182 testMap[secretEnvTest.Name] = secretEnvTest183 }184 }185 }186 if len(testMap) == 0 {187 testMap[test.Name] = test188 }...

Full Screen

Full Screen

sanitizeName

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3 fmt.Println(crds.sanitizeName("Gopher"))4}5import "fmt"6func main() {7 fmt.Println(crds.sanitizeName("Gopher"))8}9import "fmt"10func main() {11 fmt.Println(crds.sanitizeName("Gopher"))12}13import "fmt"14func main() {15 fmt.Println(crds.sanitizeName("Gopher"))16}17import "fmt"18func main() {19 fmt.Println(crds.sanitizeName("Gopher"))20}21import "fmt"22func main() {23 fmt.Println(crds.sanitizeName("Gopher"))24}25import "fmt"26func main() {27 fmt.Println(crds.sanitizeName("Gopher"))28}29import "fmt"30func main() {31 fmt.Println(crds.sanitizeName("Gopher"))32}33import "fmt"34func main() {35 fmt.Println(crds.sanitizeName("Gopher"))36}37import "fmt"38func main() {39 fmt.Println(crds.sanitizeName("Gopher"))40}41import "fmt"42func main() {43 fmt.Println(crds.sanitizeName("Gopher"))44}45import "fmt"46func main() {47 fmt.Println(crds.sanitizeName("Gopher"))48}

Full Screen

Full Screen

sanitizeName

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3 crds := Crds{}4 crds.sanitizeName()5}6import "fmt"7func main() {8 crds := Crds{}9 crds.sanitizeName()10}11import "fmt"12func main() {13 crds := Crds{}14 crds.sanitizeName()15}16import "fmt"17func main() {18 crds := Crds{}19 crds.sanitizeName()20}21import "fmt"22func main() {23 crds := Crds{}24 crds.sanitizeName()25}26import "fmt"27func main() {28 crds := Crds{}29 crds.sanitizeName()30}31import "fmt"32func main() {33 crds := Crds{}34 crds.sanitizeName()35}36import "fmt"37func main() {38 crds := Crds{}39 crds.sanitizeName()40}41import "fmt"42func main() {43 crds := Crds{}44 crds.sanitizeName()45}46import "fmt"47func main() {48 crds := Crds{}49 crds.sanitizeName()50}51import "fmt"52func main() {53 crds := Crds{}54 crds.sanitizeName()55}

Full Screen

Full Screen

sanitizeName

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println(crds.sanitizeName("myname"))4}5import (6func main() {7 fmt.Println(crds.sanitizeName("myname"))8}9import (10func main() {11 fmt.Println(crds.sanitizeName("myname"))12}13import (14func main() {15 fmt.Println(crds.sanitizeName("myname"))16}17import (18func main() {19 fmt.Println(crds.sanitizeName("myname"))20}21import (22func main() {23 fmt.Println(crds.sanitizeName("myname"))24}25import (26func main() {27 fmt.Println(crds.sanitizeName("myname"))28}29import (30func main() {31 fmt.Println(crds.sanitizeName("myname"))32}33import (34func main() {35 fmt.Println(crds.sanitizeName("myname"))36}37import (38func main() {39 fmt.Println(crds.sanitizeName("myname"))40}41import (42func main() {43 fmt.Println(crds.sanitizeName("myname"))44}

Full Screen

Full Screen

sanitizeName

Using AI Code Generation

copy

Full Screen

1func main() {2 crd := crds{}3 crd.sanitizeName("test-name")4}5func main() {6 crd := crds{}7 crd.sanitizeName("test-name")8}9import (10type crds struct{}11func (c crds) sanitizeName(name string) {12 fmt.Println(name)13}14func main() {15 crd := crds{}16 crd.sanitizeName("test-name")17}18import (19type crds struct{}20func (c crds) sanitizeName(name string) {21 fmt.Println(name)22}23func main() {24 crd := crds{}25 crd.sanitizeName("test-name")26}27import (28type crds struct{}29func (c crds) sanitizeName(name string) {30 fmt.Println(name)31}32func main() {33 crd := crds{}34 crd.sanitizeName("test-name")35}36import (37type crds struct{}38func (c crds) sanitizeName(name string) {39 fmt.Println(name)40}41func main() {42 crd := crds{}43 crd.sanitizeName("test-name")44}45import (46type crds struct{}47func (

Full Screen

Full Screen

sanitizeName

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println(crds.SanitizeName("test"))4}5func SanitizeName(name string) string {6}7import (8func TestSanitizeName(t *testing.T) {9 if SanitizeName("test") != "test" {10 t.Error("SanitizeName should return test")11 }12}13init() function14import (15func main() {16 fmt.Println(crds.SanitizeName("test"))17}18func init() {19 fmt.Println(crds.SanitizeName("test"))20}21func SanitizeName(name string) string {22}23import (24func TestSanitizeName(t *testing.T) {25 if SanitizeName("test") != "test" {26 t.Error("SanitizeName should return test")27 }28}29The init() function is called before the main() function, so we can use it to set up the environment for

Full Screen

Full Screen

sanitizeName

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println("Enter a name:")4 fmt.Scanln(&name)5 fmt.Println(crds.sanitizeName(name))6}7import (8func main() {9 fmt.Println("Enter a name:")10 fmt.Scanln(&name)11 fmt.Println(crds.sanitizeName(name))12}13import (14func main() {15 fmt.Println("Enter a name:")16 fmt.Scanln(&name)17 fmt.Println(crds.sanitizeName(name))18}19import (20func main() {21 fmt.Println("Enter a name:")22 fmt.Scanln(&name)23 fmt.Println(crds.sanitizeName(name))24}25import (26func main() {27 fmt.Println("Enter a name:")28 fmt.Scanln(&name)29 fmt.Println(crds.sanitizeName(name))30}31import (32func main() {33 fmt.Println("Enter a name:")34 fmt.Scanln(&name)35 fmt.Println(crds.sanitizeName(name))36}37import (38func main() {

Full Screen

Full Screen

sanitizeName

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println(crds.SanitizeName("Sachin"))4}5import (6func main() {7 fmt.Println(crds.SanitizeName("Sachin"))8}

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Testkube automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful