How to use daemonHost method of testcontainers Package

Best Testcontainers-go code snippet using testcontainers.daemonHost

docker.go

Source:docker.go Github

copy

Full Screen

...92// Host gets host (ip or name) of the docker daemon where the container port is exposed93// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel94// You can use the "TC_HOST" env variable to set this yourself95func (c *DockerContainer) Host(ctx context.Context) (string, error) {96 host, err := c.provider.daemonHost(ctx)97 if err != nil {98 return "", err99 }100 return host, nil101}102// MappedPort gets externally mapped port for a container port103func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) {104 inspect, err := c.inspectContainer(ctx)105 if err != nil {106 return "", err107 }108 if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" {109 return port, nil110 }111 ports, err := c.Ports(ctx)112 if err != nil {113 return "", err114 }115 for k, p := range ports {116 if k.Port() != port.Port() {117 continue118 }119 if port.Proto() != "" && k.Proto() != port.Proto() {120 continue121 }122 if len(p) == 0 {123 continue124 }125 return nat.NewPort(k.Proto(), p[0].HostPort)126 }127 return "", errors.New("port not found")128}129// Ports gets the exposed ports for the container.130func (c *DockerContainer) Ports(ctx context.Context) (nat.PortMap, error) {131 inspect, err := c.inspectContainer(ctx)132 if err != nil {133 return nil, err134 }135 return inspect.NetworkSettings.Ports, nil136}137// SessionID gets the current session id138func (c *DockerContainer) SessionID() string {139 return c.sessionID.String()140}141// Start will start an already created container142func (c *DockerContainer) Start(ctx context.Context) error {143 shortID := c.ID[:12]144 c.logger.Printf("Starting container id: %s image: %s", shortID, c.Image)145 if err := c.provider.client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil {146 return err147 }148 // if a Wait Strategy has been specified, wait before returning149 if c.WaitingFor != nil {150 c.logger.Printf("Waiting for container id %s image: %s", shortID, c.Image)151 if err := c.WaitingFor.WaitUntilReady(ctx, c); err != nil {152 return err153 }154 }155 c.logger.Printf("Container is ready id: %s image: %s", shortID, c.Image)156 return nil157}158// Stop will stop an already started container159//160// In case the container fails to stop161// gracefully within a time frame specified by the timeout argument,162// it is forcefully terminated (killed).163//164// If the timeout is nil, the container's StopTimeout value is used, if set,165// otherwise the engine default. A negative timeout value can be specified,166// meaning no timeout, i.e. no forceful termination is performed.167func (c *DockerContainer) Stop(ctx context.Context, timeout *time.Duration) error {168 shortID := c.ID[:12]169 c.logger.Printf("Stopping container id: %s image: %s", shortID, c.Image)170 if err := c.provider.client.ContainerStop(ctx, c.ID, timeout); err != nil {171 return err172 }173 c.logger.Printf("Container is stopped id: %s image: %s", shortID, c.Image)174 return nil175}176// Terminate is used to kill the container. It is usually triggered by as defer function.177func (c *DockerContainer) Terminate(ctx context.Context) error {178 select {179 // close reaper if it was created180 case c.terminationSignal <- true:181 default:182 }183 err := c.provider.client.ContainerRemove(ctx, c.GetContainerID(), types.ContainerRemoveOptions{184 RemoveVolumes: true,185 Force: true,186 })187 if err != nil {188 return err189 }190 if c.imageWasBuilt {191 _, err := c.provider.client.ImageRemove(ctx, c.Image, types.ImageRemoveOptions{192 Force: true,193 PruneChildren: true,194 })195 if err != nil {196 return err197 }198 }199 if err := c.provider.client.Close(); err != nil {200 return err201 }202 c.sessionID = uuid.UUID{}203 return nil204}205// update container raw info206func (c *DockerContainer) inspectRawContainer(ctx context.Context) (*types.ContainerJSON, error) {207 inspect, err := c.provider.client.ContainerInspect(ctx, c.ID)208 if err != nil {209 return nil, err210 }211 c.raw = &inspect212 return c.raw, nil213}214func (c *DockerContainer) inspectContainer(ctx context.Context) (*types.ContainerJSON, error) {215 inspect, err := c.provider.client.ContainerInspect(ctx, c.ID)216 if err != nil {217 return nil, err218 }219 return &inspect, nil220}221// Logs will fetch both STDOUT and STDERR from the current container. Returns a222// ReadCloser and leaves it up to the caller to extract what it wants.223func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) {224 options := types.ContainerLogsOptions{225 ShowStdout: true,226 ShowStderr: true,227 }228 return c.provider.client.ContainerLogs(ctx, c.ID, options)229}230// FollowOutput adds a LogConsumer to be sent logs from the container's231// STDOUT and STDERR232func (c *DockerContainer) FollowOutput(consumer LogConsumer) {233 if c.consumers == nil {234 c.consumers = []LogConsumer{235 consumer,236 }237 } else {238 c.consumers = append(c.consumers, consumer)239 }240}241// Name gets the name of the container.242func (c *DockerContainer) Name(ctx context.Context) (string, error) {243 inspect, err := c.inspectContainer(ctx)244 if err != nil {245 return "", err246 }247 return inspect.Name, nil248}249// State returns container's running state250func (c *DockerContainer) State(ctx context.Context) (*types.ContainerState, error) {251 inspect, err := c.inspectRawContainer(ctx)252 if err != nil {253 return c.raw.State, err254 }255 return inspect.State, nil256}257// Networks gets the names of the networks the container is attached to.258func (c *DockerContainer) Networks(ctx context.Context) ([]string, error) {259 inspect, err := c.inspectContainer(ctx)260 if err != nil {261 return []string{}, err262 }263 networks := inspect.NetworkSettings.Networks264 n := []string{}265 for k := range networks {266 n = append(n, k)267 }268 return n, nil269}270// ContainerIP gets the IP address of the primary network within the container.271func (c *DockerContainer) ContainerIP(ctx context.Context) (string, error) {272 inspect, err := c.inspectContainer(ctx)273 if err != nil {274 return "", err275 }276 return inspect.NetworkSettings.IPAddress, nil277}278// NetworkAliases gets the aliases of the container for the networks it is attached to.279func (c *DockerContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) {280 inspect, err := c.inspectContainer(ctx)281 if err != nil {282 return map[string][]string{}, err283 }284 networks := inspect.NetworkSettings.Networks285 a := map[string][]string{}286 for k := range networks {287 a[k] = networks[k].Aliases288 }289 return a, nil290}291func (c *DockerContainer) Exec(ctx context.Context, cmd []string) (int, io.Reader, error) {292 cli := c.provider.client293 response, err := cli.ContainerExecCreate(ctx, c.ID, types.ExecConfig{294 Cmd: cmd,295 Detach: false,296 AttachStdout: true,297 AttachStderr: true,298 })299 if err != nil {300 return 0, nil, err301 }302 hijack, err := cli.ContainerExecAttach(ctx, response.ID, types.ExecStartCheck{})303 if err != nil {304 return 0, nil, err305 }306 err = cli.ContainerExecStart(ctx, response.ID, types.ExecStartCheck{307 Detach: false,308 })309 if err != nil {310 return 0, nil, err311 }312 var exitCode int313 for {314 execResp, err := cli.ContainerExecInspect(ctx, response.ID)315 if err != nil {316 return 0, nil, err317 }318 if !execResp.Running {319 exitCode = execResp.ExitCode320 break321 }322 time.Sleep(100 * time.Millisecond)323 }324 return exitCode, hijack.Reader, nil325}326type FileFromContainer struct {327 underlying *io.ReadCloser328 tarreader *tar.Reader329}330func (fc *FileFromContainer) Read(b []byte) (int, error) {331 return (*fc.tarreader).Read(b)332}333func (fc *FileFromContainer) Close() error {334 return (*fc.underlying).Close()335}336func (c *DockerContainer) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) {337 r, _, err := c.provider.client.CopyFromContainer(ctx, c.ID, filePath)338 if err != nil {339 return nil, err340 }341 tarReader := tar.NewReader(r)342 // if we got here we have exactly one file in the TAR-stream343 // so we advance the index by one so the next call to Read will start reading it344 _, err = tarReader.Next()345 if err != nil {346 return nil, err347 }348 ret := &FileFromContainer{349 underlying: &r,350 tarreader: tarReader,351 }352 return ret, nil353}354func (c *DockerContainer) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error {355 fileContent, err := ioutil.ReadFile(hostFilePath)356 if err != nil {357 return err358 }359 return c.CopyToContainer(ctx, fileContent, containerFilePath, fileMode)360}361// CopyToContainer copies fileContent data to a file in container362func (c *DockerContainer) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error {363 buffer := &bytes.Buffer{}364 tw := tar.NewWriter(buffer)365 defer tw.Close()366 hdr := &tar.Header{367 Name: filepath.Base(containerFilePath),368 Mode: fileMode,369 Size: int64(len(fileContent)),370 }371 if err := tw.WriteHeader(hdr); err != nil {372 return err373 }374 if _, err := tw.Write(fileContent); err != nil {375 return err376 }377 return c.provider.client.CopyToContainer(ctx, c.ID, filepath.Dir(containerFilePath), buffer, types.CopyToContainerOptions{})378}379// StartLogProducer will start a concurrent process that will continuously read logs380// from the container and will send them to each added LogConsumer381func (c *DockerContainer) StartLogProducer(ctx context.Context) error {382 go func() {383 since := ""384 // if the socket is closed we will make additional logs request with updated Since timestamp385 BEGIN:386 options := types.ContainerLogsOptions{387 ShowStdout: true,388 ShowStderr: true,389 Follow: true,390 Since: since,391 }392 ctx, cancel := context.WithTimeout(ctx, time.Second*5)393 defer cancel()394 r, err := c.provider.client.ContainerLogs(ctx, c.GetContainerID(), options)395 if err != nil {396 // if we can't get the logs, panic, we can't return an error to anything397 // from within this goroutine398 panic(err)399 }400 for {401 select {402 case <-c.stopProducer:403 err := r.Close()404 if err != nil {405 // we can't close the read closer, this should never happen406 panic(err)407 }408 return409 default:410 h := make([]byte, 8)411 _, err := r.Read(h)412 if err != nil {413 // proper type matching requires https://go-review.googlesource.com/c/go/+/250357/ (go 1.16)414 if strings.Contains(err.Error(), "use of closed network connection") {415 now := time.Now()416 since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond()))417 goto BEGIN418 }419 // this explicitly ignores errors420 // because we want to keep procesing even if one of our reads fails421 continue422 }423 count := binary.BigEndian.Uint32(h[4:])424 if count == 0 {425 continue426 }427 logType := h[0]428 if logType > 2 {429 _, _ = fmt.Fprintf(os.Stderr, "received invalid log type: %d", logType)430 // sometimes docker returns logType = 3 which is an undocumented log type, so treat it as stdout431 logType = 1432 }433 // a map of the log type --> int representation in the header, notice the first is blank, this is stdin, but the go docker client doesn't allow following that in logs434 logTypes := []string{"", StdoutLog, StderrLog}435 b := make([]byte, count)436 _, err = r.Read(b)437 if err != nil {438 // TODO: add-logger: use logger to log out this error439 _, _ = fmt.Fprintf(os.Stderr, "error occurred reading log with known length %s", err.Error())440 continue441 }442 for _, c := range c.consumers {443 c.Accept(Log{444 LogType: logTypes[logType],445 Content: b,446 })447 }448 }449 }450 }()451 return nil452}453// StopLogProducer will stop the concurrent process that is reading logs454// and sending them to each added LogConsumer455func (c *DockerContainer) StopLogProducer() error {456 c.stopProducer <- true457 return nil458}459// DockerNetwork represents a network started using Docker460type DockerNetwork struct {461 ID string // Network ID from Docker462 Driver string463 Name string464 provider *DockerProvider465 terminationSignal chan bool466}467// Remove is used to remove the network. It is usually triggered by as defer function.468func (n *DockerNetwork) Remove(ctx context.Context) error {469 select {470 // close reaper if it was created471 case n.terminationSignal <- true:472 default:473 }474 return n.provider.client.NetworkRemove(ctx, n.ID)475}476// DockerProvider implements the ContainerProvider interface477type DockerProvider struct {478 *DockerProviderOptions479 client *client.Client480 hostCache string481 defaultNetwork string // default container network482}483var _ ContainerProvider = (*DockerProvider)(nil)484// or through Decode485type TestContainersConfig struct {486 Host string `properties:"docker.host,default="`487 TLSVerify int `properties:"docker.tls.verify,default=0"`488 CertPath string `properties:"docker.cert.path,default="`489}490type (491 // DockerProviderOptions defines options applicable to DockerProvider492 DockerProviderOptions struct {493 *GenericProviderOptions494 }495 // DockerProviderOption defines a common interface to modify DockerProviderOptions496 // These can be passed to NewDockerProvider in a variadic way to customize the returned DockerProvider instance497 DockerProviderOption interface {498 ApplyDockerTo(opts *DockerProviderOptions)499 }500 // DockerProviderOptionFunc is a shorthand to implement the DockerProviderOption interface501 DockerProviderOptionFunc func(opts *DockerProviderOptions)502)503func (f DockerProviderOptionFunc) ApplyDockerTo(opts *DockerProviderOptions) {504 f(opts)505}506func Generic2DockerOptions(opts ...GenericProviderOption) []DockerProviderOption {507 converted := make([]DockerProviderOption, 0, len(opts))508 for _, o := range opts {509 switch c := o.(type) {510 case DockerProviderOption:511 converted = append(converted, c)512 default:513 converted = append(converted, DockerProviderOptionFunc(func(opts *DockerProviderOptions) {514 o.ApplyGenericTo(opts.GenericProviderOptions)515 }))516 }517 }518 return converted519}520// NewDockerProvider creates a Docker provider with the EnvClient521func NewDockerProvider(provOpts ...DockerProviderOption) (*DockerProvider, error) {522 tcConfig := readTCPropsFile()523 host := tcConfig.Host524 o := &DockerProviderOptions{525 GenericProviderOptions: &GenericProviderOptions{526 Logger: Logger,527 },528 }529 for idx := range provOpts {530 provOpts[idx].ApplyDockerTo(o)531 }532 opts := []client.Opt{client.FromEnv}533 if host != "" {534 opts = append(opts, client.WithHost(host))535 // for further informacion, read https://docs.docker.com/engine/security/protect-access/536 if tcConfig.TLSVerify == 1 {537 cacertPath := filepath.Join(tcConfig.CertPath, "ca.pem")538 certPath := filepath.Join(tcConfig.CertPath, "cert.pem")539 keyPath := filepath.Join(tcConfig.CertPath, "key.pem")540 opts = append(opts, client.WithTLSClientConfig(cacertPath, certPath, keyPath))541 }542 }543 c, err := client.NewClientWithOpts(opts...)544 if err != nil {545 return nil, err546 }547 _, err = c.Ping(context.TODO())548 if err != nil {549 // fallback to environment550 c, err = client.NewClientWithOpts(client.FromEnv)551 if err != nil {552 return nil, err553 }554 }555 c.NegotiateAPIVersion(context.Background())556 p := &DockerProvider{557 DockerProviderOptions: o,558 client: c,559 }560 return p, nil561}562// readTCPropsFile reads from testcontainers properties file, if it exists563func readTCPropsFile() TestContainersConfig {564 home, err := os.UserHomeDir()565 if err != nil {566 return TestContainersConfig{}567 }568 tcProp := filepath.Join(home, ".testcontainers.properties")569 // init from a file570 properties, err := properties.LoadFile(tcProp, properties.UTF8)571 if err != nil {572 return TestContainersConfig{}573 }574 var cfg TestContainersConfig575 if err := properties.Decode(&cfg); err != nil {576 fmt.Printf("invalid testcontainers properties file, returning an empty Testcontainers configuration: %v\n", err)577 return TestContainersConfig{}578 }579 return cfg580}581// BuildImage will build and image from context and Dockerfile, then return the tag582func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (string, error) {583 repo := uuid.New()584 tag := uuid.New()585 repoTag := fmt.Sprintf("%s:%s", repo, tag)586 buildContext, err := img.GetContext()587 if err != nil {588 return "", err589 }590 buildOptions := types.ImageBuildOptions{591 BuildArgs: img.GetBuildArgs(),592 Dockerfile: img.GetDockerfile(),593 Context: buildContext,594 Tags: []string{repoTag},595 Remove: true,596 ForceRemove: true,597 }598 resp, err := p.client.ImageBuild(ctx, buildContext, buildOptions)599 if err != nil {600 return "", err601 }602 if img.ShouldPrintBuildLog() {603 termFd, isTerm := term.GetFdInfo(os.Stderr)604 err = jsonmessage.DisplayJSONMessagesStream(resp.Body, os.Stderr, termFd, isTerm, nil)605 if err != nil {606 return "", err607 }608 }609 // need to read the response from Docker, I think otherwise the image610 // might not finish building before continuing to execute here611 buf := new(bytes.Buffer)612 _, err = buf.ReadFrom(resp.Body)613 if err != nil {614 return "", err615 }616 _ = resp.Body.Close()617 return repoTag, nil618}619// CreateContainer fulfills a request for a container without starting it620func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (Container, error) {621 var err error622 // Make sure that bridge network exists623 // In case it is disabled we will create reaper_default network624 p.defaultNetwork, err = getDefaultNetwork(ctx, p.client)625 if err != nil {626 return nil, err627 }628 // If default network is not bridge make sure it is attached to the request629 // as container won't be attached to it automatically630 if p.defaultNetwork != Bridge {631 isAttached := false632 for _, net := range req.Networks {633 if net == p.defaultNetwork {634 isAttached = true635 break636 }637 }638 if !isAttached {639 req.Networks = append(req.Networks, p.defaultNetwork)640 }641 }642 exposedPortSet, exposedPortMap, err := nat.ParsePortSpecs(req.ExposedPorts)643 if err != nil {644 return nil, err645 }646 env := []string{}647 for envKey, envVar := range req.Env {648 env = append(env, envKey+"="+envVar)649 }650 if req.Labels == nil {651 req.Labels = make(map[string]string)652 }653 sessionID := uuid.New()654 var termSignal chan bool655 if !req.SkipReaper {656 r, err := NewReaper(ctx, sessionID.String(), p, req.ReaperImage)657 if err != nil {658 return nil, fmt.Errorf("%w: creating reaper failed", err)659 }660 termSignal, err = r.Connect()661 if err != nil {662 return nil, fmt.Errorf("%w: connecting to reaper failed", err)663 }664 for k, v := range r.Labels() {665 if _, ok := req.Labels[k]; !ok {666 req.Labels[k] = v667 }668 }669 }670 if err = req.Validate(); err != nil {671 return nil, err672 }673 var tag string674 var platform *specs.Platform675 if req.ShouldBuildImage() {676 tag, err = p.BuildImage(ctx, &req)677 if err != nil {678 return nil, err679 }680 } else {681 tag = req.Image682 if req.ImagePlatform != "" {683 p, err := platforms.Parse(req.ImagePlatform)684 if err != nil {685 return nil, fmt.Errorf("invalid platform %s: %w", req.ImagePlatform, err)686 }687 platform = &p688 }689 var shouldPullImage bool690 if req.AlwaysPullImage {691 shouldPullImage = true // If requested always attempt to pull image692 } else {693 image, _, err := p.client.ImageInspectWithRaw(ctx, tag)694 if err != nil {695 if client.IsErrNotFound(err) {696 shouldPullImage = true697 } else {698 return nil, err699 }700 }701 if platform != nil && (image.Architecture != platform.Architecture || image.Os != platform.OS) {702 shouldPullImage = true703 }704 }705 if shouldPullImage {706 pullOpt := types.ImagePullOptions{707 Platform: req.ImagePlatform, // may be empty708 }709 if req.RegistryCred != "" {710 pullOpt.RegistryAuth = req.RegistryCred711 }712 if err := p.attemptToPullImage(ctx, tag, pullOpt); err != nil {713 return nil, err714 }715 }716 }717 dockerInput := &container.Config{718 Entrypoint: req.Entrypoint,719 Image: tag,720 Env: env,721 ExposedPorts: exposedPortSet,722 Labels: req.Labels,723 Cmd: req.Cmd,724 Hostname: req.Hostname,725 User: req.User,726 }727 // prepare mounts728 mounts := mapToDockerMounts(req.Mounts)729 hostConfig := &container.HostConfig{730 ExtraHosts: req.ExtraHosts,731 PortBindings: exposedPortMap,732 Mounts: mounts,733 Tmpfs: req.Tmpfs,734 AutoRemove: req.AutoRemove,735 Privileged: req.Privileged,736 NetworkMode: req.NetworkMode,737 Resources: req.Resources,738 }739 endpointConfigs := map[string]*network.EndpointSettings{}740 // #248: Docker allows only one network to be specified during container creation741 // If there is more than one network specified in the request container should be attached to them742 // once it is created. We will take a first network if any specified in the request and use it to create container743 if len(req.Networks) > 0 {744 attachContainerTo := req.Networks[0]745 nw, err := p.GetNetwork(ctx, NetworkRequest{746 Name: attachContainerTo,747 })748 if err == nil {749 endpointSetting := network.EndpointSettings{750 Aliases: req.NetworkAliases[attachContainerTo],751 NetworkID: nw.ID,752 }753 endpointConfigs[attachContainerTo] = &endpointSetting754 }755 }756 networkingConfig := network.NetworkingConfig{757 EndpointsConfig: endpointConfigs,758 }759 resp, err := p.client.ContainerCreate(ctx, dockerInput, hostConfig, &networkingConfig, platform, req.Name)760 if err != nil {761 return nil, err762 }763 // #248: If there is more than one network specified in the request attach newly created container to them one by one764 if len(req.Networks) > 1 {765 for _, n := range req.Networks[1:] {766 nw, err := p.GetNetwork(ctx, NetworkRequest{767 Name: n,768 })769 if err == nil {770 endpointSetting := network.EndpointSettings{771 Aliases: req.NetworkAliases[n],772 }773 err = p.client.NetworkConnect(ctx, nw.ID, resp.ID, &endpointSetting)774 if err != nil {775 return nil, err776 }777 }778 }779 }780 c := &DockerContainer{781 ID: resp.ID,782 WaitingFor: req.WaitingFor,783 Image: tag,784 imageWasBuilt: req.ShouldBuildImage(),785 sessionID: sessionID,786 provider: p,787 terminationSignal: termSignal,788 skipReaper: req.SkipReaper,789 stopProducer: make(chan bool),790 logger: p.Logger,791 }792 return c, nil793}794// attemptToPullImage tries to pull the image while respecting the ctx cancellations.795// Besides, if the image cannot be pulled due to ErrorNotFound then no need to retry but terminate immediately.796func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pullOpt types.ImagePullOptions) error {797 var (798 err error799 pull io.ReadCloser800 )801 err = backoff.Retry(func() error {802 pull, err = p.client.ImagePull(ctx, tag, pullOpt)803 if err != nil {804 if _, ok := err.(errdefs.ErrNotFound); ok {805 return backoff.Permanent(err)806 }807 Logger.Printf("Failed to pull image: %s, will retry", err)808 return err809 }810 return nil811 }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx))812 if err != nil {813 return err814 }815 defer pull.Close()816 // download of docker image finishes at EOF of the pull request817 _, err = ioutil.ReadAll(pull)818 return err819}820// Health measure the healthiness of the provider. Right now we leverage the821// docker-client ping endpoint to see if the daemon is reachable.822func (p *DockerProvider) Health(ctx context.Context) (err error) {823 _, err = p.client.Ping(ctx)824 return825}826// RunContainer takes a RequestContainer as input and it runs a container via the docker sdk827func (p *DockerProvider) RunContainer(ctx context.Context, req ContainerRequest) (Container, error) {828 c, err := p.CreateContainer(ctx, req)829 if err != nil {830 return nil, err831 }832 if err := c.Start(ctx); err != nil {833 return c, fmt.Errorf("%w: could not start container", err)834 }835 return c, nil836}837// daemonHost gets the host or ip of the Docker daemon where ports are exposed on838// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel839// You can use the "TC_HOST" env variable to set this yourself840func (p *DockerProvider) daemonHost(ctx context.Context) (string, error) {841 if p.hostCache != "" {842 return p.hostCache, nil843 }844 host, exists := os.LookupEnv("TC_HOST")845 if exists {846 p.hostCache = host847 return p.hostCache, nil848 }849 // infer from Docker host850 url, err := url.Parse(p.client.DaemonHost())851 if err != nil {852 return "", err853 }854 switch url.Scheme {...

Full Screen

Full Screen

compose_provider.go

Source:compose_provider.go Github

copy

Full Screen

...108// Host gets host (ip or name) of the docker daemon where the container port is exposed109// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel110// You can use the "TC_HOST" env variable to set this yourself111func (c *DockerContainer) Host(ctx context.Context) (string, error) {112 host, err := c.provider.daemonHost(ctx)113 if err != nil {114 return "", err115 }116 return host, nil117}118// MappedPort gets externally mapped port for a container port119func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) {120 inspect, err := c.inspectContainer(ctx)121 if err != nil {122 return "", err123 }124 if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" {125 return port, nil126 }127 ports, err := c.Ports(ctx)128 if err != nil {129 return "", err130 }131 for k, p := range ports {132 if k.Port() != port.Port() {133 continue134 }135 if port.Proto() != "" && k.Proto() != port.Proto() {136 continue137 }138 if len(p) == 0 {139 continue140 }141 return nat.NewPort(k.Proto(), p[0].HostPort)142 }143 return "", errors.New("port not found")144}145// Ports gets the exposed ports for the container.146func (c *DockerContainer) Ports(ctx context.Context) (nat.PortMap, error) {147 inspect, err := c.inspectContainer(ctx)148 if err != nil {149 return nil, err150 }151 return inspect.NetworkSettings.Ports, nil152}153func (c *DockerContainer) inspectContainer(ctx context.Context) (*types.ContainerJSON, error) {154 inspect, err := c.provider.client.ContainerInspect(ctx, c.ID)155 if err != nil {156 return nil, err157 }158 return &inspect, nil159}160// Logs will fetch both STDOUT and STDERR from the current container. Returns a161// ReadCloser and leaves it up to the caller to extract what it wants.162func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) {163 options := types.ContainerLogsOptions{164 ShowStdout: true,165 ShowStderr: true,166 }167 return c.provider.client.ContainerLogs(ctx, c.ID, options)168}169// FollowOutput adds a LogConsumer to be sent logs from the container's170// STDOUT and STDERR171func (c *DockerContainer) FollowOutput(consumer LogConsumer) {172 if c.consumers == nil {173 c.consumers = []LogConsumer{174 consumer,175 }176 } else {177 c.consumers = append(c.consumers, consumer)178 }179}180// Name gets the name of the container.181func (c *DockerContainer) Name(ctx context.Context) (string, error) {182 inspect, err := c.inspectContainer(ctx)183 if err != nil {184 return "", err185 }186 return inspect.Name, nil187}188// Networks gets the names of the networks the container is attached to.189func (c *DockerContainer) Networks(ctx context.Context) ([]string, error) {190 inspect, err := c.inspectContainer(ctx)191 if err != nil {192 return []string{}, err193 }194 networks := inspect.NetworkSettings.Networks195 n := []string{}196 for k := range networks {197 n = append(n, k)198 }199 return n, nil200}201// ContainerIP gets the IP address of the primary network within the container.202func (c *DockerContainer) ContainerIP(ctx context.Context) (string, error) {203 inspect, err := c.inspectContainer(ctx)204 if err != nil {205 return "", err206 }207 return inspect.NetworkSettings.IPAddress, nil208}209// NetworkAliases gets the aliases of the container for the networks it is attached to.210func (c *DockerContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) {211 inspect, err := c.inspectContainer(ctx)212 if err != nil {213 return map[string][]string{}, err214 }215 networks := inspect.NetworkSettings.Networks216 a := map[string][]string{}217 for k := range networks {218 a[k] = networks[k].Aliases219 }220 return a, nil221}222func (c *DockerContainer) Exec(ctx context.Context, cmd []string) (int, error) {223 cli := c.provider.client224 response, err := cli.ContainerExecCreate(ctx, c.ID, types.ExecConfig{225 Cmd: cmd,226 Detach: false,227 })228 if err != nil {229 return 0, err230 }231 err = cli.ContainerExecStart(ctx, response.ID, types.ExecStartCheck{232 Detach: false,233 })234 if err != nil {235 return 0, err236 }237 var exitCode int238 for {239 execResp, err := cli.ContainerExecInspect(ctx, response.ID)240 if err != nil {241 return 0, err242 }243 if !execResp.Running {244 exitCode = execResp.ExitCode245 break246 }247 time.Sleep(100 * time.Millisecond)248 }249 return exitCode, nil250}251// DockerNetwork represents a network started using Docker252type DockerNetwork struct {253 ID string // Network ID from Docker254 Driver string255 Name string256}257// DockerProvider implements the ContainerProvider interface258type DockerProvider struct {259 client *client.Client260 hostCache string261 defaultNetwork string // default container network262}263// daemonHost gets the host or ip of the Docker daemon where ports are exposed on264// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel265// You can use the "TC_HOST" env variable to set this yourself266func (p *DockerProvider) daemonHost(ctx context.Context) (string, error) {267 if p.hostCache != "" {268 return p.hostCache, nil269 }270 host, exists := os.LookupEnv("TC_HOST")271 if exists {272 p.hostCache = host273 return p.hostCache, nil274 }275 // infer from Docker host276 parsedURL, err := url.Parse(p.client.DaemonHost())277 if err != nil {278 return "", err279 }280 switch parsedURL.Scheme {...

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"80/tcp"},6 WaitingFor: wait.ForListeningPort("80/tcp"),7 }8 alpine, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 panic(err)12 }13 ip, err := alpine.Host(ctx)14 if err != nil {15 panic(err)16 }17 port, err := alpine.MappedPort(ctx, "80")18 if err != nil {19 panic(err)20 }21 fmt.Printf("Container IP: %s, Port: %s", ip, port.Port())22 defer alpine.Terminate(ctx)23}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"80/tcp"},6 WaitingFor: wait.ForHTTP("/"),7 }8 c, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 log.Fatal(err)12 }13 defer c.Terminate(ctx)14 ip, err := c.Host(ctx)15 if err != nil {16 log.Fatal(err)17 }18 fmt.Println(ip)19}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"6379/tcp"},6 WaitingFor: wait.ForListeningPort("6379/tcp"),7 }8 redisContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 log.Fatal(err)12 }13 defer redisContainer.Terminate(ctx)14 redisHost, err := redisContainer.Host(ctx)15 if err != nil {16 log.Fatal(err)17 }18 redisPort, err := redisContainer.MappedPort(ctx, "6379")19 if err != nil {20 log.Fatal(err)21 }22 fmt.Println(redisHost)23 fmt.Println(redisPort.Int())24}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"80/tcp"},6 WaitingFor: wait.ForHTTP("/"),7 }8 nginxContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 log.Fatal(err)12 }13 defer nginxContainer.Terminate(ctx)14 ip, err := nginxContainer.Host(ctx)15 if err != nil {16 log.Fatal(err)17 }18 port, err := nginxContainer.MappedPort(ctx, "80")19 if err != nil {20 log.Fatal(err)21 }22}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"5432/tcp"},6 WaitingFor: wait.ForListeningPort("5432/tcp"),7 }8 postgres, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 log.Fatal(err)12 }13 defer postgres.Terminate(ctx)14 postgresHost, err := postgres.Host(ctx)15 if err != nil {16 log.Fatal(err)17 }18 postgresPort, err := postgres.MappedPort(ctx, "5432/tcp")19 if err != nil {20 log.Fatal(err)21 }22 fmt.Println("postgresHost = ", postgresHost)23 fmt.Println("postgresPort = ", postgresPort.Int())24}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"5432/tcp"},6 WaitingFor: wait.ForLog("database system is ready to accept connections"),7 }8 postgresContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 log.Fatal(err)12 }13 port, err := postgresContainer.MappedPort(ctx, "5432")14 if err != nil {15 log.Fatal(err)16 }17 ip, err := postgresContainer.Host(ctx)18 if err != nil {19 log.Fatal(err)20 }21 fmt.Printf("postgres is available on %s:%s22", ip, port.Port())23 err = postgresContainer.Terminate(ctx)24 if err != nil {25 log.Fatal(err)26 }27 _, err = postgresContainer.WaitUntilStopped(ctx, 10*time.Second)28 if err != nil {29 log.Fatal(err)30 }31}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"5432/tcp"},6 WaitingFor: wait.ForListeningPort("5432/tcp"),7 }8 postgresContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 log.Fatal(err)12 }13 defer postgresContainer.Terminate(ctx)14 ip, err := postgresContainer.Host(ctx)15 if err != nil {16 log.Fatal(err)17 }18 port, err := postgresContainer.MappedPort(ctx, "5432")19 if err != nil {20 log.Fatal(err)21 }22 fmt.Printf("host is %s and port is %s", ip, port.Port())23}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1func main() {2 ctx := context.Background()3 req := testcontainers.ContainerRequest{4 ExposedPorts: []string{"6379/tcp"},5 WaitingFor: wait.ForLog("Ready to accept connections"),6 }7 redis, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{8 })9 if err != nil {10 panic(err)11 }12 defer redis.Terminate(ctx)13 ip, err := redis.Host(ctx)14 if err != nil {15 panic(err)16 }17 port, err := redis.MappedPort(ctx, "6379")18 if err != nil {19 panic(err)20 }21 fmt.Println(ip)22 fmt.Println(port.Int())23}24func main() {25 ctx := context.Background()26 req := testcontainers.ContainerRequest{27 ExposedPorts: []string{"6379/tcp"},28 WaitingFor: wait.ForLog("Ready to accept connections"),29 }30 redis, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{31 })32 if err != nil {33 panic(err)34 }35 ip, err := redis.Host(ctx)36 if err != nil {37 panic(err)38 }39 port, err := redis.MappedPort(ctx, "6379")40 if err != nil {41 panic(err)42 }43 fmt.Println(ip)44 fmt.Println(port.Int())45 redis.Terminate(ctx)46 redis.Start(ctx)47 ip, err = redis.Host(ctx)48 if err != nil {49 panic(err)50 }51 port, err = redis.MappedPort(ctx, "6379")52 if err != nil {53 panic(err)54 }55 fmt.Println(ip

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 ctx := context.Background()4 req := testcontainers.ContainerRequest{5 ExposedPorts: []string{"9200/tcp"},6 WaitingFor: wait.ForLog("started"),7 }8 elasticContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{9 })10 if err != nil {11 log.Fatalf("Could not start container: %v", err)12 }13 defer elasticContainer.Terminate(ctx)14 ip, err := elasticContainer.Host(ctx)15 if err != nil {16 log.Fatalf("Could not get container IP: %v", err)17 }18 port, err := elasticContainer.MappedPort(ctx, "9200")19 if err != nil {20 log.Fatalf("Could not get container port: %v", err)21 }22 fmt.Printf("ElasticSearch is available at %s:%s23", ip, port.Port())24 time.Sleep(60 * time.Second)25}

Full Screen

Full Screen

daemonHost

Using AI Code Generation

copy

Full Screen

1func main() {2 ctx := context.Background()3 req := testcontainers.ContainerRequest{4 ExposedPorts: []string{"80/tcp"},5 Cmd: []string{"top"},6 }7 container, _ := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{8 })9 defer container.Terminate(ctx)10 ip, _ := container.Host(ctx)11 fmt.Println(ip)12}13func main() {14 ctx := context.Background()15 req := testcontainers.ContainerRequest{16 ExposedPorts: []string{"80/tcp"},17 Cmd: []string{"top"},18 }19 container, _ := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{20 })21 defer container.Terminate(ctx)22 ip, _ := container.Host(ctx)23 fmt.Println(ip)24}25func main() {26 ctx := context.Background()27 req := testcontainers.ContainerRequest{28 ExposedPorts: []string{"80/tcp"},29 Cmd: []string{"top"},30 }31 container, _ := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{32 })33 defer container.Terminate(ctx)34 ip, _ := container.Host(ctx)35 fmt.Println(ip)36}37func main() {38 ctx := context.Background()39 req := testcontainers.ContainerRequest{40 ExposedPorts: []string{"80/tcp"},41 Cmd: []string{"top"},42 }43 container, _ := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{44 })45 defer container.Terminate(ctx)

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Testcontainers-go automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Most used method in

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful