How to use Copy method of vmm Package

Best Syzkaller code snippet using vmm.Copy

vmm.go

Source:vmm.go Github

copy

Full Screen

...435func (v *VMM) VMLoadFile(containerName string, srcPath string) error {436 if err := v.isManagedRunningContainer(containerName); err != nil {437 return err438 }439 return v.containerCopyFile(srcPath, containerName, HomeDir)440}441// VMUnzipImage unzips a zip file at the imageFile path of the container.442func (v *VMM) VMUnzipImage(containerName string, imageFile string) error {443 if err := v.isManagedRunningContainer(containerName); err != nil {444 return err445 }446 match, _ := regexp.MatchString("^[a-zA-z0-9-_]+\\.zip$", imageFile)447 if !match {448 return errors.New("Failed to unzip due to invalid zip filename \"" + imageFile + "\"")449 }450 log.Printf("Unzip %s in container %s at %s", imageFile, containerName, HomeDir)451 _, err := v.containerExec(containerName, "unzip "+path.Join(HomeDir, imageFile), "vsoc-01")452 return errors.Wrap(err, "containerExec")453}454// VMRemove force removes a container, regardless of whether the VM is running.455func (v *VMM) VMRemove(containerName string) error {456 if _, err := v.isManagedContainer(containerName); err != nil {457 return err458 }459 containerID, err := v.getContainerIDByName(containerName)460 if err != nil {461 return errors.Wrap(err, "no containerID")462 }463 err = v.Client.ContainerRemove(context.Background(), containerID, types.ContainerRemoveOptions{464 Force: true,465 // required since /home/vsoc-01 is mounted as an anonymous volume466 RemoveVolumes: true,467 })468 if err != nil {469 return errors.Wrap(err, "docker: ContainerRemove")470 }471 err = v.KVStore.RemoveContainerConfigs(containerName)472 if err != nil {473 return errors.Wrap(err, "kvstore: ContainerRemove")474 }475 err = os.RemoveAll(path.Join(v.DevicesDir, containerName))476 if err != nil {477 return err478 }479 return nil480}481// VMPrune removes all managed containers of the VMM instance. If there are more than one VMM running482// on the same host, VMPrune only removes containers with the VMM instance's CFPrefix.483func (v *VMM) VMPrune() {484 cfList, _ := v.listCuttlefishContainers()485 for _, c := range cfList {486 err := v.VMRemove(c.Names[0][1:])487 if err != nil {488 log.Printf("VMPrune (%s): failed. reason:%v\n", c.ID[:10], err)489 }490 log.Printf("VMPrune (%s): success\n", c.ID[:10])491 }492}493// VMList lists all managed containers of the VMM instance.494func (v *VMM) VMList() ([]VMItem, error) {495 cfList, err := v.listCuttlefishContainers()496 if err != nil {497 return nil, errors.Wrap(err, "listCuttlefishContainers")498 }499 resp := []VMItem{}500 for _, c := range cfList {501 status, err := v.getVMStatus(c)502 containerName := c.Names[0][1:]503 if err != nil {504 return nil, errors.Wrap(err, "getVMStatus")505 }506 cpuStr := v.KVStore.GetContainerValueOrEmpty(containerName, CONFIG_KEY_CPU)507 cpu, _ := strconv.Atoi(cpuStr)508 ramStr := v.KVStore.GetContainerValueOrEmpty(containerName, CONFIG_KEY_RAM)509 ram, _ := strconv.Atoi(ramStr)510 tagsStr := v.KVStore.GetContainerValueOrEmpty(containerName, CONFIG_KEY_TAGS)511 tags := strings.Split(tagsStr, ",")512 resp = append(resp, VMItem{513 ID: c.ID,514 Name: v.KVStore.GetContainerValueOrEmpty(containerName, CONFIG_KEY_DEVICE_NAME),515 Created: strconv.FormatInt(c.Created, 10),516 IP: c.NetworkSettings.Networks[DefaultNetwork].IPAddress,517 Status: status,518 CFInstance: c.Labels["cf_instance"],519 OSVersion: v.KVStore.GetContainerValueOrEmpty(containerName, CONFIG_KEY_AOSP_VERSION),520 CPU: cpu,521 RAM: ram,522 Tags: tags,523 Cmdline: v.KVStore.GetContainerValueOrEmpty(containerName, CONFIG_KEY_CMDLINE),524 })525 }526 return resp, nil527}528// VMGetAOSPVersion reads the "aosp_version" key of a container config.529func (v *VMM) VMGetAOSPVersion(containerName string) (string, error) {530 return v.KVStore.GetContainerValue(containerName, CONFIG_KEY_AOSP_VERSION)531}532// VMInstallAPK attempts to start an ADB daemon in the container and installs an apkFile on the VM.533// The apkFile should have been placed in the VM's deviceFolder. In the event that an ADB daemon534// is already running, calling startADBDaemon should have no effects.535func (v *VMM) VMInstallAPK(containerName string, apkFile string) error {536 if err := v.isManagedRunningContainer(containerName); err != nil {537 return err538 }539 f := path.Join(v.DevicesDir, containerName, apkFile)540 if _, err := os.Stat(f); os.IsNotExist(err) {541 log.Printf("VMInstallAPK (%s): abort installAPK because %s does not exist", containerName, f)542 return fmt.Errorf("apk file %s does not exist", apkFile)543 }544 // ADB daemon may have been terminated at this point so let's bring it up545 err := v.startADBDaemon(containerName)546 if err != nil {547 return errors.Wrap(err, "startADBDaemon")548 }549 resp, err := v.containerExec(containerName, "adb install \"/data/"+apkFile+"\"", "vsoc-01")550 if err != nil {551 return errors.Wrap(err, "adb install failed")552 }553 if resp.ExitCode != 0 {554 return errors.New("non-zero exit in installAPK: " + resp.errBuffer.String())555 }556 return nil557}558// ContainerAttachToTerminal starts a bash shell in the container and returns a bi-directional stream for the frontend to interact with.559// It's up to the caller to close the hijacked connection by calling types.HijackedResponse.Close.560// It's up to the caller to call KillTerminal() to kill the long running process at exit561func (v *VMM) ContainerAttachToTerminal(containerName string) (ir types.IDResponse, hr types.HijackedResponse, err error) {562 if err := v.isManagedRunningContainer(containerName); err != nil {563 return types.IDResponse{}, types.HijackedResponse{}, err564 }565 log.Printf("ExecAttachToTerminal %s\n", containerName)566 // TODO to do it properly, might need to get terminal dimensions from the front end567 // and dynamically adjust docker's tty dimensions568 // reference: https://github.com/xtermjs/xterm.js/issues/1359569 cmd := []string{"/bin/bash"}570 env := []string{"COLUMNS=`tput cols`", "LINES=`tput lines`"}571 return v.ContainerAttachToProcess(containerName, cmd, env)572}573// ContainerAttachToProcess starts a long running process with TTY and returns a bi-directional stream for the frontend to interact with.574// Notice:575// - It's up to the caller to close the hijacked connection by calling types.HijackedResponse.Close.576// - It's up to the caller to call KillTerminal() to kill the long running process at exit. (see reason below)577//578// Explanation: types.HijackedResponse.Close only calls HijackedResponse.Conn.Close() which leaves the process in the579// container to run forever. Moby's implementation of ContainerExecStart only terminates the process when either580// the context is Done or the attached stream returns EOF/error. In our use cases (e.g. bash/tail -f), the only possible581// way to terminate such long running processes by API is through context. However, if we trace ContainerExecAttach,582// Eventually we will end up at...583//584// // github.com/moby/moby/api/server/router/container/exec.go#L132585// // Now run the user process in container.586// // Maybe we should we pass ctx here if we're not detaching?587// s.backend.ContainerExecStart(context.Background(), ...)588//589// ... which always create a new context.Background(). Apparantly Moby team didn't implement the `maybe` part that allows590// context passing.591func (v *VMM) ContainerAttachToProcess(containerName string, cmd []string, env []string) (ID types.IDResponse, hr types.HijackedResponse, err error) {592 if err := v.isManagedRunningContainer(containerName); err != nil {593 return types.IDResponse{}, types.HijackedResponse{}, err594 }595 ctx := context.Background()596 ir, err := v.Client.ContainerExecCreate(ctx, containerName, types.ExecConfig{597 User: "vsoc-01",598 AttachStdin: true,599 AttachStdout: true,600 AttachStderr: true,601 Cmd: cmd,602 Tty: true,603 Env: env,604 })605 if err != nil {606 return types.IDResponse{}, types.HijackedResponse{}, errors.Wrap(err, "docker: failed to create an exec config")607 }608 hijackedResp, err := v.Client.ContainerExecAttach(ctx, ir.ID, types.ExecStartCheck{Detach: false, Tty: true})609 if err != nil {610 return types.IDResponse{}, hijackedResp, errors.Wrap(err, "docker: failed to execute/attach to process")611 }612 return ir, hijackedResp, nil613}614// ContainerKillTerminal kills the bash process after use. To be called after done with the process created by ExecAttachToTerminal().615func (v *VMM) ContainerKillTerminal(containerName string) error {616 if err := v.isManagedRunningContainer(containerName); err != nil {617 return err618 }619 return v.ContainerKillProcess(containerName, "/bin/bash")620}621// ContainerKillProcess kills all process in the given container with the given cmd. To be called after done with the process created by ExecAttachToTTYProcess().622//623// This is an ugly workaround since Moby's exec kill is long overdue (since 2014 https://github.com/moby/moby/pull/41548)624// Unfortunately we have to kill all pids of the same cmd since we can't get the specific terminal's pid in the container's625// pid namespace. This is because when creating a terminal in AttachToTerminal(), ContainerExecCreate only returns626// an execID that links to the spawned process's pid in the HOST pid namespace. We can't directly kill a host process unless627// we run the API server as root, which is undesirable.628func (v *VMM) ContainerKillProcess(containerName string, cmd string) error {629 if err := v.isManagedRunningContainer(containerName); err != nil {630 return err631 }632 process := strings.Split(cmd, " ")[0]633 resp, err := v.containerExec(containerName, fmt.Sprintf("ps -ef | awk '$8==\"%s\" {print $2}'", process), "vsoc-01")634 if err != nil {635 return errors.Wrap(err, "containerExec list process")636 }637 pids := strings.Split(resp.outBuffer.String(), "\n")638 if len(pids) == 0 {639 log.Printf("ContainerKillProcess (%s): 0 process found %s\n", containerName, process)640 }641 for _, pid := range pids {642 if pid != "" {643 _, err := v.containerExec(containerName, fmt.Sprintf("kill %s", pid), "root")644 if err != nil {645 // kill with best effort so just do logging646 log.Printf("ContainerKillProcess (%s): failed to kill %s;%s due to %v\n", containerName, pid, process, err)647 continue648 }649 log.Printf("ContainerKillProcess (%s): killed %s:%s", containerName, pid, process)650 }651 }652 return nil653}654// ContainerTerminalResize resizes the TTY size of a given execID655func (v *VMM) ContainerTerminalResize(execID string, lines uint, cols uint) error {656 return v.Client.ContainerExecResize(context.Background(), execID, types.ResizeOptions{Height: lines, Width: cols})657}658// ContainerListFiles gets a list of files in the given container's path659// Results are of the following format which each line represents a file/folder:660//661// -rw-r--r--|vsoc-01|vsoc-01|65536|1645183964.5579601750|vbmeta.img662func (v *VMM) ContainerListFiles(containerName string, folder string) ([]string, error) {663 if err := v.isManagedRunningContainer(containerName); err != nil {664 return []string{}, err665 }666 cid, _ := v.getContainerIDByName(containerName)667 folder = path.Clean(folder)668 _, err := v.Client.ContainerStatPath(context.Background(), cid, folder)669 if err != nil {670 return []string{}, err671 }672 resp, err := v.containerExec(containerName, "find "+folder+" -maxdepth 1 -printf \"%M|%u|%g|%s|%A@|%P\n\" | sort -t '|' -k6", "vsoc-01")673 if err != nil || resp.ExitCode != 0 {674 return []string{}, errors.Wrap(err, "containerExec find")675 }676 lines := strings.Split(resp.outBuffer.String(), "\n")677 // remove the last empty line due to split678 return lines[:len(lines)-1], nil679}680// ContainaerFileExists checks if a given file/folder exist in the container.681func (v *VMM) ContainaerFileExists(containerName string, filePath string) error {682 cid, _ := v.getContainerIDByName(containerName)683 _, err := v.Client.ContainerStatPath(context.Background(), cid, filePath)684 return err685}686// ContainerReadFile gets a reader of a file in the container. As per Moby API's design, the file will be in TAR format so687// the caller should use tar.NewReader(reader) to obtain a corresponding tar reader.688// It is up to the caller to close the reader.689func (v *VMM) ContainerReadFile(containerName string, filePath string) (io.ReadCloser, error) {690 if err := v.isManagedRunningContainer(containerName); err != nil {691 return nil, err692 }693 id, err := v.getContainerIDByName(containerName)694 if err != nil {695 return nil, err696 }697 log.Printf("ContainerReadFile (%s): Copying file %s", containerName, filePath)698 // notice the API returns a reader for a TAR archive699 rc, _, err := v.Client.CopyFromContainer(context.TODO(), id, filePath)700 if err != nil {701 return nil, err702 }703 return rc, nil704}705// ContainerUpdateConfig updates a container's config in the local KVStore706func (v *VMM) ContainerUpdateConfig(containerName string, key string, value string) error {707 if err := v.isManagedRunningContainer(containerName); err != nil {708 return err709 }710 return v.KVStore.PutContainterValue(containerName, []KeyValue{{key, value}})711}712// getNextCFInstanceNumber returns the next smallest cf_instance number that have not been assigned.713func (v *VMM) getNextCFInstanceNumber() (int, error) {714 // Here we get all cuttlefish containers from the host's view, regardless of which VMM instance they belong to.715 //716 // listCuttlefishContainers is not used because it filter containers based on v.CFPrefix. In the case that717 // two VMMs are running on the same host (i.e. 1 for dev, 1 for go test), using listCuttlefishContainers will718 // create overlapped cf_instance numbers, which could lead to port conflicts.719 containerList, err := v.Client.ContainerList(context.Background(), types.ContainerListOptions{All: true})720 if err != nil {721 return -1, err722 }723 indexes := []int{}724 cfList := []types.Container{}725 for _, c := range containerList {726 if value, ok := c.Labels["cf_instance"]; ok {727 cfList = append(cfList, c)728 cf_idx, err := strconv.Atoi(value)729 if err != nil {730 return -1, err731 }732 indexes = append(indexes, cf_idx)733 }734 }735 sort.Ints(indexes)736 log.Printf("getNextCFInstanceNumber: num of existing cuttlefish containers: %d - %v\n", len(indexes), indexes)737 if len(indexes) == 0 {738 return 1, nil739 } else if indexes[len(indexes)-1] == len(cfList) {740 // if all assigned cf_instance numbers are continueous so far741 return len(cfList) + 1, nil742 } else {743 // find the smallest available cf_instance number744 i := 1745 for {746 if indexes[i-1] != i {747 return i, nil748 }749 i = i + 1750 }751 }752}753// getContainerCFInstanceNumber reads the cf_instance label of a container.754func (v *VMM) getContainerCFInstanceNumber(containerName string) (int, error) {755 containerJSON, err := v.getContainerJSON(containerName)756 if err != nil {757 return -1, err758 }759 num, err := strconv.Atoi(containerJSON.Config.Labels["cf_instance"])760 if err != nil {761 return -1, err762 }763 return num, nil764}765func (v *VMM) getContainerIP(containerName string) (string, error) {766 containerJSON, err := v.getContainerJSON(containerName)767 if err != nil {768 return "", err769 }770 return containerJSON.NetworkSettings.IPAddress, nil771}772func (v *VMM) getContainerJSON(containerName string) (types.ContainerJSON, error) {773 cid, err := v.getContainerIDByName(containerName)774 if err != nil {775 return types.ContainerJSON{}, err776 }777 return v.Client.ContainerInspect(context.Background(), cid)778}779// startVNCProxy starts a websockify daemon in the container and listens to websocket-based VNC connection on the container port wsPort.780// startVNCProxy assumes the websockify binary exists in the container.781//782// When a cuttlefish VM is created with --start-vnc-server flag, /home/vsoc-01/bin/vnc_server starts to listen783// on 6444 of the `lo` interface. This vnc_server only supports RFB 3.x which isn't compatible with the websocket-based784// protocol of novnc.js. To allow the frontend to access the VNC stream inside of the container, we need to both785// translate RFB to websocket and to listen to a port on the container's `eth0` interface. websockify can do both.786func (v *VMM) startVNCProxy(containerName string) error {787 cfIndex, err := v.getContainerCFInstanceNumber(containerName)788 if err != nil {789 return errors.Wrap(err, "getContainerCFInstanceNumber")790 }791 vncPort := 6444 + cfIndex - 1792 wsPort := 6080 + cfIndex - 1793 resp, err := v.containerExec(containerName, fmt.Sprintf("websockify -D %d 127.0.0.1:%d --log-file websockify.log", wsPort, vncPort), "vsoc-01")794 if err != nil {795 return err796 }797 if resp.ExitCode != 0 {798 return errors.New("non-zero exit code in websockify. output:" + resp.errBuffer.String())799 }800 log.Printf("startVNCProxy (%s): websockify daemon started\n", containerName)801 return nil802}803// startADBDaemon starts an ADB daemon in the container and try connect to the VM.804// The function should be called when VM has booted up and started listening on the adb port.805// The function is safe to be called repeatedly as adb will ignore duplicated connect commands and return "already connected".806func (v *VMM) startADBDaemon(containerName string) error {807 cfIndex, err := v.getContainerCFInstanceNumber(containerName)808 if err != nil {809 return err810 }811 adbPort := 6520 + cfIndex - 1812 ip, err := v.getContainerIP(containerName)813 if err != nil {814 return err815 }816 resp, err := v.containerExec(containerName, fmt.Sprintf("adb connect %s:%d", ip, adbPort), "root")817 if err != nil {818 return err819 }820 if resp.ExitCode != 0 {821 return errors.New("non-zero exit code in adb daemon. stderr:" + resp.errBuffer.String())822 }823 log.Printf("startADBDaemon (%s): connected to %s:%d\n", containerName, ip, adbPort)824 log.Printf("startADBDaemon (%s): stdout:%s\n", containerName, resp.outBuffer.String())825 log.Printf("startADBDaemon (%s): stderr:%s\n", containerName, resp.outBuffer.String())826 return nil827}828func (v *VMM) installTools(containerName string) error {829 resp, err := v.containerExec(containerName, "apt update", "root")830 if err != nil {831 return errors.Wrap(err, "failed to apt update")832 }833 if resp.ExitCode != 0 {834 return errors.New("Failed to apt update. reason:" + resp.errBuffer.String())835 }836 resp, err = v.containerExec(containerName, "apt install -y -qq adb git htop python3-pip iputils-ping less websockify", "root")837 if err != nil {838 return errors.Wrap(err, "failed to execute apt install")839 }840 if resp.ExitCode != 0 {841 return errors.New("Failed to apt install additional tools, reason:" + resp.errBuffer.String())842 }843 resp, err = v.containerExec(containerName, "pip3 install frida-tools", "root")844 if err != nil {845 return err846 }847 if resp.ExitCode != 0 {848 return errors.New("non-zero return when install python packages. reason:" + resp.errBuffer.String())849 }850 return nil851}852func (v *VMM) getContainerIDByName(target string) (containerID string, err error) {853 cfList, err := v.listCuttlefishContainers()854 if err != nil {855 return "", err856 }857 for _, c := range cfList {858 for _, name := range c.Names {859 // docker container names all start with "/"860 prefix := "/" + v.CFPrefix861 if strings.HasPrefix(name, prefix) && strings.Contains(name, target) {862 return c.ID, nil863 }864 }865 }866 return "", errors.New("container not found")867}868// containerCopyFile copies a single file into the container.869// if srcPath isn't a .tar / tar.gz, it will be tar-ed in a temporary folder first870func (v *VMM) containerCopyFile(srcPath string, containerName string, dstPath string) error {871 start := time.Now()872 if strings.HasSuffix(srcPath, ".tar") || strings.HasSuffix(srcPath, ".tar.gz") {873 if err := v.containerCopyTarFile(srcPath, containerName, dstPath); err != nil {874 return errors.Wrap(err, "containerCopyTarFile")875 }876 }877 tmpdir, err := ioutil.TempDir("", "matrisea")878 if err != nil {879 return errors.Wrap(err, "cannot create tmp dir")880 }881 defer os.RemoveAll(tmpdir)882 srcFolder, srcFile := filepath.Split(srcPath)883 cmdStr := fmt.Sprintf("cd %s && tar -cvzf \"%s/%s.tar\" \"%s\"", srcFolder, tmpdir, srcFile, srcFile)884 log.Println(cmdStr)885 // TODO read stderr and always print to console886 cmd := exec.Command("sh", "-c", cmdStr)887 var out bytes.Buffer888 var stderr bytes.Buffer889 cmd.Stdout = &out890 cmd.Stderr = &stderr891 if err := cmd.Run(); err != nil {892 return errors.Wrap(err, "error during tar")893 }894 archive := tmpdir + "/" + srcFile + ".tar"895 if err = v.containerCopyTarFile(archive, containerName, dstPath); err != nil {896 return errors.Wrap(err, "containerCopyTarFile")897 }898 elapsed := time.Since(start)899 log.Printf("containerCopyFile (%s): src:%s dst:%s cost:%s\n", containerName, srcPath, dstPath, elapsed)900 return nil901}902// containerCopyTarFile is a wrapper function of docker's CopyToContainer API where the srcPath must be a tar file903// The API will fail silently if srcPath isn't a tar.904func (v *VMM) containerCopyTarFile(srcPath string, containerName string, dstPath string) error {905 containerID, err := v.getContainerIDByName(containerName)906 if err != nil {907 return err908 }909 archive, err := os.Open(srcPath)910 if err != nil {911 return err912 }913 defer archive.Close()914 err = v.Client.CopyToContainer(context.Background(), containerID, dstPath, bufio.NewReader(archive), types.CopyToContainerOptions{})915 if err != nil {916 return errors.Wrap(err, "docker: CopyToContainer")917 }918 return nil919}920func (v *VMM) containerExec(containerName string, cmd string, user string) (ExecResult, error) {921 return v.containerExecWithContext(context.Background(), containerName, cmd, user)922}923// Execute a command in a container and return the result924// containing stdout, stderr, and exit code. Note:925// - The function is synchronous926// - stdin is closed927//928// Adapted from moby's exec implementation929// https://github.com/moby/moby/blob/master/integration/internal/container/exec.go930func (v *VMM) containerExecWithContext(ctx context.Context, containerName string, cmd string, user string) (ExecResult, error) {931 execConfig := types.ExecConfig{932 User: user,933 AttachStdout: true,934 AttachStderr: true,935 Cmd: []string{"/bin/sh", "-c", cmd},936 }937 cresp, err := v.Client.ContainerExecCreate(ctx, containerName, execConfig)938 if err != nil {939 return ExecResult{}, errors.Wrap(err, "docker: failed to create an exec config")940 }941 execID := cresp.ID942 aresp, err := v.Client.ContainerExecAttach(ctx, execID, types.ExecStartCheck{})943 if err != nil {944 return ExecResult{}, errors.Wrap(err, "docker: failed to execute/attach to "+cmd)945 }946 defer aresp.Close()947 var outBuf, errBuf bytes.Buffer948 outputDone := make(chan error, 1)949 go func() {950 // StdCopy demultiplexes the stream into two buffers951 _, err = stdcopy.StdCopy(&outBuf, &errBuf, aresp.Reader)952 outputDone <- err953 }()954 select {955 case err := <-outputDone:956 if err != nil {957 return ExecResult{}, err958 }959 case <-ctx.Done():960 return ExecResult{}, errors.Wrap(ctx.Err(), "context done")961 }962 iresp, err := v.Client.ContainerExecInspect(ctx, execID)963 if err != nil {964 return ExecResult{}, errors.Wrap(err, "docker: ContainerExecInspect")965 }966 // Let the caller to handler non-zero exit code.967 return ExecResult{ExitCode: iresp.ExitCode, outBuffer: &outBuf, errBuffer: &errBuf}, nil968}969// listCuttlefishContainers gets a list of managed containers of the VMM instance.970func (v *VMM) listCuttlefishContainers() ([]types.Container, error) {971 containers, err := v.Client.ContainerList(context.Background(), types.ContainerListOptions{All: true})972 if err != nil {973 return nil, err974 }975 cflist := []types.Container{}976 for _, c := range containers {977 if strings.HasPrefix(c.Names[0], "/"+v.CFPrefix) {978 cflist = append(cflist, c)979 }980 }981 return cflist, nil982}983type ExecChannelResult struct {984 resp ExecResult985 err error986}987// getVMStatus derives a VMStatus, with best-effort, from the container's status and whether launch_cvd is running in the container.988// Due to the concurrency design of the underlying Moby API, if a given container is locked (busy with other requests),989// the returned VMStatus might not accurately reflect the actual status of launch_cvd.990func (v *VMM) getVMStatus(c types.Container) (VMStatus, error) {991 // Create a context that will be canceled in 500ms992 //993 // Many Moby APIs acquires a per-container lock during execution. For example, in Daemon.containerCopy (used by VMM.containerCopyFile):994 // https://github.com/moby/moby/blob/eb9e42a09ee123af1d95bf7d46dd738258fa2109/daemon/archive.go#L390995 // If one of such APIs runs for a long time (e.g. copy a large file from a container), the container's lock996 // can be held long enough that blocks subsequent API calls. Unfortunately, getVMStatus is the one that gets997 // blocked the most because it's used by VMList, one of the hottest code path that gets called by every client998 // every 5 seconds. Hence, to avoid waiting for a container's lock indefinitely, we only try query a container's999 // process list (`ps aux`) for a limited amount of time1000 ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)1001 defer cancel()1002 containerName := c.Names[0][1:]1003 // When a container is up, c.Status looks like "Up 2 days"1004 if strings.HasPrefix(c.Status, "Up") {1005 ch := make(chan ExecChannelResult, 1)1006 go func() {1007 // use grep "[x]xxx" technique to prevent grep itself from showing up in the ps result...

Full Screen

Full Screen

cmd.go

Source:cmd.go Github

copy

Full Screen

...189 spanRootfsMetadata.Finish()190 return 1191 }192 spanRootfsMetadata.Finish()193 spanRootfsCopy := tracer.StartSpan("run-rootfs-copy", opentracing.ChildOf(spanRootfsMetadata.Context()))194 // we do need to copy the rootfs file to a temp directory195 // because the jailer directory indeed links to the target rootfs196 // and changes are persisted197 runRootfs := filepath.Join(cacheDirectory, naming.RootfsFileName)198 if err := utils.CopyFile(resolvedRootfs.HostPath(), runRootfs, utils.RootFSCopyBufferSize); err != nil {199 rootLogger.Error("failed copying requested rootfs to temp build location",200 "source", resolvedRootfs.HostPath(),201 "target", runRootfs,202 "reason", err)203 spanRootfsCopy.SetBaggageItem("error", err.Error())204 spanRootfsCopy.Finish()205 return 1206 }207 spanRootfsCopy.Finish()208 // get the veth interface name and write to also to a file:209 vethIfaceName := naming.GetRandomVethName()210 spanRun.SetTag("ifname", vethIfaceName)211 // don't use resolvedRootfs.HostPath() below this point:212 machineConfig.213 WithDaemonize(commandConfig.Daemonize).214 WithKernelOverride(resolvedKernel.HostPath()).215 WithRootfsOverride(runRootfs)216 vmmLogger := rootLogger.With("vmm-id", jailingFcConfig.VMMID(), "veth-name", vethIfaceName)217 vmmLogger.Info("running VMM",218 "from", commandConfig.From,219 "source-rootfs", machineConfig.RootfsOverride(),220 "jail", jailingFcConfig.JailerChrootDirectory())221 // gather the running vmm metadata:222 runMetadata := &metadata.MDRun{223 Configs: metadata.MDRunConfigs{224 CNI: cniConfig,225 Jailer: jailingFcConfig,226 Machine: machineConfig,227 RunConfig: commandConfig,228 },229 Rootfs: mdRootfs,230 RunCache: cacheDirectory,231 Type: metadata.MetadataTypeRun,232 }233 vmmStrategy := configs.DefaultFirectackerStrategy(machineConfig).234 AddRequirements(func() *arbitrary.HandlerPlacement {235 // add this one after the previous one so by he logic,236 // this one will be placed and executed before the first one237 return arbitrary.NewHandlerPlacement(strategy.238 NewMetadataExtractorHandler(rootLogger, runMetadata), firecracker.CreateBootSourceHandlerName)239 })240 spanVMMCreate := tracer.StartSpan("run-vmm-create", opentracing.ChildOf(spanRootfsCopy.Context()))241 vmmProvider := vmm.NewDefaultProvider(cniConfig, jailingFcConfig, machineConfig).242 WithHandlersAdapter(vmmStrategy).243 WithVethIfaceName(vethIfaceName)244 vmmCtx, vmmCancel := context.WithCancel(context.Background())245 cleanup.Add(func() {246 vmmCancel()247 })248 spanVMMCreate.Finish()249 cleanup.Add(func() {250 span := tracer.StartSpan("run-cleanup-jail", opentracing.ChildOf(spanVMMCreate.Context()))251 vmmLogger.Info("cleaning up jail directory")252 if err := os.RemoveAll(jailingFcConfig.JailerChrootDirectory()); err != nil {253 vmmLogger.Error("jail directory removal status", "error", err)254 span.SetBaggageItem("error", err.Error())...

Full Screen

Full Screen

vmm_webhook_test.go

Source:vmm_webhook_test.go Github

copy

Full Screen

...38 vmm: validVMM,39 vm: validVM,40 }, {41 vmm: func() *virtv1alpha1.VirtualMachineMigration {42 vmm := validVMM.DeepCopy()43 vmm.Spec.VMName = ""44 return vmm45 }(),46 vm: validVM,47 }, {48 vmm: validVMM,49 vm: func() *virtv1alpha1.VirtualMachine {50 vm := validVM.DeepCopy()51 vm.Status.Conditions[0].Status = metav1.ConditionFalse52 vm.Status.Conditions[0].Message = "VM with containerDisk is not migratable"53 return vm54 }(),55 invalidDetail: "VM with containerDisk is not migratable",56 }, {57 vmm: validVMM,58 vm: func() *virtv1alpha1.VirtualMachine {59 vm := validVM.DeepCopy()60 vm.Status.Conditions = []metav1.Condition{}61 return vm62 }(),63 invalidDetail: "VM migratable condition status is unknown",64 }}65 for _, tc := range tests {66 c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.vm).Build()67 errs := ValidateVMM(context.Background(), c, tc.vmm, nil)68 for _, err := range errs {69 assert.Contains(t, err.Detail, tc.invalidDetail)70 }71 }72}...

Full Screen

Full Screen

Copy

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 app := cli.NewApp()4 app.Commands = []cli.Command{5 {6 Aliases: []string{"c"},7 Action: func(c *cli.Context) error {8 fmt.Println("copying")9 vmm.Copy()10 },11 },12 }13 err := app.Run(os.Args)14 if err != nil {15 log.Fatal(err)16 }17}18import (19type Vmm struct {20}21func (v Vmm) Copy() {22 fmt.Println("copying")23}24func NewVmm() *Vmm {25 return &Vmm{26 }27}28require (

Full Screen

Full Screen

Copy

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 vmm1.Init()4 vmm1.Set(0, 0, 1)5 vmm1.Set(1, 1, 1)6 vmm1.Set(2, 2, 1)7 vmm1.Set(3, 3, 1)8 vmm1.Set(4, 4, 1)9 vmm1.Set(5, 5, 1)10 vmm1.Set(6, 6, 1)11 vmm1.Set(7, 7, 1)12 vmm1.Set(8, 8, 1)13 vmm1.Set(9, 9, 1)14 vmm1.Set(10, 10, 1)15 vmm1.Set(11, 11, 1)16 vmm1.Set(12, 12, 1)17 vmm1.Set(13, 13, 1)18 vmm1.Set(14, 14, 1)19 vmm1.Set(15, 15, 1)20 vmm1.Set(16, 16, 1)21 vmm1.Set(17, 17, 1)22 vmm1.Set(18, 18, 1)23 vmm1.Set(19, 19, 1)24 vmm1.Set(20, 20, 1)25 vmm1.Set(21, 21, 1)26 vmm1.Set(22, 22, 1)27 vmm1.Set(23, 23, 1)28 vmm1.Set(24, 24, 1)29 vmm1.Set(25, 25, 1)30 vmm1.Set(26, 26, 1)31 vmm1.Set(27, 27, 1)32 vmm1.Set(28, 28, 1)33 vmm1.Set(29, 29, 1)34 vmm1.Set(30, 30, 1)35 vmm1.Set(31, 31, 1)36 vmm1.Set(32, 32, 1)37 vmm1.Set(33, 33, 1)38 vmm1.Set(34, 34, 1)39 vmm1.Set(35,

Full Screen

Full Screen

Copy

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 vmm1 := vmm.NewVmm(5)4 vmm1.Set(0, 1)5 vmm1.Set(1, 2)6 vmm1.Set(2, 3)7 vmm1.Set(3, 4)8 vmm1.Set(4, 5)9 vmm2 := vmm.NewVmm(5)10 vmm2.Copy(vmm1)11 fmt.Println("vmm1 = ", vmm1)12 fmt.Println("vmm2 = ", vmm2)13}

Full Screen

Full Screen

Copy

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 glog.Info("Starting the program")4 vmmobj := vmm.Vmm{}5 vmmobj.Copy()6 glog.Info("Ending the program")7}8import (9func main() {10 glog.Info("Starting the program")11 vmmobj := vmm.Vmm{}12 vmmobj.Copy()13 glog.Info("Ending the program")14}15import (16func main() {17 glog.Info("Starting the program")18 vmmobj := vmm.Vmm{}19 vmmobj.Copy()20 glog.Info("Ending the program")21}22import (23func main() {24 glog.Info("Starting the program")25 vmmobj := vmm.Vmm{}26 vmmobj.Copy()27 glog.Info("Ending the program")28}29import (30func main() {31 glog.Info("Starting the program")32 vmmobj := vmm.Vmm{}33 vmmobj.Copy()34 glog.Info("Ending the program")35}36import (37func main() {

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Syzkaller automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful