Best Gauge code snippet using result.AddItems
restore_test.go
Source:restore_test.go
1/*2Copyright 2019 the Velero contributors.3Licensed under the Apache License, Version 2.0 (the "License");4you may not use this file except in compliance with the License.5You may obtain a copy of the License at6 http://www.apache.org/licenses/LICENSE-2.07Unless required by applicable law or agreed to in writing, software8distributed under the License is distributed on an "AS IS" BASIS,9WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.10See the License for the specific language governing permissions and11limitations under the License.12*/13package restore14import (15 "archive/tar"16 "bytes"17 "compress/gzip"18 ctx "context"19 "encoding/json"20 "fmt"21 "io"22 "sort"23 "testing"24 "time"25 "github.com/pkg/errors"26 "github.com/sirupsen/logrus"27 "github.com/stretchr/testify/assert"28 "github.com/stretchr/testify/require"29 corev1api "k8s.io/api/core/v1"30 "k8s.io/apimachinery/pkg/api/meta"31 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"32 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"33 "k8s.io/apimachinery/pkg/runtime"34 "k8s.io/apimachinery/pkg/runtime/schema"35 "k8s.io/apimachinery/pkg/util/sets"36 "k8s.io/client-go/dynamic"37 kubetesting "k8s.io/client-go/testing"38 velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"39 "github.com/vmware-tanzu/velero/pkg/archive"40 "github.com/vmware-tanzu/velero/pkg/builder"41 "github.com/vmware-tanzu/velero/pkg/client"42 "github.com/vmware-tanzu/velero/pkg/discovery"43 velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions"44 "github.com/vmware-tanzu/velero/pkg/kuberesource"45 "github.com/vmware-tanzu/velero/pkg/plugin/velero"46 "github.com/vmware-tanzu/velero/pkg/restic"47 resticmocks "github.com/vmware-tanzu/velero/pkg/restic/mocks"48 "github.com/vmware-tanzu/velero/pkg/test"49 testutil "github.com/vmware-tanzu/velero/pkg/test"50 "github.com/vmware-tanzu/velero/pkg/util/encode"51 kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"52 "github.com/vmware-tanzu/velero/pkg/volume"53)54// TestRestoreResourceFiltering runs restores with different combinations55// of resource filters (included/excluded resources, included/excluded56// namespaces, label selectors, "include cluster resources" flag), and57// verifies that the set of items created in the API are correct.58// Validation is done by looking at the namespaces/names of the items in59// the API; contents are not checked.60func TestRestoreResourceFiltering(t *testing.T) {61 tests := []struct {62 name string63 restore *velerov1api.Restore64 backup *velerov1api.Backup65 apiResources []*test.APIResource66 tarball io.Reader67 want map[*test.APIResource][]string68 }{69 {70 name: "no filters restores everything",71 restore: defaultRestore().Result(),72 backup: defaultBackup().Result(),73 tarball: newTarWriter(t).74 addItems("pods",75 builder.ForPod("ns-1", "pod-1").Result(),76 builder.ForPod("ns-2", "pod-2").Result(),77 ).78 addItems("persistentvolumes",79 builder.ForPersistentVolume("pv-1").Result(),80 builder.ForPersistentVolume("pv-2").Result(),81 ).82 done(),83 apiResources: []*test.APIResource{84 test.Pods(),85 test.PVs(),86 },87 want: map[*test.APIResource][]string{88 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},89 test.PVs(): {"/pv-1", "/pv-2"},90 },91 },92 {93 name: "included resources filter only restores resources of those types",94 restore: defaultRestore().IncludedResources("pods").Result(),95 backup: defaultBackup().Result(),96 tarball: newTarWriter(t).97 addItems("pods",98 builder.ForPod("ns-1", "pod-1").Result(),99 builder.ForPod("ns-2", "pod-2").Result(),100 ).101 addItems("persistentvolumes",102 builder.ForPersistentVolume("pv-1").Result(),103 builder.ForPersistentVolume("pv-2").Result(),104 ).105 done(),106 apiResources: []*test.APIResource{107 test.Pods(),108 test.PVs(),109 },110 want: map[*test.APIResource][]string{111 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},112 },113 },114 {115 name: "excluded resources filter only restores resources not of those types",116 restore: defaultRestore().ExcludedResources("pvs").Result(),117 backup: defaultBackup().Result(),118 tarball: newTarWriter(t).119 addItems("pods",120 builder.ForPod("ns-1", "pod-1").Result(),121 builder.ForPod("ns-2", "pod-2").Result(),122 ).123 addItems("persistentvolumes",124 builder.ForPersistentVolume("pv-1").Result(),125 builder.ForPersistentVolume("pv-2").Result(),126 ).127 done(),128 apiResources: []*test.APIResource{129 test.Pods(),130 test.PVs(),131 },132 want: map[*test.APIResource][]string{133 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},134 },135 },136 {137 name: "included namespaces filter only restores resources in those namespaces",138 restore: defaultRestore().IncludedNamespaces("ns-1").Result(),139 backup: defaultBackup().Result(),140 tarball: newTarWriter(t).141 addItems("pods",142 builder.ForPod("ns-1", "pod-1").Result(),143 builder.ForPod("ns-2", "pod-2").Result(),144 ).145 addItems("deployments.apps",146 builder.ForDeployment("ns-1", "deploy-1").Result(),147 builder.ForDeployment("ns-2", "deploy-2").Result(),148 ).149 addItems("persistentvolumes",150 builder.ForPersistentVolume("pv-1").Result(),151 builder.ForPersistentVolume("pv-2").Result(),152 ).153 done(),154 apiResources: []*test.APIResource{155 test.Pods(),156 test.Deployments(),157 test.PVs(),158 },159 want: map[*test.APIResource][]string{160 test.Pods(): {"ns-1/pod-1"},161 test.Deployments(): {"ns-1/deploy-1"},162 },163 },164 {165 name: "excluded namespaces filter only restores resources not in those namespaces",166 restore: defaultRestore().ExcludedNamespaces("ns-2").Result(),167 backup: defaultBackup().Result(),168 tarball: newTarWriter(t).169 addItems("pods",170 builder.ForPod("ns-1", "pod-1").Result(),171 builder.ForPod("ns-2", "pod-2").Result(),172 ).173 addItems("deployments.apps",174 builder.ForDeployment("ns-1", "deploy-1").Result(),175 builder.ForDeployment("ns-2", "deploy-2").Result(),176 ).177 addItems("persistentvolumes",178 builder.ForPersistentVolume("pv-1").Result(),179 builder.ForPersistentVolume("pv-2").Result(),180 ).181 done(),182 apiResources: []*test.APIResource{183 test.Pods(),184 test.Deployments(),185 test.PVs(),186 },187 want: map[*test.APIResource][]string{188 test.Pods(): {"ns-1/pod-1"},189 test.Deployments(): {"ns-1/deploy-1"},190 },191 },192 {193 name: "IncludeClusterResources=false only restores namespaced resources",194 restore: defaultRestore().IncludeClusterResources(false).Result(),195 backup: defaultBackup().Result(),196 tarball: newTarWriter(t).197 addItems("pods",198 builder.ForPod("ns-1", "pod-1").Result(),199 builder.ForPod("ns-2", "pod-2").Result(),200 ).201 addItems("deployments.apps",202 builder.ForDeployment("ns-1", "deploy-1").Result(),203 builder.ForDeployment("ns-2", "deploy-2").Result(),204 ).205 addItems("persistentvolumes",206 builder.ForPersistentVolume("pv-1").Result(),207 builder.ForPersistentVolume("pv-2").Result(),208 ).209 done(),210 apiResources: []*test.APIResource{211 test.Pods(),212 test.Deployments(),213 test.PVs(),214 },215 want: map[*test.APIResource][]string{216 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},217 test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"},218 },219 },220 {221 name: "label selector only restores matching resources",222 restore: defaultRestore().LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}).Result(),223 backup: defaultBackup().Result(),224 tarball: newTarWriter(t).225 addItems("pods",226 builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("a", "b")).Result(),227 builder.ForPod("ns-2", "pod-2").Result(),228 ).229 addItems("deployments.apps",230 builder.ForDeployment("ns-1", "deploy-1").Result(),231 builder.ForDeployment("ns-2", "deploy-2").ObjectMeta(builder.WithLabels("a", "b")).Result(),232 ).233 addItems("persistentvolumes",234 builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("a", "b")).Result(),235 builder.ForPersistentVolume("pv-2").ObjectMeta(builder.WithLabels("a", "c")).Result(),236 ).237 done(),238 apiResources: []*test.APIResource{239 test.Pods(),240 test.Deployments(),241 test.PVs(),242 },243 want: map[*test.APIResource][]string{244 test.Pods(): {"ns-1/pod-1"},245 test.Deployments(): {"ns-2/deploy-2"},246 test.PVs(): {"/pv-1"},247 },248 },249 {250 name: "should include cluster-scoped resources if restoring subset of namespaces and IncludeClusterResources=true",251 restore: defaultRestore().IncludedNamespaces("ns-1").IncludeClusterResources(true).Result(),252 backup: defaultBackup().Result(),253 tarball: newTarWriter(t).254 addItems("pods",255 builder.ForPod("ns-1", "pod-1").Result(),256 builder.ForPod("ns-2", "pod-2").Result(),257 ).258 addItems("deployments.apps",259 builder.ForDeployment("ns-1", "deploy-1").Result(),260 builder.ForDeployment("ns-2", "deploy-2").Result(),261 ).262 addItems("persistentvolumes",263 builder.ForPersistentVolume("pv-1").Result(),264 builder.ForPersistentVolume("pv-2").Result(),265 ).266 done(),267 apiResources: []*test.APIResource{268 test.Pods(),269 test.Deployments(),270 test.PVs(),271 },272 want: map[*test.APIResource][]string{273 test.Pods(): {"ns-1/pod-1"},274 test.Deployments(): {"ns-1/deploy-1"},275 test.PVs(): {"/pv-1", "/pv-2"},276 },277 },278 {279 name: "should not include cluster-scoped resources if restoring subset of namespaces and IncludeClusterResources=false",280 restore: defaultRestore().IncludedNamespaces("ns-1").IncludeClusterResources(false).Result(),281 backup: defaultBackup().Result(),282 tarball: newTarWriter(t).283 addItems("pods",284 builder.ForPod("ns-1", "pod-1").Result(),285 builder.ForPod("ns-2", "pod-2").Result(),286 ).287 addItems("deployments.apps",288 builder.ForDeployment("ns-1", "deploy-1").Result(),289 builder.ForDeployment("ns-2", "deploy-2").Result(),290 ).291 addItems("persistentvolumes",292 builder.ForPersistentVolume("pv-1").Result(),293 builder.ForPersistentVolume("pv-2").Result(),294 ).295 done(),296 apiResources: []*test.APIResource{297 test.Pods(),298 test.Deployments(),299 test.PVs(),300 },301 want: map[*test.APIResource][]string{302 test.Pods(): {"ns-1/pod-1"},303 test.Deployments(): {"ns-1/deploy-1"},304 test.PVs(): {},305 },306 },307 {308 name: "should not include cluster-scoped resources if restoring subset of namespaces and IncludeClusterResources=nil",309 restore: defaultRestore().IncludedNamespaces("ns-1").Result(),310 backup: defaultBackup().Result(),311 tarball: newTarWriter(t).312 addItems("pods",313 builder.ForPod("ns-1", "pod-1").Result(),314 builder.ForPod("ns-2", "pod-2").Result(),315 ).316 addItems("deployments.apps",317 builder.ForDeployment("ns-1", "deploy-1").Result(),318 builder.ForDeployment("ns-2", "deploy-2").Result(),319 ).320 addItems("persistentvolumes",321 builder.ForPersistentVolume("pv-1").Result(),322 builder.ForPersistentVolume("pv-2").Result(),323 ).324 done(),325 apiResources: []*test.APIResource{326 test.Pods(),327 test.Deployments(),328 test.PVs(),329 },330 want: map[*test.APIResource][]string{331 test.Pods(): {"ns-1/pod-1"},332 test.Deployments(): {"ns-1/deploy-1"},333 test.PVs(): {},334 },335 },336 {337 name: "should include cluster-scoped resources if restoring all namespaces and IncludeClusterResources=true",338 restore: defaultRestore().IncludeClusterResources(true).Result(),339 backup: defaultBackup().Result(),340 tarball: newTarWriter(t).341 addItems("pods",342 builder.ForPod("ns-1", "pod-1").Result(),343 builder.ForPod("ns-2", "pod-2").Result(),344 ).345 addItems("deployments.apps",346 builder.ForDeployment("ns-1", "deploy-1").Result(),347 builder.ForDeployment("ns-2", "deploy-2").Result(),348 ).349 addItems("persistentvolumes",350 builder.ForPersistentVolume("pv-1").Result(),351 builder.ForPersistentVolume("pv-2").Result(),352 ).353 done(),354 apiResources: []*test.APIResource{355 test.Pods(),356 test.Deployments(),357 test.PVs(),358 },359 want: map[*test.APIResource][]string{360 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},361 test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"},362 test.PVs(): {"/pv-1", "/pv-2"},363 },364 },365 {366 name: "should not include cluster-scoped resources if restoring all namespaces and IncludeClusterResources=false",367 restore: defaultRestore().IncludeClusterResources(false).Result(),368 backup: defaultBackup().Result(),369 tarball: newTarWriter(t).370 addItems("pods",371 builder.ForPod("ns-1", "pod-1").Result(),372 builder.ForPod("ns-2", "pod-2").Result(),373 ).374 addItems("deployments.apps",375 builder.ForDeployment("ns-1", "deploy-1").Result(),376 builder.ForDeployment("ns-2", "deploy-2").Result(),377 ).378 addItems("persistentvolumes",379 builder.ForPersistentVolume("pv-1").Result(),380 builder.ForPersistentVolume("pv-2").Result(),381 ).382 done(),383 apiResources: []*test.APIResource{384 test.Pods(),385 test.Deployments(),386 test.PVs(),387 },388 want: map[*test.APIResource][]string{389 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},390 test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"},391 },392 },393 {394 name: "when a wildcard and a specific resource are included, the wildcard takes precedence",395 restore: defaultRestore().IncludedResources("*", "pods").Result(),396 backup: defaultBackup().Result(),397 tarball: newTarWriter(t).398 addItems("pods",399 builder.ForPod("ns-1", "pod-1").Result(),400 builder.ForPod("ns-2", "pod-2").Result(),401 ).402 addItems("deployments.apps",403 builder.ForDeployment("ns-1", "deploy-1").Result(),404 builder.ForDeployment("ns-2", "deploy-2").Result(),405 ).406 addItems("persistentvolumes",407 builder.ForPersistentVolume("pv-1").Result(),408 builder.ForPersistentVolume("pv-2").Result(),409 ).410 done(),411 apiResources: []*test.APIResource{412 test.Pods(),413 test.Deployments(),414 test.PVs(),415 },416 want: map[*test.APIResource][]string{417 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},418 test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"},419 test.PVs(): {"/pv-1", "/pv-2"},420 },421 },422 {423 name: "wildcard excludes are ignored",424 restore: defaultRestore().ExcludedResources("*").Result(),425 backup: defaultBackup().Result(),426 tarball: newTarWriter(t).427 addItems("pods",428 builder.ForPod("ns-1", "pod-1").Result(),429 builder.ForPod("ns-2", "pod-2").Result(),430 ).431 addItems("deployments.apps",432 builder.ForDeployment("ns-1", "deploy-1").Result(),433 builder.ForDeployment("ns-2", "deploy-2").Result(),434 ).435 addItems("persistentvolumes",436 builder.ForPersistentVolume("pv-1").Result(),437 builder.ForPersistentVolume("pv-2").Result(),438 ).439 done(),440 apiResources: []*test.APIResource{441 test.Pods(),442 test.Deployments(),443 test.PVs(),444 },445 want: map[*test.APIResource][]string{446 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},447 test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"},448 test.PVs(): {"/pv-1", "/pv-2"},449 },450 },451 {452 name: "unresolvable included resources are ignored",453 restore: defaultRestore().IncludedResources("pods", "unresolvable").Result(),454 backup: defaultBackup().Result(),455 tarball: newTarWriter(t).456 addItems("pods",457 builder.ForPod("ns-1", "pod-1").Result(),458 builder.ForPod("ns-2", "pod-2").Result(),459 ).460 addItems("deployments.apps",461 builder.ForDeployment("ns-1", "deploy-1").Result(),462 builder.ForDeployment("ns-2", "deploy-2").Result(),463 ).464 addItems("persistentvolumes",465 builder.ForPersistentVolume("pv-1").Result(),466 builder.ForPersistentVolume("pv-2").Result(),467 ).468 done(),469 apiResources: []*test.APIResource{470 test.Pods(),471 test.Deployments(),472 test.PVs(),473 },474 want: map[*test.APIResource][]string{475 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},476 },477 },478 {479 name: "unresolvable excluded resources are ignored",480 restore: defaultRestore().ExcludedResources("deployments", "unresolvable").Result(),481 backup: defaultBackup().Result(),482 tarball: newTarWriter(t).483 addItems("pods",484 builder.ForPod("ns-1", "pod-1").Result(),485 builder.ForPod("ns-2", "pod-2").Result(),486 ).487 addItems("deployments.apps",488 builder.ForDeployment("ns-1", "deploy-1").Result(),489 builder.ForDeployment("ns-2", "deploy-2").Result(),490 ).491 addItems("persistentvolumes",492 builder.ForPersistentVolume("pv-1").Result(),493 builder.ForPersistentVolume("pv-2").Result(),494 ).495 done(),496 apiResources: []*test.APIResource{497 test.Pods(),498 test.Deployments(),499 test.PVs(),500 },501 want: map[*test.APIResource][]string{502 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},503 test.PVs(): {"/pv-1", "/pv-2"},504 },505 },506 {507 name: "mirror pods are not restored",508 restore: defaultRestore().Result(),509 backup: defaultBackup().Result(),510 tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithAnnotations(corev1api.MirrorPodAnnotationKey, "foo")).Result()).done(),511 apiResources: []*test.APIResource{test.Pods()},512 want: map[*test.APIResource][]string{test.Pods(): {}},513 },514 {515 name: "service accounts are restored",516 restore: defaultRestore().Result(),517 backup: defaultBackup().Result(),518 tarball: newTarWriter(t).addItems("serviceaccounts", builder.ForServiceAccount("ns-1", "sa-1").Result()).done(),519 apiResources: []*test.APIResource{test.ServiceAccounts()},520 want: map[*test.APIResource][]string{test.ServiceAccounts(): {"ns-1/sa-1"}},521 },522 }523 for _, tc := range tests {524 t.Run(tc.name, func(t *testing.T) {525 h := newHarness(t)526 for _, r := range tc.apiResources {527 h.DiscoveryClient.WithAPIResource(r)528 }529 require.NoError(t, h.restorer.discoveryHelper.Refresh())530 data := Request{531 Log: h.log,532 Restore: tc.restore,533 Backup: tc.backup,534 PodVolumeBackups: nil,535 VolumeSnapshots: nil,536 BackupReader: tc.tarball,537 }538 warnings, errs := h.restorer.Restore(539 data,540 nil, // actions541 nil, // snapshot location lister542 nil, // volume snapshotter getter543 )544 assertEmptyResults(t, warnings, errs)545 assertAPIContents(t, h, tc.want)546 })547 }548}549// TestRestoreNamespaceMapping runs restores with namespace mappings specified,550// and verifies that the set of items created in the API are in the correct551// namespaces. Validation is done by looking at the namespaces/names of the items552// in the API; contents are not checked.553func TestRestoreNamespaceMapping(t *testing.T) {554 tests := []struct {555 name string556 restore *velerov1api.Restore557 backup *velerov1api.Backup558 apiResources []*test.APIResource559 tarball io.Reader560 want map[*test.APIResource][]string561 }{562 {563 name: "namespace mappings are applied",564 restore: defaultRestore().NamespaceMappings("ns-1", "mapped-ns-1", "ns-2", "mapped-ns-2").Result(),565 backup: defaultBackup().Result(),566 apiResources: []*test.APIResource{567 test.Pods(),568 },569 tarball: newTarWriter(t).570 addItems("pods",571 builder.ForPod("ns-1", "pod-1").Result(),572 builder.ForPod("ns-2", "pod-2").Result(),573 builder.ForPod("ns-3", "pod-3").Result(),574 ).575 done(),576 want: map[*test.APIResource][]string{577 test.Pods(): {"mapped-ns-1/pod-1", "mapped-ns-2/pod-2", "ns-3/pod-3"},578 },579 },580 {581 name: "namespace mappings are applied when IncludedNamespaces are specified",582 restore: defaultRestore().IncludedNamespaces("ns-1", "ns-2").NamespaceMappings("ns-1", "mapped-ns-1", "ns-2", "mapped-ns-2").Result(),583 backup: defaultBackup().Result(),584 apiResources: []*test.APIResource{585 test.Pods(),586 },587 tarball: newTarWriter(t).588 addItems("pods",589 builder.ForPod("ns-1", "pod-1").Result(),590 builder.ForPod("ns-2", "pod-2").Result(),591 builder.ForPod("ns-3", "pod-3").Result(),592 ).593 done(),594 want: map[*test.APIResource][]string{595 test.Pods(): {"mapped-ns-1/pod-1", "mapped-ns-2/pod-2"},596 },597 },598 }599 for _, tc := range tests {600 t.Run(tc.name, func(t *testing.T) {601 h := newHarness(t)602 for _, r := range tc.apiResources {603 h.DiscoveryClient.WithAPIResource(r)604 }605 require.NoError(t, h.restorer.discoveryHelper.Refresh())606 data := Request{607 Log: h.log,608 Restore: tc.restore,609 Backup: tc.backup,610 PodVolumeBackups: nil,611 VolumeSnapshots: nil,612 BackupReader: tc.tarball,613 }614 warnings, errs := h.restorer.Restore(615 data,616 nil, // actions617 nil, // snapshot location lister618 nil, // volume snapshotter getter619 )620 assertEmptyResults(t, warnings, errs)621 assertAPIContents(t, h, tc.want)622 })623 }624}625// TestRestoreResourcePriorities runs restores with resource priorities specified,626// and verifies that the set of items created in the API are created in the expected627// order. Validation is done by adding a Reactor to the fake dynamic client that records628// resource identifiers as they're created, and comparing that to the expected order.629func TestRestoreResourcePriorities(t *testing.T) {630 tests := []struct {631 name string632 restore *velerov1api.Restore633 backup *velerov1api.Backup634 apiResources []*test.APIResource635 tarball io.Reader636 resourcePriorities []string637 }{638 {639 name: "resources are restored according to the specified resource priorities",640 restore: defaultRestore().Result(),641 backup: defaultBackup().Result(),642 tarball: newTarWriter(t).643 addItems("pods",644 builder.ForPod("ns-1", "pod-1").Result(),645 builder.ForPod("ns-2", "pod-2").Result(),646 ).647 addItems("persistentvolumes",648 builder.ForPersistentVolume("pv-1").Result(),649 builder.ForPersistentVolume("pv-2").Result(),650 ).651 addItems("deployments.apps",652 builder.ForDeployment("ns-1", "deploy-1").Result(),653 builder.ForDeployment("ns-2", "deploy-2").Result(),654 ).655 addItems("serviceaccounts",656 builder.ForServiceAccount("ns-1", "sa-1").Result(),657 builder.ForServiceAccount("ns-2", "sa-2").Result(),658 ).659 addItems("persistentvolumeclaims",660 builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(),661 builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(),662 ).663 done(),664 apiResources: []*test.APIResource{665 test.Pods(),666 test.PVs(),667 test.Deployments(),668 test.ServiceAccounts(),669 },670 resourcePriorities: []string{"persistentvolumes", "serviceaccounts", "pods", "deployments.apps"},671 },672 }673 for _, tc := range tests {674 h := newHarness(t)675 h.restorer.resourcePriorities = tc.resourcePriorities676 recorder := &createRecorder{t: t}677 h.DynamicClient.PrependReactor("create", "*", recorder.reactor())678 for _, r := range tc.apiResources {679 h.DiscoveryClient.WithAPIResource(r)680 }681 require.NoError(t, h.restorer.discoveryHelper.Refresh())682 data := Request{683 Log: h.log,684 Restore: tc.restore,685 Backup: tc.backup,686 PodVolumeBackups: nil,687 VolumeSnapshots: nil,688 BackupReader: tc.tarball,689 }690 warnings, errs := h.restorer.Restore(691 data,692 nil, // actions693 nil, // snapshot location lister694 nil, // volume snapshotter getter695 )696 assertEmptyResults(t, warnings, errs)697 assertResourceCreationOrder(t, tc.resourcePriorities, recorder.resources)698 }699}700// TestInvalidTarballContents runs restores for tarballs that are invalid in some way, and701// verifies that the set of items created in the API and the errors returned are correct.702// Validation is done by looking at the namespaces/names of the items in the API and the703// Result objects returned from the restorer.704func TestInvalidTarballContents(t *testing.T) {705 tests := []struct {706 name string707 restore *velerov1api.Restore708 backup *velerov1api.Backup709 apiResources []*test.APIResource710 tarball io.Reader711 want map[*test.APIResource][]string712 wantErrs Result713 }{714 {715 name: "empty tarball returns an error",716 restore: defaultRestore().Result(),717 backup: defaultBackup().Result(),718 tarball: newTarWriter(t).719 done(),720 wantErrs: Result{721 Velero: []string{"error parsing backup contents: directory \"resources\" does not exist"},722 },723 },724 {725 name: "invalid JSON is reported as an error and restore continues",726 restore: defaultRestore().Result(),727 backup: defaultBackup().Result(),728 tarball: newTarWriter(t).729 add("resources/pods/namespaces/ns-1/pod-1.json", []byte("invalid JSON")).730 addItems("pods",731 builder.ForPod("ns-1", "pod-2").Result(),732 ).733 done(),734 apiResources: []*test.APIResource{735 test.Pods(),736 },737 want: map[*test.APIResource][]string{738 test.Pods(): {"ns-1/pod-2"},739 },740 wantErrs: Result{741 Namespaces: map[string][]string{742 "ns-1": {"error decoding \"resources/pods/namespaces/ns-1/pod-1.json\": invalid character 'i' looking for beginning of value"},743 },744 },745 },746 }747 for _, tc := range tests {748 t.Run(tc.name, func(t *testing.T) {749 h := newHarness(t)750 for _, r := range tc.apiResources {751 h.DiscoveryClient.WithAPIResource(r)752 }753 require.NoError(t, h.restorer.discoveryHelper.Refresh())754 data := Request{755 Log: h.log,756 Restore: tc.restore,757 Backup: tc.backup,758 PodVolumeBackups: nil,759 VolumeSnapshots: nil,760 BackupReader: tc.tarball,761 }762 warnings, errs := h.restorer.Restore(763 data,764 nil, // actions765 nil, // snapshot location lister766 nil, // volume snapshotter getter767 )768 assertEmptyResults(t, warnings)769 assert.Equal(t, tc.wantErrs, errs)770 assertAPIContents(t, h, tc.want)771 })772 }773}774// TestRestoreItems runs restores of specific items and validates that they are created775// with the expected metadata/spec/status in the API.776func TestRestoreItems(t *testing.T) {777 tests := []struct {778 name string779 restore *velerov1api.Restore780 backup *velerov1api.Backup781 apiResources []*test.APIResource782 tarball io.Reader783 want []*test.APIResource784 }{785 {786 name: "metadata other than namespace/name/labels/annotations gets removed",787 restore: defaultRestore().Result(),788 backup: defaultBackup().Result(),789 tarball: newTarWriter(t).790 addItems("pods",791 builder.ForPod("ns-1", "pod-1").792 ObjectMeta(793 builder.WithLabels("key-1", "val-1"),794 builder.WithAnnotations("key-1", "val-1"),795 builder.WithClusterName("cluster-1"),796 builder.WithFinalizers("finalizer-1"),797 ).798 Result(),799 ).800 done(),801 apiResources: []*test.APIResource{802 test.Pods(),803 },804 want: []*test.APIResource{805 test.Pods(806 builder.ForPod("ns-1", "pod-1").807 ObjectMeta(808 builder.WithLabels("key-1", "val-1", "velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),809 builder.WithAnnotations("key-1", "val-1"),810 ).811 Result(),812 ),813 },814 },815 {816 name: "status gets removed",817 restore: defaultRestore().Result(),818 backup: defaultBackup().Result(),819 tarball: newTarWriter(t).820 addItems("pods",821 &corev1api.Pod{822 TypeMeta: metav1.TypeMeta{823 APIVersion: "v1",824 Kind: "Pod",825 },826 ObjectMeta: metav1.ObjectMeta{827 Namespace: "ns-1",828 Name: "pod-1",829 },830 Status: corev1api.PodStatus{831 Message: "a non-empty status",832 },833 },834 ).835 done(),836 apiResources: []*test.APIResource{837 test.Pods(),838 },839 want: []*test.APIResource{840 test.Pods(841 builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1")).Result(),842 ),843 },844 },845 {846 name: "object gets labeled with full backup and restore names when they're both shorter than 63 characters",847 restore: defaultRestore().Result(),848 backup: defaultBackup().Result(),849 tarball: newTarWriter(t).850 addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).851 done(),852 apiResources: []*test.APIResource{853 test.Pods(),854 },855 want: []*test.APIResource{856 test.Pods(builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1")).Result()),857 },858 },859 {860 name: "object gets labeled with full backup and restore names when they're both equal to 63 characters",861 restore: builder.ForRestore(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-exactly-63-characters").862 Backup("the-really-long-kube-service-name-that-is-exactly-63-characters").863 Result(),864 backup: builder.ForBackup(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-exactly-63-characters").Result(),865 tarball: newTarWriter(t).866 addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).867 done(),868 apiResources: []*test.APIResource{869 test.Pods(),870 },871 want: []*test.APIResource{872 test.Pods(873 builder.ForPod("ns-1", "pod-1").874 ObjectMeta(875 builder.WithLabels(876 "velero.io/backup-name", "the-really-long-kube-service-name-that-is-exactly-63-characters",877 "velero.io/restore-name", "the-really-long-kube-service-name-that-is-exactly-63-characters",878 ),879 ).Result(),880 ),881 },882 },883 {884 name: "object gets labeled with shortened backup and restore names when they're both longer than 63 characters",885 restore: builder.ForRestore(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-much-greater-than-63-characters").886 Backup("the-really-long-kube-service-name-that-is-much-greater-than-63-characters").887 Result(),888 backup: builder.ForBackup(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-much-greater-than-63-characters").Result(),889 tarball: newTarWriter(t).890 addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).891 done(),892 apiResources: []*test.APIResource{893 test.Pods(),894 },895 want: []*test.APIResource{896 test.Pods(builder.ForPod("ns-1", "pod-1").897 ObjectMeta(898 builder.WithLabels(899 "velero.io/backup-name", "the-really-long-kube-service-name-that-is-much-greater-th8a11b3",900 "velero.io/restore-name", "the-really-long-kube-service-name-that-is-much-greater-th8a11b3",901 ),902 ).903 Result(),904 ),905 },906 },907 {908 name: "no error when service account already exists in cluster and is identical to the backed up one",909 restore: defaultRestore().Result(),910 backup: defaultBackup().Result(),911 tarball: newTarWriter(t).912 addItems("serviceaccounts", builder.ForServiceAccount("ns-1", "sa-1").Result()).913 done(),914 apiResources: []*test.APIResource{915 test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()),916 },917 want: []*test.APIResource{918 test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()),919 },920 },921 {922 name: "service account secrets and image pull secrets are restored when service account already exists in cluster",923 restore: defaultRestore().Result(),924 backup: defaultBackup().Result(),925 tarball: newTarWriter(t).926 addItems("serviceaccounts", &corev1api.ServiceAccount{927 TypeMeta: metav1.TypeMeta{928 APIVersion: "v1",929 Kind: "ServiceAccount",930 },931 ObjectMeta: metav1.ObjectMeta{932 Namespace: "ns-1",933 Name: "sa-1",934 },935 Secrets: []corev1api.ObjectReference{{Name: "secret-1"}},936 ImagePullSecrets: []corev1api.LocalObjectReference{{Name: "pull-secret-1"}},937 }).938 done(),939 apiResources: []*test.APIResource{940 test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()),941 },942 want: []*test.APIResource{943 test.ServiceAccounts(&corev1api.ServiceAccount{944 TypeMeta: metav1.TypeMeta{945 APIVersion: "v1",946 Kind: "ServiceAccount",947 },948 ObjectMeta: metav1.ObjectMeta{949 Namespace: "ns-1",950 Name: "sa-1",951 },952 Secrets: []corev1api.ObjectReference{{Name: "secret-1"}},953 ImagePullSecrets: []corev1api.LocalObjectReference{{Name: "pull-secret-1"}},954 }),955 },956 },957 }958 for _, tc := range tests {959 t.Run(tc.name, func(t *testing.T) {960 h := newHarness(t)961 for _, r := range tc.apiResources {962 h.addItems(t, r)963 }964 data := Request{965 Log: h.log,966 Restore: tc.restore,967 Backup: tc.backup,968 PodVolumeBackups: nil,969 VolumeSnapshots: nil,970 BackupReader: tc.tarball,971 }972 warnings, errs := h.restorer.Restore(973 data,974 nil, // actions975 nil, // snapshot location lister976 nil, // volume snapshotter getter977 )978 assertEmptyResults(t, warnings, errs)979 assertRestoredItems(t, h, tc.want)980 })981 }982}983// recordResourcesAction is a restore item action that can be configured984// to run for specific resources/namespaces and simply records the items985// that it is executed for.986type recordResourcesAction struct {987 selector velero.ResourceSelector988 ids []string989 additionalItems []velero.ResourceIdentifier990}991func (a *recordResourcesAction) AppliesTo() (velero.ResourceSelector, error) {992 return a.selector, nil993}994func (a *recordResourcesAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {995 metadata, err := meta.Accessor(input.Item)996 if err != nil {997 return &velero.RestoreItemActionExecuteOutput{998 UpdatedItem: input.Item,999 AdditionalItems: a.additionalItems,1000 }, err1001 }1002 a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata))1003 return &velero.RestoreItemActionExecuteOutput{1004 UpdatedItem: input.Item,1005 AdditionalItems: a.additionalItems,1006 }, nil1007}1008func (a *recordResourcesAction) ForResource(resource string) *recordResourcesAction {1009 a.selector.IncludedResources = append(a.selector.IncludedResources, resource)1010 return a1011}1012func (a *recordResourcesAction) ForNamespace(namespace string) *recordResourcesAction {1013 a.selector.IncludedNamespaces = append(a.selector.IncludedNamespaces, namespace)1014 return a1015}1016func (a *recordResourcesAction) ForLabelSelector(selector string) *recordResourcesAction {1017 a.selector.LabelSelector = selector1018 return a1019}1020func (a *recordResourcesAction) WithAdditionalItems(items []velero.ResourceIdentifier) *recordResourcesAction {1021 a.additionalItems = items1022 return a1023}1024// TestRestoreActionsRunsForCorrectItems runs restores with restore item actions, and1025// verifies that each restore item action is run for the correct set of resources based on its1026// AppliesTo() resource selector. Verification is done by using the recordResourcesAction struct,1027// which records which resources it's executed for.1028func TestRestoreActionsRunForCorrectItems(t *testing.T) {1029 tests := []struct {1030 name string1031 restore *velerov1api.Restore1032 backup *velerov1api.Backup1033 apiResources []*test.APIResource1034 tarball io.Reader1035 actions map[*recordResourcesAction][]string1036 }{1037 {1038 name: "single action with no selector runs for all items",1039 restore: defaultRestore().Result(),1040 backup: defaultBackup().Result(),1041 tarball: newTarWriter(t).1042 addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).1043 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()).1044 done(),1045 apiResources: []*test.APIResource{test.Pods(), test.PVs()},1046 actions: map[*recordResourcesAction][]string{1047 new(recordResourcesAction): {"ns-1/pod-1", "ns-2/pod-2", "pv-1", "pv-2"},1048 },1049 },1050 {1051 name: "single action with a resource selector for namespaced resources runs only for matching resources",1052 restore: defaultRestore().Result(),1053 backup: defaultBackup().Result(),1054 tarball: newTarWriter(t).1055 addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).1056 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()).1057 done(),1058 apiResources: []*test.APIResource{test.Pods(), test.PVs()},1059 actions: map[*recordResourcesAction][]string{1060 new(recordResourcesAction).ForResource("pods"): {"ns-1/pod-1", "ns-2/pod-2"},1061 },1062 },1063 {1064 name: "single action with a resource selector for cluster-scoped resources runs only for matching resources",1065 restore: defaultRestore().Result(),1066 backup: defaultBackup().Result(),1067 tarball: newTarWriter(t).1068 addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).1069 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()).1070 done(),1071 apiResources: []*test.APIResource{test.Pods(), test.PVs()},1072 actions: map[*recordResourcesAction][]string{1073 new(recordResourcesAction).ForResource("persistentvolumes"): {"pv-1", "pv-2"},1074 },1075 },1076 {1077 name: "single action with a namespace selector runs only for resources in that namespace",1078 restore: defaultRestore().Result(),1079 backup: defaultBackup().Result(),1080 tarball: newTarWriter(t).1081 addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).1082 addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()).1083 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()).1084 done(),1085 apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()},1086 actions: map[*recordResourcesAction][]string{1087 new(recordResourcesAction).ForNamespace("ns-1"): {"ns-1/pod-1", "ns-1/pvc-1"},1088 },1089 },1090 {1091 name: "single action with a resource and namespace selector runs only for matching resources in that namespace",1092 restore: defaultRestore().Result(),1093 backup: defaultBackup().Result(),1094 tarball: newTarWriter(t).1095 addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).1096 addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()).1097 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()).1098 done(),1099 apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()},1100 actions: map[*recordResourcesAction][]string{1101 new(recordResourcesAction).ForNamespace("ns-1").ForResource("pods"): {"ns-1/pod-1"},1102 },1103 },1104 {1105 name: "multiple actions, each with a different resource selector using short name, run for matching resources",1106 restore: defaultRestore().Result(),1107 backup: defaultBackup().Result(),1108 tarball: newTarWriter(t).1109 addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).1110 addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()).1111 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()).1112 done(),1113 apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()},1114 actions: map[*recordResourcesAction][]string{1115 new(recordResourcesAction).ForResource("po"): {"ns-1/pod-1", "ns-2/pod-2"},1116 new(recordResourcesAction).ForResource("pv"): {"pv-1", "pv-2"},1117 },1118 },1119 {1120 name: "actions with selectors that don't match anything don't run for any resources",1121 restore: defaultRestore().Result(),1122 backup: defaultBackup().Result(),1123 tarball: newTarWriter(t).1124 addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).1125 addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()).1126 done(),1127 apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()},1128 actions: map[*recordResourcesAction][]string{1129 new(recordResourcesAction).ForNamespace("ns-1").ForResource("persistentvolumeclaims"): nil,1130 new(recordResourcesAction).ForNamespace("ns-2").ForResource("pods"): nil,1131 },1132 },1133 }1134 for _, tc := range tests {1135 t.Run(tc.name, func(t *testing.T) {1136 h := newHarness(t)1137 for _, r := range tc.apiResources {1138 h.addItems(t, r)1139 }1140 actions := []velero.RestoreItemAction{}1141 for action := range tc.actions {1142 actions = append(actions, action)1143 }1144 data := Request{1145 Log: h.log,1146 Restore: tc.restore,1147 Backup: tc.backup,1148 PodVolumeBackups: nil,1149 VolumeSnapshots: nil,1150 BackupReader: tc.tarball,1151 }1152 warnings, errs := h.restorer.Restore(1153 data,1154 actions,1155 nil, // snapshot location lister1156 nil, // volume snapshotter getter1157 )1158 assertEmptyResults(t, warnings, errs)1159 for action, want := range tc.actions {1160 sort.Strings(want)1161 sort.Strings(action.ids)1162 assert.Equal(t, want, action.ids)1163 }1164 })1165 }1166}1167// pluggableAction is a restore item action that can be plugged with an Execute1168// function body at runtime.1169type pluggableAction struct {1170 selector velero.ResourceSelector1171 executeFunc func(*velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error)1172}1173func (a *pluggableAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {1174 if a.executeFunc == nil {1175 return &velero.RestoreItemActionExecuteOutput{1176 UpdatedItem: input.Item,1177 }, nil1178 }1179 return a.executeFunc(input)1180}1181func (a *pluggableAction) AppliesTo() (velero.ResourceSelector, error) {1182 return a.selector, nil1183}1184// TestRestoreActionModifications runs restores with restore item actions that modify resources, and1185// verifies that that the modified item is correctly created in the API. Verification is done by looking1186// at the full object in the API.1187func TestRestoreActionModifications(t *testing.T) {1188 // modifyingActionGetter is a helper function that returns a *pluggableAction, whose Execute(...)1189 // method modifies the item being passed in by calling the 'modify' function on it.1190 modifyingActionGetter := func(modify func(*unstructured.Unstructured)) *pluggableAction {1191 return &pluggableAction{1192 executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {1193 obj, ok := input.Item.(*unstructured.Unstructured)1194 if !ok {1195 return nil, errors.Errorf("unexpected type %T", input.Item)1196 }1197 res := obj.DeepCopy()1198 modify(res)1199 return &velero.RestoreItemActionExecuteOutput{1200 UpdatedItem: res,1201 }, nil1202 },1203 }1204 }1205 tests := []struct {1206 name string1207 restore *velerov1api.Restore1208 backup *velerov1api.Backup1209 apiResources []*test.APIResource1210 tarball io.Reader1211 actions []velero.RestoreItemAction1212 want []*test.APIResource1213 }{1214 {1215 name: "action that adds a label to item gets restored",1216 restore: defaultRestore().Result(),1217 backup: defaultBackup().Result(),1218 tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).done(),1219 apiResources: []*test.APIResource{test.Pods()},1220 actions: []velero.RestoreItemAction{1221 modifyingActionGetter(func(item *unstructured.Unstructured) {1222 item.SetLabels(map[string]string{"updated": "true"})1223 }),1224 },1225 want: []*test.APIResource{1226 test.Pods(1227 builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("updated", "true")).Result(),1228 ),1229 },1230 },1231 {1232 name: "action that removes a label to item gets restored",1233 restore: defaultRestore().Result(),1234 backup: defaultBackup().Result(),1235 tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("should-be-removed", "true")).Result()).done(),1236 apiResources: []*test.APIResource{test.Pods()},1237 actions: []velero.RestoreItemAction{1238 modifyingActionGetter(func(item *unstructured.Unstructured) {1239 item.SetLabels(nil)1240 }),1241 },1242 want: []*test.APIResource{1243 test.Pods(builder.ForPod("ns-1", "pod-1").Result()),1244 },1245 },1246 // TODO action that modifies namespace/name - what's the expected behavior?1247 }1248 for _, tc := range tests {1249 t.Run(tc.name, func(t *testing.T) {1250 h := newHarness(t)1251 for _, r := range tc.apiResources {1252 h.addItems(t, r)1253 }1254 // every restored item should have the restore and backup name labels, set1255 // them here so we don't have to do it in every test case definition above.1256 for _, resource := range tc.want {1257 for _, item := range resource.Items {1258 labels := item.GetLabels()1259 if labels == nil {1260 labels = make(map[string]string)1261 }1262 labels["velero.io/restore-name"] = tc.restore.Name1263 labels["velero.io/backup-name"] = tc.restore.Spec.BackupName1264 item.SetLabels(labels)1265 }1266 }1267 data := Request{1268 Log: h.log,1269 Restore: tc.restore,1270 Backup: tc.backup,1271 PodVolumeBackups: nil,1272 VolumeSnapshots: nil,1273 BackupReader: tc.tarball,1274 }1275 warnings, errs := h.restorer.Restore(1276 data,1277 tc.actions,1278 nil, // snapshot location lister1279 nil, // volume snapshotter getter1280 )1281 assertEmptyResults(t, warnings, errs)1282 assertRestoredItems(t, h, tc.want)1283 })1284 }1285}1286// TestRestoreActionAdditionalItems runs restores with restore item actions that return additional items1287// to be restored, and verifies that that the correct set of items is created in the API. Verification is1288// done by looking at the namespaces/names of the items in the API; contents are not checked.1289func TestRestoreActionAdditionalItems(t *testing.T) {1290 tests := []struct {1291 name string1292 restore *velerov1api.Restore1293 backup *velerov1api.Backup1294 tarball io.Reader1295 apiResources []*test.APIResource1296 actions []velero.RestoreItemAction1297 want map[*test.APIResource][]string1298 }{1299 {1300 name: "additional items that are already being restored are not restored twice",1301 restore: defaultRestore().Result(),1302 backup: defaultBackup().Result(),1303 tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).done(),1304 apiResources: []*test.APIResource{test.Pods()},1305 actions: []velero.RestoreItemAction{1306 &pluggableAction{1307 selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}},1308 executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {1309 return &velero.RestoreItemActionExecuteOutput{1310 UpdatedItem: input.Item,1311 AdditionalItems: []velero.ResourceIdentifier{1312 {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"},1313 },1314 }, nil1315 },1316 },1317 },1318 want: map[*test.APIResource][]string{1319 test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"},1320 },1321 },1322 {1323 name: "when using a restore namespace filter, additional items that are in a non-included namespace are not restored",1324 restore: defaultRestore().IncludedNamespaces("ns-1").Result(),1325 backup: defaultBackup().Result(),1326 tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).done(),1327 apiResources: []*test.APIResource{test.Pods()},1328 actions: []velero.RestoreItemAction{1329 &pluggableAction{1330 executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {1331 return &velero.RestoreItemActionExecuteOutput{1332 UpdatedItem: input.Item,1333 AdditionalItems: []velero.ResourceIdentifier{1334 {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"},1335 },1336 }, nil1337 },1338 },1339 },1340 want: map[*test.APIResource][]string{1341 test.Pods(): {"ns-1/pod-1"},1342 },1343 },1344 {1345 name: "when using a restore namespace filter, additional items that are cluster-scoped are restored when IncludeClusterResources=nil",1346 restore: defaultRestore().IncludedNamespaces("ns-1").Result(),1347 backup: defaultBackup().Result(),1348 tarball: newTarWriter(t).1349 addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).1350 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()).1351 done(),1352 apiResources: []*test.APIResource{test.Pods(), test.PVs()},1353 actions: []velero.RestoreItemAction{1354 &pluggableAction{1355 executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {1356 return &velero.RestoreItemActionExecuteOutput{1357 UpdatedItem: input.Item,1358 AdditionalItems: []velero.ResourceIdentifier{1359 {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"},1360 },1361 }, nil1362 },1363 },1364 },1365 want: map[*test.APIResource][]string{1366 test.Pods(): {"ns-1/pod-1"},1367 test.PVs(): {"/pv-1"},1368 },1369 },1370 {1371 name: "additional items that are cluster-scoped are not restored when IncludeClusterResources=false",1372 restore: defaultRestore().IncludeClusterResources(false).Result(),1373 backup: defaultBackup().Result(),1374 tarball: newTarWriter(t).1375 addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).1376 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()).1377 done(),1378 apiResources: []*test.APIResource{test.Pods(), test.PVs()},1379 actions: []velero.RestoreItemAction{1380 &pluggableAction{1381 executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {1382 return &velero.RestoreItemActionExecuteOutput{1383 UpdatedItem: input.Item,1384 AdditionalItems: []velero.ResourceIdentifier{1385 {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"},1386 },1387 }, nil1388 },1389 },1390 },1391 want: map[*test.APIResource][]string{1392 test.Pods(): {"ns-1/pod-1"},1393 test.PVs(): nil,1394 },1395 },1396 {1397 name: "when using a restore resource filter, additional items that are non-included resources are not restored",1398 restore: defaultRestore().IncludedResources("pods").Result(),1399 backup: defaultBackup().Result(),1400 tarball: newTarWriter(t).1401 addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).1402 addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()).1403 done(),1404 apiResources: []*test.APIResource{test.Pods(), test.PVs()},1405 actions: []velero.RestoreItemAction{1406 &pluggableAction{1407 executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {1408 return &velero.RestoreItemActionExecuteOutput{1409 UpdatedItem: input.Item,1410 AdditionalItems: []velero.ResourceIdentifier{1411 {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"},1412 },1413 }, nil1414 },1415 },1416 },1417 want: map[*test.APIResource][]string{1418 test.Pods(): {"ns-1/pod-1"},1419 test.PVs(): nil,1420 },1421 },1422 }1423 for _, tc := range tests {1424 t.Run(tc.name, func(t *testing.T) {1425 h := newHarness(t)1426 for _, r := range tc.apiResources {1427 h.addItems(t, r)1428 }1429 data := Request{1430 Log: h.log,1431 Restore: tc.restore,1432 Backup: tc.backup,1433 PodVolumeBackups: nil,1434 VolumeSnapshots: nil,1435 BackupReader: tc.tarball,1436 }1437 warnings, errs := h.restorer.Restore(1438 data,1439 tc.actions,1440 nil, // snapshot location lister1441 nil, // volume snapshotter getter1442 )1443 assertEmptyResults(t, warnings, errs)1444 assertAPIContents(t, h, tc.want)1445 })1446 }1447}1448// TestShouldRestore runs the ShouldRestore function for various permutations of1449// existing/nonexisting/being-deleted PVs, PVCs, and namespaces, and verifies the1450// result/error matches expectations.1451func TestShouldRestore(t *testing.T) {1452 tests := []struct {1453 name string1454 pvName string1455 apiResources []*test.APIResource1456 namespaces []*corev1api.Namespace1457 want bool1458 wantErr error1459 }{1460 {1461 name: "when PV is not found, result is true",1462 pvName: "pv-1",1463 want: true,1464 },1465 {1466 name: "when PV is found and has Phase=Released, result is false",1467 pvName: "pv-1",1468 apiResources: []*test.APIResource{1469 test.PVs(&corev1api.PersistentVolume{1470 TypeMeta: metav1.TypeMeta{1471 APIVersion: "v1",1472 Kind: "PersistentVolume",1473 },1474 ObjectMeta: metav1.ObjectMeta{1475 Name: "pv-1",1476 },1477 Status: corev1api.PersistentVolumeStatus{1478 Phase: corev1api.VolumeReleased,1479 },1480 }),1481 },1482 want: false,1483 },1484 {1485 name: "when PV is found and has associated PVC and namespace that aren't deleting, result is false",1486 pvName: "pv-1",1487 apiResources: []*test.APIResource{1488 test.PVs(1489 builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(),1490 ),1491 test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()),1492 },1493 namespaces: []*corev1api.Namespace{builder.ForNamespace("ns-1").Result()},1494 want: false,1495 },1496 {1497 name: "when PV is found and has associated PVC that is deleting, result is false + timeout error",1498 pvName: "pv-1",1499 apiResources: []*test.APIResource{1500 test.PVs(1501 builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(),1502 ),1503 test.PVCs(1504 builder.ForPersistentVolumeClaim("ns-1", "pvc-1").ObjectMeta(builder.WithDeletionTimestamp(time.Now())).Result(),1505 ),1506 },1507 want: false,1508 wantErr: errors.New("timed out waiting for the condition"),1509 },1510 {1511 name: "when PV is found, has associated PVC that's not deleting, has associated NS that is terminating, result is false + timeout error",1512 pvName: "pv-1",1513 apiResources: []*test.APIResource{1514 test.PVs(1515 builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(),1516 ),1517 test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()),1518 },1519 namespaces: []*corev1api.Namespace{1520 builder.ForNamespace("ns-1").Phase(corev1api.NamespaceTerminating).Result(),1521 },1522 want: false,1523 wantErr: errors.New("timed out waiting for the condition"),1524 },1525 {1526 name: "when PV is found, has associated PVC that's not deleting, has associated NS that has deletion timestamp, result is false + timeout error",1527 pvName: "pv-1",1528 apiResources: []*test.APIResource{1529 test.PVs(1530 builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(),1531 ),1532 test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()),1533 },1534 namespaces: []*corev1api.Namespace{1535 builder.ForNamespace("ns-1").ObjectMeta(builder.WithDeletionTimestamp(time.Now())).Result(),1536 },1537 want: false,1538 wantErr: errors.New("timed out waiting for the condition"),1539 },1540 {1541 name: "when PV is found, associated PVC is not found, result is false + timeout error",1542 pvName: "pv-1",1543 apiResources: []*test.APIResource{1544 test.PVs(1545 builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(),1546 ),1547 },1548 want: false,1549 wantErr: errors.New("timed out waiting for the condition"),1550 },1551 {1552 name: "when PV is found, has associated PVC, associated namespace not found, result is false + timeout error",1553 pvName: "pv-1",1554 apiResources: []*test.APIResource{1555 test.PVs(1556 builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(),1557 ),1558 test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()),1559 },1560 want: false,1561 wantErr: errors.New("timed out waiting for the condition"),1562 },1563 }1564 for _, tc := range tests {1565 t.Run(tc.name, func(t *testing.T) {1566 h := newHarness(t)1567 ctx := &context{1568 log: h.log,1569 dynamicFactory: client.NewDynamicFactory(h.DynamicClient),1570 namespaceClient: h.KubeClient.CoreV1().Namespaces(),1571 resourceTerminatingTimeout: time.Millisecond,1572 }1573 for _, resource := range tc.apiResources {1574 h.addItems(t, resource)1575 }1576 for _, ns := range tc.namespaces {1577 _, err := ctx.namespaceClient.Create(ns)1578 require.NoError(t, err)1579 }1580 pvClient, err := ctx.dynamicFactory.ClientForGroupVersionResource(1581 schema.GroupVersion{Group: "", Version: "v1"},1582 metav1.APIResource{Name: "persistentvolumes"},1583 "",1584 )1585 require.NoError(t, err)1586 res, err := ctx.shouldRestore(tc.pvName, pvClient)1587 assert.Equal(t, tc.want, res)1588 if tc.wantErr != nil {1589 if assert.NotNil(t, err, "expected a non-nil error") {1590 assert.EqualError(t, err, tc.wantErr.Error())1591 }1592 } else {1593 assert.Nil(t, err)1594 }1595 })1596 }1597}1598func assertRestoredItems(t *testing.T, h *harness, want []*test.APIResource) {1599 t.Helper()1600 for _, resource := range want {1601 resourceClient := h.DynamicClient.Resource(resource.GVR())1602 for _, item := range resource.Items {1603 var client dynamic.ResourceInterface1604 if item.GetNamespace() != "" {1605 client = resourceClient.Namespace(item.GetNamespace())1606 } else {1607 client = resourceClient1608 }1609 res, err := client.Get(item.GetName(), metav1.GetOptions{})1610 if !assert.NoError(t, err) {1611 continue1612 }1613 itemJSON, err := json.Marshal(item)1614 if !assert.NoError(t, err) {1615 continue1616 }1617 t.Logf("%v", string(itemJSON))1618 u := make(map[string]interface{})1619 if !assert.NoError(t, json.Unmarshal(itemJSON, &u)) {1620 continue1621 }1622 want := &unstructured.Unstructured{Object: u}1623 // These fields get non-nil zero values in the unstructured objects if they're1624 // empty in the structured objects. Remove them to make comparison easier.1625 unstructured.RemoveNestedField(want.Object, "metadata", "creationTimestamp")1626 unstructured.RemoveNestedField(want.Object, "status")1627 assert.Equal(t, want, res)1628 }1629 }1630}1631// volumeSnapshotterGetter is a simple implementation of the VolumeSnapshotterGetter1632// interface that returns velero.VolumeSnapshotters from a map if they exist.1633type volumeSnapshotterGetter map[string]velero.VolumeSnapshotter1634func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) {1635 snapshotter, ok := vsg[name]1636 if !ok {1637 return nil, errors.New("volume snapshotter not found")1638 }1639 return snapshotter, nil1640}1641// volumeSnapshotter is a test fake for the velero.VolumeSnapshotter interface1642type volumeSnapshotter struct {1643 // a map from snapshotID to volumeID1644 snapshotVolumes map[string]string1645 // a map from volumeID to new pv name1646 pvName map[string]string1647}1648// Init is a no-op.1649func (vs *volumeSnapshotter) Init(config map[string]string) error {1650 return nil1651}1652// CreateVolumeFromSnapshot looks up the specified snapshotID in the snapshotVolumes1653// map and returns the corresponding volumeID if it exists, or an error otherwise.1654func (vs *volumeSnapshotter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {1655 volumeID, ok := vs.snapshotVolumes[snapshotID]1656 if !ok {1657 return "", errors.New("snapshot not found")1658 }1659 return volumeID, nil1660}1661// SetVolumeID sets the persistent volume's spec.awsElasticBlockStore.volumeID field1662// with the provided volumeID.1663func (vs *volumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) {1664 unstructured.SetNestedField(pv.UnstructuredContent(), volumeID, "spec", "awsElasticBlockStore", "volumeID")1665 newPVName, ok := vs.pvName[volumeID]1666 if !ok {1667 return pv, nil1668 }1669 unstructured.SetNestedField(pv.UnstructuredContent(), newPVName, "metadata", "name")1670 return pv, nil1671}1672// GetVolumeID panics because it's not expected to be used for restores.1673func (*volumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (string, error) {1674 panic("GetVolumeID should not be used for restores")1675}1676// CreateSnapshot panics because it's not expected to be used for restores.1677func (*volumeSnapshotter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (snapshotID string, err error) {1678 panic("CreateSnapshot should not be used for restores")1679}1680// GetVolumeInfo panics because it's not expected to be used for restores.1681func (*volumeSnapshotter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {1682 panic("GetVolumeInfo should not be used for restores")1683}1684// DeleteSnapshot panics because it's not expected to be used for restores.1685func (*volumeSnapshotter) DeleteSnapshot(snapshotID string) error {1686 panic("DeleteSnapshot should not be used for backups")1687}1688// TestRestorePersistentVolumes runs restores for persistent volumes and verifies that1689// they are restored as expected, including restoring volumes from snapshots when expected.1690// Verification is done by looking at the contents of the API and the metadata/spec/status of1691// the items in the API.1692func TestRestorePersistentVolumes(t *testing.T) {1693 tests := []struct {1694 name string1695 restore *velerov1api.Restore1696 backup *velerov1api.Backup1697 tarball io.Reader1698 apiResources []*test.APIResource1699 volumeSnapshots []*volume.Snapshot1700 volumeSnapshotLocations []*velerov1api.VolumeSnapshotLocation1701 volumeSnapshotterGetter volumeSnapshotterGetter1702 want []*test.APIResource1703 }{1704 {1705 name: "when a PV with a reclaim policy of delete has no snapshot and does not exist in-cluster, it does not get restored, and its PVC gets reset for dynamic provisioning",1706 restore: defaultRestore().Result(),1707 backup: defaultBackup().Result(),1708 tarball: newTarWriter(t).1709 addItems("persistentvolumes",1710 builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).ClaimRef("ns-1", "pvc-1").Result(),1711 ).1712 addItems("persistentvolumeclaims",1713 builder.ForPersistentVolumeClaim("ns-1", "pvc-1").1714 VolumeName("pv-1").1715 ObjectMeta(1716 builder.WithAnnotations("pv.kubernetes.io/bind-completed", "true", "pv.kubernetes.io/bound-by-controller", "true", "foo", "bar"),1717 ).1718 Result(),1719 ).1720 done(),1721 apiResources: []*test.APIResource{1722 test.PVs(),1723 test.PVCs(),1724 },1725 want: []*test.APIResource{1726 test.PVs(),1727 test.PVCs(1728 builder.ForPersistentVolumeClaim("ns-1", "pvc-1").1729 ObjectMeta(1730 builder.WithAnnotations("foo", "bar"),1731 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),1732 ).1733 Result(),1734 ),1735 },1736 },1737 {1738 name: "when a PV with a reclaim policy of retain has no snapshot and does not exist in-cluster, it gets restored, without its claim ref",1739 restore: defaultRestore().Result(),1740 backup: defaultBackup().Result(),1741 tarball: newTarWriter(t).1742 addItems("persistentvolumes",1743 builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).ClaimRef("ns-1", "pvc-1").Result(),1744 ).1745 done(),1746 apiResources: []*test.APIResource{1747 test.PVs(),1748 test.PVCs(),1749 },1750 want: []*test.APIResource{1751 test.PVs(1752 builder.ForPersistentVolume("pv-1").1753 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).1754 ObjectMeta(1755 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),1756 ).1757 Result(),1758 ),1759 },1760 },1761 {1762 name: "when a PV with a reclaim policy of delete has a snapshot and does not exist in-cluster, the snapshot and PV are restored",1763 restore: defaultRestore().Result(),1764 backup: defaultBackup().Result(),1765 tarball: newTarWriter(t).1766 addItems("persistentvolumes",1767 builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).AWSEBSVolumeID("old-volume").Result(),1768 ).1769 done(),1770 apiResources: []*test.APIResource{1771 test.PVs(),1772 test.PVCs(),1773 },1774 volumeSnapshots: []*volume.Snapshot{1775 {1776 Spec: volume.SnapshotSpec{1777 BackupName: "backup-1",1778 Location: "default",1779 PersistentVolumeName: "pv-1",1780 },1781 Status: volume.SnapshotStatus{1782 Phase: volume.SnapshotPhaseCompleted,1783 ProviderSnapshotID: "snapshot-1",1784 },1785 },1786 },1787 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{1788 builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),1789 },1790 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{1791 "provider-1": &volumeSnapshotter{1792 snapshotVolumes: map[string]string{"snapshot-1": "new-volume"},1793 },1794 },1795 want: []*test.APIResource{1796 test.PVs(1797 builder.ForPersistentVolume("pv-1").1798 ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).1799 AWSEBSVolumeID("new-volume").1800 ObjectMeta(1801 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),1802 ).1803 Result(),1804 ),1805 },1806 },1807 {1808 name: "when a PV with a reclaim policy of retain has a snapshot and does not exist in-cluster, the snapshot and PV are restored",1809 restore: defaultRestore().Result(),1810 backup: defaultBackup().Result(),1811 tarball: newTarWriter(t).1812 addItems("persistentvolumes",1813 builder.ForPersistentVolume("pv-1").1814 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).1815 AWSEBSVolumeID("old-volume").1816 Result(),1817 ).1818 done(),1819 apiResources: []*test.APIResource{1820 test.PVs(),1821 test.PVCs(),1822 },1823 volumeSnapshots: []*volume.Snapshot{1824 {1825 Spec: volume.SnapshotSpec{1826 BackupName: "backup-1",1827 Location: "default",1828 PersistentVolumeName: "pv-1",1829 },1830 Status: volume.SnapshotStatus{1831 Phase: volume.SnapshotPhaseCompleted,1832 ProviderSnapshotID: "snapshot-1",1833 },1834 },1835 },1836 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{1837 builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),1838 },1839 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{1840 "provider-1": &volumeSnapshotter{1841 snapshotVolumes: map[string]string{"snapshot-1": "new-volume"},1842 },1843 },1844 want: []*test.APIResource{1845 test.PVs(1846 builder.ForPersistentVolume("pv-1").1847 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).1848 AWSEBSVolumeID("new-volume").1849 ObjectMeta(1850 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),1851 ).1852 Result(),1853 ),1854 },1855 },1856 {1857 name: "when a PV with a reclaim policy of delete has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored",1858 restore: defaultRestore().Result(),1859 backup: defaultBackup().Result(),1860 tarball: newTarWriter(t).1861 addItems("persistentvolumes",1862 builder.ForPersistentVolume("pv-1").1863 ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).1864 AWSEBSVolumeID("old-volume").1865 Result(),1866 ).1867 done(),1868 apiResources: []*test.APIResource{1869 test.PVs(1870 builder.ForPersistentVolume("pv-1").1871 ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).1872 AWSEBSVolumeID("old-volume").1873 Result(),1874 ),1875 test.PVCs(),1876 },1877 volumeSnapshots: []*volume.Snapshot{1878 {1879 Spec: volume.SnapshotSpec{1880 BackupName: "backup-1",1881 Location: "default",1882 PersistentVolumeName: "pv-1",1883 },1884 Status: volume.SnapshotStatus{1885 Phase: volume.SnapshotPhaseCompleted,1886 ProviderSnapshotID: "snapshot-1",1887 },1888 },1889 },1890 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{1891 builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),1892 },1893 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{1894 // the volume snapshotter fake is not configured with any snapshotID -> volumeID1895 // mappings as a way to verify that the snapshot is not restored, since if it were1896 // restored, we'd get an error of "snapshot not found".1897 "provider-1": &volumeSnapshotter{},1898 },1899 want: []*test.APIResource{1900 test.PVs(1901 builder.ForPersistentVolume("pv-1").1902 ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).1903 AWSEBSVolumeID("old-volume").1904 Result(),1905 ),1906 },1907 },1908 {1909 name: "when a PV with a reclaim policy of retain has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored",1910 restore: defaultRestore().Result(),1911 backup: defaultBackup().Result(),1912 tarball: newTarWriter(t).1913 addItems("persistentvolumes",1914 builder.ForPersistentVolume("pv-1").1915 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).1916 AWSEBSVolumeID("old-volume").1917 Result(),1918 ).1919 done(),1920 apiResources: []*test.APIResource{1921 test.PVs(1922 builder.ForPersistentVolume("pv-1").1923 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).1924 AWSEBSVolumeID("old-volume").1925 Result(),1926 ),1927 test.PVCs(),1928 },1929 volumeSnapshots: []*volume.Snapshot{1930 {1931 Spec: volume.SnapshotSpec{1932 BackupName: "backup-1",1933 Location: "default",1934 PersistentVolumeName: "pv-1",1935 },1936 Status: volume.SnapshotStatus{1937 Phase: volume.SnapshotPhaseCompleted,1938 ProviderSnapshotID: "snapshot-1",1939 },1940 },1941 },1942 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{1943 builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),1944 },1945 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{1946 // the volume snapshotter fake is not configured with any snapshotID -> volumeID1947 // mappings as a way to verify that the snapshot is not restored, since if it were1948 // restored, we'd get an error of "snapshot not found".1949 "provider-1": &volumeSnapshotter{},1950 },1951 want: []*test.APIResource{1952 test.PVs(1953 builder.ForPersistentVolume("pv-1").1954 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).1955 AWSEBSVolumeID("old-volume").1956 Result(),1957 ),1958 },1959 },1960 {1961 name: "when a PV with a snapshot is used by a PVC in a namespace that's being remapped, and the original PV exists in-cluster, the PV is renamed",1962 restore: defaultRestore().NamespaceMappings("source-ns", "target-ns").Result(),1963 backup: defaultBackup().Result(),1964 tarball: newTarWriter(t).1965 addItems(1966 "persistentvolumes",1967 builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),1968 ).1969 addItems(1970 "persistentvolumeclaims",1971 builder.ForPersistentVolumeClaim("source-ns", "pvc-1").VolumeName("source-pv").Result(),1972 ).1973 done(),1974 apiResources: []*test.APIResource{1975 test.PVs(1976 builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),1977 ),1978 test.PVCs(),1979 },1980 volumeSnapshots: []*volume.Snapshot{1981 {1982 Spec: volume.SnapshotSpec{1983 BackupName: "backup-1",1984 Location: "default",1985 PersistentVolumeName: "source-pv",1986 },1987 Status: volume.SnapshotStatus{1988 Phase: volume.SnapshotPhaseCompleted,1989 ProviderSnapshotID: "snapshot-1",1990 },1991 },1992 },1993 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{1994 builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),1995 },1996 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{1997 "provider-1": &volumeSnapshotter{1998 snapshotVolumes: map[string]string{"snapshot-1": "new-volume"},1999 },2000 },2001 want: []*test.APIResource{2002 test.PVs(2003 builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),2004 // note that the renamed PV is not expected to have a claimRef in this test; that would be2005 // added after creation by the Kubernetes PV/PVC controller when it does a bind.2006 builder.ForPersistentVolume("renamed-source-pv").2007 ObjectMeta(2008 builder.WithAnnotations("velero.io/original-pv-name", "source-pv"),2009 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),2010 ).2011 AWSEBSVolumeID("new-volume").2012 Result(),2013 ),2014 test.PVCs(2015 builder.ForPersistentVolumeClaim("target-ns", "pvc-1").2016 ObjectMeta(2017 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),2018 ).2019 VolumeName("renamed-source-pv").2020 Result(),2021 ),2022 },2023 },2024 {2025 name: "when a PV with a snapshot is used by a PVC in a namespace that's being remapped, and the original PV does not exist in-cluster, the PV is not renamed",2026 restore: defaultRestore().NamespaceMappings("source-ns", "target-ns").Result(),2027 backup: defaultBackup().Result(),2028 tarball: newTarWriter(t).2029 addItems(2030 "persistentvolumes",2031 builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),2032 ).2033 addItems(2034 "persistentvolumeclaims",2035 builder.ForPersistentVolumeClaim("source-ns", "pvc-1").VolumeName("source-pv").Result(),2036 ).2037 done(),2038 apiResources: []*test.APIResource{2039 test.PVs(),2040 test.PVCs(),2041 },2042 volumeSnapshots: []*volume.Snapshot{2043 {2044 Spec: volume.SnapshotSpec{2045 BackupName: "backup-1",2046 Location: "default",2047 PersistentVolumeName: "source-pv",2048 },2049 Status: volume.SnapshotStatus{2050 Phase: volume.SnapshotPhaseCompleted,2051 ProviderSnapshotID: "snapshot-1",2052 },2053 },2054 },2055 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{2056 builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),2057 },2058 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{2059 "provider-1": &volumeSnapshotter{2060 snapshotVolumes: map[string]string{"snapshot-1": "new-volume"},2061 },2062 },2063 want: []*test.APIResource{2064 test.PVs(2065 builder.ForPersistentVolume("source-pv").2066 ObjectMeta(2067 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),2068 ).2069 AWSEBSVolumeID("new-volume").2070 Result(),2071 ),2072 test.PVCs(2073 builder.ForPersistentVolumeClaim("target-ns", "pvc-1").2074 ObjectMeta(2075 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),2076 ).2077 VolumeName("source-pv").2078 Result(),2079 ),2080 },2081 },2082 {2083 name: "when a PV with a reclaim policy of retain has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored",2084 restore: defaultRestore().Result(),2085 backup: defaultBackup().Result(),2086 tarball: newTarWriter(t).2087 addItems("persistentvolumes",2088 builder.ForPersistentVolume("pv-1").2089 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).2090 AWSEBSVolumeID("old-volume").2091 Result(),2092 ).2093 done(),2094 apiResources: []*test.APIResource{2095 test.PVs(2096 builder.ForPersistentVolume("pv-1").2097 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).2098 AWSEBSVolumeID("old-volume").2099 Result(),2100 ),2101 test.PVCs(),2102 },2103 volumeSnapshots: []*volume.Snapshot{2104 {2105 Spec: volume.SnapshotSpec{2106 BackupName: "backup-1",2107 Location: "default",2108 PersistentVolumeName: "pv-1",2109 },2110 Status: volume.SnapshotStatus{2111 Phase: volume.SnapshotPhaseCompleted,2112 ProviderSnapshotID: "snapshot-1",2113 },2114 },2115 },2116 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{2117 {2118 ObjectMeta: metav1.ObjectMeta{2119 Namespace: velerov1api.DefaultNamespace,2120 Name: "default",2121 },2122 Spec: velerov1api.VolumeSnapshotLocationSpec{2123 Provider: "provider-1",2124 },2125 },2126 },2127 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{2128 // the volume snapshotter fake is not configured with any snapshotID -> volumeID2129 // mappings as a way to verify that the snapshot is not restored, since if it were2130 // restored, we'd get an error of "snapshot not found".2131 "provider-1": &volumeSnapshotter{},2132 },2133 want: []*test.APIResource{2134 test.PVs(2135 builder.ForPersistentVolume("pv-1").2136 ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).2137 AWSEBSVolumeID("old-volume").2138 Result(),2139 ),2140 },2141 },2142 {2143 name: "when a PV with a snapshot is used by a PVC in a namespace that's being remapped, and the original PV exists in-cluster, the PV is renamed by volumesnapshotter",2144 restore: defaultRestore().NamespaceMappings("source-ns", "target-ns").Result(),2145 backup: defaultBackup().Result(),2146 tarball: newTarWriter(t).2147 addItems(2148 "persistentvolumes",2149 builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),2150 ).2151 addItems(2152 "persistentvolumeclaims",2153 builder.ForPersistentVolumeClaim("source-ns", "pvc-1").VolumeName("source-pv").Result(),2154 ).2155 done(),2156 apiResources: []*test.APIResource{2157 test.PVs(2158 builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),2159 ),2160 test.PVCs(),2161 },2162 volumeSnapshots: []*volume.Snapshot{2163 {2164 Spec: volume.SnapshotSpec{2165 BackupName: "backup-1",2166 Location: "default",2167 PersistentVolumeName: "source-pv",2168 },2169 Status: volume.SnapshotStatus{2170 Phase: volume.SnapshotPhaseCompleted,2171 ProviderSnapshotID: "snapshot-1",2172 },2173 },2174 },2175 volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{2176 builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),2177 },2178 volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{2179 "provider-1": &volumeSnapshotter{2180 snapshotVolumes: map[string]string{"snapshot-1": "new-volume"},2181 pvName: map[string]string{"new-volume": "volumesnapshotter-renamed-source-pv"},2182 },2183 },2184 want: []*test.APIResource{2185 test.PVs(2186 builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),2187 // note that the renamed PV is not expected to have a claimRef in this test; that would be2188 // added after creation by the Kubernetes PV/PVC controller when it does a bind.2189 builder.ForPersistentVolume("volumesnapshotter-renamed-source-pv").2190 ObjectMeta(2191 builder.WithAnnotations("velero.io/original-pv-name", "source-pv"),2192 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),2193 ).2194 AWSEBSVolumeID("new-volume").2195 Result(),2196 ),2197 test.PVCs(2198 builder.ForPersistentVolumeClaim("target-ns", "pvc-1").2199 ObjectMeta(2200 builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),2201 ).2202 VolumeName("volumesnapshotter-renamed-source-pv").2203 Result(),2204 ),2205 },2206 },2207 }2208 for _, tc := range tests {2209 t.Run(tc.name, func(t *testing.T) {2210 h := newHarness(t)2211 h.restorer.resourcePriorities = []string{"persistentvolumes", "persistentvolumeclaims"}2212 h.restorer.pvRenamer = func(oldName string) (string, error) {2213 renamed := "renamed-" + oldName2214 return renamed, nil2215 }2216 // set up the VolumeSnapshotLocation informer/lister and add test data to it2217 vslInformer := velerov1informers.NewSharedInformerFactory(h.VeleroClient, 0).Velero().V1().VolumeSnapshotLocations()2218 for _, vsl := range tc.volumeSnapshotLocations {2219 require.NoError(t, vslInformer.Informer().GetStore().Add(vsl))2220 }2221 for _, r := range tc.apiResources {2222 h.addItems(t, r)2223 }2224 // Collect the IDs of all of the wanted resources so we can ensure the2225 // exact set exists in the API after restore.2226 wantIDs := make(map[*test.APIResource][]string)2227 for i, resource := range tc.want {2228 wantIDs[tc.want[i]] = []string{}2229 for _, item := range resource.Items {2230 wantIDs[tc.want[i]] = append(wantIDs[tc.want[i]], fmt.Sprintf("%s/%s", item.GetNamespace(), item.GetName()))2231 }2232 }2233 data := Request{2234 Log: h.log,2235 Restore: tc.restore,2236 Backup: tc.backup,2237 VolumeSnapshots: tc.volumeSnapshots,2238 BackupReader: tc.tarball,2239 }2240 warnings, errs := h.restorer.Restore(2241 data,2242 nil, // actions2243 vslInformer.Lister(),2244 tc.volumeSnapshotterGetter,2245 )2246 assertEmptyResults(t, warnings, errs)2247 assertAPIContents(t, h, wantIDs)2248 assertRestoredItems(t, h, tc.want)2249 })2250 }2251}2252type fakeResticRestorerFactory struct {2253 restorer *resticmocks.Restorer2254}2255func (f *fakeResticRestorerFactory) NewRestorer(ctx.Context, *velerov1api.Restore) (restic.Restorer, error) {2256 return f.restorer, nil2257}2258// TestRestoreWithRestic verifies that a call to RestorePodVolumes was made as and when2259// expected for the given pods by using a mock for the restic restorer.2260func TestRestoreWithRestic(t *testing.T) {2261 tests := []struct {2262 name string2263 restore *velerov1api.Restore2264 backup *velerov1api.Backup2265 apiResources []*test.APIResource2266 podVolumeBackups []*velerov1api.PodVolumeBackup2267 podWithPVBs, podWithoutPVBs []*corev1api.Pod2268 want map[*test.APIResource][]string2269 }{2270 {2271 name: "a pod that exists in given backup and contains associated PVBs should have should have RestorePodVolumes called",2272 restore: defaultRestore().Result(),2273 backup: defaultBackup().Result(),2274 apiResources: []*test.APIResource{test.Pods()},2275 podVolumeBackups: []*velerov1api.PodVolumeBackup{2276 builder.ForPodVolumeBackup("velero", "pvb-1").PodName("pod-1").SnapshotID("foo").Result(),2277 builder.ForPodVolumeBackup("velero", "pvb-2").PodName("pod-2").SnapshotID("foo").Result(),2278 builder.ForPodVolumeBackup("velero", "pvb-3").PodName("pod-4").SnapshotID("foo").Result(),2279 },2280 podWithPVBs: []*corev1api.Pod{2281 builder.ForPod("ns-1", "pod-2").2282 Result(),2283 builder.ForPod("ns-2", "pod-4").2284 Result(),2285 },2286 podWithoutPVBs: []*corev1api.Pod{2287 builder.ForPod("ns-2", "pod-3").2288 Result(),2289 },2290 want: map[*test.APIResource][]string{2291 test.Pods(): {"ns-1/pod-2", "ns-2/pod-3", "ns-2/pod-4"},2292 },2293 },2294 {2295 name: "a pod that exists in given backup but does not contain associated PVBs should not have should have RestorePodVolumes called",2296 restore: defaultRestore().Result(),2297 backup: defaultBackup().Result(),2298 apiResources: []*test.APIResource{test.Pods()},2299 podVolumeBackups: []*velerov1api.PodVolumeBackup{2300 builder.ForPodVolumeBackup("velero", "pvb-1").PodName("pod-1").Result(),2301 builder.ForPodVolumeBackup("velero", "pvb-2").PodName("pod-2").Result(),2302 },2303 podWithPVBs: []*corev1api.Pod{},2304 podWithoutPVBs: []*corev1api.Pod{2305 builder.ForPod("ns-1", "pod-3").2306 Result(),2307 builder.ForPod("ns-2", "pod-4").2308 Result(),2309 },2310 want: map[*test.APIResource][]string{2311 test.Pods(): {"ns-1/pod-3", "ns-2/pod-4"},2312 },2313 },2314 }2315 for _, tc := range tests {2316 t.Run(tc.name, func(t *testing.T) {2317 h := newHarness(t)2318 restorer := new(resticmocks.Restorer)2319 defer restorer.AssertExpectations(t)2320 h.restorer.resticRestorerFactory = &fakeResticRestorerFactory{2321 restorer: restorer,2322 }2323 // needed only to indicate resource types that can be restored, in this case, pods2324 for _, resource := range tc.apiResources {2325 h.addItems(t, resource)2326 }2327 tarball := newTarWriter(t)2328 // these backed up pods don't have any PVBs associated with them, so a call to RestorePodVolumes is not expected to be made for them2329 for _, pod := range tc.podWithoutPVBs {2330 tarball.addItems("pods", pod)2331 }2332 // these backed up pods have PVBs associated with them, so a call to RestorePodVolumes will be made for each of them2333 for _, pod := range tc.podWithPVBs {2334 tarball.addItems("pods", pod)2335 // the restore process adds these labels before restoring, so we must add them here too otherwise they won't match2336 pod.Labels = map[string]string{"velero.io/backup-name": tc.backup.Name, "velero.io/restore-name": tc.restore.Name}2337 expectedArgs := restic.RestoreData{2338 Restore: tc.restore,2339 Pod: pod,2340 PodVolumeBackups: tc.podVolumeBackups,2341 SourceNamespace: pod.Namespace,2342 BackupLocation: "",2343 }2344 restorer.2345 On("RestorePodVolumes", expectedArgs).2346 Return(nil)2347 }2348 data := Request{2349 Log: h.log,2350 Restore: tc.restore,2351 Backup: tc.backup,2352 PodVolumeBackups: tc.podVolumeBackups,2353 BackupReader: tarball.done(),2354 }2355 warnings, errs := h.restorer.Restore(2356 data,2357 nil, // actions2358 nil, // snapshot location lister2359 nil, // volume snapshotter getter2360 )2361 assertEmptyResults(t, warnings, errs)2362 assertAPIContents(t, h, tc.want)2363 })2364 }2365}2366func TestResetMetadataAndStatus(t *testing.T) {2367 tests := []struct {2368 name string2369 obj *unstructured.Unstructured2370 expectedErr bool2371 expectedRes *unstructured.Unstructured2372 }{2373 {2374 name: "no metadata causes error",2375 obj: &unstructured.Unstructured{},2376 expectedErr: true,2377 },2378 {2379 name: "keep name, namespace, labels, annotations only",2380 obj: NewTestUnstructured().WithMetadata("name", "blah", "namespace", "labels", "annotations", "foo").Unstructured,2381 expectedErr: false,2382 expectedRes: NewTestUnstructured().WithMetadata("name", "namespace", "labels", "annotations").Unstructured,2383 },2384 {2385 name: "don't keep status",2386 obj: NewTestUnstructured().WithMetadata().WithStatus().Unstructured,2387 expectedErr: false,2388 expectedRes: NewTestUnstructured().WithMetadata().Unstructured,2389 },2390 }2391 for _, test := range tests {2392 t.Run(test.name, func(t *testing.T) {2393 res, err := resetMetadataAndStatus(test.obj)2394 if assert.Equal(t, test.expectedErr, err != nil) {2395 assert.Equal(t, test.expectedRes, res)2396 }2397 })2398 }2399}2400func TestIsCompleted(t *testing.T) {2401 tests := []struct {2402 name string2403 expected bool2404 content string2405 groupResource schema.GroupResource2406 expectedErr bool2407 }{2408 {2409 name: "Failed pods are complete",2410 expected: true,2411 content: `{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"ns","name":"pod1"}, "status": {"phase": "Failed"}}`,2412 groupResource: schema.GroupResource{Group: "", Resource: "pods"},2413 },2414 {2415 name: "Succeeded pods are complete",2416 expected: true,2417 content: `{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"ns","name":"pod1"}, "status": {"phase": "Succeeded"}}`,2418 groupResource: schema.GroupResource{Group: "", Resource: "pods"},2419 },2420 {2421 name: "Pending pods aren't complete",2422 expected: false,2423 content: `{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"ns","name":"pod1"}, "status": {"phase": "Pending"}}`,2424 groupResource: schema.GroupResource{Group: "", Resource: "pods"},2425 },2426 {2427 name: "Running pods aren't complete",2428 expected: false,2429 content: `{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"ns","name":"pod1"}, "status": {"phase": "Running"}}`,2430 groupResource: schema.GroupResource{Group: "", Resource: "pods"},2431 },2432 {2433 name: "Jobs without a completion time aren't complete",2434 expected: false,2435 content: `{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"ns","name":"pod1"}}`,2436 groupResource: schema.GroupResource{Group: "batch", Resource: "jobs"},2437 },2438 {2439 name: "Jobs with a completion time are completed",2440 expected: true,2441 content: `{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"ns","name":"pod1"}, "status": {"completionTime": "bar"}}`,2442 groupResource: schema.GroupResource{Group: "batch", Resource: "jobs"},2443 },2444 {2445 name: "Jobs with an empty completion time are not completed",2446 expected: false,2447 content: `{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"ns","name":"pod1"}, "status": {"completionTime": ""}}`,2448 groupResource: schema.GroupResource{Group: "batch", Resource: "jobs"},2449 },2450 {2451 name: "Something not a pod or a job may actually be complete, but we're not concerned with that",2452 expected: false,2453 content: `{"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": "ns"}, "status": {"completionTime": "bar", "phase":"Completed"}}`,2454 groupResource: schema.GroupResource{Group: "", Resource: "namespaces"},2455 },2456 }2457 for _, test := range tests {2458 t.Run(test.name, func(t *testing.T) {2459 u := testutil.UnstructuredOrDie(test.content)2460 backup, err := isCompleted(u, test.groupResource)2461 if assert.Equal(t, test.expectedErr, err != nil) {2462 assert.Equal(t, test.expected, backup)2463 }2464 })2465 }2466}2467func TestGetItemFilePath(t *testing.T) {2468 res := getItemFilePath("root", "resource", "", "item")2469 assert.Equal(t, "root/resources/resource/cluster/item.json", res)2470 res = getItemFilePath("root", "resource", "namespace", "item")2471 assert.Equal(t, "root/resources/resource/namespaces/namespace/item.json", res)2472}2473func Test_getOrderedResources(t *testing.T) {2474 tests := []struct {2475 name string2476 resourcePriorities []string2477 backupResources map[string]*archive.ResourceItems2478 want []string2479 }{2480 {2481 name: "when only priorities are specified, they're returned in order",2482 resourcePriorities: []string{"prio-3", "prio-2", "prio-1"},2483 backupResources: nil,2484 want: []string{"prio-3", "prio-2", "prio-1"},2485 },2486 {2487 name: "when only backup resources are specified, they're returned in alphabetical order",2488 resourcePriorities: nil,2489 backupResources: map[string]*archive.ResourceItems{2490 "backup-resource-3": nil,2491 "backup-resource-2": nil,2492 "backup-resource-1": nil,2493 },2494 want: []string{"backup-resource-1", "backup-resource-2", "backup-resource-3"},2495 },2496 {2497 name: "when priorities and backup resources are specified, they're returned in the correct order",2498 resourcePriorities: []string{"prio-3", "prio-2", "prio-1"},2499 backupResources: map[string]*archive.ResourceItems{2500 "prio-3": nil,2501 "backup-resource-3": nil,2502 "backup-resource-2": nil,2503 "backup-resource-1": nil,2504 },2505 want: []string{"prio-3", "prio-2", "prio-1", "backup-resource-1", "backup-resource-2", "backup-resource-3", "prio-3"},2506 },2507 }2508 for _, tc := range tests {2509 t.Run(tc.name, func(t *testing.T) {2510 assert.Equal(t, tc.want, getOrderedResources(tc.resourcePriorities, tc.backupResources))2511 })2512 }2513}2514// assertResourceCreationOrder ensures that resources were created in the expected2515// order. Any resources *not* in resourcePriorities are required to come *after* all2516// resources in any order.2517func assertResourceCreationOrder(t *testing.T, resourcePriorities []string, createdResources []resourceID) {2518 // lastSeen tracks the index in 'resourcePriorities' of the last resource type2519 // we saw created. Once we've seen a resource in 'resourcePriorities', we should2520 // never see another instance of a prior resource.2521 lastSeen := 02522 // Find the index in 'resourcePriorities' of the resource type for2523 // the current item, if it exists. This index ('current') *must*2524 // be greater than or equal to 'lastSeen', which was the last resource2525 // we saw, since otherwise the current resource would be out of order. By2526 // initializing current to len(ordered), we're saying that if the resource2527 // is not explicitly in orderedResources, then it must come *after*2528 // all orderedResources.2529 for _, r := range createdResources {2530 current := len(resourcePriorities)2531 for i, item := range resourcePriorities {2532 if item == r.groupResource {2533 current = i2534 break2535 }2536 }2537 // the index of the current resource must be the same as or greater than the index of2538 // the last resource we saw for the restored order to be correct.2539 assert.True(t, current >= lastSeen, "%s was restored out of order", r.groupResource)2540 lastSeen = current2541 }2542}2543type resourceID struct {2544 groupResource string2545 nsAndName string2546}2547// createRecorder provides a Reactor that can be used to capture2548// resources created in a fake client.2549type createRecorder struct {2550 t *testing.T2551 resources []resourceID2552}2553func (cr *createRecorder) reactor() func(kubetesting.Action) (bool, runtime.Object, error) {2554 return func(action kubetesting.Action) (bool, runtime.Object, error) {2555 createAction, ok := action.(kubetesting.CreateAction)2556 if !ok {2557 return false, nil, nil2558 }2559 accessor, err := meta.Accessor(createAction.GetObject())2560 assert.NoError(cr.t, err)2561 cr.resources = append(cr.resources, resourceID{2562 groupResource: action.GetResource().GroupResource().String(),2563 nsAndName: fmt.Sprintf("%s/%s", action.GetNamespace(), accessor.GetName()),2564 })2565 return false, nil, nil2566 }2567}2568func defaultRestore() *builder.RestoreBuilder {2569 return builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Backup("backup-1")2570}2571// assertAPIContents asserts that the dynamic client on the provided harness contains2572// all of the items specified in 'want' (a map from an APIResource definition to a slice2573// of resource identifiers, formatted as <namespace>/<name>).2574func assertAPIContents(t *testing.T, h *harness, want map[*test.APIResource][]string) {2575 t.Helper()2576 for r, want := range want {2577 res, err := h.DynamicClient.Resource(r.GVR()).List(metav1.ListOptions{})2578 assert.NoError(t, err)2579 if err != nil {2580 continue2581 }2582 got := sets.NewString()2583 for _, item := range res.Items {2584 got.Insert(fmt.Sprintf("%s/%s", item.GetNamespace(), item.GetName()))2585 }2586 assert.Equal(t, sets.NewString(want...), got)2587 }2588}2589func assertEmptyResults(t *testing.T, res ...Result) {2590 t.Helper()2591 for _, r := range res {2592 assert.Empty(t, r.Cluster)2593 assert.Empty(t, r.Namespaces)2594 assert.Empty(t, r.Velero)2595 }2596}2597type tarWriter struct {2598 t *testing.T2599 buf *bytes.Buffer2600 gzw *gzip.Writer2601 tw *tar.Writer2602}2603func newTarWriter(t *testing.T) *tarWriter {2604 tw := new(tarWriter)2605 tw.t = t2606 tw.buf = new(bytes.Buffer)2607 tw.gzw = gzip.NewWriter(tw.buf)2608 tw.tw = tar.NewWriter(tw.gzw)2609 return tw2610}2611func (tw *tarWriter) addItems(groupResource string, items ...metav1.Object) *tarWriter {2612 tw.t.Helper()2613 for _, obj := range items {2614 var path string2615 if obj.GetNamespace() == "" {2616 path = fmt.Sprintf("resources/%s/cluster/%s.json", groupResource, obj.GetName())2617 } else {2618 path = fmt.Sprintf("resources/%s/namespaces/%s/%s.json", groupResource, obj.GetNamespace(), obj.GetName())2619 }2620 tw.add(path, obj)2621 }2622 return tw2623}2624func (tw *tarWriter) add(name string, obj interface{}) *tarWriter {2625 tw.t.Helper()2626 var data []byte2627 var err error2628 switch obj.(type) {2629 case runtime.Object:2630 data, err = encode.Encode(obj.(runtime.Object), "json")2631 case []byte:2632 data = obj.([]byte)2633 default:2634 data, err = json.Marshal(obj)2635 }2636 require.NoError(tw.t, err)2637 require.NoError(tw.t, tw.tw.WriteHeader(&tar.Header{2638 Name: name,2639 Size: int64(len(data)),2640 Typeflag: tar.TypeReg,2641 Mode: 0755,2642 ModTime: time.Now(),2643 }))2644 _, err = tw.tw.Write(data)2645 require.NoError(tw.t, err)2646 return tw2647}2648func (tw *tarWriter) done() *bytes.Buffer {2649 require.NoError(tw.t, tw.tw.Close())2650 require.NoError(tw.t, tw.gzw.Close())2651 return tw.buf2652}2653type harness struct {2654 *test.APIServer2655 restorer *kubernetesRestorer2656 log logrus.FieldLogger2657}2658func newHarness(t *testing.T) *harness {2659 t.Helper()2660 apiServer := test.NewAPIServer(t)2661 log := logrus.StandardLogger()2662 discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, log)2663 require.NoError(t, err)2664 return &harness{2665 APIServer: apiServer,2666 restorer: &kubernetesRestorer{2667 discoveryHelper: discoveryHelper,2668 dynamicFactory: client.NewDynamicFactory(apiServer.DynamicClient),2669 namespaceClient: apiServer.KubeClient.CoreV1().Namespaces(),2670 resourceTerminatingTimeout: time.Minute,2671 logger: log,2672 fileSystem: testutil.NewFakeFileSystem(),2673 // unsupported2674 resticRestorerFactory: nil,2675 resticTimeout: 0,2676 },2677 log: log,2678 }2679}2680func (h *harness) addItems(t *testing.T, resource *test.APIResource) {2681 t.Helper()2682 h.DiscoveryClient.WithAPIResource(resource)2683 require.NoError(t, h.restorer.discoveryHelper.Refresh())2684 for _, item := range resource.Items {2685 obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item)2686 require.NoError(t, err)2687 unstructuredObj := &unstructured.Unstructured{Object: obj}2688 // These fields have non-nil zero values in the unstructured objects. We remove2689 // them to make comparison easier in our tests.2690 unstructured.RemoveNestedField(unstructuredObj.Object, "metadata", "creationTimestamp")2691 unstructured.RemoveNestedField(unstructuredObj.Object, "status")2692 if resource.Namespaced {2693 _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(unstructuredObj, metav1.CreateOptions{})2694 } else {2695 _, err = h.DynamicClient.Resource(resource.GVR()).Create(unstructuredObj, metav1.CreateOptions{})2696 }2697 require.NoError(t, err)2698 }2699}...
rt_test.go
Source:rt_test.go
...307 items := generateLinearItems(number)308 cfg := defaultConfig()309 rt := New(cfg)310 mutable := rt.AsMutable()311 _, err := mutable.AddItems(items...)312 require.NoError(t, err)313 assert.Equal(t, number, mutable.Len())314 mutable.(*Tr).verify(mutable.(*Tr).Root, t)315 result, err := mutable.(*Tr).toList(itemsToValues(items[5:10]...)...)316 require.NoError(t, err)317 if !assert.Equal(t, items[5:10], result) {318 mutable.(*Tr).pprint(mutable.(*Tr).Root)319 for i, c := range items[5:10] {320 t.Logf(`EXPECTED: %+v, RESULT: %+v`, c, result[i])321 }322 t.FailNow()323 }324 mutable = rt.AsMutable()325 for _, c := range items {326 _, err := mutable.AddItems(c)327 require.NoError(t, err)328 }329 result, err = mutable.(*Tr).toList(itemsToValues(items...)...)330 require.NoError(t, err)331 assert.Equal(t, items, result)332 mutable.(*Tr).verify(mutable.(*Tr).Root, t)333 rt, err = mutable.Commit()334 require.NoError(t, err)335 rt, err = Load(cfg.Persister, rt.ID(), comparator)336 result, err = mutable.(*Tr).toList(itemsToValues(items...)...)337 require.NoError(t, err)338 assert.Equal(t, items, result)339 rt.(*Tr).verify(rt.(*Tr).Root, t)340}341func TestReverseNodeSplit(t *testing.T) {342 number := 400343 items := generateLinearItems(number)344 reversed := make([]*Item, len(items))345 copy(reversed, items)346 reversed = reverse(reversed)347 rt := New(defaultConfig())348 mutable := rt.AsMutable()349 _, err := mutable.AddItems(reversed...)350 require.NoError(t, err)351 result, err := mutable.(*Tr).toList(itemsToValues(items...)...)352 require.NoError(t, err)353 if !assert.Equal(t, items, result) {354 for _, c := range result {355 t.Logf(`RESULT: %+v`, c)356 }357 }358 mutable = rt.AsMutable()359 for _, c := range reversed {360 _, err := mutable.AddItems(c)361 require.NoError(t, err)362 }363 result, err = mutable.(*Tr).toList(itemsToValues(items...)...)364 require.NoError(t, err)365 assert.Equal(t, items, result)366 mutable.(*Tr).verify(mutable.(*Tr).Root, t)367}368func TestDuplicate(t *testing.T) {369 item1 := newItem(int64(1))370 item2 := newItem(int64(1))371 rt := New(defaultConfig())372 mutable := rt.AsMutable()373 _, err := mutable.AddItems(item1)374 require.NoError(t, err)375 _, err = mutable.AddItems(item2)376 require.NoError(t, err)377 assert.Equal(t, 1, mutable.Len())378 result, err := mutable.(*Tr).toList(int64(1))379 require.NoError(t, err)380 assert.Equal(t, items{item2}, result)381 mutable.(*Tr).verify(mutable.(*Tr).Root, t)382}383func TestCommit(t *testing.T) {384 items := generateRandomItems(5)385 rt := New(defaultConfig())386 mutable := rt.AsMutable()387 _, err := mutable.AddItems(items...)388 require.Nil(t, err)389 rt, err = mutable.Commit()390 require.NoError(t, err)391 expected := toOrdered(items).toItems()392 result, err := rt.(*Tr).toList(itemsToValues(expected...)...)393 require.NoError(t, err)394 if !assert.Equal(t, expected, result) {395 require.Equal(t, len(expected), len(result))396 for i, c := range expected {397 if !assert.Equal(t, c, result[i]) {398 t.Logf(`EXPECTED: %+v, RESULT: %+v`, c, result[i])399 }400 }401 }402 rt.(*Tr).verify(rt.(*Tr).Root, t)403}404func TestRandom(t *testing.T) {405 items := generateRandomItems(1000)406 rt := New(defaultConfig())407 mutable := rt.AsMutable()408 _, err := mutable.AddItems(items...)409 require.Nil(t, err)410 require.NoError(t, err)411 expected := toOrdered(items).toItems()412 result, err := mutable.(*Tr).toList(itemsToValues(expected...)...)413 if !assert.Equal(t, expected, result) {414 assert.Equal(t, len(expected), len(result))415 for i, c := range expected {416 assert.Equal(t, c, result[i])417 }418 }419 mutable.(*Tr).verify(mutable.(*Tr).Root, t)420}421func TestLoad(t *testing.T) {422 cfg := defaultConfig()423 rt := New(cfg)424 mutable := rt.AsMutable()425 items := generateRandomItems(1000)426 _, err := mutable.AddItems(items...)427 require.NoError(t, err)428 id := mutable.ID()429 _, err = mutable.Commit()430 require.NoError(t, err)431 rt, err = Load(cfg.Persister, id, comparator)432 require.NoError(t, err)433 sort.Sort(orderedItems(items))434 result, err := rt.(*Tr).toList(itemsToValues(items...)...)435 require.NoError(t, err)436 assert.Equal(t, items, result)437 rt.(*Tr).verify(rt.(*Tr).Root, t)438}439func TestDeleteFromRoot(t *testing.T) {440 number := 5441 cfg := defaultConfig()442 rt := New(cfg)443 mutable := rt.AsMutable()444 items := generateLinearItems(number)445 mutable.AddItems(items...)446 mutable.DeleteItems(items[0].Value, items[1].Value, items[2].Value)447 result, err := mutable.(*Tr).toList(itemsToValues(items...)...)448 require.Nil(t, err)449 assert.Equal(t, items[3:], result)450 assert.Equal(t, 2, mutable.Len())451 mutable.(*Tr).verify(mutable.(*Tr).Root, t)452}453func TestDeleteAllFromRoot(t *testing.T) {454 num := 5455 cfg := defaultConfig()456 rt := New(cfg)457 mutable := rt.AsMutable()458 items := generateLinearItems(num)459 mutable.AddItems(items...)460 mutable.DeleteItems(itemsToValues(items...)...)461 result, err := mutable.(*Tr).toList(itemsToValues(items...)...)462 require.Nil(t, err)463 assert.Empty(t, result)464 assert.Equal(t, 0, mutable.Len())465}466func TestDeleteAfterSplitIncreasing(t *testing.T) {467 num := 11468 cfg := defaultConfig()469 rt := New(cfg)470 mutable := rt.AsMutable()471 items := generateLinearItems(num)472 mutable.AddItems(items...)473 for i := 0; i < num-1; i++ {474 mutable.DeleteItems(itemsToValues(items[i])...)475 result, err := mutable.(*Tr).toList(itemsToValues(items...)...)476 require.Nil(t, err)477 assert.Equal(t, items[i+1:], result)478 mutable.(*Tr).verify(mutable.(*Tr).Root, t)479 }480}481func TestDeleteMultipleLevelsRandomlyBulk(t *testing.T) {482 num := 200483 cfg := defaultConfig()484 rt := New(cfg)485 mutable := rt.AsMutable()486 items := generateRandomItems(num)487 mutable.AddItems(items...)488 mutable.DeleteItems(itemsToValues(items[:100]...)...)489 result, _ := mutable.(*Tr).toList(itemsToValues(items...)...)490 assert.Len(t, result, 100)491}492func TestDeleteAfterSplitDecreasing(t *testing.T) {493 num := 11494 cfg := defaultConfig()495 rt := New(cfg)496 mutable := rt.AsMutable()497 items := generateLinearItems(num)498 mutable.AddItems(items...)499 for i := num - 1; i >= 0; i-- {500 mutable.DeleteItems(itemsToValues(items[i])...)501 result, err := mutable.(*Tr).toList(itemsToValues(items...)...)502 require.Nil(t, err)503 assert.Equal(t, items[:i], result)504 if i > 0 {505 mutable.(*Tr).verify(mutable.(*Tr).Root, t)506 }507 }508}509func TestDeleteMultipleLevels(t *testing.T) {510 num := 20511 cfg := defaultConfig()512 rt := New(cfg)513 mutable := rt.AsMutable()514 items := generateRandomItems(num)515 mutable.AddItems(items...)516 ordered := toOrdered(items)517 for i, c := range ordered {518 _, err := mutable.DeleteItems(c.Value)519 require.NoError(t, err)520 result, err := mutable.(*Tr).toList(itemsToValues(ordered...)...)521 require.NoError(t, err)522 if !assert.Equal(t, ordered[i+1:].toItems(), result) {523 log.Printf(`LEN EXPECTED: %+v, RESULT: %+v`, len(ordered[i+1:]), len(result))524 mutable.(*Tr).pprint(mutable.(*Tr).Root)525 assert.Equal(t, len(ordered[i+1:]), len(result))526 for i, c := range ordered[i+1:] {527 log.Printf(`EXPECTED: %+v`, c)528 if i < len(result) {529 log.Printf(`RECEIVED: %+v`, result[i])530 }531 }532 break533 }534 if len(ordered[i+1:]) > 0 {535 mutable.(*Tr).verify(mutable.(*Tr).Root, t)536 }537 }538 assert.Nil(t, mutable.(*Tr).Root)539}540func TestDeleteMultipleLevelsRandomly(t *testing.T) {541 num := 200542 cfg := defaultConfig()543 rt := New(cfg)544 mutable := rt.AsMutable()545 items := generateRandomItems(num)546 mutable.AddItems(items...)547 ordered := toOrdered(items)548 for _, c := range items {549 _, err := mutable.DeleteItems(c.Value)550 require.NoError(t, err)551 ordered = ordered.delete(c)552 result, err := mutable.(*Tr).toList(itemsToValues(ordered...)...)553 require.NoError(t, err)554 assert.Equal(t, ordered.toItems(), result)555 if len(ordered) > 0 {556 mutable.(*Tr).verify(mutable.(*Tr).Root, t)557 }558 }559 assert.Nil(t, mutable.(*Tr).Root)560}561func TestDeleteMultipleLevelsWithCommit(t *testing.T) {562 num := 20563 cfg := defaultConfig()564 rt := New(cfg)565 mutable := rt.AsMutable()566 items := generateRandomItems(num)567 mutable.AddItems(items...)568 rt, _ = mutable.Commit()569 rt, _ = Load(cfg.Persister, rt.ID(), comparator)570 result, err := rt.(*Tr).toList(itemsToValues(items...)...)571 require.NoError(t, err)572 assert.Equal(t, items, result)573 mutable = rt.AsMutable()574 for _, c := range items[:10] {575 _, err := mutable.DeleteItems(c.Value)576 require.Nil(t, err)577 }578 result, err = mutable.(*Tr).toList(itemsToValues(items[10:]...)...)579 require.Nil(t, err)580 assert.Equal(t, items[10:], result)581 mutable.(*Tr).verify(mutable.(*Tr).Root, t)582 result, err = rt.(*Tr).toList(itemsToValues(items...)...)583 require.NoError(t, err)584 assert.Equal(t, items, result)585 rt.(*Tr).verify(rt.(*Tr).Root, t)586}587func TestCommitAfterDelete(t *testing.T) {588 num := 15589 cfg := defaultConfig()590 rt := New(cfg)591 mutable := rt.AsMutable()592 items := generateRandomItems(num)593 mutable.AddItems(items...)594 for _, c := range items[:5] {595 mutable.DeleteItems(c.Value)596 mutable.(*Tr).verify(mutable.(*Tr).Root, t)597 }598 rt, err := mutable.Commit()599 require.Nil(t, err)600 result, err := rt.(*Tr).toList(itemsToValues(items...)...)601 require.Nil(t, err)602 assert.Equal(t, items[5:], result)603 rt.(*Tr).verify(rt.(*Tr).Root, t)604}605func TestSecondCommitSplitsRoot(t *testing.T) {606 number := 15607 cfg := defaultConfig()608 rt := New(cfg)609 items := generateLinearItems(number)610 mutable := rt.AsMutable()611 mutable.AddItems(items[:10]...)612 mutable.(*Tr).verify(mutable.(*Tr).Root, t)613 rt, _ = mutable.Commit()614 rt.(*Tr).verify(rt.(*Tr).Root, t)615 mutable = rt.AsMutable()616 mutable.AddItems(items[10:]...)617 mutable.(*Tr).verify(mutable.(*Tr).Root, t)618 result, err := mutable.(*Tr).toList(itemsToValues(items...)...)619 require.Nil(t, err)620 if !assert.Equal(t, items, result) {621 for i, c := range items {622 log.Printf(`EXPECTED: %+v, RECEIVED: %+v`, c, result[i])623 }624 }625}626func TestSecondCommitMultipleSplits(t *testing.T) {627 num := 50628 cfg := defaultConfig()629 rt := New(cfg)630 items := generateRandomItems(num)631 mutable := rt.AsMutable()632 mutable.AddItems(items[:25]...)633 mutable.(*Tr).verify(mutable.(*Tr).Root, t)634 rt, err := mutable.Commit()635 rt.(*Tr).verify(rt.(*Tr).Root, t)636 result, err := rt.(*Tr).toList(itemsToValues(items...)...)637 require.Nil(t, err)638 assert.Equal(t, items[:25], result)639 mutable = rt.AsMutable()640 mutable.AddItems(items[25:]...)641 mutable.(*Tr).verify(mutable.(*Tr).Root, t)642 sort.Sort(orderedItems(items))643 result, err = mutable.(*Tr).toList(itemsToValues(items...)...)644 require.Nil(t, err)645 if !assert.Equal(t, items, result) {646 mutable.(*Tr).pprint(mutable.(*Tr).Root)647 }648}649func TestLargeAdd(t *testing.T) {650 cfg := defaultConfig()651 number := cfg.NodeWidth * 5652 rt := New(cfg)653 items := generateLinearItems(number)654 mutable := rt.AsMutable()655 _, err := mutable.AddItems(items...)656 require.NoError(t, err)657 id := mutable.ID()658 result, err := mutable.(*Tr).toList(itemsToValues(items...)...)659 require.NoError(t, err)660 assert.Equal(t, items, result)661 _, err = mutable.Commit()662 require.NoError(t, err)663 rt, err = Load(cfg.Persister, id, comparator)664 require.NoError(t, err)665 result, err = rt.(*Tr).toList(itemsToValues(items...)...)666 require.NoError(t, err)667 assert.Equal(t, items, result)668}669func TestNodeInfiniteLoop(t *testing.T) {670 cfg := defaultConfig()671 rt := New(cfg)672 items := generateLinearItems(3)673 mutable := rt.AsMutable()674 _, err := mutable.AddItems(items...)675 require.NoError(t, err)676 result, err := mutable.DeleteItems(items[1].Value, items[2].Value)677 require.NoError(t, err)678 assert.Len(t, result, 2)679}680// all remaining tests are generative in nature to catch things681// I can't think of.682func TestGenerativeAdds(t *testing.T) {683 if testing.Short() {684 t.Skipf(`skipping generative add`)685 return686 }687 number := 100688 cfg := defaultConfig()689 rt := New(cfg)690 oc := make(orderedItems, 0)691 for i := 0; i < number; i++ {692 num := int(rand.Int31n(100))693 if num == 0 {694 num++695 }696 items := generateRandomItems(num)697 mutated := oc.copy()698 for _, c := range items {699 mutated = mutated.add(c)700 }701 mutable := rt.AsMutable()702 _, err := mutable.AddItems(items...)703 require.Nil(t, err)704 mutable.(*Tr).verify(mutable.(*Tr).Root, t)705 rtMutated, err := mutable.Commit()706 require.Nil(t, err)707 rtMutated.(*Tr).verify(rtMutated.(*Tr).Root, t)708 result, err := rtMutated.(*Tr).toList(itemsToValues(mutated.toItems()...)...)709 require.Nil(t, err)710 if !assert.Equal(t, mutated.toItems(), result) {711 rtMutated.(*Tr).pprint(rtMutated.(*Tr).Root)712 if len(mutated) == len(result) {713 for i, c := range mutated.toItems() {714 log.Printf(`EXPECTED: %+v, RECEIVED: %+v`, c, result[i])715 }716 }717 }718 assert.Equal(t, len(mutated), rtMutated.Len())719 result, err = rt.(*Tr).toList(itemsToValues(oc.toItems()...)...)720 require.Nil(t, err)721 assert.Equal(t, oc.toItems(), result)722 oc = mutated723 rt = rtMutated724 }725}726func TestGenerativeDeletes(t *testing.T) {727 if testing.Short() {728 t.Skipf(`skipping generative delete`)729 return730 }731 number := 100732 var err error733 cfg := defaultConfig()734 rt := New(cfg)735 oc := toOrdered(generateRandomItems(1000))736 mutable := rt.AsMutable()737 mutable.AddItems(oc.toItems()...)738 mutable.(*Tr).verify(mutable.(*Tr).Root, t)739 rt, err = mutable.Commit()740 require.NoError(t, err)741 for i := 0; i < number; i++ {742 mutable = rt.AsMutable()743 index := rand.Intn(len(oc))744 c := oc[index]745 mutated := oc.delete(c)746 result, err := rt.(*Tr).toList(itemsToValues(oc.toItems()...)...)747 require.NoError(t, err)748 assert.Equal(t, oc.toItems(), result)749 assert.Equal(t, len(oc), rt.Len())750 _, err = mutable.DeleteItems(c.Value)751 require.NoError(t, err)752 mutable.(*Tr).verify(mutable.(*Tr).Root, t)753 result, err = mutable.(*Tr).toList(itemsToValues(mutated.toItems()...)...)754 require.NoError(t, err)755 assert.Equal(t, len(mutated), len(result))756 require.Equal(t, mutated.toItems(), result)757 oc = mutated758 rt, err = mutable.Commit()759 require.NoError(t, err)760 }761}762func TestGenerativeOperations(t *testing.T) {763 if testing.Short() {764 t.Skipf(`skipping generative operations`)765 return766 }767 number := 100768 cfg := defaultConfig()769 rt := New(cfg)770 // seed the tree771 items := generateRandomItems(1000)772 oc := toOrdered(items)773 mutable := rt.AsMutable()774 mutable.AddItems(items...)775 result, err := mutable.(*Tr).toList(itemsToValues(oc.toItems()...)...)776 require.NoError(t, err)777 require.Equal(t, oc.toItems(), result)778 rt, err = mutable.Commit()779 require.NoError(t, err)780 for i := 0; i < number; i++ {781 mutable = rt.AsMutable()782 if rand.Float64() < .5 && len(oc) > 0 {783 c := oc[rand.Intn(len(oc))]784 oc = oc.delete(c)785 _, err = mutable.DeleteItems(c.Value)786 require.NoError(t, err)787 mutable.(*Tr).verify(mutable.(*Tr).Root, t)788 result, err := mutable.(*Tr).toList(itemsToValues(oc.toItems()...)...)789 require.NoError(t, err)790 require.Equal(t, oc.toItems(), result)791 assert.Equal(t, len(oc), mutable.Len())792 } else {793 c := generateRandomItem()794 oc = oc.add(c)795 _, err = mutable.AddItems(c)796 require.NoError(t, err)797 mutable.(*Tr).verify(mutable.(*Tr).Root, t)798 result, err = mutable.(*Tr).toList(itemsToValues(oc.toItems()...)...)799 require.NoError(t, err)800 require.Equal(t, oc.toItems(), result)801 assert.Equal(t, len(oc), mutable.Len())802 }803 rt, err = mutable.Commit()804 require.NoError(t, err)805 }806}807func BenchmarkGetitems(b *testing.B) {808 number := 100809 cfg := defaultConfig()810 cfg.Persister = newDelayed()811 rt := New(cfg)812 items := generateRandomItems(number)813 mutable := rt.AsMutable()814 _, err := mutable.AddItems(items...)815 require.NoError(b, err)816 rt, err = mutable.Commit()817 require.NoError(b, err)818 id := rt.ID()819 b.ResetTimer()820 for i := 0; i < b.N; i++ {821 rt, err = Load(cfg.Persister, id, comparator)822 require.NoError(b, err)823 _, err = rt.(*Tr).toList(itemsToValues(items...)...)824 require.NoError(b, err)825 }826}827func BenchmarkBulkAdd(b *testing.B) {828 number := 1000000829 items := generateLinearItems(number)830 b.ResetTimer()831 for i := 0; i < b.N; i++ {832 tr := New(defaultConfig())833 mutable := tr.AsMutable()834 mutable.AddItems(items...)835 }836}...
AddItems
Using AI Code Generation
1result := NewResult()2result.AddItems(1)3result.AddItems(2)4result.AddItems(3)5result.AddItems(4)6result := NewResult()7result.AddItems(5)8result.AddItems(6)9result.AddItems(7)10result.AddItems(8)11result := NewResult()12result.AddItems(9)13result.AddItems(10)14result.AddItems(11)15result.AddItems(12)16result := NewResult()17result.AddItems(13)18result.AddItems(14)19result.AddItems(15)20result.AddItems(16)21result := NewResult()22result.AddItems(17)23result.AddItems(18)24result.AddItems(19)25result.AddItems(20)26result := NewResult()27result.AddItems(21)28result.AddItems(22)29result.AddItems(23)30result.AddItems(24)31result := NewResult()32result.AddItems(25)33result.AddItems(26)34result.AddItems(27)35result.AddItems(28)36result := NewResult()37result.AddItems(29)38result.AddItems(30)39result.AddItems(31)40result.AddItems(32)41result := NewResult()42result.AddItems(33)43result.AddItems(34)44result.AddItems(35)45result.AddItems(36)46result := NewResult()47result.AddItems(37)48result.AddItems(38)49result.AddItems(39)50result.AddItems(40)51result := NewResult()52result.AddItems(41)53result.AddItems(42)54result.AddItems(43)55result.AddItems(44
AddItems
Using AI Code Generation
1result.AddItems(new[] { 1, 2, 3, 4 });2result.AddItems(new[] { 5, 6, 7, 8 });3result.AddItems(new[] { 9, 10, 11, 12 });4result.AddItems(new[] { 13, 14, 15, 16 });5result.AddItems(new[] { 17, 18, 19, 20 });6result.AddItems(new[] { 21, 22, 23, 24 });7result.AddItems(new[] { 25, 26, 27, 28 });8result.AddItems(new[] { 29, 30, 31, 32 });9result.AddItems(new[] { 33, 34, 35, 36 });10result.AddItems(new[] { 37, 38, 39, 40 });11result.AddItems(new[] { 41, 42, 43, 44 });12result.AddItems(new[] { 45, 46, 47, 48 });13result.AddItems(new[] { 49, 50, 51, 52 });
AddItems
Using AI Code Generation
1import (2func main() {3 result := &result.Result{}4 result.AddItems(1)5 result.AddItems(2)6 result.AddItems(3)7 result.AddItems(4)8 result.AddItems(5)9 result.AddItems(6)10 result.AddItems(7)11 result.AddItems(8)12 result.AddItems(9)13 result.AddItems(10)14 data, err := proto.Marshal(result)15 if err != nil {16 log.Fatal("marshaling error: ", err)17 }18 if err := ioutil.WriteFile("result.pb", data, 0644); err != nil {19 log.Fatal("write error: ", err)20 }21}22import (23func main() {24 in, err := ioutil.ReadFile("result.pb")25 if err != nil {26 log.Fatalln("Error reading file:", err)27 }28 result := &result.Result{}29 if err := proto.Unmarshal(in, result); err != nil {30 log.Fatalln("Failed to parse address book:", err)31 }32 for _, item := range result.GetItems() {33 fmt.Println(item)34 }35}36import proto "github.com/golang/protobuf/proto"37import fmt "fmt"38import math "math"39import _ "github.com/golang/protobuf/protoc-gen-go/descriptor"40type Result struct {
AddItems
Using AI Code Generation
1./1.go:10: cannot use result literal (type result) as type *result in argument to AddItems2import "fmt"3type result struct {4}5func (r *result) AddItems(items ...string) {6 r.items = append(r.items, items...)7}8func main() {9 r := &result{}10 r.AddItems("a", "b", "c")11 fmt.Println(r.items)12}13import "fmt"14type result struct {15}16func (r *result) AddItems(items ...string) {17 r.items = append(r.items, items...)18}19func main() {20 r := result{}21 r.AddItems("a", "b", "c")22 fmt.Println(r.items)23}24You can also use the address operator (&) to convert a value type to a pointer type. For example, the following code will compile and run fine:25import "fmt"26type result struct {27}28func (r *result) AddItems(items ...string) {29 r.items = append(r.items, items...)30}31func main() {32 r := result{}33 (&r).AddItems("a", "b", "c")34 fmt.Println(r.items)35}
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!