How to use isNil method of is Package

Best Is code snippet using is.isNil

repo_test.go

Source:repo_test.go Github

copy

Full Screen

1package tuf2import (3 "bytes"4 "crypto"5 "crypto/rand"6 "encoding/hex"7 "encoding/json"8 "errors"9 "fmt"10 "io/ioutil"11 "os"12 "path/filepath"13 "reflect"14 "sort"15 "strings"16 "testing"17 "time"18 "github.com/secure-systems-lab/go-securesystemslib/cjson"19 "github.com/theupdateframework/go-tuf/data"20 "github.com/theupdateframework/go-tuf/encrypted"21 "github.com/theupdateframework/go-tuf/internal/sets"22 "github.com/theupdateframework/go-tuf/pkg/keys"23 "github.com/theupdateframework/go-tuf/pkg/targets"24 "github.com/theupdateframework/go-tuf/util"25 "github.com/theupdateframework/go-tuf/verify"26 "golang.org/x/crypto/ed25519"27 . "gopkg.in/check.v1"28)29// Hook up gocheck into the "go test" runner.30func Test(t *testing.T) { TestingT(t) }31type RepoSuite struct{}32var _ = Suite(&RepoSuite{})33func (RepoSuite) TestNewRepo(c *C) {34 testNewRepo(c, NewRepo)35}36func (RepoSuite) TestNewRepoIndent(c *C) {37 testNewRepo(c, func(local LocalStore, hashAlgorithms ...string) (*Repo, error) {38 return NewRepoIndent(local, "", "\t")39 })40}41// UniqueKeys returns the unique keys for each associated role.42// We might have multiple key IDs that correspond to the same key.43func UniqueKeys(r *data.Root) map[string][]*data.PublicKey {44 keysByRole := make(map[string][]*data.PublicKey)45 for name, role := range r.Roles {46 seen := make(map[string]struct{})47 roleKeys := []*data.PublicKey{}48 for _, id := range role.KeyIDs {49 // Double-check that there is actually a key with that ID.50 if key, ok := r.Keys[id]; ok {51 verifier, err := keys.GetVerifier(key)52 if err != nil {53 continue54 }55 val := verifier.Public()56 if _, ok := seen[val]; ok {57 continue58 }59 seen[val] = struct{}{}60 roleKeys = append(roleKeys, key)61 }62 }63 keysByRole[name] = roleKeys64 }65 return keysByRole66}67// AssertNumUniqueKeys verifies that the number of unique root keys for a given role is as expected.68func (*RepoSuite) assertNumUniqueKeys(c *C, root *data.Root, role string, num int) {69 c.Assert(UniqueKeys(root)[role], HasLen, num)70}71func testNewRepo(c *C, newRepo func(local LocalStore, hashAlgorithms ...string) (*Repo, error)) {72 meta := map[string]json.RawMessage{73 "root.json": []byte(`{74 "signed": {75 "_type": "root",76 "version": 1,77 "expires": "2015-12-26T03:26:55.821520874Z",78 "keys": {},79 "roles": {}80 },81 "signatures": []82 }`),83 "targets.json": []byte(`{84 "signed": {85 "_type": "targets",86 "version": 1,87 "expires": "2015-03-26T03:26:55.82155686Z",88 "targets": {}89 },90 "signatures": []91 }`),92 "snapshot.json": []byte(`{93 "signed": {94 "_type": "snapshot",95 "version": 1,96 "expires": "2015-01-02T03:26:55.821585981Z",97 "meta": {}98 },99 "signatures": []100 }`),101 "timestamp.json": []byte(`{102 "signed": {103 "_type": "timestamp",104 "version": 1,105 "expires": "2014-12-27T03:26:55.821599702Z",106 "meta": {}107 },108 "signatures": []109 }`),110 }111 local := MemoryStore(meta, nil)112 r, err := newRepo(local)113 c.Assert(err, IsNil)114 root, err := r.root()115 c.Assert(err, IsNil)116 c.Assert(root.Type, Equals, "root")117 c.Assert(root.Version, Equals, int64(1))118 c.Assert(root.Keys, NotNil)119 c.Assert(root.Keys, HasLen, 0)120 targets, err := r.topLevelTargets()121 c.Assert(err, IsNil)122 c.Assert(targets.Type, Equals, "targets")123 c.Assert(targets.Version, Equals, int64(1))124 c.Assert(targets.Targets, NotNil)125 c.Assert(targets.Targets, HasLen, 0)126 snapshot, err := r.snapshot()127 c.Assert(err, IsNil)128 c.Assert(snapshot.Type, Equals, "snapshot")129 c.Assert(snapshot.Version, Equals, int64(1))130 c.Assert(snapshot.Meta, NotNil)131 c.Assert(snapshot.Meta, HasLen, 0)132 timestamp, err := r.timestamp()133 c.Assert(err, IsNil)134 c.Assert(timestamp.Type, Equals, "timestamp")135 c.Assert(timestamp.Version, Equals, int64(1))136 c.Assert(timestamp.Meta, NotNil)137 c.Assert(timestamp.Meta, HasLen, 0)138}139func (rs *RepoSuite) TestInit(c *C) {140 local := MemoryStore(141 make(map[string]json.RawMessage),142 map[string][]byte{"foo.txt": []byte("foo")},143 )144 r, err := NewRepo(local)145 c.Assert(err, IsNil)146 // Init() sets root.ConsistentSnapshot147 for _, v := range []bool{true, false} {148 c.Assert(r.Init(v), IsNil)149 root, err := r.root()150 c.Assert(err, IsNil)151 c.Assert(root.ConsistentSnapshot, Equals, v)152 }153 // Add a target.154 generateAndAddPrivateKey(c, r, "targets")155 c.Assert(r.AddTarget("foo.txt", nil), IsNil)156 // Init() fails if targets have been added157 c.Assert(r.Init(true), Equals, ErrInitNotAllowed)158}159func genKey(c *C, r *Repo, role string) []string {160 keyids, err := r.GenKey(role)161 c.Assert(err, IsNil)162 c.Assert(len(keyids) > 0, Equals, true)163 return keyids164}165func (rs *RepoSuite) TestGenKey(c *C) {166 local := MemoryStore(make(map[string]json.RawMessage), nil)167 r, err := NewRepo(local)168 c.Assert(err, IsNil)169 // generate a key for an unknown role170 _, err = r.GenKey("foo")171 c.Assert(err, Equals, ErrInvalidRole{"foo", "only support adding keys for top-level roles"})172 // generate a root key173 ids := genKey(c, r, "root")174 // check root metadata is correct175 root, err := r.root()176 c.Assert(err, IsNil)177 c.Assert(root.Roles, NotNil)178 c.Assert(root.Roles, HasLen, 1)179 rs.assertNumUniqueKeys(c, root, "root", 1)180 rootRole, ok := root.Roles["root"]181 if !ok {182 c.Fatal("missing root role")183 }184 c.Assert(rootRole.KeyIDs, HasLen, 1)185 c.Assert(rootRole.KeyIDs, DeepEquals, ids)186 for _, keyID := range ids {187 k, ok := root.Keys[keyID]188 if !ok {189 c.Fatal("missing key")190 }191 c.Assert(k.IDs(), DeepEquals, ids)192 pk, err := keys.GetVerifier(k)193 c.Assert(err, IsNil)194 c.Assert(pk.Public(), HasLen, ed25519.PublicKeySize)195 }196 // check root key + role are in db197 db, err := r.topLevelKeysDB()198 c.Assert(err, IsNil)199 for _, keyID := range ids {200 rootKey, err := db.GetVerifier(keyID)201 c.Assert(err, IsNil)202 c.Assert(rootKey.MarshalPublicKey().IDs(), DeepEquals, ids)203 role := db.GetRole("root")204 c.Assert(role.KeyIDs, DeepEquals, sets.StringSliceToSet(ids))205 // check the key was saved correctly206 localKeys, err := local.GetSigners("root")207 c.Assert(err, IsNil)208 c.Assert(localKeys, HasLen, 1)209 c.Assert(localKeys[0].PublicData().IDs(), DeepEquals, ids)210 // check RootKeys() is correct211 rootKeys, err := r.RootKeys()212 c.Assert(err, IsNil)213 c.Assert(rootKeys, HasLen, 1)214 c.Assert(rootKeys[0].IDs(), DeepEquals, rootKey.MarshalPublicKey().IDs())215 pk, err := keys.GetVerifier(rootKeys[0])216 c.Assert(err, IsNil)217 c.Assert(pk.Public(), DeepEquals, rootKey.Public())218 }219 rootKey, err := db.GetVerifier(ids[0])220 c.Assert(err, IsNil)221 // generate two targets keys222 genKey(c, r, "targets")223 genKey(c, r, "targets")224 // check root metadata is correct225 root, err = r.root()226 c.Assert(err, IsNil)227 c.Assert(root.Roles, HasLen, 2)228 rs.assertNumUniqueKeys(c, root, "root", 1)229 rs.assertNumUniqueKeys(c, root, "targets", 2)230 targetsRole, ok := root.Roles["targets"]231 if !ok {232 c.Fatal("missing targets role")233 }234 c.Assert(targetsRole.KeyIDs, HasLen, 2)235 targetKeyIDs := make(map[string]struct{}, 2)236 db, err = r.topLevelKeysDB()237 c.Assert(err, IsNil)238 for _, id := range targetsRole.KeyIDs {239 targetKeyIDs[id] = struct{}{}240 _, ok = root.Keys[id]241 if !ok {242 c.Fatal("missing key")243 }244 verifier, err := db.GetVerifier(id)245 c.Assert(err, IsNil)246 c.Assert(verifier.MarshalPublicKey().ContainsID(id), Equals, true)247 }248 role := db.GetRole("targets")249 c.Assert(role.KeyIDs, DeepEquals, targetKeyIDs)250 // check RootKeys() is unchanged251 rootKeys, err := r.RootKeys()252 c.Assert(err, IsNil)253 c.Assert(rootKeys, HasLen, 1)254 c.Assert(rootKeys[0].IDs(), DeepEquals, rootKey.MarshalPublicKey().IDs())255 // check the keys were saved correctly256 localKeys, err := local.GetSigners("targets")257 c.Assert(err, IsNil)258 c.Assert(localKeys, HasLen, 2)259 for _, key := range localKeys {260 found := false261 for _, id := range targetsRole.KeyIDs {262 if key.PublicData().ContainsID(id) {263 found = true264 break265 }266 }267 if !found {268 c.Fatal("missing key")269 }270 }271 // check root.json got staged272 meta, err := local.GetMeta()273 c.Assert(err, IsNil)274 rootJSON, ok := meta["root.json"]275 if !ok {276 c.Fatal("missing root metadata")277 }278 s := &data.Signed{}279 c.Assert(json.Unmarshal(rootJSON, s), IsNil)280 stagedRoot := &data.Root{}281 c.Assert(json.Unmarshal(s.Signed, stagedRoot), IsNil)282 c.Assert(stagedRoot.Type, Equals, root.Type)283 c.Assert(stagedRoot.Version, Equals, root.Version)284 c.Assert(stagedRoot.Expires.UnixNano(), Equals, root.Expires.UnixNano())285 // make sure both root and stagedRoot have evaluated IDs(), otherwise286 // DeepEquals will fail because those values might not have been287 // computed yet.288 for _, key := range root.Keys {289 key.IDs()290 }291 for _, key := range stagedRoot.Keys {292 key.IDs()293 }294 c.Assert(stagedRoot.Keys, DeepEquals, root.Keys)295 c.Assert(stagedRoot.Roles, DeepEquals, root.Roles)296}297func addPrivateKey(c *C, r *Repo, role string, key keys.Signer) []string {298 err := r.AddPrivateKey(role, key)299 c.Assert(err, IsNil)300 keyids := key.PublicData().IDs()301 c.Assert(len(keyids) > 0, Equals, true)302 return keyids303}304func generateAndAddPrivateKey(c *C, r *Repo, role string) []string {305 signer, err := keys.GenerateEd25519Key()306 c.Assert(err, IsNil)307 return addPrivateKey(c, r, role, signer)308}309func (rs *RepoSuite) TestAddPrivateKey(c *C) {310 local := MemoryStore(make(map[string]json.RawMessage), nil)311 r, err := NewRepo(local)312 c.Assert(err, IsNil)313 // generate a key for an unknown role314 signer, err := keys.GenerateEd25519Key()315 c.Assert(err, IsNil)316 err = r.AddPrivateKey("foo", signer)317 c.Assert(err, Equals, ErrInvalidRole{"foo", "only support adding keys for top-level roles"})318 // add a root key319 ids := addPrivateKey(c, r, "root", signer)320 // check root metadata is correct321 root, err := r.root()322 c.Assert(err, IsNil)323 c.Assert(root.Version, Equals, int64(1))324 c.Assert(root.Roles, NotNil)325 c.Assert(root.Roles, HasLen, 1)326 rs.assertNumUniqueKeys(c, root, "root", 1)327 rootRole, ok := root.Roles["root"]328 if !ok {329 c.Fatal("missing root role")330 }331 c.Assert(rootRole.KeyIDs, HasLen, 1)332 c.Assert(rootRole.KeyIDs, DeepEquals, ids)333 for _, keyID := range ids {334 k, ok := root.Keys[keyID]335 if !ok {336 c.Fatalf("missing key %s", keyID)337 }338 c.Assert(k.IDs(), DeepEquals, ids)339 pk, err := keys.GetVerifier(k)340 c.Assert(err, IsNil)341 c.Assert(pk.Public(), HasLen, ed25519.PublicKeySize)342 }343 // check root key + role are in db344 db, err := r.topLevelKeysDB()345 c.Assert(err, IsNil)346 for _, keyID := range ids {347 rootKey, err := db.GetVerifier(keyID)348 c.Assert(err, IsNil)349 c.Assert(rootKey.MarshalPublicKey().IDs(), DeepEquals, ids)350 role := db.GetRole("root")351 c.Assert(role.KeyIDs, DeepEquals, sets.StringSliceToSet(ids))352 // check the key was saved correctly353 localKeys, err := local.GetSigners("root")354 c.Assert(err, IsNil)355 c.Assert(localKeys, HasLen, 1)356 c.Assert(localKeys[0].PublicData().IDs(), DeepEquals, ids)357 // check RootKeys() is correct358 rootKeys, err := r.RootKeys()359 c.Assert(err, IsNil)360 c.Assert(rootKeys, HasLen, 1)361 c.Assert(rootKeys[0].IDs(), DeepEquals, rootKey.MarshalPublicKey().IDs())362 pk, err := keys.GetVerifier(rootKeys[0])363 c.Assert(err, IsNil)364 c.Assert(pk.Public(), DeepEquals, rootKey.Public())365 }366 rootKey, err := db.GetVerifier(ids[0])367 c.Assert(err, IsNil)368 // generate two targets keys369 generateAndAddPrivateKey(c, r, "targets")370 generateAndAddPrivateKey(c, r, "targets")371 // check root metadata is correct372 root, err = r.root()373 c.Assert(err, IsNil)374 c.Assert(root.Roles, HasLen, 2)375 rs.assertNumUniqueKeys(c, root, "root", 1)376 rs.assertNumUniqueKeys(c, root, "targets", 2)377 targetsRole, ok := root.Roles["targets"]378 if !ok {379 c.Fatal("missing targets role")380 }381 c.Assert(targetsRole.KeyIDs, HasLen, 2)382 targetKeyIDs := make(map[string]struct{}, 2)383 db, err = r.topLevelKeysDB()384 c.Assert(err, IsNil)385 for _, id := range targetsRole.KeyIDs {386 targetKeyIDs[id] = struct{}{}387 _, ok = root.Keys[id]388 if !ok {389 c.Fatal("missing key")390 }391 verifier, err := db.GetVerifier(id)392 c.Assert(err, IsNil)393 c.Assert(verifier.MarshalPublicKey().ContainsID(id), Equals, true)394 }395 role := db.GetRole("targets")396 c.Assert(role.KeyIDs, DeepEquals, targetKeyIDs)397 // check RootKeys() is unchanged398 rootKeys, err := r.RootKeys()399 c.Assert(err, IsNil)400 c.Assert(rootKeys, HasLen, 1)401 c.Assert(rootKeys[0].IDs(), DeepEquals, rootKey.MarshalPublicKey().IDs())402 // check the keys were saved correctly403 localKeys, err := local.GetSigners("targets")404 c.Assert(err, IsNil)405 c.Assert(localKeys, HasLen, 2)406 for _, key := range localKeys {407 found := false408 for _, id := range targetsRole.KeyIDs {409 if key.PublicData().ContainsID(id) {410 found = true411 break412 }413 }414 if !found {415 c.Fatal("missing key")416 }417 }418 // check root.json got staged419 meta, err := local.GetMeta()420 c.Assert(err, IsNil)421 rootJSON, ok := meta["root.json"]422 if !ok {423 c.Fatal("missing root metadata")424 }425 s := &data.Signed{}426 c.Assert(json.Unmarshal(rootJSON, s), IsNil)427 stagedRoot := &data.Root{}428 c.Assert(json.Unmarshal(s.Signed, stagedRoot), IsNil)429 c.Assert(stagedRoot.Type, Equals, root.Type)430 c.Assert(stagedRoot.Version, Equals, root.Version)431 c.Assert(stagedRoot.Expires.UnixNano(), Equals, root.Expires.UnixNano())432 // make sure both root and stagedRoot have evaluated IDs(), otherwise433 // DeepEquals will fail because those values might not have been434 // computed yet.435 for _, key := range root.Keys {436 key.IDs()437 }438 for _, key := range stagedRoot.Keys {439 key.IDs()440 }441 c.Assert(stagedRoot.Keys, DeepEquals, root.Keys)442 c.Assert(stagedRoot.Roles, DeepEquals, root.Roles)443 // commit to make sure we don't modify metadata after committing metadata.444 generateAndAddPrivateKey(c, r, "snapshot")445 generateAndAddPrivateKey(c, r, "timestamp")446 c.Assert(r.AddTargets([]string{}, nil), IsNil)447 c.Assert(r.Snapshot(), IsNil)448 c.Assert(r.Timestamp(), IsNil)449 c.Assert(r.Commit(), IsNil)450 // add the same root key to make sure the metadata is unmodified.451 oldRoot, err := r.root()452 c.Assert(err, IsNil)453 addPrivateKey(c, r, "root", signer)454 newRoot, err := r.root()455 c.Assert(err, IsNil)456 c.Assert(oldRoot, DeepEquals, newRoot)457 if r.local.FileIsStaged("root.json") {458 c.Fatal("root should not be marked dirty")459 }460}461func (rs *RepoSuite) TestRevokeKey(c *C) {462 local := MemoryStore(make(map[string]json.RawMessage), nil)463 r, err := NewRepo(local)464 c.Assert(err, IsNil)465 // revoking a key for an unknown role returns ErrInvalidRole466 c.Assert(r.RevokeKey("foo", ""), DeepEquals, ErrInvalidRole{"foo", "only revocations for top-level roles supported"})467 // revoking a key which doesn't exist returns ErrKeyNotFound468 c.Assert(r.RevokeKey("root", "nonexistent"), DeepEquals, ErrKeyNotFound{"root", "nonexistent"})469 // generate keys470 genKey(c, r, "root")471 target1IDs := genKey(c, r, "targets")472 target2IDs := genKey(c, r, "targets")473 genKey(c, r, "snapshot")474 genKey(c, r, "timestamp")475 root, err := r.root()476 c.Assert(err, IsNil)477 c.Assert(root.Roles, NotNil)478 c.Assert(root.Roles, HasLen, 4)479 c.Assert(root.Keys, NotNil)480 rs.assertNumUniqueKeys(c, root, "root", 1)481 rs.assertNumUniqueKeys(c, root, "targets", 2)482 rs.assertNumUniqueKeys(c, root, "snapshot", 1)483 rs.assertNumUniqueKeys(c, root, "timestamp", 1)484 // revoke a key485 targetsRole, ok := root.Roles["targets"]486 if !ok {487 c.Fatal("missing targets role")488 }489 c.Assert(targetsRole.KeyIDs, HasLen, len(target1IDs)+len(target2IDs))490 id := targetsRole.KeyIDs[0]491 c.Assert(r.RevokeKey("targets", id), IsNil)492 // make sure all the other key ids were also revoked493 for _, id := range target1IDs {494 c.Assert(r.RevokeKey("targets", id), DeepEquals, ErrKeyNotFound{"targets", id})495 }496 // check root was updated497 root, err = r.root()498 c.Assert(err, IsNil)499 c.Assert(root.Roles, NotNil)500 c.Assert(root.Roles, HasLen, 4)501 c.Assert(root.Keys, NotNil)502 rs.assertNumUniqueKeys(c, root, "root", 1)503 rs.assertNumUniqueKeys(c, root, "targets", 1)504 rs.assertNumUniqueKeys(c, root, "snapshot", 1)505 rs.assertNumUniqueKeys(c, root, "timestamp", 1)506 targetsRole, ok = root.Roles["targets"]507 if !ok {508 c.Fatal("missing targets role")509 }510 c.Assert(targetsRole.KeyIDs, HasLen, 1)511 c.Assert(targetsRole.KeyIDs, DeepEquals, target2IDs)512}513func (rs *RepoSuite) TestRevokeKeyInMultipleRoles(c *C) {514 local := MemoryStore(make(map[string]json.RawMessage), nil)515 r, err := NewRepo(local)516 c.Assert(err, IsNil)517 // generate keys. add a root key that is shared with the targets role518 rootSigner, err := keys.GenerateEd25519Key()519 c.Assert(err, IsNil)520 c.Assert(r.AddVerificationKey("root", rootSigner.PublicData()), IsNil)521 sharedSigner, err := keys.GenerateEd25519Key()522 c.Assert(err, IsNil)523 sharedIDs := sharedSigner.PublicData().IDs()524 c.Assert(r.AddVerificationKey("root", sharedSigner.PublicData()), IsNil)525 c.Assert(r.AddVerificationKey("targets", sharedSigner.PublicData()), IsNil)526 targetIDs := genKey(c, r, "targets")527 genKey(c, r, "snapshot")528 genKey(c, r, "timestamp")529 root, err := r.root()530 c.Assert(err, IsNil)531 c.Assert(root.Roles, NotNil)532 c.Assert(root.Roles, HasLen, 4)533 c.Assert(root.Keys, NotNil)534 rs.assertNumUniqueKeys(c, root, "root", 2)535 rs.assertNumUniqueKeys(c, root, "targets", 2)536 rs.assertNumUniqueKeys(c, root, "snapshot", 1)537 rs.assertNumUniqueKeys(c, root, "timestamp", 1)538 // revoke a key539 targetsRole, ok := root.Roles["targets"]540 if !ok {541 c.Fatal("missing targets role")542 }543 c.Assert(targetsRole.KeyIDs, HasLen, len(targetIDs)+len(sharedIDs))544 id := targetsRole.KeyIDs[0]545 c.Assert(r.RevokeKey("targets", id), IsNil)546 // make sure all the other key ids were also revoked547 for _, id := range sharedIDs {548 c.Assert(r.RevokeKey("targets", id), DeepEquals, ErrKeyNotFound{"targets", id})549 }550 // check root was updated551 root, err = r.root()552 c.Assert(err, IsNil)553 c.Assert(root.Roles, NotNil)554 c.Assert(root.Roles, HasLen, 4)555 c.Assert(root.Keys, NotNil)556 // the shared root/targets signer should still be present in root keys557 c.Assert(UniqueKeys(root)["root"], DeepEquals,558 []*data.PublicKey{rootSigner.PublicData(), sharedSigner.PublicData()})559 rs.assertNumUniqueKeys(c, root, "root", 2)560 rs.assertNumUniqueKeys(c, root, "targets", 1)561 rs.assertNumUniqueKeys(c, root, "snapshot", 1)562 rs.assertNumUniqueKeys(c, root, "timestamp", 1)563 targetsRole, ok = root.Roles["targets"]564 if !ok {565 c.Fatal("missing targets role")566 }567 c.Assert(targetsRole.KeyIDs, HasLen, 1)568 c.Assert(targetsRole.KeyIDs, DeepEquals, targetIDs)569}570func (rs *RepoSuite) TestSign(c *C) {571 meta := map[string]json.RawMessage{"root.json": []byte(`{"signed":{},"signatures":[]}`)}572 local := MemoryStore(meta, nil)573 r, err := NewRepo(local)574 c.Assert(err, IsNil)575 c.Assert(r.Sign("foo.json"), Equals, ErrMissingMetadata{"foo.json"})576 // signing with no keys returns ErrNoKeys577 c.Assert(r.Sign("root.json"), Equals, ErrNoKeys{"root.json"})578 checkSigIDs := func(keyIDs ...string) {579 meta, err := local.GetMeta()580 c.Assert(err, IsNil)581 rootJSON, ok := meta["root.json"]582 if !ok {583 c.Fatal("missing root.json")584 }585 s := &data.Signed{}586 c.Assert(json.Unmarshal(rootJSON, s), IsNil)587 c.Assert(s.Signatures, HasLen, len(keyIDs))588 // Signatures may be in any order, so must sort key IDs before comparison.589 wantKeyIDs := append([]string{}, keyIDs...)590 sort.Strings(wantKeyIDs)591 gotKeyIDs := []string{}592 for _, sig := range s.Signatures {593 gotKeyIDs = append(gotKeyIDs, sig.KeyID)594 }595 sort.Strings(gotKeyIDs)596 c.Assert(wantKeyIDs, DeepEquals, gotKeyIDs)597 }598 // signing with an available key generates a signature599 signer, err := keys.GenerateEd25519Key()600 c.Assert(err, IsNil)601 c.Assert(local.SaveSigner("root", signer), IsNil)602 c.Assert(r.Sign("root.json"), IsNil)603 checkSigIDs(signer.PublicData().IDs()...)604 // signing again does not generate a duplicate signature605 c.Assert(r.Sign("root.json"), IsNil)606 checkSigIDs(signer.PublicData().IDs()...)607 // signing with a new available key generates another signature608 newKey, err := keys.GenerateEd25519Key()609 c.Assert(err, IsNil)610 c.Assert(local.SaveSigner("root", newKey), IsNil)611 c.Assert(r.Sign("root.json"), IsNil)612 checkSigIDs(append(signer.PublicData().IDs(), newKey.PublicData().IDs()...)...)613 // attempt to sign missing metadata614 c.Assert(r.Sign("targets.json"), Equals, ErrMissingMetadata{"targets.json"})615}616func (rs *RepoSuite) TestCommit(c *C) {617 files := map[string][]byte{"foo.txt": []byte("foo"), "bar.txt": []byte("bar")}618 local := MemoryStore(make(map[string]json.RawMessage), files)619 r, err := NewRepo(local)620 c.Assert(err, IsNil)621 // commit without root.json622 c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"root.json"})623 // Init should create targets.json, but not signed yet624 r.Init(false)625 c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"snapshot.json"})626 genKey(c, r, "root")627 // commit without snapshot.json628 genKey(c, r, "targets")629 c.Assert(r.Sign("targets.json"), IsNil)630 c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"snapshot.json"})631 // commit without timestamp.json632 genKey(c, r, "snapshot")633 c.Assert(r.Snapshot(), IsNil)634 c.Assert(r.Commit(), DeepEquals, ErrMissingMetadata{"timestamp.json"})635 // commit with timestamp.json but no timestamp key636 c.Assert(r.Timestamp(), IsNil)637 c.Assert(r.Commit(), DeepEquals, ErrInsufficientSignatures{"timestamp.json", verify.ErrNoSignatures})638 // commit success639 genKey(c, r, "timestamp")640 c.Assert(r.Snapshot(), IsNil)641 c.Assert(r.Timestamp(), IsNil)642 c.Assert(r.Commit(), IsNil)643 // commit with an invalid root hash in snapshot.json due to new key creation644 genKey(c, r, "targets")645 c.Assert(r.Sign("targets.json"), IsNil)646 c.Assert(r.Commit(), DeepEquals, errors.New("tuf: invalid targets.json in snapshot.json: wrong length, expected 338 got 552"))647 // commit with an invalid targets hash in snapshot.json648 c.Assert(r.Snapshot(), IsNil)649 c.Assert(r.AddTarget("bar.txt", nil), IsNil)650 c.Assert(r.Commit(), DeepEquals, errors.New("tuf: invalid targets.json in snapshot.json: wrong length, expected 552 got 725"))651 // commit with an invalid timestamp652 c.Assert(r.Snapshot(), IsNil)653 err = r.Commit()654 c.Assert(err, NotNil)655 c.Assert(err.Error()[0:44], Equals, "tuf: invalid snapshot.json in timestamp.json")656 // commit with a role's threshold greater than number of keys657 root, err := r.root()658 c.Assert(err, IsNil)659 role, ok := root.Roles["timestamp"]660 if !ok {661 c.Fatal("missing timestamp role")662 }663 c.Assert(role.KeyIDs, HasLen, 1)664 c.Assert(role.Threshold, Equals, 1)665 c.Assert(r.RevokeKey("timestamp", role.KeyIDs[0]), IsNil)666 c.Assert(r.Snapshot(), IsNil)667 c.Assert(r.Timestamp(), IsNil)668 c.Assert(r.Commit(), DeepEquals, ErrNotEnoughKeys{"timestamp", 0, 1})669}670func (rs *RepoSuite) TestCommitVersions(c *C) {671 files := map[string][]byte{"foo.txt": []byte("foo")}672 local := MemoryStore(make(map[string]json.RawMessage), files)673 r, err := NewRepo(local)674 c.Assert(err, IsNil)675 genKey(c, r, "root")676 genKey(c, r, "targets")677 genKey(c, r, "snapshot")678 genKey(c, r, "timestamp")679 c.Assert(r.AddTarget("foo.txt", nil), IsNil)680 c.Assert(r.Snapshot(), IsNil)681 c.Assert(r.Timestamp(), IsNil)682 c.Assert(r.Commit(), IsNil)683 // on initial commit everything should be at version 1.684 rootVersion, err := r.RootVersion()685 c.Assert(err, IsNil)686 c.Assert(rootVersion, Equals, int64(1))687 targetsVersion, err := r.TargetsVersion()688 c.Assert(err, IsNil)689 c.Assert(targetsVersion, Equals, int64(1))690 snapshotVersion, err := r.SnapshotVersion()691 c.Assert(err, IsNil)692 c.Assert(snapshotVersion, Equals, int64(1))693 timestampVersion, err := r.SnapshotVersion()694 c.Assert(err, IsNil)695 c.Assert(timestampVersion, Equals, int64(1))696 // taking a snapshot should only increment snapshot and timestamp.697 c.Assert(r.Snapshot(), IsNil)698 c.Assert(r.Timestamp(), IsNil)699 c.Assert(r.Commit(), IsNil)700 rootVersion, err = r.RootVersion()701 c.Assert(err, IsNil)702 c.Assert(rootVersion, Equals, int64(1))703 targetsVersion, err = r.TargetsVersion()704 c.Assert(err, IsNil)705 c.Assert(targetsVersion, Equals, int64(1))706 snapshotVersion, err = r.SnapshotVersion()707 c.Assert(err, IsNil)708 c.Assert(snapshotVersion, Equals, int64(2))709 timestampVersion, err = r.SnapshotVersion()710 c.Assert(err, IsNil)711 c.Assert(timestampVersion, Equals, int64(2))712 // rotating multiple keys should increment the root once.713 genKey(c, r, "targets")714 genKey(c, r, "snapshot")715 genKey(c, r, "timestamp")716 c.Assert(r.Snapshot(), IsNil)717 c.Assert(r.Timestamp(), IsNil)718 c.Assert(r.Commit(), IsNil)719 rootVersion, err = r.RootVersion()720 c.Assert(err, IsNil)721 c.Assert(rootVersion, Equals, int64(2))722 targetsVersion, err = r.TargetsVersion()723 c.Assert(err, IsNil)724 c.Assert(targetsVersion, Equals, int64(1))725 snapshotVersion, err = r.SnapshotVersion()726 c.Assert(err, IsNil)727 c.Assert(snapshotVersion, Equals, int64(3))728 timestampVersion, err = r.TimestampVersion()729 c.Assert(err, IsNil)730 c.Assert(timestampVersion, Equals, int64(3))731}732type tmpDir struct {733 path string734 c *C735}736func newTmpDir(c *C) *tmpDir {737 return &tmpDir{path: c.MkDir(), c: c}738}739func (t *tmpDir) assertExists(path string) {740 if _, err := os.Stat(filepath.Join(t.path, path)); os.IsNotExist(err) {741 t.c.Fatalf("expected path to exist but it doesn't: %s", path)742 }743}744func (t *tmpDir) assertNotExist(path string) {745 if _, err := os.Stat(filepath.Join(t.path, path)); !os.IsNotExist(err) {746 t.c.Fatalf("expected path to not exist but it does: %s", path)747 }748}749func (t *tmpDir) assertHashedFilesExist(path string, hashes data.Hashes) {750 t.c.Assert(len(hashes) > 0, Equals, true)751 for _, path := range util.HashedPaths(path, hashes) {752 t.assertExists(path)753 }754}755func (t *tmpDir) assertHashedFilesNotExist(path string, hashes data.Hashes) {756 for _, path := range util.HashedPaths(path, hashes) {757 t.assertNotExist(path)758 }759}760func (t *tmpDir) assertVersionedFileExist(path string, version int64) {761 t.assertExists(util.VersionedPath(path, version))762}763func (t *tmpDir) assertVersionedFileNotExist(path string, version int64) {764 t.assertNotExist(util.VersionedPath(path, version))765}766func (t *tmpDir) assertEmpty(dir string) {767 path := filepath.Join(t.path, dir)768 f, err := os.Stat(path)769 if os.IsNotExist(err) {770 t.c.Fatalf("expected dir to exist but it doesn't: %s", dir)771 }772 t.c.Assert(err, IsNil)773 t.c.Assert(f.IsDir(), Equals, true)774 entries, err := ioutil.ReadDir(path)775 t.c.Assert(err, IsNil)776 // check that all (if any) entries are also empty777 for _, e := range entries {778 t.assertEmpty(filepath.Join(dir, e.Name()))779 }780}781func (t *tmpDir) assertFileContent(path, content string) {782 actual := t.readFile(path)783 t.c.Assert(string(actual), Equals, content)784}785func (t *tmpDir) stagedTargetPath(path string) string {786 return filepath.Join(t.path, "staged", "targets", path)787}788func (t *tmpDir) writeStagedTarget(path, data string) {789 path = t.stagedTargetPath(path)790 t.c.Assert(os.MkdirAll(filepath.Dir(path), 0755), IsNil)791 t.c.Assert(ioutil.WriteFile(path, []byte(data), 0644), IsNil)792}793func (t *tmpDir) readFile(path string) []byte {794 t.assertExists(path)795 data, err := ioutil.ReadFile(filepath.Join(t.path, path))796 t.c.Assert(err, IsNil)797 return data798}799func (rs *RepoSuite) TestCommitFileSystem(c *C) {800 tmp := newTmpDir(c)801 local := FileSystemStore(tmp.path, nil)802 r, err := NewRepo(local)803 c.Assert(err, IsNil)804 // don't use consistent snapshots to make the checks simpler805 c.Assert(r.Init(false), IsNil)806 // cleaning with nothing staged or committed should fail807 c.Assert(r.Clean(), Equals, ErrNewRepository)808 // generating keys should stage root.json and create repo dirs809 genKey(c, r, "root")810 genKey(c, r, "targets")811 genKey(c, r, "snapshot")812 genKey(c, r, "timestamp")813 tmp.assertExists("staged/root.json")814 tmp.assertEmpty("repository")815 tmp.assertEmpty("staged/targets")816 // cleaning with nothing committed should fail817 c.Assert(r.Clean(), Equals, ErrNewRepository)818 // adding a non-existent file fails819 c.Assert(r.AddTarget("foo.txt", nil), Equals, ErrFileNotFound{tmp.stagedTargetPath("foo.txt")})820 tmp.assertEmpty("repository")821 // adding a file stages targets.json822 tmp.writeStagedTarget("foo.txt", "foo")823 c.Assert(r.AddTarget("foo.txt", nil), IsNil)824 tmp.assertExists("staged/targets.json")825 tmp.assertEmpty("repository")826 t, err := r.topLevelTargets()827 c.Assert(err, IsNil)828 c.Assert(t.Targets, HasLen, 1)829 if _, ok := t.Targets["foo.txt"]; !ok {830 c.Fatal("missing target file: foo.txt")831 }832 // Snapshot() stages snapshot.json833 c.Assert(r.Snapshot(), IsNil)834 tmp.assertExists("staged/snapshot.json")835 tmp.assertEmpty("repository")836 // Timestamp() stages timestamp.json837 c.Assert(r.Timestamp(), IsNil)838 tmp.assertExists("staged/timestamp.json")839 tmp.assertEmpty("repository")840 // committing moves files from staged -> repository841 c.Assert(r.Commit(), IsNil)842 tmp.assertExists("repository/root.json")843 tmp.assertExists("repository/targets.json")844 tmp.assertExists("repository/snapshot.json")845 tmp.assertExists("repository/timestamp.json")846 tmp.assertFileContent("repository/targets/foo.txt", "foo")847 tmp.assertEmpty("staged/targets")848 tmp.assertEmpty("staged")849 // adding and committing another file moves it into repository/targets850 tmp.writeStagedTarget("path/to/bar.txt", "bar")851 c.Assert(r.AddTarget("path/to/bar.txt", nil), IsNil)852 tmp.assertExists("staged/targets.json")853 c.Assert(r.Snapshot(), IsNil)854 c.Assert(r.Timestamp(), IsNil)855 c.Assert(r.Commit(), IsNil)856 tmp.assertFileContent("repository/targets/foo.txt", "foo")857 tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar")858 tmp.assertEmpty("staged/targets")859 tmp.assertEmpty("staged")860 // removing and committing a file removes it from repository/targets861 c.Assert(r.RemoveTarget("foo.txt"), IsNil)862 tmp.assertExists("staged/targets.json")863 c.Assert(r.Snapshot(), IsNil)864 c.Assert(r.Timestamp(), IsNil)865 c.Assert(r.Commit(), IsNil)866 tmp.assertNotExist("repository/targets/foo.txt")867 tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar")868 tmp.assertEmpty("staged/targets")869 tmp.assertEmpty("staged")870}871func (rs *RepoSuite) TestCommitFileSystemWithNewRepositories(c *C) {872 tmp := newTmpDir(c)873 newRepo := func() *Repo {874 local := FileSystemStore(tmp.path, nil)875 r, err := NewRepo(local)876 c.Assert(err, IsNil)877 return r878 }879 genKey(c, newRepo(), "root")880 genKey(c, newRepo(), "targets")881 genKey(c, newRepo(), "snapshot")882 genKey(c, newRepo(), "timestamp")883 tmp.writeStagedTarget("foo.txt", "foo")884 c.Assert(newRepo().AddTarget("foo.txt", nil), IsNil)885 c.Assert(newRepo().Snapshot(), IsNil)886 c.Assert(newRepo().Timestamp(), IsNil)887 c.Assert(newRepo().Commit(), IsNil)888}889func (rs *RepoSuite) TestConsistentSnapshot(c *C) {890 tmp := newTmpDir(c)891 local := FileSystemStore(tmp.path, nil)892 r, err := NewRepo(local, "sha512", "sha256")893 c.Assert(err, IsNil)894 genKey(c, r, "root")895 genKey(c, r, "targets")896 genKey(c, r, "snapshot")897 genKey(c, r, "timestamp")898 tmp.writeStagedTarget("foo.txt", "foo")899 c.Assert(r.AddTarget("foo.txt", nil), IsNil)900 tmp.writeStagedTarget("dir/bar.txt", "bar")901 c.Assert(r.AddTarget("dir/bar.txt", nil), IsNil)902 c.Assert(r.Snapshot(), IsNil)903 c.Assert(r.Timestamp(), IsNil)904 c.Assert(r.Commit(), IsNil)905 versions, err := r.fileVersions()906 c.Assert(err, IsNil)907 c.Assert(versions["root.json"], Equals, int64(1))908 c.Assert(versions["targets.json"], Equals, int64(1))909 c.Assert(versions["snapshot.json"], Equals, int64(1))910 hashes, err := r.fileHashes()911 c.Assert(err, IsNil)912 // root.json, targets.json and snapshot.json should exist at both versioned and unversioned paths913 for _, path := range []string{"root.json", "targets.json", "snapshot.json"} {914 repoPath := filepath.Join("repository", path)915 if path != "root.json" {916 c.Assert(len(hashes[path]) > 0, Equals, true)917 }918 tmp.assertHashedFilesNotExist(repoPath, hashes[path])919 tmp.assertVersionedFileExist(repoPath, versions[path])920 tmp.assertExists(repoPath)921 }922 // target files should exist at hashed but not unhashed paths923 for _, path := range []string{"targets/foo.txt", "targets/dir/bar.txt"} {924 repoPath := filepath.Join("repository", path)925 tmp.assertHashedFilesExist(repoPath, hashes[path])926 tmp.assertNotExist(repoPath)927 }928 // timestamp.json should exist at an unversioned and unhashed path (it doesn't have a hash)929 c.Assert(hashes["repository/timestamp.json"], IsNil)930 tmp.assertVersionedFileNotExist("repository/timestamp.json", versions["repository/timestamp.json"])931 tmp.assertExists("repository/timestamp.json")932 // removing a file should remove the hashed files933 c.Assert(r.RemoveTarget("foo.txt"), IsNil)934 c.Assert(r.Snapshot(), IsNil)935 c.Assert(r.Timestamp(), IsNil)936 c.Assert(r.Commit(), IsNil)937 versions, err = r.fileVersions()938 c.Assert(err, IsNil)939 c.Assert(versions["root.json"], Equals, int64(1))940 c.Assert(versions["targets.json"], Equals, int64(2))941 c.Assert(versions["snapshot.json"], Equals, int64(2))942 // Save the old hashes for foo.txt to make sure we can assert it doesn't exist later.943 fooHashes := hashes["targets/foo.txt"]944 hashes, err = r.fileHashes()945 c.Assert(err, IsNil)946 // root.json, targets.json and snapshot.json should exist at both versioned and unversioned paths947 for _, path := range []string{"root.json", "targets.json", "snapshot.json"} {948 repoPath := filepath.Join("repository", path)949 if path != "root.json" {950 c.Assert(len(hashes[path]) > 0, Equals, true)951 }952 tmp.assertHashedFilesNotExist(repoPath, hashes[path])953 tmp.assertVersionedFileExist(repoPath, versions[path])954 tmp.assertExists(repoPath)955 }956 tmp.assertHashedFilesNotExist("repository/targets/foo.txt", fooHashes)957 tmp.assertNotExist("repository/targets/foo.txt")958 // targets should be returned by new repo959 newRepo, err := NewRepo(local, "sha512", "sha256")960 c.Assert(err, IsNil)961 t, err := newRepo.topLevelTargets()962 c.Assert(err, IsNil)963 c.Assert(t.Targets, HasLen, 1)964 if _, ok := t.Targets["dir/bar.txt"]; !ok {965 c.Fatal("missing targets file: dir/bar.txt")966 }967}968func (rs *RepoSuite) TestExpiresAndVersion(c *C) {969 files := map[string][]byte{"foo.txt": []byte("foo")}970 local := MemoryStore(make(map[string]json.RawMessage), files)971 r, err := NewRepo(local)972 c.Assert(err, IsNil)973 past := time.Now().Add(-1 * time.Second)974 _, genKeyErr := r.GenKeyWithExpires("root", past)975 for _, err := range []error{976 genKeyErr,977 r.AddTargetWithExpires("foo.txt", nil, past),978 r.RemoveTargetWithExpires("foo.txt", past),979 r.SnapshotWithExpires(past),980 r.TimestampWithExpires(past),981 } {982 c.Assert(err, Equals, ErrInvalidExpires{past})983 }984 genKey(c, r, "root")985 genKey(c, r, "targets")986 genKey(c, r, "snapshot")987 genKey(c, r, "timestamp")988 c.Assert(r.AddTargets([]string{}, nil), IsNil)989 c.Assert(r.Snapshot(), IsNil)990 c.Assert(r.Timestamp(), IsNil)991 c.Assert(r.Commit(), IsNil)992 root, err := r.root()993 c.Assert(err, IsNil)994 c.Assert(root.Version, Equals, int64(1))995 expires := time.Now().Add(24 * time.Hour)996 _, err = r.GenKeyWithExpires("root", expires)997 c.Assert(err, IsNil)998 c.Assert(r.Snapshot(), IsNil)999 c.Assert(r.Timestamp(), IsNil)1000 c.Assert(r.Commit(), IsNil)1001 root, err = r.root()1002 c.Assert(err, IsNil)1003 c.Assert(root.Expires.Unix(), DeepEquals, expires.Round(time.Second).Unix())1004 c.Assert(root.Version, Equals, int64(2))1005 expires = time.Now().Add(12 * time.Hour)1006 role, ok := root.Roles["root"]1007 if !ok {1008 c.Fatal("missing root role")1009 }1010 c.Assert(role.KeyIDs, HasLen, 2)1011 c.Assert(r.RevokeKeyWithExpires("root", role.KeyIDs[0], expires), IsNil)1012 c.Assert(r.Snapshot(), IsNil)1013 c.Assert(r.Timestamp(), IsNil)1014 c.Assert(r.Commit(), IsNil)1015 root, err = r.root()1016 c.Assert(err, IsNil)1017 c.Assert(root.Expires.Unix(), DeepEquals, expires.Round(time.Second).Unix())1018 c.Assert(root.Version, Equals, int64(3))1019 expires = time.Now().Add(6 * time.Hour)1020 c.Assert(r.AddTargetWithExpires("foo.txt", nil, expires), IsNil)1021 c.Assert(r.Snapshot(), IsNil)1022 c.Assert(r.Timestamp(), IsNil)1023 c.Assert(r.Commit(), IsNil)1024 targets, err := r.topLevelTargets()1025 c.Assert(err, IsNil)1026 c.Assert(targets.Expires.Unix(), Equals, expires.Round(time.Second).Unix())1027 c.Assert(targets.Version, Equals, int64(2))1028 expires = time.Now().Add(2 * time.Hour)1029 c.Assert(r.RemoveTargetWithExpires("foo.txt", expires), IsNil)1030 c.Assert(r.Snapshot(), IsNil)1031 c.Assert(r.Timestamp(), IsNil)1032 c.Assert(r.Commit(), IsNil)1033 targets, err = r.topLevelTargets()1034 c.Assert(err, IsNil)1035 c.Assert(targets.Expires.Unix(), Equals, expires.Round(time.Second).Unix())1036 c.Assert(targets.Version, Equals, int64(3))1037 expires = time.Now().Add(time.Hour)1038 c.Assert(r.SnapshotWithExpires(expires), IsNil)1039 c.Assert(r.Timestamp(), IsNil)1040 c.Assert(r.Commit(), IsNil)1041 snapshot, err := r.snapshot()1042 c.Assert(err, IsNil)1043 c.Assert(snapshot.Expires.Unix(), Equals, expires.Round(time.Second).Unix())1044 c.Assert(snapshot.Version, Equals, int64(6))1045 _, snapshotHasRoot := snapshot.Meta["root.json"]1046 c.Assert(snapshotHasRoot, Equals, false)1047 c.Assert(snapshot.Meta["targets.json"].Version, Equals, targets.Version)1048 c.Assert(r.Snapshot(), IsNil)1049 c.Assert(r.Timestamp(), IsNil)1050 c.Assert(r.Commit(), IsNil)1051 snapshot, err = r.snapshot()1052 c.Assert(err, IsNil)1053 c.Assert(snapshot.Version, Equals, int64(7))1054 expires = time.Now().Add(10 * time.Minute)1055 c.Assert(r.TimestampWithExpires(expires), IsNil)1056 c.Assert(r.Commit(), IsNil)1057 timestamp, err := r.timestamp()1058 c.Assert(err, IsNil)1059 c.Assert(timestamp.Expires.Unix(), Equals, expires.Round(time.Second).Unix())1060 c.Assert(timestamp.Version, Equals, int64(8))1061 c.Assert(r.Timestamp(), IsNil)1062 c.Assert(r.Commit(), IsNil)1063 timestamp, err = r.timestamp()1064 c.Assert(err, IsNil)1065 c.Assert(timestamp.Version, Equals, int64(9))1066 c.Assert(timestamp.Meta["snapshot.json"].Version, Equals, snapshot.Version)1067}1068func (rs *RepoSuite) TestHashAlgorithm(c *C) {1069 files := map[string][]byte{"foo.txt": []byte("foo")}1070 local := MemoryStore(make(map[string]json.RawMessage), files)1071 type hashTest struct {1072 args []string1073 expected []string1074 }1075 for _, test := range []hashTest{1076 {args: []string{}, expected: []string{"sha512"}},1077 {args: []string{"sha256"}},1078 {args: []string{"sha512", "sha256"}},1079 } {1080 // generate metadata with specific hash functions1081 r, err := NewRepo(local, test.args...)1082 c.Assert(err, IsNil)1083 genKey(c, r, "root")1084 genKey(c, r, "targets")1085 genKey(c, r, "snapshot")1086 c.Assert(r.AddTarget("foo.txt", nil), IsNil)1087 c.Assert(r.Snapshot(), IsNil)1088 c.Assert(r.Timestamp(), IsNil)1089 // check metadata has correct hash functions1090 if test.expected == nil {1091 test.expected = test.args1092 }1093 targets, err := r.topLevelTargets()1094 c.Assert(err, IsNil)1095 snapshot, err := r.snapshot()1096 c.Assert(err, IsNil)1097 timestamp, err := r.timestamp()1098 c.Assert(err, IsNil)1099 for name, file := range map[string]data.FileMeta{1100 "foo.txt": targets.Targets["foo.txt"].FileMeta,1101 "targets.json": snapshot.Meta["targets.json"].FileMeta,1102 "snapshot.json": timestamp.Meta["snapshot.json"].FileMeta,1103 } {1104 for _, hashAlgorithm := range test.expected {1105 if _, ok := file.Hashes[hashAlgorithm]; !ok {1106 c.Fatalf("expected %s hash to contain hash func %s, got %s", name, hashAlgorithm, file.HashAlgorithms())1107 }1108 }1109 }1110 }1111}1112func (rs *RepoSuite) TestKeyPersistence(c *C) {1113 tmp := newTmpDir(c)1114 oldPassphrase := []byte("old_s3cr3t")1115 newPassphrase := []byte("new_s3cr3t")1116 // returnNewPassphrase is used to force the passphrase function to return the new passphrase when called by the SaveSigner() method1117 returnNewPassphrase := false1118 // passphrase mock function1119 testPassphraseFunc := func(a string, b, change bool) ([]byte, error) {1120 if change || returnNewPassphrase {1121 return newPassphrase, nil1122 }1123 return oldPassphrase, nil1124 }1125 store := FileSystemStore(tmp.path, testPassphraseFunc)1126 assertKeys := func(role string, enc bool, expected []*data.PrivateKey) {1127 keysJSON := tmp.readFile("keys/" + role + ".json")1128 pk := &persistedKeys{}1129 c.Assert(json.Unmarshal(keysJSON, pk), IsNil)1130 // check the persisted keys are correct1131 var actual []*data.PrivateKey1132 pass := oldPassphrase1133 if enc {1134 c.Assert(pk.Encrypted, Equals, true)1135 if returnNewPassphrase {1136 pass = newPassphrase1137 }1138 decrypted, err := encrypted.Decrypt(pk.Data, pass)1139 c.Assert(err, IsNil)1140 c.Assert(json.Unmarshal(decrypted, &actual), IsNil)1141 } else {1142 c.Assert(pk.Encrypted, Equals, false)1143 c.Assert(json.Unmarshal(pk.Data, &actual), IsNil)1144 }1145 // Compare slices of unique elements disregarding order.1146 c.Assert(actual, HasLen, len(expected))1147 for _, gotKey := range actual {1148 expectedNumMatches := 01149 for _, x := range actual {1150 if reflect.DeepEqual(gotKey, x) {1151 expectedNumMatches++1152 }1153 }1154 numMatches := 01155 for _, wantKey := range expected {1156 wantCanon, err := cjson.EncodeCanonical(wantKey)1157 c.Assert(err, IsNil)1158 gotCanon, err := cjson.EncodeCanonical(gotKey)1159 c.Assert(err, IsNil)1160 if string(wantCanon) == string(gotCanon) {1161 numMatches++1162 }1163 }1164 c.Assert(numMatches, Equals, expectedNumMatches, Commentf("actual: %+v, expected: %+v", actual, expected))1165 }1166 // check GetKeys is correct1167 signers, err := store.GetSigners(role)1168 c.Assert(err, IsNil)1169 // Compare slices of unique elements disregarding order.1170 c.Assert(signers, HasLen, len(expected))1171 for _, s := range signers {1172 expectedNumMatches := 01173 for _, x := range signers {1174 if reflect.DeepEqual(s, x) {1175 expectedNumMatches++1176 }1177 }1178 numMatches := 01179 for _, e := range expected {1180 v, err := keys.GetSigner(e)1181 c.Assert(err, IsNil)1182 if reflect.DeepEqual(s.PublicData().IDs(), v.PublicData().IDs()) {1183 numMatches++1184 }1185 }1186 c.Assert(numMatches, Equals, expectedNumMatches, Commentf("signers: %+v, expected: %+v", signers, expected))1187 }1188 }1189 // save a key and check it gets encrypted1190 signer, err := keys.GenerateEd25519Key()1191 c.Assert(err, IsNil)1192 privateKey, err := signer.MarshalPrivateKey()1193 c.Assert(err, IsNil)1194 c.Assert(store.SaveSigner("root", signer), IsNil)1195 assertKeys("root", true, []*data.PrivateKey{privateKey})1196 // save another key and check it gets added to the existing keys1197 newKey, err := keys.GenerateEd25519Key()1198 c.Assert(err, IsNil)1199 newPrivateKey, err := newKey.MarshalPrivateKey()1200 c.Assert(err, IsNil)1201 c.Assert(store.SaveSigner("root", newKey), IsNil)1202 assertKeys("root", true, []*data.PrivateKey{privateKey, newPrivateKey})1203 // check saving a key to an encrypted file without a passphrase fails1204 insecureStore := FileSystemStore(tmp.path, nil)1205 signer, err = keys.GenerateEd25519Key()1206 c.Assert(err, IsNil)1207 c.Assert(insecureStore.SaveSigner("root", signer), Equals, ErrPassphraseRequired{"root"})1208 // save a key to an insecure store and check it is not encrypted1209 signer, err = keys.GenerateEd25519Key()1210 c.Assert(err, IsNil)1211 privateKey, err = signer.MarshalPrivateKey()1212 c.Assert(err, IsNil)1213 c.Assert(insecureStore.SaveSigner("targets", signer), IsNil)1214 assertKeys("targets", false, []*data.PrivateKey{privateKey})1215 c.Assert(insecureStore.SaveSigner("foo", signer), IsNil)1216 assertKeys("foo", false, []*data.PrivateKey{privateKey})1217 // Test changing the passphrase1218 // 1. Create a secure store with a passphrase (create new object and temp folder so we discard any previous state)1219 tmp = newTmpDir(c)1220 store = FileSystemStore(tmp.path, testPassphraseFunc)1221 // 1.5. Changing passphrase works for top-level and delegated roles.1222 r, err := NewRepo(store)1223 c.Assert(err, IsNil)1224 c.Assert(r.ChangePassphrase("targets"), NotNil)1225 c.Assert(r.ChangePassphrase("foo"), NotNil)1226 // 2. Test changing the passphrase when the keys file does not exist - should FAIL1227 c.Assert(store.(PassphraseChanger).ChangePassphrase("root"), NotNil)1228 // 3. Generate a new key1229 signer, err = keys.GenerateEd25519Key()1230 c.Assert(err, IsNil)1231 privateKey, err = signer.MarshalPrivateKey()1232 c.Assert(err, IsNil)1233 c.Assert(store.SaveSigner("root", signer), IsNil)1234 // 4. Verify the key file can be decrypted using the original passphrase - should SUCCEED1235 assertKeys("root", true, []*data.PrivateKey{privateKey})1236 // 5. Change the passphrase (our mock passphrase function is called with change=true thus returning the newPassphrase value)1237 c.Assert(store.(PassphraseChanger).ChangePassphrase("root"), IsNil)1238 // 6. Try to add a key and implicitly decrypt the keys file using the OLD passphrase - should FAIL1239 newKey, err = keys.GenerateEd25519Key()1240 c.Assert(err, IsNil)1241 _, err = newKey.MarshalPrivateKey()1242 c.Assert(err, IsNil)1243 c.Assert(store.SaveSigner("root", newKey), NotNil)1244 // 7. Try to add a key and implicitly decrypt the keys using the NEW passphrase - should SUCCEED1245 returnNewPassphrase = true1246 newKey, err = keys.GenerateEd25519Key()1247 c.Assert(err, IsNil)1248 newPrivateKey, err = newKey.MarshalPrivateKey()1249 c.Assert(err, IsNil)1250 c.Assert(store.SaveSigner("root", newKey), IsNil)1251 // 8. Verify again that the key entries are what we expect after decrypting them using the NEW passphrase1252 assertKeys("root", true, []*data.PrivateKey{privateKey, newPrivateKey})1253}1254func (rs *RepoSuite) TestManageMultipleTargets(c *C) {1255 tmp := newTmpDir(c)1256 local := FileSystemStore(tmp.path, nil)1257 r, err := NewRepo(local)1258 c.Assert(err, IsNil)1259 // don't use consistent snapshots to make the checks simpler1260 c.Assert(r.Init(false), IsNil)1261 genKey(c, r, "root")1262 genKey(c, r, "targets")1263 genKey(c, r, "snapshot")1264 genKey(c, r, "timestamp")1265 assertRepoTargets := func(paths ...string) {1266 t, err := r.topLevelTargets()1267 c.Assert(err, IsNil)1268 for _, path := range paths {1269 if _, ok := t.Targets[path]; !ok {1270 c.Fatalf("missing target file: %s", path)1271 }1272 }1273 }1274 // adding and committing multiple files moves correct targets from staged -> repository1275 tmp.writeStagedTarget("foo.txt", "foo")1276 tmp.writeStagedTarget("bar.txt", "bar")1277 c.Assert(r.AddTargets([]string{"foo.txt", "bar.txt"}, nil), IsNil)1278 c.Assert(r.Snapshot(), IsNil)1279 c.Assert(r.Timestamp(), IsNil)1280 c.Assert(r.Commit(), IsNil)1281 assertRepoTargets("foo.txt", "bar.txt")1282 tmp.assertExists("repository/targets/foo.txt")1283 tmp.assertExists("repository/targets/bar.txt")1284 // adding all targets moves them all from staged -> repository1285 count := 101286 files := make([]string, count)1287 for i := 0; i < count; i++ {1288 files[i] = fmt.Sprintf("file%d.txt", i)1289 tmp.writeStagedTarget(files[i], "data")1290 }1291 c.Assert(r.AddTargets(nil, nil), IsNil)1292 c.Assert(r.Snapshot(), IsNil)1293 c.Assert(r.Timestamp(), IsNil)1294 c.Assert(r.Commit(), IsNil)1295 tmp.assertExists("repository/targets/foo.txt")1296 tmp.assertExists("repository/targets/bar.txt")1297 assertRepoTargets(files...)1298 for _, file := range files {1299 tmp.assertExists("repository/targets/" + file)1300 }1301 tmp.assertEmpty("staged/targets")1302 tmp.assertEmpty("staged")1303 // removing all targets removes them from the repository and targets.json1304 c.Assert(r.RemoveTargets(nil), IsNil)1305 c.Assert(r.Snapshot(), IsNil)1306 c.Assert(r.Timestamp(), IsNil)1307 c.Assert(r.Commit(), IsNil)1308 tmp.assertNotExist("repository/targets")1309 t, err := r.topLevelTargets()1310 c.Assert(err, IsNil)1311 c.Assert(t.Targets, HasLen, 0)1312}1313func (rs *RepoSuite) TestCustomTargetMetadata(c *C) {1314 files := map[string][]byte{1315 "foo.txt": []byte("foo"),1316 "bar.txt": []byte("bar"),1317 "baz.txt": []byte("baz"),1318 }1319 local := MemoryStore(make(map[string]json.RawMessage), files)1320 r, err := NewRepo(local)1321 c.Assert(err, IsNil)1322 generateAndAddPrivateKey(c, r, "targets")1323 custom := json.RawMessage(`{"foo":"bar"}`)1324 assertCustomMeta := func(file string, custom *json.RawMessage) {1325 t, err := r.topLevelTargets()1326 c.Assert(err, IsNil)1327 target, ok := t.Targets[file]1328 if !ok {1329 c.Fatalf("missing target file: %s", file)1330 }1331 c.Assert(target.Custom, DeepEquals, custom)1332 }1333 // check custom metadata gets added to the target1334 c.Assert(r.AddTarget("foo.txt", custom), IsNil)1335 assertCustomMeta("foo.txt", &custom)1336 // check adding bar.txt with no metadata doesn't affect foo.txt1337 c.Assert(r.AddTarget("bar.txt", nil), IsNil)1338 assertCustomMeta("bar.txt", nil)1339 assertCustomMeta("foo.txt", &custom)1340 // check adding all files with no metadata doesn't reset existing metadata1341 c.Assert(r.AddTargets(nil, nil), IsNil)1342 assertCustomMeta("baz.txt", nil)1343 assertCustomMeta("bar.txt", nil)1344 assertCustomMeta("foo.txt", &custom)1345}1346func (rs *RepoSuite) TestUnknownKeyIDs(c *C) {1347 // generate a repo1348 local := MemoryStore(make(map[string]json.RawMessage), nil)1349 r, err := NewRepo(local)1350 c.Assert(err, IsNil)1351 genKey(c, r, "root")1352 genKey(c, r, "targets")1353 genKey(c, r, "snapshot")1354 genKey(c, r, "timestamp")1355 // add a new key to the root metadata with an unknown key id.1356 signer, err := keys.GenerateEd25519Key()1357 c.Assert(err, IsNil)1358 root, err := r.root()1359 c.Assert(err, IsNil)1360 c.Assert(root.Version, Equals, int64(1))1361 root.Keys["unknown-key-id"] = signer.PublicData()1362 r.setMeta("root.json", root)1363 // commit the metadata to the store.1364 c.Assert(r.AddTargets([]string{}, nil), IsNil)1365 c.Assert(r.Snapshot(), IsNil)1366 c.Assert(r.Timestamp(), IsNil)1367 c.Assert(r.Commit(), IsNil)1368 // validate that the unknown key id wasn't stripped when written to the1369 // store.1370 meta, err := local.GetMeta()1371 c.Assert(err, IsNil)1372 rootJSON, ok := meta["root.json"]1373 c.Assert(ok, Equals, true)1374 var signedRoot struct {1375 Signed data.Root `json:"signed"`1376 Signatures []data.Signature `json:"signatures"`1377 }1378 c.Assert(json.Unmarshal(rootJSON, &signedRoot), IsNil)1379 c.Assert(signedRoot.Signed.Version, Equals, int64(1))1380 unknownKey, ok := signedRoot.Signed.Keys["unknown-key-id"]1381 c.Assert(ok, Equals, true)1382 c.Assert(unknownKey, DeepEquals, signer.PublicData())1383 // a new root should preserve the unknown key id.1384 root, err = r.root()1385 c.Assert(root, NotNil)1386 c.Assert(err, IsNil)1387 genKey(c, r, "timestamp")1388 c.Assert(r.Snapshot(), IsNil)1389 c.Assert(r.Timestamp(), IsNil)1390 c.Assert(r.Commit(), IsNil)1391 meta, err = local.GetMeta()1392 c.Assert(err, IsNil)1393 rootJSON, ok = meta["root.json"]1394 c.Assert(ok, Equals, true)1395 c.Assert(json.Unmarshal(rootJSON, &signedRoot), IsNil)1396 c.Assert(signedRoot.Signed.Version, Equals, int64(2))1397 unknownKey, ok = signedRoot.Signed.Keys["unknown-key-id"]1398 c.Assert(ok, Equals, true)1399 c.Assert(unknownKey, DeepEquals, signer.PublicData())1400}1401func (rs *RepoSuite) TestThreshold(c *C) {1402 local := MemoryStore(make(map[string]json.RawMessage), nil)1403 r, err := NewRepo(local)1404 c.Assert(err, IsNil)1405 _, err = r.GetThreshold("root")1406 c.Assert(err, DeepEquals, ErrInvalidRole{"root", "role missing from root metadata"})1407 err = r.SetThreshold("root", 2)1408 c.Assert(err, DeepEquals, ErrInvalidRole{"root", "role missing from root metadata"})1409 // Add one key to each role1410 genKey(c, r, "root")1411 genKey(c, r, "targets")1412 genKey(c, r, "snapshot")1413 genKey(c, r, "timestamp")1414 t, err := r.GetThreshold("root")1415 c.Assert(err, IsNil)1416 c.Assert(t, Equals, 1)1417 _, err = r.GetThreshold("foo")1418 c.Assert(err, DeepEquals, ErrInvalidRole{"foo", "only thresholds for top-level roles supported"})1419 err = r.SetThreshold("foo", 2)1420 c.Assert(err, DeepEquals, ErrInvalidRole{"foo", "only thresholds for top-level roles supported"})1421 // commit the metadata to the store.1422 c.Assert(r.AddTargets([]string{}, nil), IsNil)1423 c.Assert(r.Snapshot(), IsNil)1424 c.Assert(r.Timestamp(), IsNil)1425 c.Assert(r.Commit(), IsNil)1426 // Set a new threshold. Commit without threshold keys1427 c.Assert(r.SetThreshold("root", 2), IsNil)1428 t, err = r.GetThreshold("root")1429 c.Assert(err, IsNil)1430 c.Assert(t, Equals, 2)1431 c.Assert(r.Commit(), DeepEquals, ErrNotEnoughKeys{"root", 1, 2})1432 // Add a second root key and try again1433 genKey(c, r, "root")1434 c.Assert(r.Sign("root.json"), IsNil)1435 c.Assert(r.Snapshot(), IsNil)1436 c.Assert(r.Timestamp(), IsNil)1437 c.Assert(r.Commit(), IsNil)1438 // Check versions updated1439 rootVersion, err := r.RootVersion()1440 c.Assert(err, IsNil)1441 c.Assert(rootVersion, Equals, int64(2))1442 targetsVersion, err := r.TargetsVersion()1443 c.Assert(err, IsNil)1444 c.Assert(targetsVersion, Equals, int64(1))1445 snapshotVersion, err := r.SnapshotVersion()1446 c.Assert(err, IsNil)1447 c.Assert(snapshotVersion, Equals, int64(2))1448 timestampVersion, err := r.TimestampVersion()1449 c.Assert(err, IsNil)1450 c.Assert(timestampVersion, Equals, int64(2))1451}1452func (rs *RepoSuite) TestAddOrUpdateSignatures(c *C) {1453 files := map[string][]byte{"foo.txt": []byte("foo")}1454 local := MemoryStore(make(map[string]json.RawMessage), files)1455 r, err := NewRepo(local)1456 c.Assert(err, IsNil)1457 // don't use consistent snapshots to make the checks simpler1458 c.Assert(r.Init(false), IsNil)1459 // generate root key offline and add as a verification key1460 rootKey, err := keys.GenerateEd25519Key()1461 c.Assert(err, IsNil)1462 c.Assert(r.AddVerificationKey("root", rootKey.PublicData()), IsNil)1463 targetsKey, err := keys.GenerateEd25519Key()1464 c.Assert(err, IsNil)1465 c.Assert(r.AddVerificationKey("targets", targetsKey.PublicData()), IsNil)1466 snapshotKey, err := keys.GenerateEd25519Key()1467 c.Assert(err, IsNil)1468 c.Assert(r.AddVerificationKey("snapshot", snapshotKey.PublicData()), IsNil)1469 timestampKey, err := keys.GenerateEd25519Key()1470 c.Assert(err, IsNil)1471 c.Assert(r.AddVerificationKey("timestamp", timestampKey.PublicData()), IsNil)1472 // generate signatures externally and append1473 rootMeta, err := r.SignedMeta("root.json")1474 c.Assert(err, IsNil)1475 rootCanonical, err := cjson.EncodeCanonical(rootMeta.Signed)1476 c.Assert(err, IsNil)1477 rootSig, err := rootKey.SignMessage(rootCanonical)1478 c.Assert(err, IsNil)1479 for _, id := range rootKey.PublicData().IDs() {1480 c.Assert(r.AddOrUpdateSignature("root.json", data.Signature{1481 KeyID: id,1482 Signature: rootSig}), IsNil)1483 }1484 // add targets and sign1485 c.Assert(r.AddTarget("foo.txt", nil), IsNil)1486 targetsMeta, err := r.SignedMeta("targets.json")1487 c.Assert(err, IsNil)1488 targetsCanonical, err := cjson.EncodeCanonical(targetsMeta.Signed)1489 c.Assert(err, IsNil)1490 targetsSig, err := targetsKey.SignMessage(targetsCanonical)1491 c.Assert(err, IsNil)1492 for _, id := range targetsKey.PublicData().IDs() {1493 r.AddOrUpdateSignature("targets.json", data.Signature{1494 KeyID: id,1495 Signature: targetsSig})1496 }1497 // snapshot and timestamp1498 c.Assert(r.Snapshot(), IsNil)1499 snapshotMeta, err := r.SignedMeta("snapshot.json")1500 c.Assert(err, IsNil)1501 snapshotCanonical, err := cjson.EncodeCanonical(snapshotMeta.Signed)1502 c.Assert(err, IsNil)1503 snapshotSig, err := snapshotKey.SignMessage(snapshotCanonical)1504 c.Assert(err, IsNil)1505 for _, id := range snapshotKey.PublicData().IDs() {1506 r.AddOrUpdateSignature("snapshot.json", data.Signature{1507 KeyID: id,1508 Signature: snapshotSig})1509 }1510 c.Assert(r.Timestamp(), IsNil)1511 timestampMeta, err := r.SignedMeta("timestamp.json")1512 c.Assert(err, IsNil)1513 timestampCanonical, err := cjson.EncodeCanonical(timestampMeta.Signed)1514 c.Assert(err, IsNil)1515 timestampSig, err := timestampKey.SignMessage(timestampCanonical)1516 c.Assert(err, IsNil)1517 for _, id := range timestampKey.PublicData().IDs() {1518 r.AddOrUpdateSignature("timestamp.json", data.Signature{1519 KeyID: id,1520 Signature: timestampSig})1521 }1522 // commit successfully!1523 c.Assert(r.Commit(), IsNil)1524}1525func (rs *RepoSuite) TestBadAddOrUpdateSignatures(c *C) {1526 files := map[string][]byte{"foo.txt": []byte("foo")}1527 local := MemoryStore(make(map[string]json.RawMessage), files)1528 r, err := NewRepo(local)1529 c.Assert(err, IsNil)1530 // don't use consistent snapshots to make the checks simpler1531 c.Assert(r.Init(false), IsNil)1532 c.Assert(r.AddOrUpdateSignature("targets.json", data.Signature{1533 KeyID: "foo",1534 Signature: nil}), Equals, ErrInvalidRole{"targets", "role is not in verifier DB"})1535 // generate root key offline and add as a verification key1536 rootKey, err := keys.GenerateEd25519Key()1537 c.Assert(err, IsNil)1538 c.Assert(r.AddVerificationKey("root", rootKey.PublicData()), IsNil)1539 targetsKey, err := keys.GenerateEd25519Key()1540 c.Assert(err, IsNil)1541 c.Assert(r.AddVerificationKey("targets", targetsKey.PublicData()), IsNil)1542 snapshotKey, err := keys.GenerateEd25519Key()1543 c.Assert(err, IsNil)1544 c.Assert(r.AddVerificationKey("snapshot", snapshotKey.PublicData()), IsNil)1545 timestampKey, err := keys.GenerateEd25519Key()1546 c.Assert(err, IsNil)1547 c.Assert(r.AddVerificationKey("timestamp", timestampKey.PublicData()), IsNil)1548 // attempt to sign `root`, rather than `root.json`1549 for _, id := range rootKey.PublicData().IDs() {1550 c.Assert(r.AddOrUpdateSignature("root", data.Signature{1551 KeyID: id,1552 Signature: nil}), Equals, ErrMissingMetadata{"root"})1553 }1554 // add a signature with a bad role1555 rootMeta, err := r.SignedMeta("root.json")1556 c.Assert(err, IsNil)1557 rootCanonical, err := cjson.EncodeCanonical(rootMeta.Signed)1558 c.Assert(err, IsNil)1559 rootSig, err := rootKey.Sign(rand.Reader, rootCanonical, crypto.Hash(0))1560 c.Assert(err, IsNil)1561 for _, id := range rootKey.PublicData().IDs() {1562 c.Assert(r.AddOrUpdateSignature("invalid_root.json", data.Signature{1563 KeyID: id,1564 Signature: rootSig}), Equals, ErrInvalidRole{"invalid_root", "no trusted keys for role"})1565 }1566 // add a root signature with an key ID that is for the targets role1567 for _, id := range targetsKey.PublicData().IDs() {1568 c.Assert(r.AddOrUpdateSignature("root.json", data.Signature{1569 KeyID: id,1570 Signature: rootSig}), Equals, verify.ErrInvalidKey)1571 }1572 // attempt to add a bad signature to root1573 badSig, err := rootKey.Sign(rand.Reader, []byte(""), crypto.Hash(0))1574 c.Assert(err, IsNil)1575 for _, id := range rootKey.PublicData().IDs() {1576 c.Assert(r.AddOrUpdateSignature("root.json", data.Signature{1577 KeyID: id,1578 Signature: badSig}), Equals, verify.ErrInvalid)1579 }1580 // add the correct root signature1581 for _, id := range rootKey.PublicData().IDs() {1582 c.Assert(r.AddOrUpdateSignature("root.json", data.Signature{1583 KeyID: id,1584 Signature: rootSig}), IsNil)1585 }1586 checkSigIDs := func(role string) {1587 s, err := r.SignedMeta(role)1588 c.Assert(err, IsNil)1589 db, err := r.topLevelKeysDB()1590 c.Assert(err, IsNil)1591 // keys is a map of key IDs.1592 keys := db.GetRole(strings.TrimSuffix(role, ".json")).KeyIDs1593 c.Assert(s.Signatures, HasLen, len(keys))1594 // If the lengths are equal, and each signature key ID appears1595 // in the role keys, they Sig IDs are equal to keyIDs.1596 for _, sig := range s.Signatures {1597 if _, ok := keys[sig.KeyID]; !ok {1598 c.Fatal("missing key ID in signatures")1599 }1600 }1601 }1602 checkSigIDs("root.json")1603 // re-adding should not duplicate. this is checked by verifying1604 // signature key IDs match with the map of role key IDs.1605 for _, id := range rootKey.PublicData().IDs() {1606 c.Assert(r.AddOrUpdateSignature("root.json", data.Signature{1607 KeyID: id,1608 Signature: rootSig}), IsNil)1609 }1610 checkSigIDs("root.json")1611}1612func (rs *RepoSuite) TestSignDigest(c *C) {1613 files := map[string][]byte{"foo.txt": []byte("foo")}1614 local := MemoryStore(make(map[string]json.RawMessage), files)1615 r, err := NewRepo(local)1616 c.Assert(err, IsNil)1617 genKey(c, r, "root")1618 genKey(c, r, "targets")1619 genKey(c, r, "snapshot")1620 genKey(c, r, "timestamp")1621 digest := "sha256:bc11b176a293bb341a0f2d0d226f52e7fcebd186a7c4dfca5fc64f305f06b94c"1622 hash := "bc11b176a293bb341a0f2d0d226f52e7fcebd186a7c4dfca5fc64f305f06b94c"1623 size := int64(42)1624 c.Assert(r.AddTargetsWithDigest(hash, "sha256", size, digest, nil), IsNil)1625 c.Assert(r.Snapshot(), IsNil)1626 c.Assert(r.Timestamp(), IsNil)1627 c.Assert(r.Commit(), IsNil)1628 digest_bytes, err := hex.DecodeString("bc11b176a293bb341a0f2d0d226f52e7fcebd186a7c4dfca5fc64f305f06b94c")1629 hex_digest_bytes := data.HexBytes(digest_bytes)1630 c.Assert(err, IsNil)1631 targets, err := r.topLevelTargets()1632 c.Assert(err, IsNil)1633 c.Assert(targets.Targets["sha256:bc11b176a293bb341a0f2d0d226f52e7fcebd186a7c4dfca5fc64f305f06b94c"].FileMeta.Length, Equals, size)1634 c.Assert(targets.Targets["sha256:bc11b176a293bb341a0f2d0d226f52e7fcebd186a7c4dfca5fc64f305f06b94c"].FileMeta.Hashes["sha256"], DeepEquals, hex_digest_bytes)1635}1636func concat(ss ...[]string) []string {1637 ret := []string{}1638 for _, s := range ss {1639 ret = append(ret, s...)1640 }1641 return ret1642}1643func checkSigKeyIDs(c *C, local LocalStore, fileToKeyIDs map[string][]string) {1644 metas, err := local.GetMeta()1645 c.Assert(err, IsNil)1646 for f, keyIDs := range fileToKeyIDs {1647 meta, ok := metas[f]1648 c.Assert(ok, Equals, true, Commentf("meta file: %v", f))1649 s := &data.Signed{}1650 err = json.Unmarshal(meta, s)1651 c.Assert(err, IsNil)1652 gotKeyIDs := []string{}1653 for _, sig := range s.Signatures {1654 gotKeyIDs = append(gotKeyIDs, sig.KeyID)1655 }1656 gotKeyIDs = sets.DeduplicateStrings(gotKeyIDs)1657 sort.Strings(gotKeyIDs)1658 sort.Strings(keyIDs)1659 c.Assert(gotKeyIDs, DeepEquals, keyIDs)1660 }1661}1662func (rs *RepoSuite) TestDelegations(c *C) {1663 tmp := newTmpDir(c)1664 local := FileSystemStore(tmp.path, nil)1665 r, err := NewRepo(local)1666 c.Assert(err, IsNil)1667 // Add one key to each role1668 genKey(c, r, "root")1669 targetsKeyIDs := genKey(c, r, "targets")1670 genKey(c, r, "snapshot")1671 genKey(c, r, "timestamp")1672 // commit the metadata to the store.1673 c.Assert(r.AddTargets([]string{}, nil), IsNil)1674 c.Assert(r.Snapshot(), IsNil)1675 c.Assert(r.Timestamp(), IsNil)1676 c.Assert(r.Commit(), IsNil)1677 snapshot, err := r.snapshot()1678 c.Assert(err, IsNil)1679 c.Assert(snapshot.Meta, HasLen, 1)1680 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(1))1681 checkSigKeyIDs(c, local, map[string][]string{1682 "1.targets.json": targetsKeyIDs,1683 })1684 saveNewKey := func(role string) keys.Signer {1685 key, err := keys.GenerateEd25519Key()1686 c.Assert(err, IsNil)1687 err = local.SaveSigner(role, key)1688 c.Assert(err, IsNil)1689 return key1690 }1691 // Delegate from targets -> role1 for A/*, B/* with one key, threshold 1.1692 role1ABKey := saveNewKey("role1")1693 role1AB := data.DelegatedRole{1694 Name: "role1",1695 KeyIDs: role1ABKey.PublicData().IDs(),1696 Paths: []string{"A/*", "B/*"},1697 Threshold: 1,1698 }1699 err = r.AddDelegatedRole("targets", role1AB, []*data.PublicKey{1700 role1ABKey.PublicData(),1701 })1702 c.Assert(err, IsNil)1703 // Adding duplicate delegation should return an error.1704 err = r.AddDelegatedRole("targets", role1AB, []*data.PublicKey{1705 role1ABKey.PublicData(),1706 })1707 c.Assert(err, NotNil)1708 // Delegate from targets -> role2 for C/*, D/* with three key, threshold 2.1709 role2CDKey1 := saveNewKey("role2")1710 role2CDKey2 := saveNewKey("role2")1711 role2CDKey3 := saveNewKey("role2")1712 role2CD := data.DelegatedRole{1713 Name: "role2",1714 KeyIDs: concat(1715 role2CDKey1.PublicData().IDs(),1716 role2CDKey2.PublicData().IDs(),1717 role2CDKey3.PublicData().IDs(),1718 ),1719 Paths: []string{"C/*", "D/*"},1720 Threshold: 2,1721 }1722 err = r.AddDelegatedRole("targets", role2CD, []*data.PublicKey{1723 role2CDKey1.PublicData(),1724 role2CDKey2.PublicData(),1725 role2CDKey3.PublicData(),1726 })1727 c.Assert(err, IsNil)1728 // Delegate from role1 -> role2 for A/allium.txt with one key, threshold 1.1729 role1To2Key := saveNewKey("role2")1730 role1To2 := data.DelegatedRole{1731 Name: "role2",1732 KeyIDs: role1To2Key.PublicData().IDs(),1733 Paths: []string{"A/allium.txt"},1734 Threshold: 1,1735 Terminating: true,1736 }1737 err = r.AddDelegatedRole("role1", role1To2, []*data.PublicKey{1738 role1To2Key.PublicData(),1739 })1740 c.Assert(err, IsNil)1741 checkDelegations := func(delegator string, delegatedRoles ...data.DelegatedRole) {1742 t, err := r.targets(delegator)1743 c.Assert(err, IsNil)1744 // Check that delegated roles are copied verbatim.1745 c.Assert(t.Delegations.Roles, DeepEquals, delegatedRoles)1746 // Check that public keys match key IDs in roles.1747 expectedKeyIDs := []string{}1748 for _, dr := range delegatedRoles {1749 expectedKeyIDs = append(expectedKeyIDs, dr.KeyIDs...)1750 }1751 expectedKeyIDs = sets.DeduplicateStrings(expectedKeyIDs)1752 sort.Strings(expectedKeyIDs)1753 gotKeyIDs := []string{}1754 for _, k := range t.Delegations.Keys {1755 gotKeyIDs = append(gotKeyIDs, k.IDs()...)1756 }1757 gotKeyIDs = sets.DeduplicateStrings(gotKeyIDs)1758 sort.Strings(gotKeyIDs)1759 c.Assert(gotKeyIDs, DeepEquals, expectedKeyIDs)1760 }1761 checkDelegations("targets", role1AB, role2CD)1762 checkDelegations("role1", role1To2)1763 c.Assert(r.Snapshot(), IsNil)1764 c.Assert(r.Timestamp(), IsNil)1765 c.Assert(r.Commit(), IsNil)1766 snapshot, err = r.snapshot()1767 c.Assert(err, IsNil)1768 c.Assert(snapshot.Meta, HasLen, 3)1769 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(2))1770 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(1))1771 c.Assert(snapshot.Meta["role2.json"].Version, Equals, int64(1))1772 checkSigKeyIDs(c, local, map[string][]string{1773 "2.targets.json": targetsKeyIDs,1774 "1.role1.json": role1ABKey.PublicData().IDs(),1775 "1.role2.json": concat(1776 role2CDKey1.PublicData().IDs(),1777 role2CDKey2.PublicData().IDs(),1778 role2CDKey3.PublicData().IDs(),1779 role1To2Key.PublicData().IDs(),1780 ),1781 })1782 // Add a variety of targets.1783 files := map[string]string{1784 // targets.json1785 "potato.txt": "potatoes can be starchy or waxy",1786 // role1.json1787 "A/apple.txt": "apples are sometimes red",1788 "B/banana.txt": "bananas are yellow and sometimes brown",1789 // role2.json1790 "C/clementine.txt": "clementines are a citrus fruit",1791 "D/durian.txt": "durians are spiky",1792 "A/allium.txt": "alliums include garlic and leeks",1793 }1794 for name, content := range files {1795 tmp.writeStagedTarget(name, content)1796 c.Assert(r.AddTarget(name, nil), IsNil)1797 }1798 c.Assert(r.Snapshot(), IsNil)1799 c.Assert(r.Timestamp(), IsNil)1800 c.Assert(r.Commit(), IsNil)1801 snapshot, err = r.snapshot()1802 c.Assert(err, IsNil)1803 c.Assert(snapshot.Meta, HasLen, 3)1804 // All roles should have new targets.1805 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(3))1806 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(2))1807 c.Assert(snapshot.Meta["role2.json"].Version, Equals, int64(2))1808 checkSigKeyIDs(c, local, map[string][]string{1809 "3.targets.json": targetsKeyIDs,1810 "2.role1.json": role1ABKey.PublicData().IDs(),1811 "2.role2.json": concat(1812 role2CDKey1.PublicData().IDs(),1813 role2CDKey2.PublicData().IDs(),1814 role2CDKey3.PublicData().IDs(),1815 role1To2Key.PublicData().IDs(),1816 ),1817 })1818 // Check that the given targets role has signed for the given filenames, with1819 // the correct file metadata.1820 checkTargets := func(role string, filenames ...string) {1821 t, err := r.targets(role)1822 c.Assert(err, IsNil)1823 c.Assert(t.Targets, HasLen, len(filenames))1824 for _, fn := range filenames {1825 content := files[fn]1826 fm, err := util.GenerateTargetFileMeta(strings.NewReader(content))1827 c.Assert(err, IsNil)1828 c.Assert(util.TargetFileMetaEqual(t.Targets[fn], fm), IsNil)1829 }1830 }1831 checkTargets("targets", "potato.txt")1832 checkTargets("role1", "A/apple.txt", "B/banana.txt")1833 checkTargets("role2", "C/clementine.txt", "D/durian.txt", "A/allium.txt")1834 // Test AddTargetToPreferredRole.1835 // role2 is the default signer for A/allium.txt, but role1 is also eligible1836 // for A/*.txt according to the delegation from the top-level targets role.1837 c.Assert(r.RemoveTarget("A/allium.txt"), IsNil)1838 tmp.writeStagedTarget("A/allium.txt", files["A/allium.txt"])1839 c.Assert(r.AddTargetToPreferredRole("A/allium.txt", nil, "role1"), IsNil)1840 c.Assert(r.Snapshot(), IsNil)1841 c.Assert(r.Timestamp(), IsNil)1842 c.Assert(r.Commit(), IsNil)1843 snapshot, err = r.snapshot()1844 c.Assert(err, IsNil)1845 c.Assert(snapshot.Meta, HasLen, 3)1846 // Only role1 and role2 should have bumped versions.1847 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(3))1848 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(3))1849 c.Assert(snapshot.Meta["role2.json"].Version, Equals, int64(3))1850 checkSigKeyIDs(c, local, map[string][]string{1851 "3.targets.json": targetsKeyIDs,1852 "3.role1.json": role1ABKey.PublicData().IDs(),1853 "3.role2.json": concat(1854 role2CDKey1.PublicData().IDs(),1855 role2CDKey2.PublicData().IDs(),1856 role2CDKey3.PublicData().IDs(),1857 role1To2Key.PublicData().IDs(),1858 ),1859 })1860 // role1 now signs A/allium.txt.1861 checkTargets("targets", "potato.txt")1862 checkTargets("role1", "A/apple.txt", "B/banana.txt", "A/allium.txt")1863 checkTargets("role2", "C/clementine.txt", "D/durian.txt")1864 // Remove the delegation from role1 to role2.1865 c.Assert(r.ResetTargetsDelegations("role1"), IsNil)1866 checkDelegations("targets", role1AB, role2CD)1867 checkDelegations("role1")1868 // Try to sign A/allium.txt with role2.1869 // It should fail since we removed the role1 -> role2 delegation.1870 c.Assert(r.RemoveTarget("A/allium.txt"), IsNil)1871 tmp.writeStagedTarget("A/allium.txt", files["A/allium.txt"])1872 c.Assert(r.AddTargetToPreferredRole("A/allium.txt", nil, "role2"), Equals, ErrNoDelegatedTarget{Path: "A/allium.txt"})1873 // Try to sign A/allium.txt with the default role (role1).1874 c.Assert(r.AddTarget("A/allium.txt", nil), IsNil)1875 c.Assert(r.Snapshot(), IsNil)1876 c.Assert(r.Timestamp(), IsNil)1877 c.Assert(r.Commit(), IsNil)1878 snapshot, err = r.snapshot()1879 c.Assert(err, IsNil)1880 c.Assert(snapshot.Meta, HasLen, 3)1881 // Only role1 should have a bumped version.1882 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(3))1883 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(4))1884 c.Assert(snapshot.Meta["role2.json"].Version, Equals, int64(3))1885 checkSigKeyIDs(c, local, map[string][]string{1886 "3.targets.json": targetsKeyIDs,1887 "4.role1.json": role1ABKey.PublicData().IDs(),1888 "3.role2.json": concat(1889 // Metadata (and therefore signers) for role2.json shouldn't have1890 // changed, even though we revoked role1To2Key. Clients verify the1891 // signature using keys specified by 4.role1.json, so role1To2Key1892 // shouldn't contribute to the threshold.1893 role2CDKey1.PublicData().IDs(),1894 role2CDKey2.PublicData().IDs(),1895 role2CDKey3.PublicData().IDs(),1896 role1To2Key.PublicData().IDs(),1897 ),1898 })1899 // Re-sign target signed by role2 to test that role1To2Key is not used going1900 // forward.1901 c.Assert(r.RemoveTarget("C/clementine.txt"), IsNil)1902 tmp.writeStagedTarget("C/clementine.txt", files["C/clementine.txt"])1903 c.Assert(r.AddTarget("C/clementine.txt", nil), IsNil)1904 c.Assert(r.Snapshot(), IsNil)1905 c.Assert(r.Timestamp(), IsNil)1906 c.Assert(r.Commit(), IsNil)1907 snapshot, err = r.snapshot()1908 c.Assert(err, IsNil)1909 c.Assert(snapshot.Meta, HasLen, 3)1910 // Only role2 should have a bumped version.1911 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(3))1912 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(4))1913 c.Assert(snapshot.Meta["role2.json"].Version, Equals, int64(4))1914 checkSigKeyIDs(c, local, map[string][]string{1915 "3.targets.json": targetsKeyIDs,1916 "4.role1.json": role1ABKey.PublicData().IDs(),1917 "4.role2.json": concat(1918 role2CDKey1.PublicData().IDs(),1919 role2CDKey2.PublicData().IDs(),1920 role2CDKey3.PublicData().IDs(),1921 // Note that role1To2Key no longer signs since the role1 -> role21922 // delegation was removed.1923 ),1924 })1925 // Targets should still be signed by the same roles.1926 checkTargets("targets", "potato.txt")1927 checkTargets("role1", "A/apple.txt", "B/banana.txt", "A/allium.txt")1928 checkTargets("role2", "C/clementine.txt", "D/durian.txt")1929 // Add back the role1 -> role2 delegation, and verify that it doesn't change1930 // existing targets in role2.json.1931 err = r.AddDelegatedRole("role1", role1To2, []*data.PublicKey{1932 role1To2Key.PublicData(),1933 })1934 c.Assert(err, IsNil)1935 c.Assert(r.Snapshot(), IsNil)1936 c.Assert(r.Timestamp(), IsNil)1937 c.Assert(r.Commit(), IsNil)1938 snapshot, err = r.snapshot()1939 c.Assert(err, IsNil)1940 c.Assert(snapshot.Meta, HasLen, 3)1941 // Both role1 and role2 should have a bumped version.1942 // role1 is bumped because the delegations changed.1943 // role2 is only bumped because its expiration is bumped.1944 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(3))1945 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(5))1946 c.Assert(snapshot.Meta["role2.json"].Version, Equals, int64(5))1947 checkTargets("targets", "potato.txt")1948 checkTargets("role1", "A/apple.txt", "B/banana.txt", "A/allium.txt")1949 checkTargets("role2", "C/clementine.txt", "D/durian.txt")1950}1951func (rs *RepoSuite) TestHashBinDelegations(c *C) {1952 tmp := newTmpDir(c)1953 local := FileSystemStore(tmp.path, nil)1954 r, err := NewRepo(local)1955 c.Assert(err, IsNil)1956 // Add one key to each role1957 genKey(c, r, "root")1958 targetsKeyIDs := genKey(c, r, "targets")1959 genKey(c, r, "snapshot")1960 genKey(c, r, "timestamp")1961 hb, err := targets.NewHashBins("bins_", 3)1962 if err != nil {1963 c.Assert(err, IsNil)1964 }1965 // Generate key for the intermediate bins role.1966 binsKey, err := keys.GenerateEd25519Key()1967 c.Assert(err, IsNil)1968 err = local.SaveSigner("bins", binsKey)1969 c.Assert(err, IsNil)1970 // Generate key for the leaf bins role.1971 leafKey, err := keys.GenerateEd25519Key()1972 c.Assert(err, IsNil)1973 for i := uint64(0); i < hb.NumBins(); i++ {1974 b := hb.GetBin(i)1975 err = local.SaveSigner(b.RoleName(), leafKey)1976 if err != nil {1977 c.Assert(err, IsNil)1978 }1979 }1980 err = r.AddDelegatedRole("targets", data.DelegatedRole{1981 Name: "bins",1982 KeyIDs: binsKey.PublicData().IDs(),1983 Paths: []string{"*.txt"},1984 Threshold: 1,1985 }, []*data.PublicKey{1986 binsKey.PublicData(),1987 })1988 c.Assert(err, IsNil)1989 err = r.AddDelegatedRolesForPathHashBins("bins", hb, []*data.PublicKey{leafKey.PublicData()}, 1)1990 c.Assert(err, IsNil)1991 targets, err := r.targets("bins")1992 c.Assert(err, IsNil)1993 c.Assert(targets.Delegations.Roles, HasLen, 8)1994 c.Assert(r.Snapshot(), IsNil)1995 c.Assert(r.Timestamp(), IsNil)1996 c.Assert(r.Commit(), IsNil)1997 tmp.writeStagedTarget("foo.txt", "foo")1998 err = r.AddTarget("foo.txt", nil)1999 c.Assert(err, IsNil)2000 c.Assert(r.Snapshot(), IsNil)2001 c.Assert(r.Timestamp(), IsNil)2002 c.Assert(r.Commit(), IsNil)2003 snapshot, err := r.snapshot()2004 c.Assert(err, IsNil)2005 // 1 targets.json, 1 bins.json, 8 bins_*.json.2006 c.Assert(snapshot.Meta, HasLen, 10)2007 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(1))2008 c.Assert(snapshot.Meta["bins.json"].Version, Equals, int64(1))2009 c.Assert(snapshot.Meta["bins_0-1.json"].Version, Equals, int64(1))2010 c.Assert(snapshot.Meta["bins_2-3.json"].Version, Equals, int64(1))2011 c.Assert(snapshot.Meta["bins_4-5.json"].Version, Equals, int64(1))2012 c.Assert(snapshot.Meta["bins_6-7.json"].Version, Equals, int64(1))2013 c.Assert(snapshot.Meta["bins_8-9.json"].Version, Equals, int64(1))2014 c.Assert(snapshot.Meta["bins_a-b.json"].Version, Equals, int64(1))2015 c.Assert(snapshot.Meta["bins_c-d.json"].Version, Equals, int64(2))2016 c.Assert(snapshot.Meta["bins_e-f.json"].Version, Equals, int64(1))2017 targets, err = r.targets("bins_c-d")2018 c.Assert(err, IsNil)2019 c.Assert(targets.Targets, HasLen, 1)2020 checkSigKeyIDs(c, local, map[string][]string{2021 "targets.json": targetsKeyIDs,2022 "1.bins.json": binsKey.PublicData().IDs(),2023 "1.bins_0-1.json": leafKey.PublicData().IDs(),2024 "1.bins_2-3.json": leafKey.PublicData().IDs(),2025 "1.bins_4-5.json": leafKey.PublicData().IDs(),2026 "1.bins_6-7.json": leafKey.PublicData().IDs(),2027 "1.bins_8-9.json": leafKey.PublicData().IDs(),2028 "1.bins_a-b.json": leafKey.PublicData().IDs(),2029 "1.bins_c-d.json": leafKey.PublicData().IDs(),2030 "2.bins_c-d.json": leafKey.PublicData().IDs(),2031 "1.bins_e-f.json": leafKey.PublicData().IDs(),2032 })2033}2034func (rs *RepoSuite) TestResetTargetsDelegationsWithExpires(c *C) {2035 tmp := newTmpDir(c)2036 local := FileSystemStore(tmp.path, nil)2037 r, err := NewRepo(local)2038 c.Assert(err, IsNil)2039 // Add one key to each role2040 genKey(c, r, "root")2041 targetsKeyIDs := genKey(c, r, "targets")2042 genKey(c, r, "snapshot")2043 genKey(c, r, "timestamp")2044 // commit the metadata to the store.2045 c.Assert(r.AddTargets([]string{}, nil), IsNil)2046 c.Assert(r.Snapshot(), IsNil)2047 c.Assert(r.Timestamp(), IsNil)2048 c.Assert(r.Commit(), IsNil)2049 snapshot, err := r.snapshot()2050 c.Assert(err, IsNil)2051 c.Assert(snapshot.Meta, HasLen, 1)2052 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(1))2053 checkSigKeyIDs(c, local, map[string][]string{2054 "1.targets.json": targetsKeyIDs,2055 })2056 role1Key, err := keys.GenerateEd25519Key()2057 c.Assert(err, IsNil)2058 err = local.SaveSigner("role1", role1Key)2059 c.Assert(err, IsNil)2060 // Delegate from targets -> role1 for A/*, B/* with one key, threshold 1.2061 role1 := data.DelegatedRole{2062 Name: "role1",2063 KeyIDs: role1Key.PublicData().IDs(),2064 Paths: []string{"A/*", "B/*"},2065 Threshold: 1,2066 }2067 err = r.AddDelegatedRole("targets", role1, []*data.PublicKey{2068 role1Key.PublicData(),2069 })2070 c.Assert(err, IsNil)2071 c.Assert(r.Snapshot(), IsNil)2072 c.Assert(r.Timestamp(), IsNil)2073 c.Assert(r.Commit(), IsNil)2074 snapshot, err = r.snapshot()2075 c.Assert(err, IsNil)2076 c.Assert(snapshot.Meta, HasLen, 2)2077 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(2))2078 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(1))2079 checkSigKeyIDs(c, local, map[string][]string{2080 "1.targets.json": targetsKeyIDs,2081 "targets.json": targetsKeyIDs,2082 "1.role1.json": role1Key.PublicData().IDs(),2083 "role1.json": role1Key.PublicData().IDs(),2084 })2085 c.Assert(r.ResetTargetsDelegations("targets"), IsNil)2086 c.Assert(r.Snapshot(), IsNil)2087 c.Assert(r.Timestamp(), IsNil)2088 c.Assert(r.Commit(), IsNil)2089 snapshot, err = r.snapshot()2090 c.Assert(err, IsNil)2091 c.Assert(snapshot.Meta, HasLen, 2)2092 c.Assert(snapshot.Meta["targets.json"].Version, Equals, int64(3))2093 c.Assert(snapshot.Meta["role1.json"].Version, Equals, int64(1))2094 checkSigKeyIDs(c, local, map[string][]string{2095 "2.targets.json": targetsKeyIDs,2096 "targets.json": targetsKeyIDs,2097 "1.role1.json": role1Key.PublicData().IDs(),2098 "role1.json": role1Key.PublicData().IDs(),2099 })2100}2101func (rs *RepoSuite) TestSignWithDelegations(c *C) {2102 tmp := newTmpDir(c)2103 local := FileSystemStore(tmp.path, nil)2104 r, err := NewRepo(local)2105 c.Assert(err, IsNil)2106 // Add one key to each role2107 genKey(c, r, "root")2108 genKey(c, r, "targets")2109 genKey(c, r, "snapshot")2110 genKey(c, r, "timestamp")2111 role1Key, err := keys.GenerateEd25519Key()2112 c.Assert(err, IsNil)2113 role1 := data.DelegatedRole{2114 Name: "role1",2115 KeyIDs: role1Key.PublicData().IDs(),2116 Paths: []string{"A/*", "B/*"},2117 Threshold: 1,2118 }2119 err = r.AddDelegatedRole("targets", role1, []*data.PublicKey{2120 role1Key.PublicData(),2121 })2122 c.Assert(err, IsNil)2123 // targets.json should be signed, but role1.json is not signed because there2124 // is no key in the local store.2125 m, err := local.GetMeta()2126 c.Assert(err, IsNil)2127 targetsMeta := &data.Signed{}2128 c.Assert(json.Unmarshal(m["targets.json"], targetsMeta), IsNil)2129 c.Assert(len(targetsMeta.Signatures), Equals, 1)2130 role1Meta := &data.Signed{}2131 c.Assert(json.Unmarshal(m["role1.json"], role1Meta), IsNil)2132 c.Assert(len(role1Meta.Signatures), Equals, 0)2133 c.Assert(r.Snapshot(), DeepEquals, ErrInsufficientSignatures{"role1.json", verify.ErrNoSignatures})2134 // Sign role1.json.2135 c.Assert(local.SaveSigner("role1", role1Key), IsNil)2136 c.Assert(r.Sign("role1.json"), IsNil)2137 m, err = local.GetMeta()2138 c.Assert(err, IsNil)2139 targetsMeta = &data.Signed{}2140 c.Assert(json.Unmarshal(m["targets.json"], targetsMeta), IsNil)2141 c.Assert(len(targetsMeta.Signatures), Equals, 1)2142 role1Meta = &data.Signed{}2143 c.Assert(json.Unmarshal(m["role1.json"], role1Meta), IsNil)2144 c.Assert(len(role1Meta.Signatures), Equals, 1)2145 c.Assert(r.Snapshot(), IsNil)2146 c.Assert(r.Timestamp(), IsNil)2147 c.Assert(r.Commit(), IsNil)2148}2149func (rs *RepoSuite) TestAddOrUpdateSignatureWithDelegations(c *C) {2150 tmp := newTmpDir(c)2151 local := FileSystemStore(tmp.path, nil)2152 r, err := NewRepo(local)2153 c.Assert(err, IsNil)2154 // Add one key to each role2155 genKey(c, r, "root")2156 genKey(c, r, "targets")2157 genKey(c, r, "snapshot")2158 genKey(c, r, "timestamp")2159 role1Key, err := keys.GenerateEd25519Key()2160 c.Assert(err, IsNil)2161 role1 := data.DelegatedRole{2162 Name: "role1",2163 KeyIDs: role1Key.PublicData().IDs(),2164 Paths: []string{"A/*", "B/*"},2165 Threshold: 1,2166 }2167 err = r.AddDelegatedRole("targets", role1, []*data.PublicKey{2168 role1Key.PublicData(),2169 })2170 c.Assert(err, IsNil)2171 // targets.json should be signed, but role1.json is not signed because there2172 // is no key in the local store.2173 m, err := local.GetMeta()2174 c.Assert(err, IsNil)2175 targetsMeta := &data.Signed{}2176 c.Assert(json.Unmarshal(m["targets.json"], targetsMeta), IsNil)2177 c.Assert(len(targetsMeta.Signatures), Equals, 1)2178 role1Meta := &data.Signed{}2179 c.Assert(json.Unmarshal(m["role1.json"], role1Meta), IsNil)2180 c.Assert(len(role1Meta.Signatures), Equals, 0)2181 c.Assert(r.Snapshot(), DeepEquals, ErrInsufficientSignatures{"role1.json", verify.ErrNoSignatures})2182 // Sign role1.json.2183 canonical, err := cjson.EncodeCanonical(role1Meta.Signed)2184 c.Assert(err, IsNil)2185 sig, err := role1Key.SignMessage(canonical)2186 c.Assert(err, IsNil)2187 err = r.AddOrUpdateSignature("role1.json", data.Signature{2188 KeyID: role1Key.PublicData().IDs()[0],2189 Signature: sig,2190 })2191 c.Assert(err, IsNil)2192 m, err = local.GetMeta()2193 c.Assert(err, IsNil)2194 targetsMeta = &data.Signed{}2195 c.Assert(json.Unmarshal(m["targets.json"], targetsMeta), IsNil)2196 c.Assert(len(targetsMeta.Signatures), Equals, 1)2197 role1Meta = &data.Signed{}2198 c.Assert(json.Unmarshal(m["role1.json"], role1Meta), IsNil)2199 c.Assert(len(role1Meta.Signatures), Equals, 1)2200 c.Assert(r.Snapshot(), IsNil)2201 c.Assert(r.Timestamp(), IsNil)2202 c.Assert(r.Commit(), IsNil)2203}2204// Test the offline signature flow: Payload -> SignPayload -> AddSignature2205func (rs *RepoSuite) TestOfflineFlow(c *C) {2206 // Set up repo.2207 meta := make(map[string]json.RawMessage)2208 local := MemoryStore(meta, nil)2209 r, err := NewRepo(local)2210 c.Assert(err, IsNil)2211 c.Assert(r.Init(false), IsNil)2212 _, err = r.GenKey("root")2213 c.Assert(err, IsNil)2214 // Get the payload to sign2215 _, err = r.Payload("badrole.json")2216 c.Assert(err, Equals, ErrMissingMetadata{"badrole.json"})2217 _, err = r.Payload("root")2218 c.Assert(err, Equals, ErrMissingMetadata{"root"})2219 payload, err := r.Payload("root.json")2220 c.Assert(err, IsNil)2221 root, err := r.SignedMeta("root.json")2222 c.Assert(err, IsNil)2223 rootCanonical, err := cjson.EncodeCanonical(root.Signed)2224 c.Assert(err, IsNil)2225 if !bytes.Equal(payload, rootCanonical) {2226 c.Fatalf("Payload(): not canonical.\n%s\n%s", string(payload), string(rootCanonical))2227 }2228 // Sign the payload2229 signed := data.Signed{Signed: payload}2230 _, err = r.SignPayload("targets", &signed)2231 c.Assert(err, Equals, ErrNoKeys{"targets"})2232 numKeys, err := r.SignPayload("root", &signed)2233 c.Assert(err, IsNil)2234 c.Assert(numKeys, Equals, 1)2235 // Add the payload signatures back2236 for _, sig := range signed.Signatures {2237 // This method checks that the signature verifies!2238 err = r.AddOrUpdateSignature("root.json", sig)2239 c.Assert(err, IsNil)2240 }2241}2242// Regression test: Snapshotting an invalid root should fail.2243func (rs *RepoSuite) TestSnapshotWithInvalidRoot(c *C) {2244 files := map[string][]byte{"foo.txt": []byte("foo")}2245 local := MemoryStore(make(map[string]json.RawMessage), files)2246 r, err := NewRepo(local)2247 c.Assert(err, IsNil)2248 // Init should create targets.json, but not signed yet2249 r.Init(false)2250 genKey(c, r, "root")2251 genKey(c, r, "targets")2252 genKey(c, r, "snapshot")2253 genKey(c, r, "timestamp")2254 c.Assert(r.AddTarget("foo.txt", nil), IsNil)2255 // Clear the root signature so that signature verification fails.2256 s, err := r.SignedMeta("root.json")2257 c.Assert(err, IsNil)2258 c.Assert(s.Signatures, HasLen, 1)2259 s.Signatures[0].Signature = data.HexBytes{}2260 b, err := r.jsonMarshal(s)2261 c.Assert(err, IsNil)2262 r.meta["root.json"] = b2263 local.SetMeta("root.json", b)2264 // Snapshotting should fail.2265 c.Assert(r.Snapshot(), Equals, ErrInsufficientSignatures{"root.json", verify.ErrInvalid})2266 // Correctly sign root2267 c.Assert(r.Sign("root.json"), IsNil)2268 c.Assert(r.Snapshot(), IsNil)2269 c.Assert(r.Timestamp(), IsNil)2270 c.Assert(r.Commit(), IsNil)2271}...

Full Screen

Full Screen

gridfs_test.go

Source:gridfs_test.go Github

copy

Full Screen

1// mgo - MongoDB driver for Go2//3// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>4//5// All rights reserved.6//7// Redistribution and use in source and binary forms, with or without8// modification, are permitted provided that the following conditions are met:9//10// 1. Redistributions of source code must retain the above copyright notice, this11// list of conditions and the following disclaimer.12// 2. Redistributions in binary form must reproduce the above copyright notice,13// this list of conditions and the following disclaimer in the documentation14// and/or other materials provided with the distribution.15//16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR20// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES21// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;22// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND23// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT24// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.26package mgo_test27import (28 "io"29 "os"30 "time"31 . "gopkg.in/check.v1"32 "gopkg.in/mgo.v2"33 "gopkg.in/mgo.v2/bson"34)35func (s *S) TestGridFSCreate(c *C) {36 session, err := mgo.Dial("localhost:40011")37 c.Assert(err, IsNil)38 defer session.Close()39 db := session.DB("mydb")40 before := bson.Now()41 gfs := db.GridFS("fs")42 file, err := gfs.Create("")43 c.Assert(err, IsNil)44 n, err := file.Write([]byte("some data"))45 c.Assert(err, IsNil)46 c.Assert(n, Equals, 9)47 err = file.Close()48 c.Assert(err, IsNil)49 after := bson.Now()50 // Check the file information.51 result := M{}52 err = db.C("fs.files").Find(nil).One(result)53 c.Assert(err, IsNil)54 fileId, ok := result["_id"].(bson.ObjectId)55 c.Assert(ok, Equals, true)56 c.Assert(fileId.Valid(), Equals, true)57 result["_id"] = "<id>"58 ud, ok := result["uploadDate"].(time.Time)59 c.Assert(ok, Equals, true)60 c.Assert(ud.After(before) && ud.Before(after), Equals, true)61 result["uploadDate"] = "<timestamp>"62 expected := M{63 "_id": "<id>",64 "length": 9,65 "chunkSize": 255 * 1024,66 "uploadDate": "<timestamp>",67 "md5": "1e50210a0202497fb79bc38b6ade6c34",68 }69 c.Assert(result, DeepEquals, expected)70 // Check the chunk.71 result = M{}72 err = db.C("fs.chunks").Find(nil).One(result)73 c.Assert(err, IsNil)74 chunkId, ok := result["_id"].(bson.ObjectId)75 c.Assert(ok, Equals, true)76 c.Assert(chunkId.Valid(), Equals, true)77 result["_id"] = "<id>"78 expected = M{79 "_id": "<id>",80 "files_id": fileId,81 "n": 0,82 "data": []byte("some data"),83 }84 c.Assert(result, DeepEquals, expected)85 // Check that an index was created.86 indexes, err := db.C("fs.chunks").Indexes()87 c.Assert(err, IsNil)88 c.Assert(len(indexes), Equals, 2)89 c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})90}91func (s *S) TestGridFSFileDetails(c *C) {92 session, err := mgo.Dial("localhost:40011")93 c.Assert(err, IsNil)94 defer session.Close()95 db := session.DB("mydb")96 gfs := db.GridFS("fs")97 file, err := gfs.Create("myfile1.txt")98 c.Assert(err, IsNil)99 n, err := file.Write([]byte("some"))100 c.Assert(err, IsNil)101 c.Assert(n, Equals, 4)102 c.Assert(file.Size(), Equals, int64(4))103 n, err = file.Write([]byte(" data"))104 c.Assert(err, IsNil)105 c.Assert(n, Equals, 5)106 c.Assert(file.Size(), Equals, int64(9))107 id, _ := file.Id().(bson.ObjectId)108 c.Assert(id.Valid(), Equals, true)109 c.Assert(file.Name(), Equals, "myfile1.txt")110 c.Assert(file.ContentType(), Equals, "")111 var info interface{}112 err = file.GetMeta(&info)113 c.Assert(err, IsNil)114 c.Assert(info, IsNil)115 file.SetId("myid")116 file.SetName("myfile2.txt")117 file.SetContentType("text/plain")118 file.SetMeta(M{"any": "thing"})119 c.Assert(file.Id(), Equals, "myid")120 c.Assert(file.Name(), Equals, "myfile2.txt")121 c.Assert(file.ContentType(), Equals, "text/plain")122 err = file.GetMeta(&info)123 c.Assert(err, IsNil)124 c.Assert(info, DeepEquals, bson.M{"any": "thing"})125 err = file.Close()126 c.Assert(err, IsNil)127 c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")128 ud := file.UploadDate()129 now := time.Now()130 c.Assert(ud.Before(now), Equals, true)131 c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)132 result := M{}133 err = db.C("fs.files").Find(nil).One(result)134 c.Assert(err, IsNil)135 result["uploadDate"] = "<timestamp>"136 expected := M{137 "_id": "myid",138 "length": 9,139 "chunkSize": 255 * 1024,140 "uploadDate": "<timestamp>",141 "md5": "1e50210a0202497fb79bc38b6ade6c34",142 "filename": "myfile2.txt",143 "contentType": "text/plain",144 "metadata": M{"any": "thing"},145 }146 c.Assert(result, DeepEquals, expected)147}148func (s *S) TestGridFSSetUploadDate(c *C) {149 session, err := mgo.Dial("localhost:40011")150 c.Assert(err, IsNil)151 defer session.Close()152 db := session.DB("mydb")153 gfs := db.GridFS("fs")154 file, err := gfs.Create("")155 c.Assert(err, IsNil)156 t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)157 file.SetUploadDate(t)158 err = file.Close()159 c.Assert(err, IsNil)160 // Check the file information.161 result := M{}162 err = db.C("fs.files").Find(nil).One(result)163 c.Assert(err, IsNil)164 ud := result["uploadDate"].(time.Time)165 if !ud.Equal(t) {166 c.Fatalf("want upload date %s, got %s", t, ud)167 }168}169func (s *S) TestGridFSCreateWithChunking(c *C) {170 session, err := mgo.Dial("localhost:40011")171 c.Assert(err, IsNil)172 defer session.Close()173 db := session.DB("mydb")174 gfs := db.GridFS("fs")175 file, err := gfs.Create("")176 c.Assert(err, IsNil)177 file.SetChunkSize(5)178 // Smaller than the chunk size.179 n, err := file.Write([]byte("abc"))180 c.Assert(err, IsNil)181 c.Assert(n, Equals, 3)182 // Boundary in the middle.183 n, err = file.Write([]byte("defg"))184 c.Assert(err, IsNil)185 c.Assert(n, Equals, 4)186 // Boundary at the end.187 n, err = file.Write([]byte("hij"))188 c.Assert(err, IsNil)189 c.Assert(n, Equals, 3)190 // Larger than the chunk size, with 3 chunks.191 n, err = file.Write([]byte("klmnopqrstuv"))192 c.Assert(err, IsNil)193 c.Assert(n, Equals, 12)194 err = file.Close()195 c.Assert(err, IsNil)196 // Check the file information.197 result := M{}198 err = db.C("fs.files").Find(nil).One(result)199 c.Assert(err, IsNil)200 fileId, _ := result["_id"].(bson.ObjectId)201 c.Assert(fileId.Valid(), Equals, true)202 result["_id"] = "<id>"203 result["uploadDate"] = "<timestamp>"204 expected := M{205 "_id": "<id>",206 "length": 22,207 "chunkSize": 5,208 "uploadDate": "<timestamp>",209 "md5": "44a66044834cbe55040089cabfc102d5",210 }211 c.Assert(result, DeepEquals, expected)212 // Check the chunks.213 iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()214 dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}215 for i := 0; ; i++ {216 result = M{}217 if !iter.Next(result) {218 if i != 5 {219 c.Fatalf("Expected 5 chunks, got %d", i)220 }221 break222 }223 c.Assert(iter.Close(), IsNil)224 result["_id"] = "<id>"225 expected = M{226 "_id": "<id>",227 "files_id": fileId,228 "n": i,229 "data": []byte(dataChunks[i]),230 }231 c.Assert(result, DeepEquals, expected)232 }233}234func (s *S) TestGridFSAbort(c *C) {235 session, err := mgo.Dial("localhost:40011")236 c.Assert(err, IsNil)237 defer session.Close()238 db := session.DB("mydb")239 gfs := db.GridFS("fs")240 file, err := gfs.Create("")241 c.Assert(err, IsNil)242 file.SetChunkSize(5)243 n, err := file.Write([]byte("some data"))244 c.Assert(err, IsNil)245 c.Assert(n, Equals, 9)246 var count int247 for i := 0; i < 10; i++ {248 count, err = db.C("fs.chunks").Count()249 if count > 0 || err != nil {250 break251 }252 }253 c.Assert(err, IsNil)254 c.Assert(count, Equals, 1)255 file.Abort()256 err = file.Close()257 c.Assert(err, ErrorMatches, "write aborted")258 count, err = db.C("fs.chunks").Count()259 c.Assert(err, IsNil)260 c.Assert(count, Equals, 0)261}262func (s *S) TestGridFSCloseConflict(c *C) {263 session, err := mgo.Dial("localhost:40011")264 c.Assert(err, IsNil)265 defer session.Close()266 db := session.DB("mydb")267 db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})268 // For a closing-time conflict269 err = db.C("fs.files").Insert(M{"filename": "foo.txt"})270 c.Assert(err, IsNil)271 gfs := db.GridFS("fs")272 file, err := gfs.Create("foo.txt")273 c.Assert(err, IsNil)274 _, err = file.Write([]byte("some data"))275 c.Assert(err, IsNil)276 err = file.Close()277 c.Assert(mgo.IsDup(err), Equals, true)278 count, err := db.C("fs.chunks").Count()279 c.Assert(err, IsNil)280 c.Assert(count, Equals, 0)281}282func (s *S) TestGridFSOpenNotFound(c *C) {283 session, err := mgo.Dial("localhost:40011")284 c.Assert(err, IsNil)285 defer session.Close()286 db := session.DB("mydb")287 gfs := db.GridFS("fs")288 file, err := gfs.OpenId("non-existent")289 c.Assert(err == mgo.ErrNotFound, Equals, true)290 c.Assert(file, IsNil)291 file, err = gfs.Open("non-existent")292 c.Assert(err == mgo.ErrNotFound, Equals, true)293 c.Assert(file, IsNil)294}295func (s *S) TestGridFSReadAll(c *C) {296 session, err := mgo.Dial("localhost:40011")297 c.Assert(err, IsNil)298 defer session.Close()299 db := session.DB("mydb")300 gfs := db.GridFS("fs")301 file, err := gfs.Create("")302 c.Assert(err, IsNil)303 id := file.Id()304 file.SetChunkSize(5)305 n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))306 c.Assert(err, IsNil)307 c.Assert(n, Equals, 22)308 err = file.Close()309 c.Assert(err, IsNil)310 file, err = gfs.OpenId(id)311 c.Assert(err, IsNil)312 b := make([]byte, 30)313 n, err = file.Read(b)314 c.Assert(n, Equals, 22)315 c.Assert(err, IsNil)316 n, err = file.Read(b)317 c.Assert(n, Equals, 0)318 c.Assert(err == io.EOF, Equals, true)319 err = file.Close()320 c.Assert(err, IsNil)321}322func (s *S) TestGridFSReadChunking(c *C) {323 session, err := mgo.Dial("localhost:40011")324 c.Assert(err, IsNil)325 defer session.Close()326 db := session.DB("mydb")327 gfs := db.GridFS("fs")328 file, err := gfs.Create("")329 c.Assert(err, IsNil)330 id := file.Id()331 file.SetChunkSize(5)332 n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))333 c.Assert(err, IsNil)334 c.Assert(n, Equals, 22)335 err = file.Close()336 c.Assert(err, IsNil)337 file, err = gfs.OpenId(id)338 c.Assert(err, IsNil)339 b := make([]byte, 30)340 // Smaller than the chunk size.341 n, err = file.Read(b[:3])342 c.Assert(err, IsNil)343 c.Assert(n, Equals, 3)344 c.Assert(b[:3], DeepEquals, []byte("abc"))345 // Boundary in the middle.346 n, err = file.Read(b[:4])347 c.Assert(err, IsNil)348 c.Assert(n, Equals, 4)349 c.Assert(b[:4], DeepEquals, []byte("defg"))350 // Boundary at the end.351 n, err = file.Read(b[:3])352 c.Assert(err, IsNil)353 c.Assert(n, Equals, 3)354 c.Assert(b[:3], DeepEquals, []byte("hij"))355 // Larger than the chunk size, with 3 chunks.356 n, err = file.Read(b)357 c.Assert(err, IsNil)358 c.Assert(n, Equals, 12)359 c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))360 n, err = file.Read(b)361 c.Assert(n, Equals, 0)362 c.Assert(err == io.EOF, Equals, true)363 err = file.Close()364 c.Assert(err, IsNil)365}366func (s *S) TestGridFSOpen(c *C) {367 session, err := mgo.Dial("localhost:40011")368 c.Assert(err, IsNil)369 defer session.Close()370 db := session.DB("mydb")371 gfs := db.GridFS("fs")372 file, err := gfs.Create("myfile.txt")373 c.Assert(err, IsNil)374 file.Write([]byte{'1'})375 file.Close()376 file, err = gfs.Create("myfile.txt")377 c.Assert(err, IsNil)378 file.Write([]byte{'2'})379 file.Close()380 file, err = gfs.Open("myfile.txt")381 c.Assert(err, IsNil)382 defer file.Close()383 var b [1]byte384 _, err = file.Read(b[:])385 c.Assert(err, IsNil)386 c.Assert(string(b[:]), Equals, "2")387}388func (s *S) TestGridFSSeek(c *C) {389 session, err := mgo.Dial("localhost:40011")390 c.Assert(err, IsNil)391 defer session.Close()392 db := session.DB("mydb")393 gfs := db.GridFS("fs")394 file, err := gfs.Create("")395 c.Assert(err, IsNil)396 id := file.Id()397 file.SetChunkSize(5)398 n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))399 c.Assert(err, IsNil)400 c.Assert(n, Equals, 22)401 err = file.Close()402 c.Assert(err, IsNil)403 b := make([]byte, 5)404 file, err = gfs.OpenId(id)405 c.Assert(err, IsNil)406 o, err := file.Seek(3, os.SEEK_SET)407 c.Assert(err, IsNil)408 c.Assert(o, Equals, int64(3))409 _, err = file.Read(b)410 c.Assert(err, IsNil)411 c.Assert(b, DeepEquals, []byte("defgh"))412 o, err = file.Seek(5, os.SEEK_CUR)413 c.Assert(err, IsNil)414 c.Assert(o, Equals, int64(13))415 _, err = file.Read(b)416 c.Assert(err, IsNil)417 c.Assert(b, DeepEquals, []byte("nopqr"))418 o, err = file.Seek(0, os.SEEK_END)419 c.Assert(err, IsNil)420 c.Assert(o, Equals, int64(22))421 n, err = file.Read(b)422 c.Assert(err, Equals, io.EOF)423 c.Assert(n, Equals, 0)424 o, err = file.Seek(-10, os.SEEK_END)425 c.Assert(err, IsNil)426 c.Assert(o, Equals, int64(12))427 _, err = file.Read(b)428 c.Assert(err, IsNil)429 c.Assert(b, DeepEquals, []byte("mnopq"))430 o, err = file.Seek(8, os.SEEK_SET)431 c.Assert(err, IsNil)432 c.Assert(o, Equals, int64(8))433 _, err = file.Read(b)434 c.Assert(err, IsNil)435 c.Assert(b, DeepEquals, []byte("ijklm"))436 // Trivial seek forward within same chunk. Already437 // got the data, shouldn't touch the database.438 sent := mgo.GetStats().SentOps439 o, err = file.Seek(1, os.SEEK_CUR)440 c.Assert(err, IsNil)441 c.Assert(o, Equals, int64(14))442 c.Assert(mgo.GetStats().SentOps, Equals, sent)443 _, err = file.Read(b)444 c.Assert(err, IsNil)445 c.Assert(b, DeepEquals, []byte("opqrs"))446 // Try seeking past end of file.447 file.Seek(3, os.SEEK_SET)448 o, err = file.Seek(23, os.SEEK_SET)449 c.Assert(err, ErrorMatches, "seek past end of file")450 c.Assert(o, Equals, int64(3))451}452func (s *S) TestGridFSRemoveId(c *C) {453 session, err := mgo.Dial("localhost:40011")454 c.Assert(err, IsNil)455 defer session.Close()456 db := session.DB("mydb")457 gfs := db.GridFS("fs")458 file, err := gfs.Create("myfile.txt")459 c.Assert(err, IsNil)460 file.Write([]byte{'1'})461 file.Close()462 file, err = gfs.Create("myfile.txt")463 c.Assert(err, IsNil)464 file.Write([]byte{'2'})465 id := file.Id()466 file.Close()467 err = gfs.RemoveId(id)468 c.Assert(err, IsNil)469 file, err = gfs.Open("myfile.txt")470 c.Assert(err, IsNil)471 defer file.Close()472 var b [1]byte473 _, err = file.Read(b[:])474 c.Assert(err, IsNil)475 c.Assert(string(b[:]), Equals, "1")476 n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()477 c.Assert(err, IsNil)478 c.Assert(n, Equals, 0)479}480func (s *S) TestGridFSRemove(c *C) {481 session, err := mgo.Dial("localhost:40011")482 c.Assert(err, IsNil)483 defer session.Close()484 db := session.DB("mydb")485 gfs := db.GridFS("fs")486 file, err := gfs.Create("myfile.txt")487 c.Assert(err, IsNil)488 file.Write([]byte{'1'})489 file.Close()490 file, err = gfs.Create("myfile.txt")491 c.Assert(err, IsNil)492 file.Write([]byte{'2'})493 file.Close()494 err = gfs.Remove("myfile.txt")495 c.Assert(err, IsNil)496 _, err = gfs.Open("myfile.txt")497 c.Assert(err == mgo.ErrNotFound, Equals, true)498 n, err := db.C("fs.chunks").Find(nil).Count()499 c.Assert(err, IsNil)500 c.Assert(n, Equals, 0)501}502func (s *S) TestGridFSOpenNext(c *C) {503 session, err := mgo.Dial("localhost:40011")504 c.Assert(err, IsNil)505 defer session.Close()506 db := session.DB("mydb")507 gfs := db.GridFS("fs")508 file, err := gfs.Create("myfile1.txt")509 c.Assert(err, IsNil)510 file.Write([]byte{'1'})511 file.Close()512 file, err = gfs.Create("myfile2.txt")513 c.Assert(err, IsNil)514 file.Write([]byte{'2'})515 file.Close()516 var f *mgo.GridFile517 var b [1]byte518 iter := gfs.Find(nil).Sort("-filename").Iter()519 ok := gfs.OpenNext(iter, &f)520 c.Assert(ok, Equals, true)521 c.Check(f.Name(), Equals, "myfile2.txt")522 _, err = f.Read(b[:])523 c.Assert(err, IsNil)524 c.Assert(string(b[:]), Equals, "2")525 ok = gfs.OpenNext(iter, &f)526 c.Assert(ok, Equals, true)527 c.Check(f.Name(), Equals, "myfile1.txt")528 _, err = f.Read(b[:])529 c.Assert(err, IsNil)530 c.Assert(string(b[:]), Equals, "1")531 ok = gfs.OpenNext(iter, &f)532 c.Assert(ok, Equals, false)533 c.Assert(iter.Close(), IsNil)534 c.Assert(f, IsNil)535 // Do it again with a more restrictive query to make sure536 // it's actually taken into account.537 iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()538 ok = gfs.OpenNext(iter, &f)539 c.Assert(ok, Equals, true)540 c.Check(f.Name(), Equals, "myfile1.txt")541 ok = gfs.OpenNext(iter, &f)542 c.Assert(ok, Equals, false)543 c.Assert(iter.Close(), IsNil)544 c.Assert(f, IsNil)545}...

Full Screen

Full Screen

structure_test.go

Source:structure_test.go Github

copy

Full Screen

1// Copyright 2015 PingCAP, Inc.2//3// Licensed under the Apache License, Version 2.0 (the "License");4// you may not use this file except in compliance with the License.5// You may obtain a copy of the License at6//7// http://www.apache.org/licenses/LICENSE-2.08//9// Unless required by applicable law or agreed to in writing, software10// distributed under the License is distributed on an "AS IS" BASIS,11// See the License for the specific language governing permissions and12// limitations under the License.13package structure_test14import (15 "context"16 "testing"17 . "github.com/pingcap/check"18 "github.com/pingcap/parser/mysql"19 "github.com/pingcap/parser/terror"20 "github.com/pingcap/tidb/kv"21 "github.com/pingcap/tidb/store/mockstore"22 "github.com/pingcap/tidb/structure"23 "github.com/pingcap/tidb/util/testleak"24)25func TestTxStructure(t *testing.T) {26 CustomVerboseFlag = true27 TestingT(t)28}29var _ = Suite(&testTxStructureSuite{})30type testTxStructureSuite struct {31 store kv.Storage32}33func (s *testTxStructureSuite) SetUpSuite(c *C) {34 testleak.BeforeTest()35 store, err := mockstore.NewMockTikvStore()36 c.Assert(err, IsNil)37 s.store = store38}39func (s *testTxStructureSuite) TearDownSuite(c *C) {40 err := s.store.Close()41 c.Assert(err, IsNil)42 testleak.AfterTest(c)()43}44func (s *testTxStructureSuite) TestString(c *C) {45 txn, err := s.store.Begin()46 c.Assert(err, IsNil)47 defer txn.Rollback()48 tx := structure.NewStructure(txn, txn, []byte{0x00})49 key := []byte("a")50 value := []byte("1")51 err = tx.Set(key, value)52 c.Assert(err, IsNil)53 v, err := tx.Get(key)54 c.Assert(err, IsNil)55 c.Assert(v, DeepEquals, value)56 n, err := tx.Inc(key, 1)57 c.Assert(err, IsNil)58 c.Assert(n, Equals, int64(2))59 v, err = tx.Get(key)60 c.Assert(err, IsNil)61 c.Assert(v, DeepEquals, []byte("2"))62 n, err = tx.GetInt64(key)63 c.Assert(err, IsNil)64 c.Assert(n, Equals, int64(2))65 err = tx.Clear(key)66 c.Assert(err, IsNil)67 v, err = tx.Get(key)68 c.Assert(err, IsNil)69 c.Assert(v, IsNil)70 tx1 := structure.NewStructure(txn, nil, []byte{0x01})71 err = tx1.Set(key, value)72 c.Assert(err, NotNil)73 _, err = tx1.Inc(key, 1)74 c.Assert(err, NotNil)75 err = tx1.Clear(key)76 c.Assert(err, NotNil)77 err = txn.Commit(context.Background())78 c.Assert(err, IsNil)79}80func (s *testTxStructureSuite) TestList(c *C) {81 txn, err := s.store.Begin()82 c.Assert(err, IsNil)83 defer txn.Rollback()84 tx := structure.NewStructure(txn, txn, []byte{0x00})85 key := []byte("a")86 err = tx.LPush(key, []byte("3"), []byte("2"), []byte("1"))87 c.Assert(err, IsNil)88 // Test LGetAll.89 err = tx.LPush(key, []byte("11"))90 c.Assert(err, IsNil)91 values, err := tx.LGetAll(key)92 c.Assert(err, IsNil)93 c.Assert(values, DeepEquals, [][]byte{[]byte("3"), []byte("2"), []byte("1"), []byte("11")})94 value, err := tx.LPop(key)95 c.Assert(err, IsNil)96 c.Assert(value, DeepEquals, []byte("11"))97 l, err := tx.LLen(key)98 c.Assert(err, IsNil)99 c.Assert(l, Equals, int64(3))100 value, err = tx.LIndex(key, 1)101 c.Assert(err, IsNil)102 c.Assert(value, DeepEquals, []byte("2"))103 err = tx.LSet(key, 1, []byte("4"))104 c.Assert(err, IsNil)105 value, err = tx.LIndex(key, 1)106 c.Assert(err, IsNil)107 c.Assert(value, DeepEquals, []byte("4"))108 err = tx.LSet(key, 1, []byte("2"))109 c.Assert(err, IsNil)110 err = tx.LSet(key, 100, []byte("2"))111 c.Assert(err, NotNil)112 value, err = tx.LIndex(key, -1)113 c.Assert(err, IsNil)114 c.Assert(value, DeepEquals, []byte("3"))115 value, err = tx.LPop(key)116 c.Assert(err, IsNil)117 c.Assert(value, DeepEquals, []byte("1"))118 l, err = tx.LLen(key)119 c.Assert(err, IsNil)120 c.Assert(l, Equals, int64(2))121 err = tx.RPush(key, []byte("4"))122 c.Assert(err, IsNil)123 l, err = tx.LLen(key)124 c.Assert(err, IsNil)125 c.Assert(l, Equals, int64(3))126 value, err = tx.LIndex(key, -1)127 c.Assert(err, IsNil)128 c.Assert(value, DeepEquals, []byte("4"))129 value, err = tx.RPop(key)130 c.Assert(err, IsNil)131 c.Assert(value, DeepEquals, []byte("4"))132 value, err = tx.RPop(key)133 c.Assert(err, IsNil)134 c.Assert(value, DeepEquals, []byte("3"))135 value, err = tx.RPop(key)136 c.Assert(err, IsNil)137 c.Assert(value, DeepEquals, []byte("2"))138 l, err = tx.LLen(key)139 c.Assert(err, IsNil)140 c.Assert(l, Equals, int64(0))141 err = tx.LPush(key, []byte("1"))142 c.Assert(err, IsNil)143 err = tx.LClear(key)144 c.Assert(err, IsNil)145 l, err = tx.LLen(key)146 c.Assert(err, IsNil)147 c.Assert(l, Equals, int64(0))148 tx1 := structure.NewStructure(txn, nil, []byte{0x01})149 err = tx1.LPush(key, []byte("1"))150 c.Assert(err, NotNil)151 _, err = tx1.RPop(key)152 c.Assert(err, NotNil)153 err = tx1.LSet(key, 1, []byte("2"))154 c.Assert(err, NotNil)155 err = tx1.LClear(key)156 c.Assert(err, NotNil)157 err = txn.Commit(context.Background())158 c.Assert(err, IsNil)159}160func (s *testTxStructureSuite) TestHash(c *C) {161 txn, err := s.store.Begin()162 c.Assert(err, IsNil)163 defer txn.Rollback()164 tx := structure.NewStructure(txn, txn, []byte{0x00})165 key := []byte("a")166 tx.EncodeHashAutoIDKeyValue(key, key, 5)167 err = tx.HSet(key, []byte("1"), []byte("1"))168 c.Assert(err, IsNil)169 err = tx.HSet(key, []byte("2"), []byte("2"))170 c.Assert(err, IsNil)171 l, err := tx.HLen(key)172 c.Assert(err, IsNil)173 c.Assert(l, Equals, int64(2))174 value, err := tx.HGet(key, []byte("1"))175 c.Assert(err, IsNil)176 c.Assert(value, DeepEquals, []byte("1"))177 value, err = tx.HGet(key, []byte("fake"))178 c.Assert(err, IsNil)179 c.Assert(value, IsNil)180 keys, err := tx.HKeys(key)181 c.Assert(err, IsNil)182 c.Assert(keys, DeepEquals, [][]byte{[]byte("1"), []byte("2")})183 res, err := tx.HGetAll(key)184 c.Assert(err, IsNil)185 c.Assert(res, DeepEquals, []structure.HashPair{186 {Field: []byte("1"), Value: []byte("1")},187 {Field: []byte("2"), Value: []byte("2")}})188 res, err = tx.HGetLastN(key, 1)189 c.Assert(err, IsNil)190 c.Assert(res, DeepEquals, []structure.HashPair{191 {Field: []byte("2"), Value: []byte("2")}})192 res, err = tx.HGetLastN(key, 2)193 c.Assert(err, IsNil)194 c.Assert(res, DeepEquals, []structure.HashPair{195 {Field: []byte("2"), Value: []byte("2")},196 {Field: []byte("1"), Value: []byte("1")}})197 err = tx.HDel(key, []byte("1"))198 c.Assert(err, IsNil)199 value, err = tx.HGet(key, []byte("1"))200 c.Assert(err, IsNil)201 c.Assert(value, IsNil)202 l, err = tx.HLen(key)203 c.Assert(err, IsNil)204 c.Assert(l, Equals, int64(1))205 n, err := tx.HInc(key, []byte("1"), 1)206 c.Assert(err, IsNil)207 c.Assert(n, Equals, int64(1))208 l, err = tx.HLen(key)209 c.Assert(err, IsNil)210 c.Assert(l, Equals, int64(2))211 // Test set new value which equals to old value.212 value, err = tx.HGet(key, []byte("1"))213 c.Assert(err, IsNil)214 c.Assert(value, DeepEquals, []byte("1"))215 err = tx.HSet(key, []byte("1"), []byte("1"))216 c.Assert(err, IsNil)217 value, err = tx.HGet(key, []byte("1"))218 c.Assert(err, IsNil)219 c.Assert(value, DeepEquals, []byte("1"))220 l, err = tx.HLen(key)221 c.Assert(err, IsNil)222 c.Assert(l, Equals, int64(2))223 n, err = tx.HInc(key, []byte("1"), 1)224 c.Assert(err, IsNil)225 c.Assert(n, Equals, int64(2))226 l, err = tx.HLen(key)227 c.Assert(err, IsNil)228 c.Assert(l, Equals, int64(2))229 n, err = tx.HInc(key, []byte("1"), 1)230 c.Assert(err, IsNil)231 c.Assert(n, Equals, int64(3))232 l, err = tx.HLen(key)233 c.Assert(err, IsNil)234 c.Assert(l, Equals, int64(2))235 n, err = tx.HGetInt64(key, []byte("1"))236 c.Assert(err, IsNil)237 c.Assert(n, Equals, int64(3))238 l, err = tx.HLen(key)239 c.Assert(err, IsNil)240 c.Assert(l, Equals, int64(2))241 err = tx.HClear(key)242 c.Assert(err, IsNil)243 l, err = tx.HLen(key)244 c.Assert(err, IsNil)245 c.Assert(l, Equals, int64(0))246 err = tx.HDel(key, []byte("fake_key"))247 c.Assert(err, IsNil)248 // Test set nil value.249 value, err = tx.HGet(key, []byte("nil_key"))250 c.Assert(err, IsNil)251 c.Assert(value, IsNil)252 l, err = tx.HLen(key)253 c.Assert(err, IsNil)254 c.Assert(l, Equals, int64(0))255 err = tx.HSet(key, []byte("nil_key"), nil)256 c.Assert(err, IsNil)257 l, err = tx.HLen(key)258 c.Assert(err, IsNil)259 c.Assert(l, Equals, int64(0))260 err = tx.HSet(key, []byte("nil_key"), []byte("1"))261 c.Assert(err, IsNil)262 l, err = tx.HLen(key)263 c.Assert(err, IsNil)264 c.Assert(l, Equals, int64(1))265 value, err = tx.HGet(key, []byte("nil_key"))266 c.Assert(err, IsNil)267 c.Assert(value, DeepEquals, []byte("1"))268 err = tx.HSet(key, []byte("nil_key"), nil)269 c.Assert(err, NotNil)270 l, err = tx.HLen(key)271 c.Assert(err, IsNil)272 c.Assert(l, Equals, int64(1))273 value, err = tx.HGet(key, []byte("nil_key"))274 c.Assert(err, IsNil)275 c.Assert(value, DeepEquals, []byte("1"))276 err = tx.HSet(key, []byte("nil_key"), []byte("2"))277 c.Assert(err, IsNil)278 l, err = tx.HLen(key)279 c.Assert(err, IsNil)280 c.Assert(l, Equals, int64(1))281 value, err = tx.HGet(key, []byte("nil_key"))282 c.Assert(err, IsNil)283 c.Assert(value, DeepEquals, []byte("2"))284 tx1 := structure.NewStructure(txn, nil, []byte{0x01})285 _, err = tx1.HInc(key, []byte("1"), 1)286 c.Assert(err, NotNil)287 err = tx1.HDel(key, []byte("1"))288 c.Assert(err, NotNil)289 err = txn.Commit(context.Background())290 c.Assert(err, IsNil)291 err = kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error {292 t := structure.NewStructure(txn, txn, []byte{0x00})293 err = t.Set(key, []byte("abc"))294 c.Assert(err, IsNil)295 value, err = t.Get(key)296 c.Assert(err, IsNil)297 c.Assert(value, DeepEquals, []byte("abc"))298 return nil299 })300 c.Assert(err, IsNil)301}302func (*testTxStructureSuite) TestError(c *C) {303 kvErrs := []*terror.Error{304 structure.ErrInvalidHashKeyFlag,305 structure.ErrInvalidListIndex,306 structure.ErrInvalidListMetaData,307 structure.ErrWriteOnSnapshot,308 }309 for _, err := range kvErrs {310 code := terror.ToSQLError(err).Code311 c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err))312 }313}...

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1func main() {2 var i interface{}3 fmt.Println(isNil(i))4}5func isNil(i interface{}) bool {6 if i == nil {7 }8 v := reflect.ValueOf(i)9 switch v.Kind() {10 return v.IsNil()11 }12}13type Cache struct {14}15func (c *Cache) Get(key string, field string) (string, error) {16 if c.cache[key] == nil {17 return "", errors.New("key not found")18 }19 if c.cache[key][field] == "" {20 return "", errors.New("field not found")21 }22}23func (c *Cache) Set(key string, field string, value string) {24 if c.cache[key] == nil {25 c.cache[key] = make(map[string]string)26 }27}28func (c *Cache) Delete(key string) {29 delete(c.cache, key)30}31func (c *Cache) DeleteField(key string, field string) {32 delete(c.cache[key], field)33}34func (c *Cache) GetKeys() []string {35 keys := make([]string, len(c.cache))36 for k := range c.cache {37 }38}39func (c *Cache) GetFields(key string) []string {40 fields := make([]string, len(c.cache[key]))41 for f := range c.cache[key] {42 }43}44func main() {45 cache := Cache{cache: make(map[string]map[string]string)}46 cache.Set("

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 b = new(int)4 fmt.Println(reflect.ValueOf(a).IsNil())5 fmt.Println(reflect.ValueOf(b).IsNil())6}

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println("type of x is ", reflect.TypeOf(x))4 fmt.Println("value of x is ", reflect.ValueOf(x))5 fmt.Println("value of x is nil ?", reflect.ValueOf(x).IsNil())6 fmt.Println("type of y is ", reflect.TypeOf(y))7 fmt.Println("value of y is ", reflect.ValueOf(y))8 fmt.Println("value of y is nil ?", reflect.ValueOf(y).IsNil())9}

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println("Is a nil? ", reflect.ValueOf(a).IsNil())4 a = new(int)5 fmt.Println("Is a nil? ", reflect.ValueOf(a).IsNil())6}

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1import (2type isNil interface {3 isNil() bool4}5type myNil struct {6}7func (m myNil) isNil() bool {8}9func main() {10 i = myNil{}11 fmt.Println(i.isNil())12}

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1import (2type is struct {3}4func main() {5 fmt.Println(reflect.ValueOf(i).IsNil())6}7import (8type is struct {9}10func main() {11 i = new(is)12 fmt.Println(reflect.ValueOf(i).IsNil())13}14import (15type is struct {16}17func main() {18 var i interface{}19 fmt.Println(reflect.ValueOf(i).IsNil())20}21import (22type is struct {23}24func main() {25 var i interface{}26 i = new(is)27 fmt.Println(reflect.ValueOf(i).IsNil())28}

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1if isNil(a) {2 fmt.Printf("a is nil")3} else {4 fmt.Printf("a is not nil")5}6if isNil(a) {7 fmt.Printf("a is nil")8} else {9 fmt.Printf("a is not nil")10}11if isNil(a) {12 fmt.Printf("a is nil")13} else {14 fmt.Printf("a is not nil")15}16if isNil(a) {17 fmt.Printf("a is nil")18} else {19 fmt.Printf("a is not nil")20}21if isNil(a) {22 fmt.Printf("a is nil")23} else {24 fmt.Printf("a is not nil")25}26if isNil(a) {27 fmt.Printf("a is nil")28} else {29 fmt.Printf("a is not nil")30}31if isNil(a) {32 fmt.Printf("a is nil")33} else {34 fmt.Printf("a is not nil")35}36if isNil(a) {37 fmt.Printf("a is nil")38} else {39 fmt.Printf("a is not nil")40}41if isNil(a) {42 fmt.Printf("a is nil")43} else {44 fmt.Printf("a is not nil")45}46if isNil(a) {47 fmt.Printf("a is nil")48} else {49 fmt.Printf("a is not nil")50}

Full Screen

Full Screen

isNil

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 fmt.Println("isNil(a):", reflect.ValueOf(a).IsNil())4}5import (6func main() {7 fmt.Println("isNil(a):", reflect.ValueOf(a).IsNil())8}9import (10func main() {11 fmt.Println("isNil(a):", reflect.ValueOf(a).IsNil())12}13import (14func main() {15 fmt.Println("isNil(a):", reflect.ValueOf(a).IsNil())16}17import (18func main() {19 fmt.Println("isNil(a):", reflect.ValueOf(a).IsNil())20}21import (22func main() {23 fmt.Println("isNil(a):", reflect.ValueOf(a).IsNil())24}25import (26func main() {27 fmt.Println("isNil(a):", reflect.ValueOf(a).IsNil())28}29import (30func main() {31 fmt.Println("isNil(a):

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful