How to use Flush method of db Package

Best Syzkaller code snippet using db.Flush

database.go

Source:database.go Github

copy

Full Screen

...22 "github.com/ethereum/go-ethereum/log"23 "github.com/ethereum/go-ethereum/metrics"24)25var (26 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)27 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)28 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)29 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)30 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)31 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)32 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)33 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)34 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)35)36// secureKeyPrefix is the database key prefix used to store trie node preimages.37var secureKeyPrefix = []byte("secure-key-")38// secureKeyLength is the length of the above prefix + 32byte hash.39const secureKeyLength = 11 + 3240// DatabaseReader wraps the Get and Has method of a backing store for the trie.41type DatabaseReader interface {42 // Get retrieves the value associated with key form the database.43 Get(key []byte) (value []byte, err error)44 // Has retrieves whether a key is present in the database.45 Has(key []byte) (bool, error)46}47// Database is an intermediate write layer between the trie data structures and48// the disk database. The aim is to accumulate trie writes in-memory and only49// periodically flush a couple tries to disk, garbage collecting the remainder.50type Database struct {51 diskdb ethdb.Database // Persistent storage for matured trie nodes52 nodes map[common.Hash]*cachedNode // Data and references relationships of a node53 oldest common.Hash // Oldest tracked node, flush-list head54 newest common.Hash // Newest tracked node, flush-list tail55 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie56 seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys57 gctime time.Duration // Time spent on garbage collection since last commit58 gcnodes uint64 // Nodes garbage collected since last commit59 gcsize common.StorageSize // Data storage garbage collected since last commit60 flushtime time.Duration // Time spent on data flushing since last commit61 flushnodes uint64 // Nodes flushed since last commit62 flushsize common.StorageSize // Data storage flushed since last commit63 nodesSize common.StorageSize // Storage size of the nodes cache (exc. flushlist)64 preimagesSize common.StorageSize // Storage size of the preimages cache65 lock sync.RWMutex66}67// cachedNode is all the information we know about a single cached node in the68// memory database write layer.69type cachedNode struct {70 blob []byte // Cached data block of the trie node71 parents int // Number of live nodes referencing this one72 children map[common.Hash]int // Children referenced by this nodes73 flushPrev common.Hash // Previous node in the flush-list74 flushNext common.Hash // Next node in the flush-list75}76// NewDatabase creates a new trie database to store ephemeral trie content before77// its written out to disk or garbage collected.78func NewDatabase(diskdb ethdb.Database) *Database {79 return &Database{80 diskdb: diskdb,81 nodes: map[common.Hash]*cachedNode{82 {}: {children: make(map[common.Hash]int)},83 },84 preimages: make(map[common.Hash][]byte),85 }86}87// DiskDB retrieves the persistent storage backing the trie database.88func (db *Database) DiskDB() DatabaseReader {89 return db.diskdb90}91// Insert writes a new trie node to the memory database if it's yet unknown. The92// method will make a copy of the slice.93func (db *Database) Insert(hash common.Hash, blob []byte) {94 db.lock.Lock()95 defer db.lock.Unlock()96 db.insert(hash, blob)97}98// insert is the private locked version of Insert.99func (db *Database) insert(hash common.Hash, blob []byte) {100 // If the node's already cached, skip101 if _, ok := db.nodes[hash]; ok {102 return103 }104 db.nodes[hash] = &cachedNode{105 blob: common.CopyBytes(blob),106 children: make(map[common.Hash]int),107 flushPrev: db.newest,108 }109 // Update the flush-list endpoints110 if db.oldest == (common.Hash{}) {111 db.oldest, db.newest = hash, hash112 } else {113 db.nodes[db.newest].flushNext, db.newest = hash, hash114 }115 db.nodesSize += common.StorageSize(common.HashLength + len(blob))116}117// insertPreimage writes a new trie node pre-image to the memory database if it's118// yet unknown. The method will make a copy of the slice.119//120// Note, this method assumes that the database's lock is held!121func (db *Database) insertPreimage(hash common.Hash, preimage []byte) {122 if _, ok := db.preimages[hash]; ok {123 return124 }125 db.preimages[hash] = common.CopyBytes(preimage)126 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage))127}128// Node retrieves a cached trie node from memory. If it cannot be found cached,129// the method queries the persistent database for the content.130func (db *Database) Node(hash common.Hash) ([]byte, error) {131 // Retrieve the node from cache if available132 db.lock.RLock()133 node := db.nodes[hash]134 db.lock.RUnlock()135 if node != nil {136 return node.blob, nil137 }138 // Content unavailable in memory, attempt to retrieve from disk139 return db.diskdb.Get(hash[:])140}141// preimage retrieves a cached trie node pre-image from memory. If it cannot be142// found cached, the method queries the persistent database for the content.143func (db *Database) preimage(hash common.Hash) ([]byte, error) {144 // Retrieve the node from cache if available145 db.lock.RLock()146 preimage := db.preimages[hash]147 db.lock.RUnlock()148 if preimage != nil {149 return preimage, nil150 }151 // Content unavailable in memory, attempt to retrieve from disk152 return db.diskdb.Get(db.secureKey(hash[:]))153}154// secureKey returns the database key for the preimage of key, as an ephemeral155// buffer. The caller must not hold onto the return value because it will become156// invalid on the next call.157func (db *Database) secureKey(key []byte) []byte {158 buf := append(db.seckeybuf[:0], secureKeyPrefix...)159 buf = append(buf, key...)160 return buf161}162// Nodes retrieves the hashes of all the nodes cached within the memory database.163// This method is extremely expensive and should only be used to validate internal164// states in test code.165func (db *Database) Nodes() []common.Hash {166 db.lock.RLock()167 defer db.lock.RUnlock()168 var hashes = make([]common.Hash, 0, len(db.nodes))169 for hash := range db.nodes {170 if hash != (common.Hash{}) { // Special case for "root" references/nodes171 hashes = append(hashes, hash)172 }173 }174 return hashes175}176// Reference adds a new reference from a parent node to a child node.177func (db *Database) Reference(child common.Hash, parent common.Hash) {178 db.lock.RLock()179 defer db.lock.RUnlock()180 db.reference(child, parent)181}182// reference is the private locked version of Reference.183func (db *Database) reference(child common.Hash, parent common.Hash) {184 // If the node does not exist, it's a node pulled from disk, skip185 node, ok := db.nodes[child]186 if !ok {187 return188 }189 // If the reference already exists, only duplicate for roots190 if _, ok = db.nodes[parent].children[child]; ok && parent != (common.Hash{}) {191 return192 }193 node.parents++194 db.nodes[parent].children[child]++195}196// Dereference removes an existing reference from a parent node to a child node.197func (db *Database) Dereference(child common.Hash, parent common.Hash) {198 db.lock.Lock()199 defer db.lock.Unlock()200 nodes, storage, start := len(db.nodes), db.nodesSize, time.Now()201 db.dereference(child, parent)202 db.gcnodes += uint64(nodes - len(db.nodes))203 db.gcsize += storage - db.nodesSize204 db.gctime += time.Since(start)205 memcacheGCTimeTimer.Update(time.Since(start))206 memcacheGCSizeMeter.Mark(int64(storage - db.nodesSize))207 memcacheGCNodesMeter.Mark(int64(nodes - len(db.nodes)))208 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start),209 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.nodes), "livesize", db.nodesSize)210}211// dereference is the private locked version of Dereference.212func (db *Database) dereference(child common.Hash, parent common.Hash) {213 // Dereference the parent-child214 node := db.nodes[parent]215 node.children[child]--216 if node.children[child] == 0 {217 delete(node.children, child)218 }219 // If the child does not exist, it's a previously committed node.220 node, ok := db.nodes[child]221 if !ok {222 return223 }224 // If there are no more references to the child, delete it and cascade225 node.parents--226 if node.parents == 0 {227 // Remove the node from the flush-list228 if child == db.oldest {229 db.oldest = node.flushNext230 } else {231 db.nodes[node.flushPrev].flushNext = node.flushNext232 db.nodes[node.flushNext].flushPrev = node.flushPrev233 }234 // Dereference all children and delete the node235 for hash := range node.children {236 db.dereference(hash, child)237 }238 delete(db.nodes, child)239 db.nodesSize -= common.StorageSize(common.HashLength + len(node.blob))240 }241}242// Cap iteratively flushes old but still referenced trie nodes until the total243// memory usage goes below the given threshold.244func (db *Database) Cap(limit common.StorageSize) error {245 // Create a database batch to flush persistent data out. It is important that246 // outside code doesn't see an inconsistent state (referenced data removed from247 // memory cache during commit but not yet in persistent storage). This is ensured248 // by only uncaching existing data when the database write finalizes.249 db.lock.RLock()250 nodes, storage, start := len(db.nodes), db.nodesSize, time.Now()251 batch := db.diskdb.NewBatch()252 // db.nodesSize only contains the useful data in the cache, but when reporting253 // the total memory consumption, the maintenance metadata is also needed to be254 // counted. For every useful node, we track 2 extra hashes as the flushlist.255 size := db.nodesSize + common.StorageSize((len(db.nodes)-1)*2*common.HashLength)256 // If the preimage cache got large enough, push to disk. If it's still small257 // leave for later to deduplicate writes.258 flushPreimages := db.preimagesSize > 4*1024*1024259 if flushPreimages {260 for hash, preimage := range db.preimages {261 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {262 log.Error("Failed to commit preimage from trie database", "err", err)263 db.lock.RUnlock()264 return err265 }266 if batch.ValueSize() > ethdb.IdealBatchSize {267 if err := batch.Write(); err != nil {268 db.lock.RUnlock()269 return err270 }271 batch.Reset()272 }273 }274 }275 // Keep committing nodes from the flush-list until we're below allowance276 oldest := db.oldest277 for size > limit && oldest != (common.Hash{}) {278 // Fetch the oldest referenced node and push into the batch279 node := db.nodes[oldest]280 if err := batch.Put(oldest[:], node.blob); err != nil {281 db.lock.RUnlock()282 return err283 }284 // If we exceeded the ideal batch size, commit and reset285 if batch.ValueSize() >= ethdb.IdealBatchSize {286 if err := batch.Write(); err != nil {287 log.Error("Failed to write flush list to disk", "err", err)288 db.lock.RUnlock()289 return err290 }291 batch.Reset()292 }293 // Iterate to the next flush item, or abort if the size cap was achieved. Size294 // is the total size, including both the useful cached data (hash -> blob), as295 // well as the flushlist metadata (2*hash). When flushing items from the cache,296 // we need to reduce both.297 size -= common.StorageSize(3*common.HashLength + len(node.blob))298 oldest = node.flushNext299 }300 // Flush out any remainder data from the last batch301 if err := batch.Write(); err != nil {302 log.Error("Failed to write flush list to disk", "err", err)303 db.lock.RUnlock()304 return err305 }306 db.lock.RUnlock()307 // Write successful, clear out the flushed data308 db.lock.Lock()309 defer db.lock.Unlock()310 if flushPreimages {311 db.preimages = make(map[common.Hash][]byte)312 db.preimagesSize = 0313 }314 for db.oldest != oldest {315 node := db.nodes[db.oldest]316 delete(db.nodes, db.oldest)317 db.oldest = node.flushNext318 db.nodesSize -= common.StorageSize(common.HashLength + len(node.blob))319 }320 if db.oldest != (common.Hash{}) {321 db.nodes[db.oldest].flushPrev = common.Hash{}322 }323 db.flushnodes += uint64(nodes - len(db.nodes))324 db.flushsize += storage - db.nodesSize325 db.flushtime += time.Since(start)326 memcacheFlushTimeTimer.Update(time.Since(start))327 memcacheFlushSizeMeter.Mark(int64(storage - db.nodesSize))328 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.nodes)))329 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start),330 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.nodes), "livesize", db.nodesSize)331 return nil332}333// Commit iterates over all the children of a particular node, writes them out334// to disk, forcefully tearing down all references in both directions.335//336// As a side effect, all pre-images accumulated up to this point are also written.337func (db *Database) Commit(node common.Hash, report bool) error {338 // Create a database batch to flush persistent data out. It is important that339 // outside code doesn't see an inconsistent state (referenced data removed from340 // memory cache during commit but not yet in persistent storage). This is ensured341 // by only uncaching existing data when the database write finalizes.342 db.lock.RLock()...

Full Screen

Full Screen

batch_test.go

Source:batch_test.go Github

copy

Full Screen

...30 for i := 0; i < M; i++ {31 version++32 require.NoError(t, wb.DeleteAt(key(i), version))33 }34 require.NoError(t, wb.Flush())35 t.Logf("Time taken for %d writes (w/ test options): %s\n", N+M, time.Since(start))36 err := db.View(func(txn *Txn) error {37 itr := txn.NewIterator(DefaultIteratorOptions)38 defer itr.Close()39 i := M40 for itr.Rewind(); itr.Valid(); itr.Next() {41 item := itr.Item()42 require.Equal(t, string(key(i)), string(item.Key()))43 valcopy, err := item.ValueCopy(nil)44 require.NoError(t, err)45 require.Equal(t, val(i), valcopy)46 i++47 }48 require.Equal(t, N, i)49 return nil50 })51 require.NoError(t, err)52 }53 t.Run("disk mode", func(t *testing.T) {54 opt := getTestOptions("")55 runBadgerTest(t, &opt, func(t *testing.T, db *DB) {56 test(t, db)57 })58 t.Logf("Disk mode done\n")59 })60 t.Run("InMemory mode", func(t *testing.T) {61 opt := getTestOptions("")62 opt.InMemory = true63 db, err := Open(opt)64 require.NoError(t, err)65 test(t, db)66 t.Logf("Disk mode done\n")67 require.NoError(t, db.Close())68 })69}70// This test ensures we don't end up in deadlock in case of empty writebatch.71func TestEmptyWriteBatch(t *testing.T) {72 runBadgerTest(t, nil, func(t *testing.T, db *DB) {73 wb := db.NewWriteBatch()74 require.NoError(t, wb.Flush())75 wb = db.NewWriteBatch()76 require.NoError(t, wb.Flush())77 wb = db.NewWriteBatch()78 require.NoError(t, wb.Flush())79 })80}81// This test ensures we don't panic during flush.82// See issue: https://github.com/dgraph-io/badger/issues/139483func TestFlushPanic(t *testing.T) {84 t.Run("flush after flush", func(t *testing.T) {85 runBadgerTest(t, nil, func(t *testing.T, db *DB) {86 wb := db.NewWriteBatch()87 wb.Flush()88 require.Error(t, y.ErrCommitAfterFinish, wb.Flush())89 })90 })91 t.Run("flush after cancel", func(t *testing.T) {92 runBadgerTest(t, nil, func(t *testing.T, db *DB) {93 wb := db.NewWriteBatch()94 wb.Cancel()95 require.Error(t, y.ErrCommitAfterFinish, wb.Flush())96 })97 })98}...

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 db, err := bolt.Open("test.db", 0600, nil)4 if err != nil {5 fmt.Println(err)6 }7 defer db.Close()8 err = db.Update(func(tx *bolt.Tx) error {9 bucket, err := tx.CreateBucket([]byte("MyBucket"))10 if err != nil {11 return fmt.Errorf("create bucket: %s", err)12 }13 err = bucket.Put([]byte("answer"), []byte("42"))14 if err != nil {15 return fmt.Errorf("put: %s", err)16 }17 })18 if err != nil {19 fmt.Println(err)20 }21 err = db.Flush()22 if err != nil {23 fmt.Println(err)24 }25}26import (27func main() {28 db, err := bolt.Open("test.db", 0600, nil)29 if err != nil {30 fmt.Println(err)31 }32 defer db.Close()33 err = db.Compact(nil, nil)34 if err != nil {35 fmt.Println(err)36 }37}

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 db, err := bolt.Open("my.db", 0600, nil)4 if err != nil {5 fmt.Println(err)6 }7 defer db.Close()8 db.Update(func(tx *bolt.Tx) error {9 b, err := tx.CreateBucket([]byte("MyBucket"))10 if err != nil {11 return fmt.Errorf("create bucket: %s", err)12 }13 err = b.Put([]byte("Answer"), []byte("42"))14 })15 err = db.Flush()16 if err != nil {17 fmt.Println(err)18 }19}20import (21func main() {22 db, err := bolt.Open("my.db", 0600, nil)23 if err != nil {24 fmt.Println(err)25 }26 defer db.Close()27 db.Update(func(tx *bolt.Tx) error {28 b, err := tx.CreateBucket([]byte("MyBucket"))29 if err != nil {30 return fmt.Errorf("create bucket: %s", err)31 }32 err = b.Put([]byte("Answer"), []byte("42"))33 })34 err = db.Sync()35 if err != nil {36 fmt.Println(err)37 }38}39import

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 db, err := leveldb.OpenFile("db", nil)4 if err != nil {5 log.Fatal(err)6 }7 defer db.Close()8 err = db.Put([]byte("key"), []byte("value"), nil)9 if err != nil {10 log.Fatal(err)11 }12 err = db.Flush()13 if err != nil {14 log.Fatal(err)15 }16 data, err := db.Get([]byte("key"), nil)17 if err != nil {18 log.Fatal(err)19 }20 fmt.Printf("value: %s21}22import (23func main() {24 db, err := leveldb.OpenFile("db", nil)25 if err != nil {26 log.Fatal(err)27 }28 defer db.Close()29 err = db.Put([]byte("key"), []byte("value"), nil)30 if err != nil {31 log.Fatal(err)32 }33 data, err := db.Get([]byte("key"), nil)34 if err != nil {35 log.Fatal(err)36 }37 fmt.Printf("value: %s38}39import (40func main() {41 db, err := leveldb.OpenFile("db", nil)42 if err != nil {43 log.Fatal(err)44 }45 defer db.Close()46 err = db.Put([]byte("key"), []byte("value"), nil)47 if err != nil {48 log.Fatal(err)49 }50 has, err := db.Has([]byte("key"), nil)51 if err != nil {52 log.Fatal(err)53 }54 fmt.Printf("has key: %v55}

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 db, err := bolt.Open("my.db", 0600, nil)4 if err != nil {5 log.Fatal(err)6 }7 defer db.Close()8 err = db.Update(func(tx *bolt.Tx) error {9 b, err := tx.CreateBucketIfNotExists([]byte("MyBucket"))10 if err != nil {11 return fmt.Errorf("create bucket: %s", err)12 }13 if err := b.Put([]byte("answer"), []byte("42")); err != nil {14 }15 })16 if err != nil {17 log.Fatal(err)18 }19 time.Sleep(1 * time.Second)20 f, err := os.OpenFile("my.db", os.O_RDONLY, 0600)21 if err != nil {22 log.Fatal(err)23 }24 defer f.Close()25 buf := make([]byte, 100)26 if _, err := f.Read(buf); err != nil {27 log.Fatal(err)28 }29 fmt.Printf("%s\n", buf)30}

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 db, err := sql.Open("sqlite3", "./foo.db")4 if err != nil {5 log.Fatal(err)6 }7 defer db.Close()8 stmt, err := db.Prepare("INSERT INTO userinfo(username, departname, created) values(?,?,?)")9 if err != nil {10 log.Fatal(err)11 }12 defer stmt.Close()13 res, err := stmt.Exec("astaxie", "研发部门", "2012-12-09")14 if err != nil {15 log.Fatal(err)16 }17 id, err := res.LastInsertId()18 if err != nil {19 log.Fatal(err)20 }21 fmt.Println(id)22 stmt, err = db.Prepare("update userinfo set username=? where uid=?")23 if err != nil {24 log.Fatal(err)25 }26 defer stmt.Close()27 res, err = stmt.Exec("astaxieupdate", id)28 if err != nil {29 log.Fatal(err)30 }31 affect, err := res.RowsAffected()32 if err != nil {33 log.Fatal(err)34 }35 fmt.Println(affect)36 rows, err := db.Query("SELECT * FROM userinfo")37 if err != nil {38 log.Fatal(err)39 }40 defer rows.Close()41 for rows.Next() {42 err = rows.Scan(&uid, &username, &department, &created)43 if err != nil {44 log.Fatal(err)45 }46 fmt.Println(uid)47 fmt.Println(username)48 fmt.Println(department)49 fmt.Println(created)50 }51 err = db.QueryRow("SELECT * FROM userinfo").Scan(&username, &department, &created)52 if err != nil {53 log.Fatal(err)54 }55 fmt.Println(username)56 fmt.Println(department)57 fmt.Println(created)58 stmt, err = db.Prepare("delete from userinfo where uid=?")59 if err != nil {60 log.Fatal(err)61 }62 defer stmt.Close()63 res, err = stmt.Exec(id)64 if err != nil {65 log.Fatal(err)66 }67 affect, err = res.RowsAffected()

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 db, err := leveldb.New("/tmp/leveldb", 128, 1024, "")4 if err != nil {5 fmt.Println("error in creating db")6 }7 batch := db.NewBatch()8 batch.Put([]byte("key1"), []byte("value1"))9 batch.Put([]byte("key2"), []byte("value2"))10 batch.Delete([]byte("key1"))11 if err := batch.Write(); err != nil {12 fmt.Println("error in writing to db")13 }14 db.Close()15 db, err = rocksdb.New("/tmp/rocksdb", 128, 1024, "")16 if err != nil {17 fmt.Println("error in creating db")18 }19 batch = db.NewBatch()20 batch.Put([]byte("key1"), []byte("value1"))21 batch.Put([]byte("key2"), []byte("value2"))22 batch.Delete([]byte("key1"))23 if err := batch.Write(); err != nil {24 fmt.Println("error in writing to db")25 }26 db.Close()27 db = memorydb.New()28 batch = db.NewBatch()29 batch.Put([]byte("key1"), []byte("value1"))30 batch.Put([]byte("key2"), []byte("value2"))31 batch.Delete([]byte("key1"))

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1func main() {2 db, err := bolt.Open("my.db", 0600, nil)3 if err != nil {4 log.Fatal(err)5 }6 defer db.Close()7 err = db.Update(func(tx *bolt.Tx) error {8 _, err := tx.CreateBucketIfNotExists([]byte("MyBucket"))9 if err != nil {10 return fmt.Errorf("create bucket: %s", err)11 }12 })13 if err != nil {14 log.Fatal(err)15 }16 b := db.Batch()17 bucket, err := b.CreateBucketIfNotExists([]byte("MyBucket"))18 if err != nil {19 log.Fatal(err)20 }21 err = bucket.Put([]byte("answer"), []byte("42"))22 if err != nil {23 log.Fatal(err)24 }25 err = b.Flush()26 if err != nil {27 log.Fatal(err)28 }29}30func main() {31 db, err := bolt.Open("my.db", 0600, nil)32 if err != nil {33 log.Fatal(err)34 }35 defer db.Close()36 err = db.Update(func(tx *bolt.Tx) error {37 _, err := tx.CreateBucketIfNotExists([]byte("MyBucket"))38 if err != nil {39 return fmt.Errorf("create bucket: %s", err)40 }41 })42 if err != nil {43 log.Fatal(err)44 }45 err = db.View(func(tx *bolt.Tx) error {46 b := tx.Bucket([]byte("MyBucket"))47 if b == nil {48 return fmt.Errorf("bucket %q not found", "MyBucket")49 }50 v := b.Get([]byte("answer"))51 fmt.Printf("The answer is: %s\n", v)52 })53 if err != nil {54 log.Fatal(err)55 }56}57func main() {58 db, err := bolt.Open("my.db", 0600,

Full Screen

Full Screen

Flush

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 db, err := bolt.Open("my.db", 0600, nil)4 if err != nil {5 fmt.Println(err)6 }7 defer db.Close()8 err = db.Update(func(tx *bolt.Tx) error {9 _, err := tx.CreateBucketIfNotExists([]byte("MyBucket"))10 if err != nil {11 return fmt.Errorf("create bucket: %s", err)12 }13 })14 if err != nil {15 fmt.Println(err)16 }17 err = db.Update(func(tx *bolt.Tx) error {18 b := tx.Bucket([]byte("MyBucket"))19 err := b.Put([]byte("answer"), []byte("42"))20 })21 if err != nil {22 fmt.Println(err)23 }24 err = db.View(func(tx *bolt.Tx) error {25 b := tx.Bucket([]byte("MyBucket"))26 v := b.Get([]byte("answer"))27 fmt.Printf("The answer is: %s\n", v)28 })29 if err != nil {30 fmt.Println(err)31 }32 err = db.Sync()33 if err != nil {34 fmt.Println(err)35 }36}37import (38func main() {39 db, err := bolt.Open("my.db", 0600, nil)40 if err != nil {41 fmt.Println(err)42 }43 defer db.Close()44 err = db.Update(func(tx *bolt.Tx) error {45 _, err := tx.CreateBucketIfNotExists([]byte("MyBucket"))46 if err != nil {47 return fmt.Errorf("create bucket: %s", err)48 }49 })50 if err != nil {51 fmt.Println(err

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful