How to use Sync method of state Package

Best Syzkaller code snippet using state.Sync

statesync.go

Source:statesync.go Github

copy

Full Screen

...41// timedOut returns if this request timed out.42func (req *stateReq) timedOut() bool {43 return req.response == nil44}45// stateSyncStats is a collection of progress stats to report during a state trie46// sync to RPC requests as well as to display in user logs.47type stateSyncStats struct {48 processed uint64 // Number of state entries processed49 duplicate uint64 // Number of state entries downloaded twice50 unexpected uint64 // Number of non-requested state entries received51 pending uint64 // Number of still pending state entries52}53// syncState starts downloading state with the given root hash.54func (d *Downloader) syncState(root common.Hash) *stateSync {55 // Create the state sync56 s := newStateSync(d, root)57 select {58 case d.stateSyncStart <- s:59 case <-d.quitCh:60 s.err = errCancelStateFetch61 close(s.done)62 }63 return s64}65// stateFetcher manages the active state sync and accepts requests66// on its behalf.67func (d *Downloader) stateFetcher() {68 for {69 select {70 case s := <-d.stateSyncStart:71 for next := s; next != nil; {72 next = d.runStateSync(next)73 }74 case <-d.stateCh:75 // Ignore state responses while no sync is running.76 case <-d.quitCh:77 return78 }79 }80}81// runStateSync runs a state synchronisation until it completes or another root82// hash is requested to be switched over to.83func (d *Downloader) runStateSync(s *stateSync) *stateSync {84 var (85 active = make(map[string]*stateReq) // Currently in-flight requests86 finished []*stateReq // Completed or failed requests87 timeout = make(chan *stateReq) // Timed out active requests88 )89 defer func() {90 // Cancel active request timers on exit. Also set peers to idle so they're91 // available for the next sync.92 for _, req := range active {93 req.timer.Stop()94 req.peer.SetNodeDataIdle(len(req.items))95 }96 }()97 // Run the state sync.98 go s.run()99 defer s.Cancel()100 // Listen for peer departure events to cancel assigned tasks101 peerDrop := make(chan *peerConnection, 1024)102 peerSub := s.d.peers.SubscribePeerDrops(peerDrop)103 defer peerSub.Unsubscribe()104 for {105 // Enable sending of the first buffered element if there is one.106 var (107 deliverReq *stateReq108 deliverReqCh chan *stateReq109 )110 if len(finished) > 0 {111 deliverReq = finished[0]112 deliverReqCh = s.deliver113 }114 select {115 // The stateSync lifecycle:116 case next := <-d.stateSyncStart:117 return next118 case <-s.done:119 return nil120 // Send the next finished request to the current sync:121 case deliverReqCh <- deliverReq:122 // Shift out the first request, but also set the emptied slot to nil for GC123 copy(finished, finished[1:])124 finished[len(finished)-1] = nil125 finished = finished[:len(finished)-1]126 // Handle incoming state packs:127 case pack := <-d.stateCh:128 // Discard any data not requested (or previously timed out)129 req := active[pack.PeerId()]130 if req == nil {131 log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())132 continue133 }134 // Finalize the request and queue up for processing135 req.timer.Stop()136 req.response = pack.(*statePack).states137 finished = append(finished, req)138 delete(active, pack.PeerId())139 // Handle dropped peer connections:140 case p := <-peerDrop:141 // Skip if no request is currently pending142 req := active[p.id]143 if req == nil {144 continue145 }146 // Finalize the request and queue up for processing147 req.timer.Stop()148 req.dropped = true149 finished = append(finished, req)150 delete(active, p.id)151 // Handle timed-out requests:152 case req := <-timeout:153 // If the peer is already requesting something else, ignore the stale timeout.154 // This can happen when the timeout and the delivery happens simultaneously,155 // causing both pathways to trigger.156 if active[req.peer.id] != req {157 continue158 }159 // Move the timed out data back into the download queue160 finished = append(finished, req)161 delete(active, req.peer.id)162 // Track outgoing state requests:163 case req := <-d.trackStateReq:164 // If an active request already exists for this peer, we have a problem. In165 // theory the trie node schedule must never assign two requests to the same166 // peer. In practice however, a peer might receive a request, disconnect and167 // immediately reconnect before the previous times out. In this case the first168 // request is never honored, alas we must not silently overwrite it, as that169 // causes valid requests to go missing and sync to get stuck.170 if old := active[req.peer.id]; old != nil {171 log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id)172 // Make sure the previous one doesn't get siletly lost173 old.timer.Stop()174 old.dropped = true175 finished = append(finished, old)176 }177 // Start a timer to notify the sync loop if the peer stalled.178 req.timer = time.AfterFunc(req.timeout, func() {179 select {180 case timeout <- req:181 case <-s.done:182 // Prevent leaking of timer goroutines in the unlikely case where a183 // timer is fired just before exiting runStateSync.184 }185 })186 active[req.peer.id] = req187 }188 }189}190// stateSync schedules requests for downloading a particular state trie defined191// by a given state root.192type stateSync struct {193 d *Downloader // Downloader instance to access and manage current peerset194 sched *trie.Sync // State trie sync scheduler defining the tasks195 keccak hash.Hash // Keccak256 hasher to verify deliveries with196 tasks map[common.Hash]*stateTask // Set of tasks currently queued for retrieval197 numUncommitted int198 bytesUncommitted int199 deliver chan *stateReq // Delivery channel multiplexing peer responses200 cancel chan struct{} // Channel to signal a termination request201 cancelOnce sync.Once // Ensures cancel only ever gets called once202 done chan struct{} // Channel to signal termination completion203 err error // Any error hit during sync (set before completion)204}205// stateTask represents a single trie node download task, containing a set of206// peers already attempted retrieval from to detect stalled syncs and abort.207type stateTask struct {208 attempts map[string]struct{}209}210// newStateSync creates a new state trie download scheduler. This method does not211// yet start the sync. The user needs to call run to initiate.212func newStateSync(d *Downloader, root common.Hash) *stateSync {213 return &stateSync{214 d: d,215 sched: state.NewStateSync(root, d.stateDB, d.stateBloom),216 keccak: sha3.NewLegacyKeccak256(),217 tasks: make(map[common.Hash]*stateTask),218 deliver: make(chan *stateReq),219 cancel: make(chan struct{}),220 done: make(chan struct{}),221 }222}223// run starts the task assignment and response processing loop, blocking until224// it finishes, and finally notifying any goroutines waiting for the loop to225// finish.226func (s *stateSync) run() {227 s.err = s.loop()228 close(s.done)229}230// Wait blocks until the sync is done or canceled.231func (s *stateSync) Wait() error {232 <-s.done233 return s.err234}235// Cancel cancels the sync and waits until it has shut down.236func (s *stateSync) Cancel() error {237 s.cancelOnce.Do(func() { close(s.cancel) })238 return s.Wait()239}240// loop is the main event loop of a state trie sync. It it responsible for the241// assignment of new tasks to peers (including sending it to them) as well as242// for the processing of inbound data. Note, that the loop does not directly243// receive data from peers, rather those are buffered up in the downloader and244// pushed here async. The reason is to decouple processing from data receipt245// and timeouts.246func (s *stateSync) loop() (err error) {247 // Listen for new peer events to assign tasks to them248 newPeer := make(chan *peerConnection, 1024)249 peerSub := s.d.peers.SubscribeNewPeers(newPeer)250 defer peerSub.Unsubscribe()251 defer func() {252 cerr := s.commit(true)253 if err == nil {254 err = cerr255 }256 }()257 // Keep assigning new tasks until the sync completes or aborts258 for s.sched.Pending() > 0 {259 if err = s.commit(false); err != nil {260 return err261 }262 s.assignTasks()263 // Tasks assigned, wait for something to happen264 select {265 case <-newPeer:266 // New peer arrived, try to assign it download tasks267 case <-s.cancel:268 return errCancelStateFetch269 case <-s.d.cancelCh:270 return errCanceled271 case req := <-s.deliver:272 // Response, disconnect or timeout triggered, drop the peer if stalling273 log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut())274 if len(req.items) <= 2 && !req.dropped && req.timedOut() {275 // 2 items are the minimum requested, if even that times out, we've no use of276 // this peer at the moment.277 log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id)278 if s.d.dropPeer == nil {279 // The dropPeer method is nil when `--copydb` is used for a local copy.280 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored281 req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id)282 } else {283 s.d.dropPeer(req.peer.id)284 // If this peer was the master peer, abort sync immediately285 s.d.cancelLock.RLock()286 master := req.peer.id == s.d.cancelPeer287 s.d.cancelLock.RUnlock()288 if master {289 s.d.cancel()290 return errTimeout291 }292 }293 }294 // Process all the received blobs and check for stale delivery295 delivered, err := s.process(req)296 if err != nil {297 log.Warn("Node data write error", "err", err)298 return err299 }300 req.peer.SetNodeDataIdle(delivered)301 }302 }303 return nil304}305func (s *stateSync) commit(force bool) error {306 if !force && s.bytesUncommitted < ethdb.IdealBatchSize {307 return nil308 }309 start := time.Now()310 b := s.d.stateDB.NewBatch()311 if err := s.sched.Commit(b); err != nil {312 return err313 }314 if err := b.Write(); err != nil {315 return fmt.Errorf("DB write error: %v", err)316 }317 s.updateStats(s.numUncommitted, 0, 0, time.Since(start))318 s.numUncommitted = 0319 s.bytesUncommitted = 0320 return nil321}322// assignTasks attempts to assign new tasks to all idle peers, either from the323// batch currently being retried, or fetching new data from the trie sync itself.324func (s *stateSync) assignTasks() {325 // Iterate over all idle peers and try to assign them state fetches326 peers, _ := s.d.peers.NodeDataIdlePeers()327 for _, p := range peers {328 // Assign a batch of fetches proportional to the estimated latency/bandwidth329 cap := p.NodeDataCapacity(s.d.requestRTT())330 req := &stateReq{peer: p, timeout: s.d.requestTTL()}331 s.fillTasks(cap, req)332 // If the peer was assigned tasks to fetch, send the network request333 if len(req.items) > 0 {334 req.peer.log.Trace("Requesting new batch of data", "type", "state", "count", len(req.items))335 select {336 case s.d.trackStateReq <- req:337 req.peer.FetchNodeData(req.items)338 case <-s.cancel:339 case <-s.d.cancelCh:340 }341 }342 }343}344// fillTasks fills the given request object with a maximum of n state download345// tasks to send to the remote peer.346func (s *stateSync) fillTasks(n int, req *stateReq) {347 // Refill available tasks from the scheduler.348 if len(s.tasks) < n {349 new := s.sched.Missing(n - len(s.tasks))350 for _, hash := range new {351 s.tasks[hash] = &stateTask{make(map[string]struct{})}352 }353 }354 // Find tasks that haven't been tried with the request's peer.355 req.items = make([]common.Hash, 0, n)356 req.tasks = make(map[common.Hash]*stateTask, n)357 for hash, t := range s.tasks {358 // Stop when we've gathered enough requests359 if len(req.items) == n {360 break361 }362 // Skip any requests we've already tried from this peer363 if _, ok := t.attempts[req.peer.id]; ok {364 continue365 }366 // Assign the request to this peer367 t.attempts[req.peer.id] = struct{}{}368 req.items = append(req.items, hash)369 req.tasks[hash] = t370 delete(s.tasks, hash)371 }372}373// process iterates over a batch of delivered state data, injecting each item374// into a running state sync, re-queuing any items that were requested but not375// delivered. Returns whether the peer actually managed to deliver anything of376// value, and any error that occurred.377func (s *stateSync) process(req *stateReq) (int, error) {378 // Collect processing stats and update progress if valid data was received379 duplicate, unexpected, successful := 0, 0, 0380 defer func(start time.Time) {381 if duplicate > 0 || unexpected > 0 {382 s.updateStats(0, duplicate, unexpected, time.Since(start))383 }384 }(time.Now())385 // Iterate over all the delivered data and inject one-by-one into the trie386 for _, blob := range req.response {387 _, hash, err := s.processNodeData(blob)388 switch err {389 case nil:390 s.numUncommitted++391 s.bytesUncommitted += len(blob)392 successful++393 case trie.ErrNotRequested:394 unexpected++395 case trie.ErrAlreadyProcessed:396 duplicate++397 default:398 return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)399 }400 delete(req.tasks, hash)401 }402 // Put unfulfilled tasks back into the retry queue403 npeers := s.d.peers.Len()404 for hash, task := range req.tasks {405 // If the node did deliver something, missing items may be due to a protocol406 // limit or a previous timeout + delayed delivery. Both cases should permit407 // the node to retry the missing items (to avoid single-peer stalls).408 if len(req.response) > 0 || req.timedOut() {409 delete(task.attempts, req.peer.id)410 }411 // If we've requested the node too many times already, it may be a malicious412 // sync where nobody has the right data. Abort.413 if len(task.attempts) >= npeers {414 return successful, fmt.Errorf("state node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)415 }416 // Missing item, place into the retry queue.417 s.tasks[hash] = task418 }419 return successful, nil420}421// processNodeData tries to inject a trie node data blob delivered from a remote422// peer into the state trie, returning whether anything useful was written or any423// error occurred.424func (s *stateSync) processNodeData(blob []byte) (bool, common.Hash, error) {425 res := trie.SyncResult{Data: blob}426 s.keccak.Reset()427 s.keccak.Write(blob)428 s.keccak.Sum(res.Hash[:0])429 committed, _, err := s.sched.Process([]trie.SyncResult{res})430 return committed, res.Hash, err431}432// updateStats bumps the various state sync progress counters and displays a log433// message for the user to see.434func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) {435 s.d.syncStatsLock.Lock()436 defer s.d.syncStatsLock.Unlock()437 s.d.syncStatsState.pending = uint64(s.sched.Pending())438 s.d.syncStatsState.processed += uint64(written)439 s.d.syncStatsState.duplicate += uint64(duplicate)440 s.d.syncStatsState.unexpected += uint64(unexpected)441 if written > 0 || duplicate > 0 || unexpected > 0 {442 log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "retry", len(s.tasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)443 }444 if written > 0 {445 rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)446 }447}...

Full Screen

Full Screen

sync_test.go

Source:sync_test.go Github

copy

Full Screen

...109 }110 return it.Error111}112// Tests that an empty state is not scheduled for syncing.113func TestEmptyStateSync(t *testing.T) {114 empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")115 if req := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New())).Missing(1); len(req) != 0 {116 t.Errorf("content requested for empty state: %v", req)117 }118}119// Tests that given a root hash, a state can sync iteratively on a single thread,120// requesting retrieval tasks and returning all of them in one go.121func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) }122func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) }123func testIterativeStateSync(t *testing.T, count int) {124 // Create a random state to copy125 srcDb, srcRoot, srcAccounts := makeTestState()126 // Create a destination state and sync with the scheduler127 dstDb := rawdb.NewMemoryDatabase()128 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))129 queue := append([]common.Hash{}, sched.Missing(count)...)130 for len(queue) > 0 {131 results := make([]trie.SyncResult, len(queue))132 for i, hash := range queue {133 data, err := srcDb.TrieDB().Node(hash)134 if err != nil {135 t.Fatalf("failed to retrieve node data for %x", hash)136 }137 results[i] = trie.SyncResult{Hash: hash, Data: data}138 }139 if _, index, err := sched.Process(results); err != nil {140 t.Fatalf("failed to process result #%d: %v", index, err)141 }142 batch := dstDb.NewBatch()143 if err := sched.Commit(batch); err != nil {144 t.Fatalf("failed to commit data: %v", err)145 }146 batch.Write()147 queue = append(queue[:0], sched.Missing(count)...)148 }149 // Cross check that the two states are in sync150 checkStateAccounts(t, dstDb, srcRoot, srcAccounts)151}152// Tests that the trie scheduler can correctly reconstruct the state even if only153// partial results are returned, and the others sent only later.154func TestIterativeDelayedStateSync(t *testing.T) {155 // Create a random state to copy156 srcDb, srcRoot, srcAccounts := makeTestState()157 // Create a destination state and sync with the scheduler158 dstDb := rawdb.NewMemoryDatabase()159 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))160 queue := append([]common.Hash{}, sched.Missing(0)...)161 for len(queue) > 0 {162 // Sync only half of the scheduled nodes163 results := make([]trie.SyncResult, len(queue)/2+1)164 for i, hash := range queue[:len(results)] {165 data, err := srcDb.TrieDB().Node(hash)166 if err != nil {167 t.Fatalf("failed to retrieve node data for %x", hash)168 }169 results[i] = trie.SyncResult{Hash: hash, Data: data}170 }171 if _, index, err := sched.Process(results); err != nil {172 t.Fatalf("failed to process result #%d: %v", index, err)173 }174 batch := dstDb.NewBatch()175 if err := sched.Commit(batch); err != nil {176 t.Fatalf("failed to commit data: %v", err)177 }178 batch.Write()179 queue = append(queue[len(results):], sched.Missing(0)...)180 }181 // Cross check that the two states are in sync182 checkStateAccounts(t, dstDb, srcRoot, srcAccounts)183}184// Tests that given a root hash, a trie can sync iteratively on a single thread,185// requesting retrieval tasks and returning all of them in one go, however in a186// random order.187func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }188func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) }189func testIterativeRandomStateSync(t *testing.T, count int) {190 // Create a random state to copy191 srcDb, srcRoot, srcAccounts := makeTestState()192 // Create a destination state and sync with the scheduler193 dstDb := rawdb.NewMemoryDatabase()194 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))195 queue := make(map[common.Hash]struct{})196 for _, hash := range sched.Missing(count) {197 queue[hash] = struct{}{}198 }199 for len(queue) > 0 {200 // Fetch all the queued nodes in a random order201 results := make([]trie.SyncResult, 0, len(queue))202 for hash := range queue {203 data, err := srcDb.TrieDB().Node(hash)204 if err != nil {205 t.Fatalf("failed to retrieve node data for %x", hash)206 }207 results = append(results, trie.SyncResult{Hash: hash, Data: data})208 }209 // Feed the retrieved results back and queue new tasks210 if _, index, err := sched.Process(results); err != nil {211 t.Fatalf("failed to process result #%d: %v", index, err)212 }213 batch := dstDb.NewBatch()214 if err := sched.Commit(batch); err != nil {215 t.Fatalf("failed to commit data: %v", err)216 }217 batch.Write()218 queue = make(map[common.Hash]struct{})219 for _, hash := range sched.Missing(count) {220 queue[hash] = struct{}{}221 }222 }223 // Cross check that the two states are in sync224 checkStateAccounts(t, dstDb, srcRoot, srcAccounts)225}226// Tests that the trie scheduler can correctly reconstruct the state even if only227// partial results are returned (Even those randomly), others sent only later.228func TestIterativeRandomDelayedStateSync(t *testing.T) {229 // Create a random state to copy230 srcDb, srcRoot, srcAccounts := makeTestState()231 // Create a destination state and sync with the scheduler232 dstDb := rawdb.NewMemoryDatabase()233 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))234 queue := make(map[common.Hash]struct{})235 for _, hash := range sched.Missing(0) {236 queue[hash] = struct{}{}237 }238 for len(queue) > 0 {239 // Sync only half of the scheduled nodes, even those in random order240 results := make([]trie.SyncResult, 0, len(queue)/2+1)241 for hash := range queue {242 delete(queue, hash)243 data, err := srcDb.TrieDB().Node(hash)244 if err != nil {245 t.Fatalf("failed to retrieve node data for %x", hash)246 }247 results = append(results, trie.SyncResult{Hash: hash, Data: data})248 if len(results) >= cap(results) {249 break250 }251 }252 // Feed the retrieved results back and queue new tasks253 if _, index, err := sched.Process(results); err != nil {254 t.Fatalf("failed to process result #%d: %v", index, err)255 }256 batch := dstDb.NewBatch()257 if err := sched.Commit(batch); err != nil {258 t.Fatalf("failed to commit data: %v", err)259 }260 batch.Write()261 for _, hash := range sched.Missing(0) {262 queue[hash] = struct{}{}263 }264 }265 // Cross check that the two states are in sync266 checkStateAccounts(t, dstDb, srcRoot, srcAccounts)267}268// Tests that at any point in time during a sync, only complete sub-tries are in269// the database.270func TestIncompleteStateSync(t *testing.T) {271 // Create a random state to copy272 srcDb, srcRoot, srcAccounts := makeTestState()273 checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot)274 // Create a destination state and sync with the scheduler275 dstDb := rawdb.NewMemoryDatabase()276 sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))277 added := []common.Hash{}278 queue := append([]common.Hash{}, sched.Missing(1)...)279 for len(queue) > 0 {280 // Fetch a batch of state nodes281 results := make([]trie.SyncResult, len(queue))282 for i, hash := range queue {283 data, err := srcDb.TrieDB().Node(hash)284 if err != nil {285 t.Fatalf("failed to retrieve node data for %x", hash)286 }287 results[i] = trie.SyncResult{Hash: hash, Data: data}288 }289 // Process each of the state nodes290 if _, index, err := sched.Process(results); err != nil {291 t.Fatalf("failed to process result #%d: %v", index, err)292 }293 batch := dstDb.NewBatch()294 if err := sched.Commit(batch); err != nil {295 t.Fatalf("failed to commit data: %v", err)296 }297 batch.Write()298 for _, result := range results {299 added = append(added, result.Hash)300 }301 // Check that all known sub-tries added so far are complete or missing entirely....

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 wg.Add(2)4 go func() {5 fmt.Println("Hello")6 wg.Done()7 }()8 go func() {9 fmt.Println("World")10 wg.Done()11 }()12 wg.Wait()13}14import (15func main() {16 wg.Add(2)17 go func() {18 fmt.Println("Hello")19 wg.Done()20 }()21 go func() {22 fmt.Println("World")23 wg.Done()24 }()25 wg.Wait()26}27import (28func main() {29 wg.Add(2)30 go func() {31 fmt.Println("Hello")32 wg.Done()33 }()34 go func() {35 fmt.Println("World")36 wg.Done()37 }()38 wg.Wait()39}40import (41func main() {42 wg.Add(2)43 go func() {44 fmt.Println("Hello")45 wg.Done()46 }()47 go func() {48 fmt.Println("World")49 wg.Done()50 }()51 wg.Wait()52}53import (54func main() {55 wg.Add(2)56 go func() {57 fmt.Println("Hello")58 wg.Done()59 }()60 go func() {61 fmt.Println("World")62 wg.Done()63 }()64 wg.Wait()65}66import (67func main() {68 wg.Add(2)69 go func() {70 fmt.Println("Hello")71 wg.Done()72 }()73 go func() {74 fmt.Println("World")75 wg.Done()76 }()77 wg.Wait()78}79import (80func main() {

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 wg.Add(2)4 go func() {5 fmt.Println("Hello")6 wg.Done()7 }()8 go func() {9 fmt.Println("World")10 wg.Done()11 }()12 wg.Wait()13}14import (15func main() {16 wg.Add(2)17 go func() {18 fmt.Println("Hello")19 wg.Done()20 }()21 go func() {22 fmt.Println("World")23 wg.Done()24 }()25 wg.Wait()26}27import (28func main() {29 wg.Add(2)30 go func() {31 fmt.Println("Hello")32 wg.Done()33 }()34 go func() {35 fmt.Println("World")36 wg.Done()37 }()38 wg.Wait()39}40import (41func main() {42 wg.Add(2)43 go func() {44 fmt.Println("Hello")45 wg.Done()46 }()47 go func() {48 fmt.Println("World")49 wg.Done()50 }()51 wg.Wait()52}53import (54func main() {55 wg.Add(2)56 go func() {57 fmt.Println("Hello")58 wg.Done()59 }()60 go func() {61 fmt.Println("World")62 wg.Done()63 }()64 wg.Wait()65}66import (67func main() {68 wg.Add(2)69 go func() {70 fmt.Println("Hello")71 wg.Done()72 }()73 go func() {74 fmt.Println("World")75 wg.Done()76 }()77 wg.Wait()78}79import (

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 wg.Add(2)4 go func() {5 fmt.Println("first go routine")6 wg.Done()7 }()8 go func() {9 fmt.Println("second go routine")10 wg.Done()11 }()12 wg.Wait()13 fmt.Println("main go routine")14}15import (16func main() {17 wg.Add(2)18 go func() {19 fmt.Println("first go routine")20 wg.Done()21 }()22 go func() {23 fmt.Println("second go routine")24 wg.Done()25 }()26 wg.Wait()27 fmt.Println("main go routine")28}29import (30func main() {31 wg.Add(2)32 go func() {33 fmt.Println("first go routine")34 wg.Done()35 }()36 go func() {37 fmt.Println("second go routine")38 wg.Done()39 }()40 wg.Wait()41 fmt.Println("main go routine")42}43import (44func main() {45 wg.Add(2)46 go func() {47 fmt.Println("first go routine")48 wg.Done()49 }()50 go func() {51 fmt.Println("second go routine")52 wg.Done()53 }()54 wg.Wait()55 fmt.Println("main go routine")56}57import (58func main() {59 wg.Add(2)60 go func() {61 fmt.Println("first go routine")62 wg.Done()63 }()64 go func() {65 fmt.Println("second go routine")

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 wg.Add(2)4 fmt.Println("Start Goroutines")5 go func() {6 defer wg.Done()7 for count := 0; count < 3; count++ {8 for char := 'a'; char < 'a'+26; char++ {9 fmt.Printf("%c ", char)10 }11 }12 }()13 go func() {14 defer wg.Done()15 for count := 0; count < 3; count++ {16 for char := 'A'; char < 'A'+26; char++ {17 fmt.Printf("%c ", char)18 }19 }20 }()21 fmt.Println("Waiting to finish")22 wg.Wait()23 fmt.Println("\nTerminating Program")24}25import (26func main() {27 wg.Add(2)28 fmt.Println("Start Goroutines")29 go func() {30 defer wg.Done()31 for count := 0; count < 3; count++ {32 for char := 'a'; char < 'a'+26; char++ {33 fmt.Printf("%c ", char)34 }35 }36 }()37 go func() {38 defer wg.Done()39 for count := 0;

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 wg.Add(2)4 go func() {5 fmt.Println("Hello")6 wg.Done()7 }()8 go func() {9 fmt.Println("World")10 wg.Done()11 }()12 wg.Wait()13}14import (15func main() {16 wg.Add(2)17 go func() {18 defer wg.Done()19 fmt.Println("Hello")20 }()21 go func() {22 defer wg.Done()23 fmt.Println("World")24 }()25 wg.Wait()26}27import (28func main() {29 wg.Add(2)30 go func() {31 defer wg.Done()32 fmt.Println("Hello")33 }()34 go func() {35 defer wg.Done()36 fmt.Println("World")37 }()38 wg.Wait()39}40import (41func main() {42 wg.Add(2)43 go func() {44 defer wg.Done()45 fmt.Println("Hello")46 }()47 go func() {48 defer wg.Done()49 fmt.Println("World")50 }()51 wg.Wait()52}53import (54func main() {55 wg.Add(2)56 go func() {57 defer wg.Done()58 fmt.Println("Hello")59 }()60 go func() {61 defer wg.Done()62 fmt.Println("World")63 }()64 wg.Wait()65}66import (67func main() {68 wg.Add(2)69 go func() {70 defer wg.Done()71 fmt.Println("Hello")72 }()73 go func() {74 defer wg.Done()75 fmt.Println("World")76 }()77 wg.Wait()78}79import (

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1import (2type state struct {3}4func (s *state) inc(key string) {5 s.Lock()6 defer s.Unlock()7}8func (s *state) value(key string) int {9 s.Lock()10 defer s.Unlock()11}12func main() {13 s := state{m: make(map[string]int)}14 s.inc("foo")15 fmt.Println(s.value("foo"))16}17import (18func main() {19 m.Store("foo", 42)20 v, _ := m.Load("foo")21 fmt.Println(v)22}23import (24func main() {25 m.Store("foo", 42)26 v, _ := m.Load("foo")27 fmt.Println(v)28}29import (30func main() {31 m.Store("foo", 42)32 v, _ := m.Load("foo")33 fmt.Println(v)34}35import (36func main() {37 m.Store("foo", 42)38 v, _ := m.Load("foo")39 fmt.Println(v)40}

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1import "fmt"2type State struct {3}4func (s *State) Sync() {5}6func main() {7 s := &State{sync: make(chan bool)}8 go func() {9 fmt.Println("Hello")10 }()11 s.Sync()12 fmt.Println("World")13}

Full Screen

Full Screen

Sync

Using AI Code Generation

copy

Full Screen

1func main() {2 state := state.NewState()3 state := state.NewState(state.WithStateDir("/path/to/state"))4 state := state.NewState(state.WithStateFileName("terraform.tfstate"))5 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))6 state := state.NewState(state.WithStateFilePath("/path/to/custom"), state.WithStateFileName("terraform.tfstate"))7 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))8 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))9 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))10 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))11 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))12 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))13 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))14 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))15 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))16 state := state.NewState(state.WithStateFilePath("/path/to/custom/terraform.tfstate"))

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful