How to use Nil method of td Package

Best Go-testdeep code snippet using td.Nil

fetcher.go

Source:fetcher.go Github

copy

Full Screen

1// Copyright 2016 The go-ethereum Authors2// This file is part of the go-ethereum library.3//4// The go-ethereum library is free software: you can redistribute it and/or modify5// it under the terms of the GNU Lesser General Public License as published by6// the Free Software Foundation, either version 3 of the License, or7// (at your option) any later version.8//9// The go-ethereum library is distributed in the hope that it will be useful,10// but WITHOUT ANY WARRANTY; without even the implied warranty of11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the12// GNU Lesser General Public License for more details.13//14// You should have received a copy of the GNU Lesser General Public License15// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.16// Package les implements the Light Ethereum Subprotocol.17package les18import (19 "math/big"20 "sync"21 "time"22 "github.com/ethereum/go-ethereum/common"23 "github.com/ethereum/go-ethereum/common/mclock"24 "github.com/ethereum/go-ethereum/consensus"25 "github.com/ethereum/go-ethereum/core/rawdb"26 "github.com/ethereum/go-ethereum/core/types"27 "github.com/ethereum/go-ethereum/light"28 "github.com/ethereum/go-ethereum/log"29)30const (31 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others32 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer33 serverStateAvailable = 100 // number of recent blocks where state availability is assumed34)35// lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the36// ODR system to ensure that we only request data related to a certain block from peers who have already processed37// and announced that block.38type lightFetcher struct {39 pm *ProtocolManager40 odr *LesOdr41 chain *light.LightChain42 lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests43 maxConfirmedTd *big.Int44 peers map[*peer]*fetcherPeerInfo45 lastUpdateStats *updateStatsEntry46 syncing bool47 syncDone chan *peer48 reqMu sync.RWMutex // reqMu protects access to sent header fetch requests49 requested map[uint64]fetchRequest50 deliverChn chan fetchResponse51 timeoutChn chan uint6452 requestChn chan bool // true if initiated from outside53}54// fetcherPeerInfo holds fetcher-specific information about each active peer55type fetcherPeerInfo struct {56 root, lastAnnounced *fetcherTreeNode57 nodeCnt int58 confirmedTd *big.Int59 bestConfirmed *fetcherTreeNode60 nodeByHash map[common.Hash]*fetcherTreeNode61 firstUpdateStats *updateStatsEntry62}63// fetcherTreeNode is a node of a tree that holds information about blocks recently64// announced and confirmed by a certain peer. Each new announce message from a peer65// adds nodes to the tree, based on the previous announced head and the reorg depth.66// There are three possible states for a tree node:67// - announced: not downloaded (known) yet, but we know its head, number and td68// - intermediate: not known, hash and td are empty, they are filled out when it becomes known69// - known: both announced by this peer and downloaded (from any peer).70// This structure makes it possible to always know which peer has a certain block,71// which is necessary for selecting a suitable peer for ODR requests and also for72// canonizing new heads. It also helps to always download the minimum necessary73// amount of headers with a single request.74type fetcherTreeNode struct {75 hash common.Hash76 number uint6477 td *big.Int78 known, requested bool79 parent *fetcherTreeNode80 children []*fetcherTreeNode81}82// fetchRequest represents a header download request83type fetchRequest struct {84 hash common.Hash85 amount uint6486 peer *peer87 sent mclock.AbsTime88 timeout bool89}90// fetchResponse represents a header download response91type fetchResponse struct {92 reqID uint6493 headers []*types.Header94 peer *peer95}96// newLightFetcher creates a new light fetcher97func newLightFetcher(pm *ProtocolManager) *lightFetcher {98 f := &lightFetcher{99 pm: pm,100 chain: pm.blockchain.(*light.LightChain),101 odr: pm.odr,102 peers: make(map[*peer]*fetcherPeerInfo),103 deliverChn: make(chan fetchResponse, 100),104 requested: make(map[uint64]fetchRequest),105 timeoutChn: make(chan uint64),106 requestChn: make(chan bool, 100),107 syncDone: make(chan *peer),108 maxConfirmedTd: big.NewInt(0),109 }110 pm.peers.notify(f)111 f.pm.wg.Add(1)112 go f.syncLoop()113 return f114}115// syncLoop is the main event loop of the light fetcher116func (f *lightFetcher) syncLoop() {117 requesting := false118 defer f.pm.wg.Done()119 for {120 select {121 case <-f.pm.quitSync:122 return123 // when a new announce is received, request loop keeps running until124 // no further requests are necessary or possible125 case newAnnounce := <-f.requestChn:126 f.lock.Lock()127 s := requesting128 requesting = false129 var (130 rq *distReq131 reqID uint64132 syncing bool133 )134 if !f.syncing && !(newAnnounce && s) {135 rq, reqID, syncing = f.nextRequest()136 }137 f.lock.Unlock()138 if rq != nil {139 requesting = true140 if _, ok := <-f.pm.reqDist.queue(rq); ok {141 if syncing {142 f.lock.Lock()143 f.syncing = true144 f.lock.Unlock()145 } else {146 go func() {147 time.Sleep(softRequestTimeout)148 f.reqMu.Lock()149 req, ok := f.requested[reqID]150 if ok {151 req.timeout = true152 f.requested[reqID] = req153 }154 f.reqMu.Unlock()155 // keep starting new requests while possible156 f.requestChn <- false157 }()158 }159 } else {160 f.requestChn <- false161 }162 }163 case reqID := <-f.timeoutChn:164 f.reqMu.Lock()165 req, ok := f.requested[reqID]166 if ok {167 delete(f.requested, reqID)168 }169 f.reqMu.Unlock()170 if ok {171 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)172 req.peer.Log().Debug("Fetching data timed out hard")173 go f.pm.removePeer(req.peer.id)174 }175 case resp := <-f.deliverChn:176 f.reqMu.Lock()177 req, ok := f.requested[resp.reqID]178 if ok && req.peer != resp.peer {179 ok = false180 }181 if ok {182 delete(f.requested, resp.reqID)183 }184 f.reqMu.Unlock()185 if ok {186 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)187 }188 f.lock.Lock()189 if !ok || !(f.syncing || f.processResponse(req, resp)) {190 resp.peer.Log().Debug("Failed processing response")191 go f.pm.removePeer(resp.peer.id)192 }193 f.lock.Unlock()194 case p := <-f.syncDone:195 f.lock.Lock()196 p.Log().Debug("Done synchronising with peer")197 f.checkSyncedHeaders(p)198 f.syncing = false199 f.lock.Unlock()200 f.requestChn <- false201 }202 }203}204// registerPeer adds a new peer to the fetcher's peer set205func (f *lightFetcher) registerPeer(p *peer) {206 p.lock.Lock()207 p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {208 return f.peerHasBlock(p, hash, number, hasState)209 }210 p.lock.Unlock()211 f.lock.Lock()212 defer f.lock.Unlock()213 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}214}215// unregisterPeer removes a new peer from the fetcher's peer set216func (f *lightFetcher) unregisterPeer(p *peer) {217 p.lock.Lock()218 p.hasBlock = nil219 p.lock.Unlock()220 f.lock.Lock()221 defer f.lock.Unlock()222 // check for potential timed out block delay statistics223 f.checkUpdateStats(p, nil)224 delete(f.peers, p)225}226// announce processes a new announcement message received from a peer, adding new227// nodes to the peer's block tree and removing old nodes if necessary228func (f *lightFetcher) announce(p *peer, head *announceData) {229 f.lock.Lock()230 defer f.lock.Unlock()231 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)232 fp := f.peers[p]233 if fp == nil {234 p.Log().Debug("Announcement from unknown peer")235 return236 }237 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {238 // announced tds should be strictly monotonic239 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)240 go f.pm.removePeer(p.id)241 return242 }243 n := fp.lastAnnounced244 for i := uint64(0); i < head.ReorgDepth; i++ {245 if n == nil {246 break247 }248 n = n.parent249 }250 // n is now the reorg common ancestor, add a new branch of nodes251 if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {252 // if announced head block height is lower or same as n or too far from it to add253 // intermediate nodes then discard previous announcement info and trigger a resync254 n = nil255 fp.nodeCnt = 0256 fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)257 }258 if n != nil {259 // check if the node count is too high to add new nodes, discard oldest ones if necessary260 locked := false261 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {262 if !locked {263 f.chain.LockChain()264 defer f.chain.UnlockChain()265 locked = true266 }267 // if one of root's children is canonical, keep it, delete other branches and root itself268 var newRoot *fetcherTreeNode269 for i, nn := range fp.root.children {270 if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {271 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)272 nn.parent = nil273 newRoot = nn274 break275 }276 }277 fp.deleteNode(fp.root)278 if n == fp.root {279 n = newRoot280 }281 fp.root = newRoot282 if newRoot == nil || !f.checkKnownNode(p, newRoot) {283 fp.bestConfirmed = nil284 fp.confirmedTd = nil285 }286 if n == nil {287 break288 }289 }290 if n != nil {291 for n.number < head.Number {292 nn := &fetcherTreeNode{number: n.number + 1, parent: n}293 n.children = append(n.children, nn)294 n = nn295 fp.nodeCnt++296 }297 n.hash = head.Hash298 n.td = head.Td299 fp.nodeByHash[n.hash] = n300 }301 }302 if n == nil {303 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed304 if fp.root != nil {305 fp.deleteNode(fp.root)306 }307 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}308 fp.root = n309 fp.nodeCnt++310 fp.nodeByHash[n.hash] = n311 fp.bestConfirmed = nil312 fp.confirmedTd = nil313 }314 f.checkKnownNode(p, n)315 p.lock.Lock()316 p.headInfo = head317 fp.lastAnnounced = n318 p.lock.Unlock()319 f.checkUpdateStats(p, nil)320 f.requestChn <- true321}322// peerHasBlock returns true if we can assume the peer knows the given block323// based on its announcements324func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool {325 f.lock.Lock()326 defer f.lock.Unlock()327 fp := f.peers[p]328 if fp == nil || fp.root == nil {329 return false330 }331 if hasState {332 if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {333 return false334 }335 }336 if f.syncing {337 // always return true when syncing338 // false positives are acceptable, a more sophisticated condition can be implemented later339 return true340 }341 if number >= fp.root.number {342 // it is recent enough that if it is known, is should be in the peer's block tree343 return fp.nodeByHash[hash] != nil344 }345 f.chain.LockChain()346 defer f.chain.UnlockChain()347 // if it's older than the peer's block tree root but it's in the same canonical chain348 // as the root, we can still be sure the peer knows it349 //350 // when syncing, just check if it is part of the known chain, there is nothing better we351 // can do since we do not know the most recent block hash yet352 return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash353}354// requestAmount calculates the amount of headers to be downloaded starting355// from a certain head backwards356func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 {357 amount := uint64(0)358 nn := n359 for nn != nil && !f.checkKnownNode(p, nn) {360 nn = nn.parent361 amount++362 }363 if nn == nil {364 amount = n.number365 }366 return amount367}368// requestedID tells if a certain reqID has been requested by the fetcher369func (f *lightFetcher) requestedID(reqID uint64) bool {370 f.reqMu.RLock()371 _, ok := f.requested[reqID]372 f.reqMu.RUnlock()373 return ok374}375// nextRequest selects the peer and announced head to be requested next, amount376// to be downloaded starting from the head backwards is also returned377func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {378 var (379 bestHash common.Hash380 bestAmount uint64381 )382 bestTd := f.maxConfirmedTd383 bestSyncing := false384 for p, fp := range f.peers {385 for hash, n := range fp.nodeByHash {386 if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) {387 amount := f.requestAmount(p, n)388 if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount {389 bestHash = hash390 bestAmount = amount391 bestTd = n.td392 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)393 }394 }395 }396 }397 if bestTd == f.maxConfirmedTd {398 return nil, 0, false399 }400 var rq *distReq401 reqID := genReqID()402 if bestSyncing {403 rq = &distReq{404 getCost: func(dp distPeer) uint64 {405 return 0406 },407 canSend: func(dp distPeer) bool {408 p := dp.(*peer)409 f.lock.Lock()410 defer f.lock.Unlock()411 fp := f.peers[p]412 return fp != nil && fp.nodeByHash[bestHash] != nil413 },414 request: func(dp distPeer) func() {415 go func() {416 p := dp.(*peer)417 p.Log().Debug("Synchronisation started")418 f.pm.synchronise(p)419 f.syncDone <- p420 }()421 return nil422 },423 }424 } else {425 rq = &distReq{426 getCost: func(dp distPeer) uint64 {427 p := dp.(*peer)428 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))429 },430 canSend: func(dp distPeer) bool {431 p := dp.(*peer)432 f.lock.Lock()433 defer f.lock.Unlock()434 fp := f.peers[p]435 if fp == nil {436 return false437 }438 n := fp.nodeByHash[bestHash]439 return n != nil && !n.requested440 },441 request: func(dp distPeer) func() {442 p := dp.(*peer)443 f.lock.Lock()444 fp := f.peers[p]445 if fp != nil {446 n := fp.nodeByHash[bestHash]447 if n != nil {448 n.requested = true449 }450 }451 f.lock.Unlock()452 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))453 p.fcServer.QueueRequest(reqID, cost)454 f.reqMu.Lock()455 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}456 f.reqMu.Unlock()457 go func() {458 time.Sleep(hardRequestTimeout)459 f.timeoutChn <- reqID460 }()461 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }462 },463 }464 }465 return rq, reqID, bestSyncing466}467// deliverHeaders delivers header download request responses for processing468func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) {469 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}470}471// processResponse processes header download request responses, returns true if successful472func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {473 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {474 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)475 return false476 }477 headers := make([]*types.Header, req.amount)478 for i, header := range resp.headers {479 headers[int(req.amount)-1-i] = header480 }481 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {482 if err == consensus.ErrFutureBlock {483 return true484 }485 log.Debug("Failed to insert header chain", "err", err)486 return false487 }488 tds := make([]*big.Int, len(headers))489 for i, header := range headers {490 td := f.chain.GetTd(header.Hash(), header.Number.Uint64())491 if td == nil {492 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())493 return false494 }495 tds[i] = td496 }497 f.newHeaders(headers, tds)498 return true499}500// newHeaders updates the block trees of all active peers according to a newly501// downloaded and validated batch or headers502func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {503 var maxTd *big.Int504 for p, fp := range f.peers {505 if !f.checkAnnouncedHeaders(fp, headers, tds) {506 p.Log().Debug("Inconsistent announcement")507 go f.pm.removePeer(p.id)508 }509 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {510 maxTd = fp.confirmedTd511 }512 }513 if maxTd != nil {514 f.updateMaxConfirmedTd(maxTd)515 }516}517// checkAnnouncedHeaders updates peer's block tree if necessary after validating518// a batch of headers. It searches for the latest header in the batch that has a519// matching tree node (if any), and if it has not been marked as known already,520// sets it and its parents to known (even those which are older than the currently521// validated ones). Return value shows if all hashes, numbers and Tds matched522// correctly to the announced values (otherwise the peer should be dropped).523func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {524 var (525 n *fetcherTreeNode526 header *types.Header527 td *big.Int528 )529 for i := len(headers) - 1; ; i-- {530 if i < 0 {531 if n == nil {532 // no more headers and nothing to match533 return true534 }535 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching536 hash, number := header.ParentHash, header.Number.Uint64()-1537 td = f.chain.GetTd(hash, number)538 header = f.chain.GetHeader(hash, number)539 if header == nil || td == nil {540 log.Error("Missing parent of validated header", "hash", hash, "number", number)541 return false542 }543 } else {544 header = headers[i]545 td = tds[i]546 }547 hash := header.Hash()548 number := header.Number.Uint64()549 if n == nil {550 n = fp.nodeByHash[hash]551 }552 if n != nil {553 if n.td == nil {554 // node was unannounced555 if nn := fp.nodeByHash[hash]; nn != nil {556 // if there was already a node with the same hash, continue there and drop this one557 nn.children = append(nn.children, n.children...)558 n.children = nil559 fp.deleteNode(n)560 n = nn561 } else {562 n.hash = hash563 n.td = td564 fp.nodeByHash[hash] = n565 }566 }567 // check if it matches the header568 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {569 // peer has previously made an invalid announcement570 return false571 }572 if n.known {573 // we reached a known node that matched our expectations, return with success574 return true575 }576 n.known = true577 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {578 fp.confirmedTd = td579 fp.bestConfirmed = n580 }581 n = n.parent582 if n == nil {583 return true584 }585 }586 }587}588// checkSyncedHeaders updates peer's block tree after synchronisation by marking589// downloaded headers as known. If none of the announced headers are found after590// syncing, the peer is dropped.591func (f *lightFetcher) checkSyncedHeaders(p *peer) {592 fp := f.peers[p]593 if fp == nil {594 p.Log().Debug("Unknown peer to check sync headers")595 return596 }597 n := fp.lastAnnounced598 var td *big.Int599 for n != nil {600 if td = f.chain.GetTd(n.hash, n.number); td != nil {601 break602 }603 n = n.parent604 }605 // now n is the latest downloaded header after syncing606 if n == nil {607 p.Log().Debug("Synchronisation failed")608 go f.pm.removePeer(p.id)609 } else {610 header := f.chain.GetHeader(n.hash, n.number)611 f.newHeaders([]*types.Header{header}, []*big.Int{td})612 }613}614// checkKnownNode checks if a block tree node is known (downloaded and validated)615// If it was not known previously but found in the database, sets its known flag616func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {617 if n.known {618 return true619 }620 td := f.chain.GetTd(n.hash, n.number)621 if td == nil {622 return false623 }624 header := f.chain.GetHeader(n.hash, n.number)625 // check the availability of both header and td because reads are not protected by chain db mutex626 // Note: returning false is always safe here627 if header == nil {628 return false629 }630 fp := f.peers[p]631 if fp == nil {632 p.Log().Debug("Unknown peer to check known nodes")633 return false634 }635 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {636 p.Log().Debug("Inconsistent announcement")637 go f.pm.removePeer(p.id)638 }639 if fp.confirmedTd != nil {640 f.updateMaxConfirmedTd(fp.confirmedTd)641 }642 return n.known643}644// deleteNode deletes a node and its child subtrees from a peer's block tree645func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {646 if n.parent != nil {647 for i, nn := range n.parent.children {648 if nn == n {649 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)650 break651 }652 }653 }654 for {655 if n.td != nil {656 delete(fp.nodeByHash, n.hash)657 }658 fp.nodeCnt--659 if len(n.children) == 0 {660 return661 }662 for i, nn := range n.children {663 if i == 0 {664 n = nn665 } else {666 fp.deleteNode(nn)667 }668 }669 }670}671// updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td672// than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values673// and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated674// both globally for all peers and also for each individual peer (meaning that the given peer has announced the head675// and it has also been downloaded from any peer, either before or after the given announcement).676// The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,677// pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed678// the current global head).679type updateStatsEntry struct {680 time mclock.AbsTime681 td *big.Int682 next *updateStatsEntry683}684// updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,685// adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have686// already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.687// Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a688// positive block delay value.689func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {690 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {691 f.maxConfirmedTd = td692 newEntry := &updateStatsEntry{693 time: mclock.Now(),694 td: td,695 }696 if f.lastUpdateStats != nil {697 f.lastUpdateStats.next = newEntry698 }699 f.lastUpdateStats = newEntry700 for p := range f.peers {701 f.checkUpdateStats(p, newEntry)702 }703 }704}705// checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it706// has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the707// block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,708// the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry709// items are removed from the head of the linked list.710// If a new entry has been added to the global tail, it is passed as a parameter here even though this function711// assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),712// it can set the new head to newEntry.713func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {714 now := mclock.Now()715 fp := f.peers[p]716 if fp == nil {717 p.Log().Debug("Unknown peer to check update stats")718 return719 }720 if newEntry != nil && fp.firstUpdateStats == nil {721 fp.firstUpdateStats = newEntry722 }723 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {724 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)725 fp.firstUpdateStats = fp.firstUpdateStats.next726 }727 if fp.confirmedTd != nil {728 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {729 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))730 fp.firstUpdateStats = fp.firstUpdateStats.next731 }732 }733}...

Full Screen

Full Screen

check.go

Source:check.go Github

copy

Full Screen

1// +build linux2package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2"3import (4 "fmt"5 "io/ioutil"6 "os"7 "path"8 "path/filepath"9 "syscall"10 "github.com/docker/docker/pkg/system"11 "github.com/pkg/errors"12 "golang.org/x/sys/unix"13)14// doesSupportNativeDiff checks whether the filesystem has a bug15// which copies up the opaque flag when copying up an opaque16// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.17// When these exist naive diff should be used.18func doesSupportNativeDiff(d string) error {19 td, err := ioutil.TempDir(d, "opaque-bug-check")20 if err != nil {21 return err22 }23 defer func() {24 if err := os.RemoveAll(td); err != nil {25 logger.Warnf("Failed to remove check directory %v: %v", td, err)26 }27 }()28 // Make directories l1/d, l1/d1, l2/d, l3, work, merged29 if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {30 return err31 }32 if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {33 return err34 }35 if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {36 return err37 }38 if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil {39 return err40 }41 if err := os.Mkdir(filepath.Join(td, workDirName), 0755); err != nil {42 return err43 }44 if err := os.Mkdir(filepath.Join(td, mergedDirName), 0755); err != nil {45 return err46 }47 // Mark l2/d as opaque48 if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil {49 return errors.Wrap(err, "failed to set opaque flag on middle layer")50 }51 opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, workDirName))52 if err := unix.Mount("overlay", filepath.Join(td, mergedDirName), "overlay", 0, opts); err != nil {53 return errors.Wrap(err, "failed to mount overlay")54 }55 defer func() {56 if err := unix.Unmount(filepath.Join(td, mergedDirName), 0); err != nil {57 logger.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, mergedDirName), err)58 }59 }()60 // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"61 if err := ioutil.WriteFile(filepath.Join(td, mergedDirName, "d", "f"), []byte{}, 0644); err != nil {62 return errors.Wrap(err, "failed to write to merged directory")63 }64 // Check l3/d does not have opaque flag65 xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque")66 if err != nil {67 return errors.Wrap(err, "failed to read opaque flag on upper layer")68 }69 if string(xattrOpaque) == "y" {70 return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")71 }72 // rename "d1" to "d2"73 if err := os.Rename(filepath.Join(td, mergedDirName, "d1"), filepath.Join(td, mergedDirName, "d2")); err != nil {74 // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled75 if err.(*os.LinkError).Err == syscall.EXDEV {76 return nil77 }78 return errors.Wrap(err, "failed to rename dir in merged directory")79 }80 // get the xattr of "d2"81 xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect")82 if err != nil {83 return errors.Wrap(err, "failed to read redirect flag on upper layer")84 }85 if string(xattrRedirect) == "d1" {86 return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")87 }88 return nil89}...

Full Screen

Full Screen

Nil

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3 if a == nil {4 fmt.Println("a is nil")5 } else {6 fmt.Println("a is not nil")7 }8}9import "fmt"10func main() {11 if a == nil {12 fmt.Println("a is nil")13 } else {14 fmt.Println("a is not nil")15 }16 a = new(int)17 if a == nil {18 fmt.Println("a is nil")19 } else {20 fmt.Println("a is not nil")21 }22}23import "fmt"24func main() {25 if a == nil {26 fmt.Println("a is nil")27 } else {28 fmt.Println("a is not nil")29 }30 a = new(int)31 if a == nil {32 fmt.Println("a is nil")33 } else {34 fmt.Println("a is not nil")35 }36 fmt.Println("value of a:", *a)37}38import "fmt"39func main() {40 if a == nil {41 fmt.Println("a is nil")42 } else {43 fmt.Println("a is not nil")44 }45 a = new(int)46 if a == nil {47 fmt.Println("a is nil")48 } else {49 fmt.Println("a is not nil")50 }51 fmt.Println("value of a:", *a)52 fmt.Println("value of a:", a)53}54import "fmt"55func main() {56 if a == nil {

Full Screen

Full Screen

Nil

Using AI Code Generation

copy

Full Screen

1import (2type td struct {3}4func (v td) abs() float64 {5 return math.Sqrt(v.x*v.x + v.y*v.y)6}7func main() {8 v := td{3, 4}9 fmt.Println(v.abs())10}11import (12type td struct {13}14func (v td) abs() float64 {15 return math.Sqrt(v.x*v.x + v.y*v.y)16}17func main() {18 v := td{3, 4}19 fmt.Println(p.abs())20}21import (22type td struct {23}24func (v td) abs() float64 {25 return math.Sqrt(v.x*v.x + v.y*v.y)26}27func main() {28 v := td{3, 4}29 fmt.Println(v.abs())30}31import (32type td struct {33}34func (v td) abs() float64 {35 return math.Sqrt(v.x*v.x + v.y*v.y)36}37func main() {38 v := td{3, 4}39 fmt.Println(p.abs())40}41import (42type td struct {43}44func (v td) abs() float64 {45 return math.Sqrt(v.x*v.x + v.y*v.y)46}47func main() {48 v := td{3, 4}49 fmt.Println(p.abs())50}51import (52type td struct {53}54func (v td) abs() float64 {55 return math.Sqrt(v.x

Full Screen

Full Screen

Nil

Using AI Code Generation

copy

Full Screen

1import "fmt"2type td struct {3}4func (t td) Nil() bool {5}6func main() {7 t := td{}8 fmt.Println(t.Nil())9 fmt.Println(t.Nil())10}11import "fmt"12type td struct {13}14func (t *td) Nil() bool {15}16func main() {17 t := &td{}18 fmt.Println(t.Nil())19 fmt.Println(t.Nil())20}

Full Screen

Full Screen

Nil

Using AI Code Generation

copy

Full Screen

1import (2type td struct {3}4func (t td) Nil() bool {5 if t.x == 0 && t.y == 0 {6 }7}8func main() {9 t := td{1, 2}10 fmt.Println(t.Nil())11 fmt.Println(t.Nil())12}13import (14type td struct {15}16func (t td) IsZero() bool {17 if t.x == 0 && t.y == 0 {18 }19}20func main() {21 t := td{1, 2}22 fmt.Println(t.IsZero())23 fmt.Println(t.IsZero())24}25import (26type td struct {27}28func (t td) String() string {29 return fmt.Sprintf("x:%d,y:%d", t.x, t.y)30}31func main() {32 t := td{1, 2}33 fmt.Println(t.String())34}

Full Screen

Full Screen

Nil

Using AI Code Generation

copy

Full Screen

1import "fmt"2type td struct {3}4func (t *td) Nil() {5}6func main() {7 t := new(td)8 fmt.Println(t)9 t.Nil()10 fmt.Println(t)11}12import "fmt"13type td struct {14}15func (t td) Nil() {16}17func main() {18 t := new(td)19 fmt.Println(t)20 t.Nil()21 fmt.Println(t)22}23import "fmt"24type td struct {25}26func (t td) Nil() {27}28func main() {29 t := td{}30 fmt.Println(t)31 t.Nil()32 fmt.Println(t)33}34import "fmt"35type td struct {36}37func (t *td) Nil() {38}39func main() {40 t := td{}41 fmt.Println(t)42 t.Nil()43 fmt.Println(t)44}45import "fmt"46type td struct {

Full Screen

Full Screen

Nil

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 t = td.Nil()4 fmt.Println(t)5}6import (7func main() {8 t = td.Nil()9 fmt.Println(t)10}11import (12func main() {13 t = td.Nil()14 fmt.Println(t)15}16import (17func main() {18 t = td.Nil()19 fmt.Println(t)20}21import (22func main() {23 t = td.Nil()24 fmt.Println(t)25}26import (27func main() {28 t = td.Nil()29 fmt.Println(t)30}31import (32func main() {33 t = td.Nil()34 fmt.Println(t)35}36import (37func main() {38 t = td.Nil()39 fmt.Println(t)40}41import (42func main() {43 t = td.Nil()44 fmt.Println(t)45}46import (47func main() {48 t = td.Nil()49 fmt.Println(t)50}

Full Screen

Full Screen

Nil

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3 fmt.Println(t.Nil())4}5Golang Program to use String() method of td class6import "fmt"7func main() {8 fmt.Println(t.String())9}10Golang Program to use String() method of td class11import "fmt"12func main() {13 fmt.Println(t.String())14}15Golang Program to use String() method of td class16import "fmt"17func main() {18 fmt.Println(t.String())19}20Golang Program to use String() method of td class21import "fmt"22func main() {23 fmt.Println(t.String())24}25Golang Program to use String() method of td class26import "fmt"27func main() {28 fmt.Println(t.String())29}30Golang Program to use String() method of td class31import "fmt"32func main() {33 fmt.Println(t.String())34}35Golang Program to use String() method of td class36import "fmt"37func main() {38 fmt.Println(t.String())39}40Golang Program to use String() method of td class41import "fmt"42func main() {43 fmt.Println(t.String())44}

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Go-testdeep automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Most used method in

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful