Best Testcontainers-go code snippet using wait.Timeout
serverpool.go
Source:serverpool.go
...31 "github.com/ethereum/go-ethereum/p2p/nodestate"32 "github.com/ethereum/go-ethereum/rlp"33)34const (35 minTimeout = time.Millisecond * 500 // minimum request timeout suggested by the server pool36 timeoutRefresh = time.Second * 5 // recalculate timeout if older than this37 dialCost = 10000 // cost of a TCP dial (used for known node selection weight calculation)38 dialWaitStep = 1.5 // exponential multiplier of redial wait time when no value was provided by the server39 queryCost = 500 // cost of a UDP pre-negotiation query40 queryWaitStep = 1.02 // exponential multiplier of redial wait time when no value was provided by the server41 waitThreshold = time.Hour * 2000 // drop node if waiting time is over the threshold42 nodeWeightMul = 1000000 // multiplier constant for node weight calculation43 nodeWeightThreshold = 100 // minimum weight for keeping a node in the the known (valuable) set44 minRedialWait = 10 // minimum redial wait time in seconds45 preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries46 maxQueryFails = 100 // number of consecutive UDP query failures before we print a warning47)48// serverPool provides a node iterator for dial candidates. The output is a mix of newly discovered49// nodes, a weighted random selection of known (previously valuable) nodes and trusted/paid nodes.50type serverPool struct {51 clock mclock.Clock52 unixTime func() int6453 db ethdb.KeyValueStore54 ns *nodestate.NodeStateMachine55 vt *lpc.ValueTracker56 mixer *enode.FairMix57 mixSources []enode.Iterator58 dialIterator enode.Iterator59 validSchemes enr.IdentityScheme60 trustedURLs []string61 fillSet *lpc.FillSet62 queryFails uint3263 timeoutLock sync.RWMutex64 timeout time.Duration65 timeWeights lpc.ResponseTimeWeights66 timeoutRefreshed mclock.AbsTime67}68// nodeHistory keeps track of dial costs which determine node weight together with the69// service value calculated by lpc.ValueTracker.70type nodeHistory struct {71 dialCost utils.ExpiredValue72 redialWaitStart, redialWaitEnd int64 // unix time (seconds)73}74type nodeHistoryEnc struct {75 DialCost utils.ExpiredValue76 RedialWaitStart, RedialWaitEnd uint6477}78// queryFunc sends a pre-negotiation query and blocks until a response arrives or timeout occurs.79// It returns 1 if the remote node has confirmed that connection is possible, 0 if not80// possible and -1 if no response arrived (timeout).81type queryFunc func(*enode.Node) int82var (83 serverPoolSetup = &nodestate.Setup{Version: 1}84 sfHasValue = serverPoolSetup.NewPersistentFlag("hasValue")85 sfQueried = serverPoolSetup.NewFlag("queried")86 sfCanDial = serverPoolSetup.NewFlag("canDial")87 sfDialing = serverPoolSetup.NewFlag("dialed")88 sfWaitDialTimeout = serverPoolSetup.NewFlag("dialTimeout")89 sfConnected = serverPoolSetup.NewFlag("connected")90 sfRedialWait = serverPoolSetup.NewFlag("redialWait")91 sfAlwaysConnect = serverPoolSetup.NewFlag("alwaysConnect")92 sfDisableSelection = nodestate.MergeFlags(sfQueried, sfCanDial, sfDialing, sfConnected, sfRedialWait)93 sfiNodeHistory = serverPoolSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}),94 func(field interface{}) ([]byte, error) {95 if n, ok := field.(nodeHistory); ok {96 ne := nodeHistoryEnc{97 DialCost: n.dialCost,98 RedialWaitStart: uint64(n.redialWaitStart),99 RedialWaitEnd: uint64(n.redialWaitEnd),100 }101 enc, err := rlp.EncodeToBytes(&ne)102 return enc, err103 } else {104 return nil, errors.New("invalid field type")105 }106 },107 func(enc []byte) (interface{}, error) {108 var ne nodeHistoryEnc109 err := rlp.DecodeBytes(enc, &ne)110 n := nodeHistory{111 dialCost: ne.DialCost,112 redialWaitStart: int64(ne.RedialWaitStart),113 redialWaitEnd: int64(ne.RedialWaitEnd),114 }115 return n, err116 },117 )118 sfiNodeWeight = serverPoolSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0)))119 sfiConnectedStats = serverPoolSetup.NewField("connectedStats", reflect.TypeOf(lpc.ResponseTimeStats{}))120)121// newServerPool creates a new server pool122func newServerPool(db ethdb.KeyValueStore, dbKey []byte, vt *lpc.ValueTracker, discovery enode.Iterator, mixTimeout time.Duration, query queryFunc, clock mclock.Clock, trustedURLs []string) *serverPool {123 s := &serverPool{124 db: db,125 clock: clock,126 unixTime: func() int64 { return time.Now().Unix() },127 validSchemes: enode.ValidSchemes,128 trustedURLs: trustedURLs,129 vt: vt,130 ns: nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+"ns:"), clock, serverPoolSetup),131 }132 s.recalTimeout()133 s.mixer = enode.NewFairMix(mixTimeout)134 knownSelector := lpc.NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight)135 alwaysConnect := lpc.NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil)136 s.mixSources = append(s.mixSources, knownSelector)137 s.mixSources = append(s.mixSources, alwaysConnect)138 if discovery != nil {139 s.mixSources = append(s.mixSources, discovery)140 }141 iter := enode.Iterator(s.mixer)142 if query != nil {143 iter = s.addPreNegFilter(iter, query)144 }145 s.dialIterator = enode.Filter(iter, func(node *enode.Node) bool {146 s.ns.SetState(node, sfDialing, sfCanDial, 0)147 s.ns.SetState(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10)148 return true149 })150 s.ns.SubscribeState(nodestate.MergeFlags(sfWaitDialTimeout, sfConnected), func(n *enode.Node, oldState, newState nodestate.Flags) {151 if oldState.Equals(sfWaitDialTimeout) && newState.IsEmpty() {152 // dial timeout, no connection153 s.setRedialWait(n, dialCost, dialWaitStep)154 s.ns.SetState(n, nodestate.Flags{}, sfDialing, 0)155 }156 })157 s.ns.AddLogMetrics(sfHasValue, sfDisableSelection, "selectable", nil, nil, serverSelectableGauge)158 s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil)159 s.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, "connected", nil, nil, serverConnectedGauge)160 return s161}162// addPreNegFilter installs a node filter mechanism that performs a pre-negotiation query.163// Nodes that are filtered out and does not appear on the output iterator are put back164// into redialWait state.165func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enode.Iterator {166 s.fillSet = lpc.NewFillSet(s.ns, input, sfQueried)167 s.ns.SubscribeState(sfQueried, func(n *enode.Node, oldState, newState nodestate.Flags) {168 if newState.Equals(sfQueried) {169 fails := atomic.LoadUint32(&s.queryFails)170 if fails == maxQueryFails {171 log.Warn("UDP pre-negotiation query does not seem to work")172 }173 if fails > maxQueryFails {174 fails = maxQueryFails175 }176 if rand.Intn(maxQueryFails*2) < int(fails) {177 // skip pre-negotiation with increasing chance, max 50%178 // this ensures that the client can operate even if UDP is not working at all179 s.ns.SetState(n, sfCanDial, nodestate.Flags{}, time.Second*10)180 // set canDial before resetting queried so that FillSet will not read more181 // candidates unnecessarily182 s.ns.SetState(n, nodestate.Flags{}, sfQueried, 0)183 return184 }185 go func() {186 q := query(n)187 if q == -1 {188 atomic.AddUint32(&s.queryFails, 1)189 } else {190 atomic.StoreUint32(&s.queryFails, 0)191 }192 if q == 1 {193 s.ns.SetState(n, sfCanDial, nodestate.Flags{}, time.Second*10)194 } else {195 s.setRedialWait(n, queryCost, queryWaitStep)196 }197 s.ns.SetState(n, nodestate.Flags{}, sfQueried, 0)198 }()199 }200 })201 return lpc.NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) {202 if waiting {203 s.fillSet.SetTarget(preNegLimit)204 } else {205 s.fillSet.SetTarget(0)206 }207 })208}209// start starts the server pool. Note that NodeStateMachine should be started first.210func (s *serverPool) start() {211 s.ns.Start()212 for _, iter := range s.mixSources {213 // add sources to mixer at startup because the mixer instantly tries to read them214 // which should only happen after NodeStateMachine has been started215 s.mixer.AddSource(iter)216 }217 for _, url := range s.trustedURLs {218 if node, err := enode.Parse(s.validSchemes, url); err == nil {219 s.ns.SetState(node, sfAlwaysConnect, nodestate.Flags{}, 0)220 } else {221 log.Error("Invalid trusted server URL", "url", url, "error", err)222 }223 }224 unixTime := s.unixTime()225 s.ns.ForEach(sfHasValue, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {226 s.calculateWeight(node)227 if n, ok := s.ns.GetField(node, sfiNodeHistory).(nodeHistory); ok && n.redialWaitEnd > unixTime {228 wait := n.redialWaitEnd - unixTime229 lastWait := n.redialWaitEnd - n.redialWaitStart230 if wait > lastWait {231 // if the time until expiration is larger than the last suggested232 // waiting time then the system clock was probably adjusted233 wait = lastWait234 }235 s.ns.SetState(node, sfRedialWait, nodestate.Flags{}, time.Duration(wait)*time.Second)236 }237 })238}239// stop stops the server pool240func (s *serverPool) stop() {241 s.dialIterator.Close()242 if s.fillSet != nil {243 s.fillSet.Close()244 }245 s.ns.ForEach(sfConnected, nodestate.Flags{}, func(n *enode.Node, state nodestate.Flags) {246 // recalculate weight of connected nodes in order to update hasValue flag if necessary247 s.calculateWeight(n)248 })249 s.ns.Stop()250}251// registerPeer implements serverPeerSubscriber252func (s *serverPool) registerPeer(p *serverPeer) {253 s.ns.SetState(p.Node(), sfConnected, sfDialing.Or(sfWaitDialTimeout), 0)254 nvt := s.vt.Register(p.ID())255 s.ns.SetField(p.Node(), sfiConnectedStats, nvt.RtStats())256 p.setValueTracker(s.vt, nvt)257 p.updateVtParams()258}259// unregisterPeer implements serverPeerSubscriber260func (s *serverPool) unregisterPeer(p *serverPeer) {261 s.setRedialWait(p.Node(), dialCost, dialWaitStep)262 s.ns.SetState(p.Node(), nodestate.Flags{}, sfConnected, 0)263 s.ns.SetField(p.Node(), sfiConnectedStats, nil)264 s.vt.Unregister(p.ID())265 p.setValueTracker(nil, nil)266}267// recalTimeout calculates the current recommended timeout. This value is used by268// the client as a "soft timeout" value. It also affects the service value calculation269// of individual nodes.270func (s *serverPool) recalTimeout() {271 // Use cached result if possible, avoid recalculating too frequently.272 s.timeoutLock.RLock()273 refreshed := s.timeoutRefreshed274 s.timeoutLock.RUnlock()275 now := s.clock.Now()276 if refreshed != 0 && time.Duration(now-refreshed) < timeoutRefresh {277 return278 }279 // Cached result is stale, recalculate a new one.280 rts := s.vt.RtStats()281 // Add a fake statistic here. It is an easy way to initialize with some282 // conservative values when the database is new. As soon as we have a283 // considerable amount of real stats this small value won't matter.284 rts.Add(time.Second*2, 10, s.vt.StatsExpFactor())285 // Use either 10% failure rate timeout or twice the median response time286 // as the recommended timeout.287 timeout := minTimeout288 if t := rts.Timeout(0.1); t > timeout {289 timeout = t290 }291 if t := rts.Timeout(0.5) * 2; t > timeout {292 timeout = t293 }294 s.timeoutLock.Lock()295 if s.timeout != timeout {296 s.timeout = timeout297 s.timeWeights = lpc.TimeoutWeights(s.timeout)298 suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond))299 totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor())))300 }301 s.timeoutRefreshed = now302 s.timeoutLock.Unlock()303}304// getTimeout returns the recommended request timeout.305func (s *serverPool) getTimeout() time.Duration {306 s.recalTimeout()307 s.timeoutLock.RLock()308 defer s.timeoutLock.RUnlock()309 return s.timeout310}311// getTimeoutAndWeight returns the recommended request timeout as well as the312// response time weight which is necessary to calculate service value.313func (s *serverPool) getTimeoutAndWeight() (time.Duration, lpc.ResponseTimeWeights) {314 s.recalTimeout()315 s.timeoutLock.RLock()316 defer s.timeoutLock.RUnlock()317 return s.timeout, s.timeWeights318}319// addDialCost adds the given amount of dial cost to the node history and returns the current320// amount of total dial cost321func (s *serverPool) addDialCost(n *nodeHistory, amount int64) uint64 {322 logOffset := s.vt.StatsExpirer().LogOffset(s.clock.Now())323 if amount > 0 {324 n.dialCost.Add(amount, logOffset)325 }326 totalDialCost := n.dialCost.Value(logOffset)327 if totalDialCost < dialCost {328 totalDialCost = dialCost329 }330 return totalDialCost331}332// serviceValue returns the service value accumulated in this session and in total333func (s *serverPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) {334 nvt := s.vt.GetNode(node.ID())335 if nvt == nil {336 return 0, 0337 }338 currentStats := nvt.RtStats()339 _, timeWeights := s.getTimeoutAndWeight()340 expFactor := s.vt.StatsExpFactor()341 totalValue = currentStats.Value(timeWeights, expFactor)342 if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(lpc.ResponseTimeStats); ok {343 diff := currentStats344 diff.SubStats(&connStats)345 sessionValue = diff.Value(timeWeights, expFactor)346 sessionValueMeter.Mark(int64(sessionValue))347 }348 return349}350// updateWeight calculates the node weight and updates the nodeWeight field and the351// hasValue flag. It also saves the node state if necessary.352func (s *serverPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) {353 weight := uint64(totalValue * nodeWeightMul / float64(totalDialCost))354 if weight >= nodeWeightThreshold {355 s.ns.SetState(node, sfHasValue, nodestate.Flags{}, 0)356 s.ns.SetField(node, sfiNodeWeight, weight)357 } else {358 s.ns.SetState(node, nodestate.Flags{}, sfHasValue, 0)359 s.ns.SetField(node, sfiNodeWeight, nil)360 }361 s.ns.Persist(node) // saved if node history or hasValue changed362}363// setRedialWait calculates and sets the redialWait timeout based on the service value364// and dial cost accumulated during the last session/attempt and in total.365// The waiting time is raised exponentially if no service value has been received in order366// to prevent dialing an unresponsive node frequently for a very long time just because it367// was useful in the past. It can still be occasionally dialed though and once it provides368// a significant amount of service value again its waiting time is quickly reduced or reset369// to the minimum.370// Note: node weight is also recalculated and updated by this function.371func (s *serverPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) {372 n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory)373 sessionValue, totalValue := s.serviceValue(node)374 totalDialCost := s.addDialCost(&n, addDialCost)375 // if the current dial session has yielded at least the average value/dial cost ratio376 // then the waiting time should be reset to the minimum. If the session value377 // is below average but still positive then timeout is limited to the ratio of378 // average / current service value multiplied by the minimum timeout. If the attempt379 // was unsuccessful then timeout is raised exponentially without limitation.380 // Note: dialCost is used in the formula below even if dial was not attempted at all381 // because the pre-negotiation query did not return a positive result. In this case382 // the ratio has no meaning anyway and waitFactor is always raised, though in smaller383 // steps because queries are cheaper and therefore we can allow more failed attempts.384 unixTime := s.unixTime()385 plannedTimeout := float64(n.redialWaitEnd - n.redialWaitStart) // last planned redialWait timeout386 var actualWait float64 // actual waiting time elapsed387 if unixTime > n.redialWaitEnd {388 // the planned timeout has elapsed389 actualWait = plannedTimeout390 } else {391 // if the node was redialed earlier then we do not raise the planned timeout392 // exponentially because that could lead to the timeout rising very high in393 // a short amount of time394 // Note that in case of an early redial actualWait also includes the dial395 // timeout or connection time of the last attempt but it still serves its396 // purpose of preventing the timeout rising quicker than linearly as a function397 // of total time elapsed without a successful connection.398 actualWait = float64(unixTime - n.redialWaitStart)399 }400 // raise timeout exponentially if the last planned timeout has elapsed401 // (use at least the last planned timeout otherwise)402 nextTimeout := actualWait * waitStep403 if plannedTimeout > nextTimeout {404 nextTimeout = plannedTimeout405 }406 // we reduce the waiting time if the server has provided service value during the407 // connection (but never under the minimum)408 a := totalValue * dialCost * float64(minRedialWait)409 b := float64(totalDialCost) * sessionValue410 if a < b*nextTimeout {411 nextTimeout = a / b412 }413 if nextTimeout < minRedialWait {414 nextTimeout = minRedialWait415 }416 wait := time.Duration(float64(time.Second) * nextTimeout)417 if wait < waitThreshold {418 n.redialWaitStart = unixTime419 n.redialWaitEnd = unixTime + int64(nextTimeout)420 s.ns.SetField(node, sfiNodeHistory, n)421 s.ns.SetState(node, sfRedialWait, nodestate.Flags{}, wait)422 s.updateWeight(node, totalValue, totalDialCost)423 } else {424 // discard known node statistics if waiting time is very long because the node425 // hasn't been responsive for a very long time426 s.ns.SetField(node, sfiNodeHistory, nil)427 s.ns.SetField(node, sfiNodeWeight, nil)428 s.ns.SetState(node, nodestate.Flags{}, sfHasValue, 0)429 }430}431// calculateWeight calculates and sets the node weight without altering the node history.432// This function should be called during startup and shutdown only, otherwise setRedialWait433// will keep the weights updated as the underlying statistics are adjusted....
cond_test.go
Source:cond_test.go
...4 "testing"5 "time"6 "github.com/stretchr/testify/assert"7)8func TestTimeoutCondWait(t *testing.T) {9 var wait sync.WaitGroup10 cond := NewCond()11 wait.Add(2)12 go func() {13 cond.Wait()14 wait.Done()15 }()16 time.Sleep(time.Duration(50) * time.Millisecond)17 go func() {18 cond.Signal()19 wait.Done()20 }()21 wait.Wait()22}23func TestTimeoutCondWaitTimeout(t *testing.T) {24 var wait sync.WaitGroup25 cond := NewCond()26 wait.Add(1)27 go func() {28 cond.WaitWithTimeout(time.Duration(500) * time.Millisecond)29 wait.Done()30 }()31 wait.Wait()32}33func TestTimeoutCondWaitTimeoutRemain(t *testing.T) {34 var wait sync.WaitGroup35 cond := NewCond()36 wait.Add(2)37 ch := make(chan time.Duration, 1)38 defer close(ch)39 timeout := time.Duration(2000) * time.Millisecond40 go func() {41 remainTimeout, _ := cond.WaitWithTimeout(timeout)42 ch <- remainTimeout43 wait.Done()44 }()45 sleep(200)46 go func() {47 cond.Signal()48 wait.Done()49 }()50 wait.Wait()51 remainTimeout := <-ch52 assert.True(t, remainTimeout < timeout, "expect remainTimeout %v < %v", remainTimeout, timeout)53 assert.True(t, remainTimeout >= time.Duration(200)*time.Millisecond,54 "expect remainTimeout %v >= 200 millisecond", remainTimeout)55}56func TestSignalNoWait(t *testing.T) {57 cond := NewCond()58 cond.Signal()59}60func sleep(millisecond int) {61 time.Sleep(time.Duration(millisecond) * time.Millisecond)62}...
Timeout
Using AI Code Generation
1import (2func main() {3 c1 := make(chan string, 1)4 go func() {5 time.Sleep(time.Second * 2)6 }()7 select {8 fmt.Println(res)9 case <-time.After(time.Second * 1):10 fmt.Println("timeout 1")11 }12 c2 := make(chan string, 1)13 go func() {14 time.Sleep(time.Second * 2)15 }()16 select {17 fmt.Println(res)18 case <-time.After(time.Second * 3):19 fmt.Println("timeout 2")20 }21}
Timeout
Using AI Code Generation
1import (2func main() {3 c1 := make(chan string, 1)4 go func() {5 time.Sleep(time.Second * 2)6 }()7 select {8 fmt.Println(res)9 case <-time.After(time.Second * 1):10 fmt.Println("timeout 1")11 }12 c2 := make(chan string, 1)13 go func() {14 time.Sleep(time.Second * 2)15 }()16 select {17 fmt.Println(res)18 case <-time.After(time.Second * 3):19 fmt.Println("timeout 2")20 }21}22import (23func main() {24 d := time.Now().Add(50 * time.Millisecond)25 ctx, cancel := context.WithDeadline(context.Background(), d)26 defer cancel()27 select {28 case <-time.After(1 * time.Second):29 fmt.Println("overslept")30 case <-ctx.Done():31 fmt.Println(ctx.Err())32 }33}34import (35func main() {36 ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50)37 defer cancel()38 select {39 case <-time.After(1 * time.Second):40 fmt.Println("overslept")41 case <-ctx.Done():42 fmt.Println(ctx.Err())43 }44}45import (46func main() {47 ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50)48 defer cancel()49 select {50 case <-time.After(5 * time.Second):51 fmt.Println("overslept")52 case <-ctx.Done():53 fmt.Println(ctx.Err())54 }55}56import (
Timeout
Using AI Code Generation
1import (2func main() {3 wg.Add(1)4 go func() {5 time.Sleep(2 * time.Second)6 wg.Done()7 }()8 ch := make(chan struct{})9 go func() {10 wg.Wait()11 close(ch)12 }()13 select {14 fmt.Println("Wait completed")15 case <-time.After(1 * time.Second):16 fmt.Println("Wait timed out")17 }18}19import (20func main() {21 wg.Add(1)22 go func() {23 time.Sleep(2 * time.Second)24 wg.Done()25 }()26 ch := make(chan struct{})27 go func() {28 wg.Wait()29 close(ch)30 }()31 select {32 fmt.Println("Wait completed")33 case <-time.After(3 * time.Second):34 fmt.Println("Wait timed out")35 }36}37import (38func main() {39 wg.Add(1)40 go func() {41 time.Sleep(2 * time.Second)42 wg.Done()43 }()44 ch := make(chan struct{})45 go func() {46 wg.Wait()47 close(ch)48 }()49 select {50 fmt.Println("Wait completed")51 case <-time.After(1 * time.Second):52 fmt.Println("Wait timed out")53 }54}55import (56func main() {57 wg.Add(1)58 go func() {59 time.Sleep(2 * time.Second)60 wg.Done()61 }()62 ch := make(chan struct{})63 go func() {64 wg.Wait()65 close(ch)66 }()67 select {68 fmt.Println("Wait completed
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!