How to use Apply method of cmd Package

Best K6 code snippet using cmd.Apply

applier.go

Source:applier.go Github

copy

Full Screen

...15 rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb"16 "github.com/pingcap/errors"17 "github.com/pingcap/log"18)19type MsgApplyProposal struct {20 Id uint6421 RegionId uint6422 Props []*proposal23}24type MsgApplyCommitted struct {25 regionId uint6426 term uint6427 entries []eraftpb.Entry28}29type proposal struct {30 isConfChange bool31 index uint6432 term uint6433 cb *message.Callback34}35type MsgApplyRefresh struct {36 id uint6437 term uint6438 region *metapb.Region39}40type MsgApplyRes struct {41 regionID uint6442 execResults []execResult43 sizeDiffHint uint6444}45type execResult = interface{}46type execResultChangePeer struct {47 confChange *eraftpb.ConfChange48 peer *metapb.Peer49 region *metapb.Region50}51type execResultCompactLog struct {52 truncatedIndex uint6453 firstIndex uint6454}55type execResultSplitRegion struct {56 regions []*metapb.Region57 derived *metapb.Region58}59/// Calls the callback of `cmd` when the Region is removed.60func notifyRegionRemoved(regionID, peerID uint64, cmd pendingCmd) {61 log.Debug(fmt.Sprintf("region %d is removed, peerID %d, index %d, term %d", regionID, peerID, cmd.index, cmd.term))62 cmd.cb.Done(ErrRespRegionNotFound(regionID))63}64/// Calls the callback of `cmd` when it can not be processed further.65func notifyStaleCommand(regionID, peerID, term uint64, cmd pendingCmd) {66 log.Info(fmt.Sprintf("command is stale, skip. regionID %d, peerID %d, index %d, term %d",67 regionID, peerID, cmd.index, cmd.term))68 cmd.cb.Done(ErrRespStaleCommand(term))69}70/// The applier of a Region which is responsible for handling committed71/// raft log entries of a Region.72///73/// `Apply` is a term of Raft, which means executing the actual commands.74/// In Raft, once some log entries are committed, for every peer of the Raft75/// group will apply the logs one by one. For write commands, it does write or76/// delete to local engine; for admin commands, it does some meta change of the77/// Raft group.78///79/// The raft worker receives all the apply tasks of different Regions80/// located at this store, and it will get the corresponding applier to81/// handle the apply worker.Task to make the code logic more clear.82type applier struct {83 id uint6484 term uint6485 region *metapb.Region86 tag string87 /// Set to true when removing itself because of `ConfChangeType::RemoveNode`, and then88 /// any following committed logs in same Ready should be applied failed.89 pendingRemove bool90 /// The commands waiting to be committed and applied91 pendingCmds pendingCmdQueue92 /// We writes apply_state to KV DB, in one write batch together with kv data.93 ///94 /// If we write it to Raft DB, apply_state and kv data (Put, Delete) are in95 /// separate WAL file. When power failure, for current raft log, apply_index may synced96 /// to file, but KV data may not synced to file, so we will lose data.97 applyState rspb.RaftApplyState98 sizeDiffHint uint6499}100func newApplierFromPeer(peer *peer) *applier {101 return &applier{102 tag: fmt.Sprintf("[region %d] %d", peer.Region().GetId(), peer.PeerId()),103 id: peer.PeerId(),104 term: peer.Term(),105 region: peer.Region(),106 }107}108func (a *applier) destroy() {109 log.Info(fmt.Sprintf("%s remove applier", a.tag))110 for _, cmd := range a.pendingCmds.normals {111 notifyRegionRemoved(a.region.Id, a.id, cmd)112 }113 a.pendingCmds.normals = nil114 if cmd := a.pendingCmds.takeConfChange(); cmd != nil {115 notifyRegionRemoved(a.region.Id, a.id, *cmd)116 }117}118func (a *applier) handleTask(aCtx *applyContext, msg message.Msg) {119 switch msg.Type {120 case message.MsgTypeApplyProposal:121 a.handleProposal(msg.Data.(*MsgApplyProposal))122 case message.MsgTypeApplyCommitted:123 a.handleApply(aCtx, msg.Data.(*MsgApplyCommitted))124 case message.MsgTypeApplyRefresh:125 a.handleRefresh(msg.Data.(*MsgApplyRefresh))126 }127}128// when a snapshot, need to refresh the apply state129/// Handles peer registration. When a peer is created, it will register an applier.130func (a *applier) handleRefresh(reg *MsgApplyRefresh) {131 log.Info(fmt.Sprintf("%s refresh the applier, term %d", a.tag, reg.term))132 y.Assert(a.id == reg.id)133 a.term = reg.term134 for _, cmd := range a.pendingCmds.normals {135 notifyStaleCommand(a.region.Id, a.id, a.term, cmd)136 }137 a.pendingCmds.normals = a.pendingCmds.normals[:0]138 if cmd := a.pendingCmds.takeConfChange(); cmd != nil {139 notifyStaleCommand(a.region.Id, a.id, a.term, *cmd)140 }141 *a = applier{142 tag: fmt.Sprintf("[region %d] %d", reg.region.Id, reg.id),143 id: reg.id,144 term: reg.term,145 region: reg.region,146 }147}148/// Handles apply tasks, and uses the applier to handle the committed entries.149func (a *applier) handleApply(aCtx *applyContext, apply *MsgApplyCommitted) {150 if len(apply.entries) == 0 || a.pendingRemove {151 return152 }153 a.term = apply.term154 a.handleRaftCommittedEntries(aCtx, apply.entries)155 apply.entries = apply.entries[:0]156}157/// Handles proposals, and appends the commands to the applier.158func (a *applier) handleProposal(regionProposal *MsgApplyProposal) {159 regionID, peerID := a.region.Id, a.id160 y.Assert(a.id == regionProposal.Id)161 if a.pendingRemove {162 for _, p := range regionProposal.Props {163 cmd := pendingCmd{index: p.index, term: p.term, cb: p.cb}164 notifyStaleCommand(regionID, peerID, a.term, cmd)165 }166 return167 }168 for _, p := range regionProposal.Props {169 cmd := pendingCmd{index: p.index, term: p.term, cb: p.cb}170 if p.isConfChange {171 if confCmd := a.pendingCmds.takeConfChange(); confCmd != nil {172 // if it loses leadership before conf change is replicated, there may be173 // a stale pending conf change before next conf change is applied. If it174 // becomes leader again with the stale pending conf change, will enter175 // this block, so we notify leadership may have been changed.176 notifyStaleCommand(regionID, peerID, a.term, *confCmd)177 }178 a.pendingCmds.setConfChange(&cmd)179 } else {180 a.pendingCmds.appendNormal(cmd)181 }182 }183}184type pendingCmd struct {185 index uint64186 term uint64187 cb *message.Callback188}189type pendingCmdQueue struct {190 normals []pendingCmd191 confChange *pendingCmd192}193func (q *pendingCmdQueue) popNormal(term uint64) *pendingCmd {194 if len(q.normals) == 0 {195 return nil196 }197 cmd := &q.normals[0]198 if cmd.term > term {199 return nil200 }201 q.normals = q.normals[1:]202 return cmd203}204func (q *pendingCmdQueue) appendNormal(cmd pendingCmd) {205 q.normals = append(q.normals, cmd)206}207func (q *pendingCmdQueue) takeConfChange() *pendingCmd {208 // conf change will not be affected when changing between follower and leader,209 // so there is no need to check term.210 cmd := q.confChange211 q.confChange = nil212 return cmd213}214// TODO: seems we don't need to separate conf change from normal entries.215func (q *pendingCmdQueue) setConfChange(cmd *pendingCmd) {216 q.confChange = cmd217}218type applyResultType int219const (220 applyResultTypeNone applyResultType = 0221 applyResultTypeExecResult applyResultType = 1222)223type applyResult struct {224 tp applyResultType225 data interface{}226}227type applyExecContext struct {228 index uint64229 term uint64230 applyState rspb.RaftApplyState231}232type applyCallback struct {233 region *metapb.Region234 cbs []*message.Callback235}236func (c *applyCallback) invokeAll() {237 for _, cb := range c.cbs {238 if cb != nil {239 cb.Done(nil)240 }241 }242}243func (c *applyCallback) push(cb *message.Callback, resp *raft_cmdpb.RaftCmdResponse, txn *badger.Txn) {244 if cb != nil {245 cb.Resp = resp246 cb.Txn = txn247 }248 c.cbs = append(c.cbs, cb)249}250type applyContext struct {251 tag string252 notifier chan<- message.Msg253 engines *engine_util.Engines254 cbs []applyCallback255 applyTaskResList []*MsgApplyRes256 execCtx *applyExecContext257 wb *engine_util.WriteBatch258 lastAppliedIndex uint64259 committedCount int260}261func newApplyContext(tag string, engines *engine_util.Engines,262 notifier chan<- message.Msg, cfg *config.Config) *applyContext {263 return &applyContext{264 tag: tag,265 engines: engines,266 notifier: notifier,267 wb: new(engine_util.WriteBatch),268 }269}270/// Prepares for applying entries for `applier`.271///272/// A general apply progress for an applier is:273/// `prepare_for` -> `commit` [-> `commit` ...] -> `finish_for`.274/// After all appliers are handled, `write_to_db` method should be called.275func (ac *applyContext) prepareFor(d *applier) {276 if ac.wb == nil {277 ac.wb = new(engine_util.WriteBatch)278 }279 ac.cbs = append(ac.cbs, applyCallback{region: d.region})280 applyState, _ := meta.GetApplyState(ac.engines.Kv, d.region.GetId())281 d.applyState = *applyState282 ac.lastAppliedIndex = d.applyState.AppliedIndex283}284/// Commits all changes have done for applier. `persistent` indicates whether285/// write the changes into rocksdb.286///287/// This call is valid only when it's between a `prepare_for` and `finish_for`.288func (ac *applyContext) commit(d *applier) {289 if ac.lastAppliedIndex < d.applyState.AppliedIndex {290 d.writeApplyState(ac.wb)291 }292 // last_applied_index doesn't need to be updated, set persistent to true will293 // force it call `prepare_for` automatically.294 ac.commitOpt(d, true)295}296func (ac *applyContext) commitOpt(d *applier, persistent bool) {297 if persistent {298 ac.writeToDB()299 ac.prepareFor(d)300 }301}302/// Writes all the changes into badger.303func (ac *applyContext) writeToDB() {304 if err := ac.wb.WriteToDB(ac.engines.Kv); err != nil {305 panic(err)306 }307 ac.wb.Reset()308 for _, cb := range ac.cbs {309 cb.invokeAll()310 }311 ac.cbs = ac.cbs[:0]312}313/// Finishes `Apply`s for the applier.314func (ac *applyContext) finishFor(d *applier, results []execResult) {315 if !d.pendingRemove {316 d.writeApplyState(ac.wb)317 }318 ac.commitOpt(d, false)319 res := &MsgApplyRes{320 regionID: d.region.Id,321 execResults: results,322 }323 ac.applyTaskResList = append(ac.applyTaskResList, res)324}325func (ac *applyContext) flush() {326 // Write to engine327 ac.writeToDB()328 if len(ac.applyTaskResList) > 0 {329 for _, res := range ac.applyTaskResList {330 ac.notifier <- message.NewPeerMsg(message.MsgTypeApplyRes, res.regionID, res)331 }332 ac.applyTaskResList = ac.applyTaskResList[:0]333 }334 ac.committedCount = 0335}336/// Handles all the committed_entries, namely, applies the committed entries.337func (a *applier) handleRaftCommittedEntries(aCtx *applyContext, committedEntries []eraftpb.Entry) {338 if len(committedEntries) == 0 {339 return340 }341 aCtx.prepareFor(a)342 aCtx.committedCount += len(committedEntries)343 // If we send multiple ConfChange commands, only first one will be proposed correctly,344 // others will be saved as a normal entry with no data, so we must re-propose these345 // commands again.346 aCtx.committedCount += len(committedEntries)347 var results []execResult348 for i := range committedEntries {349 entry := &committedEntries[i]350 if a.pendingRemove {351 // This peer is about to be destroyed, skip everything.352 break353 }354 expectedIndex := a.applyState.AppliedIndex + 1355 if expectedIndex != entry.Index {356 panic(fmt.Sprintf("%s expect index %d, but got %d", a.tag, expectedIndex, entry.Index))357 }358 var res applyResult359 switch entry.EntryType {360 case eraftpb.EntryType_EntryNormal:361 res = a.handleRaftEntryNormal(aCtx, entry)362 case eraftpb.EntryType_EntryConfChange:363 res = a.handleRaftEntryConfChange(aCtx, entry)364 }365 switch res.tp {366 case applyResultTypeNone:367 case applyResultTypeExecResult:368 results = append(results, res.data)369 }370 aCtx.commit(a)371 }372 aCtx.finishFor(a, results)373}374func (a *applier) writeApplyState(wb *engine_util.WriteBatch) {375 wb.SetMeta(meta.ApplyStateKey(a.region.Id), &a.applyState)376}377func (a *applier) handleRaftEntryNormal(aCtx *applyContext, entry *eraftpb.Entry) applyResult {378 index := entry.Index379 term := entry.Term380 if len(entry.Data) > 0 {381 cmd := new(raft_cmdpb.RaftCmdRequest)382 err := cmd.Unmarshal(entry.Data)383 if err != nil {384 panic(err)385 }386 return a.processRaftCmd(aCtx, index, term, cmd)387 }388 // when a peer become leader, it will send an empty entry.389 a.applyState.AppliedIndex = index390 y.Assert(term > 0)391 for {392 cmd := a.pendingCmds.popNormal(term - 1)393 if cmd == nil {394 break395 }396 // apparently, all the callbacks whose term is less than entry's term are stale.397 aCtx.cbs[len(aCtx.cbs)-1].push(cmd.cb, ErrRespStaleCommand(term), nil)398 }399 return applyResult{}400}401func (a *applier) handleRaftEntryConfChange(aCtx *applyContext, entry *eraftpb.Entry) applyResult {402 index := entry.Index403 term := entry.Term404 confChange := new(eraftpb.ConfChange)405 if err := confChange.Unmarshal(entry.Data); err != nil {406 panic(err)407 }408 cmd := new(raft_cmdpb.RaftCmdRequest)409 if err := cmd.Unmarshal(confChange.Context); err != nil {410 panic(err)411 }412 result := a.processRaftCmd(aCtx, index, term, cmd)413 switch result.tp {414 case applyResultTypeNone:415 // If failed, tell Raft that the `ConfChange` was aborted.416 return applyResult{tp: applyResultTypeExecResult, data: &execResultChangePeer{417 confChange: new(eraftpb.ConfChange),418 }}419 case applyResultTypeExecResult:420 cp := result.data.(*execResultChangePeer)421 cp.confChange = confChange422 return applyResult{tp: applyResultTypeExecResult, data: result.data}423 default:424 panic("unreachable")425 }426}427func (a *applier) findCallback(index, term uint64, isConfChange bool) *message.Callback {428 regionID := a.region.Id429 peerID := a.id430 if isConfChange {431 cmd := a.pendingCmds.takeConfChange()432 if cmd == nil {433 return nil434 }435 if cmd.index == index && cmd.term == term {436 return cmd.cb437 }438 notifyStaleCommand(regionID, peerID, term, *cmd)439 return nil440 }441 for {442 head := a.pendingCmds.popNormal(term)443 if head == nil {444 break445 }446 if head.index == index && head.term == term {447 return head.cb448 }449 // Because of the lack of original RaftCmdRequest, we skip calling450 // coprocessor here.451 notifyStaleCommand(regionID, peerID, term, *head)452 }453 return nil454}455func (a *applier) processRaftCmd(aCtx *applyContext, index, term uint64, cmd *raft_cmdpb.RaftCmdRequest) applyResult {456 if index == 0 {457 panic(fmt.Sprintf("%s process raft cmd need a none zero index", a.tag))458 }459 isConfChange := GetChangePeerCmd(cmd) != nil460 resp, txn, result := a.applyRaftCmd(aCtx, index, term, cmd)461 log.Debug(fmt.Sprintf("applied command. region_id %d, peer_id %d, index %d", a.region.Id, a.id, index))462 // TODO: if we have exec_result, maybe we should return this callback too. Outer463 // store will call it after handing exec result.464 BindRespTerm(resp, term)465 cmdCB := a.findCallback(index, term, isConfChange)466 aCtx.cbs[len(aCtx.cbs)-1].push(cmdCB, resp, txn)467 return result468}469/// Applies raft command.470///471/// An apply operation can fail in the following situations:472/// 1. it encounters an error that will occur on all stores, it can continue473/// applying next entry safely, like epoch not match for example;474/// 2. it encounters an error that may not occur on all stores, in this case475/// we should try to apply the entry again or panic. Considering that this476/// usually due to disk operation fail, which is rare, so just panic is ok.477func (a *applier) applyRaftCmd(aCtx *applyContext, index, term uint64,478 req *raft_cmdpb.RaftCmdRequest) (*raft_cmdpb.RaftCmdResponse, *badger.Txn, applyResult) {479 // if pending remove, apply should be aborted already.480 y.Assert(!a.pendingRemove)481 aCtx.execCtx = a.newCtx(aCtx.engines, index, term)482 aCtx.wb.SetSafePoint()483 resp, txn, applyResult, err := a.execRaftCmd(aCtx, req)484 if err != nil {485 // clear dirty values.486 aCtx.wb.RollbackToSafePoint()487 if _, ok := err.(*util.ErrEpochNotMatch); ok {488 log.Debug(fmt.Sprintf("epoch not match region_id %d, peer_id %d, err %v", a.region.Id, a.id, err))489 } else {490 log.Error(fmt.Sprintf("execute raft command region_id %d, peer_id %d, err %v", a.region.Id, a.id, err))491 }492 if txn != nil {493 txn.Discard()494 txn = nil495 }496 resp = ErrResp(err)497 applyResult.tp = applyResultTypeNone498 }499 a.applyState = aCtx.execCtx.applyState500 aCtx.execCtx = nil501 a.applyState.AppliedIndex = index502 if applyResult.tp == applyResultTypeExecResult {503 switch x := applyResult.data.(type) {504 case *execResultChangePeer:505 a.region = x.region506 case *execResultSplitRegion:507 a.region = x.derived508 default:509 }510 }511 return resp, txn, applyResult512}513func (a *applier) newCtx(engines *engine_util.Engines, index, term uint64) *applyExecContext {514 return &applyExecContext{515 index: index,516 term: term,517 applyState: a.applyState,518 }519}520// Only errors that will also occur on all other stores should be returned.521func (a *applier) execRaftCmd(aCtx *applyContext, req *raft_cmdpb.RaftCmdRequest) (522 resp *raft_cmdpb.RaftCmdResponse, txn *badger.Txn, result applyResult, err error) {523 // Include region for epoch not match after merge may cause key not in range.524 err = util.CheckRegionEpoch(req, a.region, false)525 if err != nil {526 return527 }528 if req.AdminRequest != nil {529 return a.execAdminCmd(aCtx, req)530 }531 return a.execNormalCmd(aCtx, req)532}533func (a *applier) execAdminCmd(aCtx *applyContext, req *raft_cmdpb.RaftCmdRequest) (534 resp *raft_cmdpb.RaftCmdResponse, txn *badger.Txn, result applyResult, err error) {535 adminReq := req.AdminRequest536 cmdType := adminReq.CmdType537 if cmdType != raft_cmdpb.AdminCmdType_CompactLog {538 log.Info(fmt.Sprintf("%s execute admin command. term %d, index %d, command %s",539 a.tag, aCtx.execCtx.term, aCtx.execCtx.index, adminReq))540 }541 var adminResp *raft_cmdpb.AdminResponse542 switch cmdType {543 case raft_cmdpb.AdminCmdType_ChangePeer:544 adminResp, result, err = a.execChangePeer(aCtx, adminReq)545 case raft_cmdpb.AdminCmdType_Split:546 adminResp, result, err = a.execSplit(aCtx, adminReq)547 case raft_cmdpb.AdminCmdType_CompactLog:548 adminResp, result, err = a.execCompactLog(aCtx, adminReq)549 case raft_cmdpb.AdminCmdType_TransferLeader:550 err = errors.New("transfer leader won't execute")551 case raft_cmdpb.AdminCmdType_InvalidAdmin:552 err = errors.New("unsupported command type")553 }554 if err != nil {555 return556 }557 adminResp.CmdType = cmdType558 resp = newCmdResp()559 resp.AdminResponse = adminResp560 return561}562func (a *applier) execNormalCmd(aCtx *applyContext, req *raft_cmdpb.RaftCmdRequest) (563 resp *raft_cmdpb.RaftCmdResponse, txn *badger.Txn, result applyResult, err error) {564 requests := req.GetRequests()565 resps := make([]*raft_cmdpb.Response, 0, len(requests))566 hasWrite, hasRead := false, false567 for _, req := range requests {568 switch req.CmdType {569 case raft_cmdpb.CmdType_Put:570 var r *raft_cmdpb.Response571 r, err = a.handlePut(aCtx, req.GetPut())572 resps = append(resps, r)573 hasWrite = true574 case raft_cmdpb.CmdType_Delete:575 var r *raft_cmdpb.Response576 r, err = a.handleDelete(aCtx, req.GetDelete())577 resps = append(resps, r)578 hasWrite = true579 case raft_cmdpb.CmdType_Get:580 var r *raft_cmdpb.Response581 r, err = a.handleGet(aCtx, req.GetGet())582 resps = append(resps, r)583 hasRead = true584 case raft_cmdpb.CmdType_Snap:585 resps = append(resps, &raft_cmdpb.Response{586 CmdType: raft_cmdpb.CmdType_Snap,587 Snap: &raft_cmdpb.SnapResponse{Region: a.region},588 })589 txn = aCtx.engines.Kv.NewTransaction(false)590 hasRead = true591 default:592 log.Fatal(fmt.Sprintf("invalid cmd type=%v", req.CmdType))593 }594 }595 if hasWrite && hasRead {596 panic("mixed write and read in one request")597 }598 resp = newCmdResp()599 resp.Responses = resps600 return601}602func (a *applier) handlePut(aCtx *applyContext, req *raft_cmdpb.PutRequest) (*raft_cmdpb.Response, error) {603 key, value := req.GetKey(), req.GetValue()604 if err := util.CheckKeyInRegion(key, a.region); err != nil {605 return nil, err606 }607 if cf := req.GetCf(); len(cf) != 0 {608 aCtx.wb.SetCF(cf, key, value)609 } else {610 aCtx.wb.SetCF(engine_util.CfDefault, key, value)611 }612 return &raft_cmdpb.Response{613 CmdType: raft_cmdpb.CmdType_Put,614 }, nil615}616func (a *applier) handleDelete(aCtx *applyContext, req *raft_cmdpb.DeleteRequest) (*raft_cmdpb.Response, error) {617 key := req.GetKey()618 if err := util.CheckKeyInRegion(key, a.region); err != nil {619 return nil, err620 }621 if cf := req.GetCf(); len(cf) != 0 {622 aCtx.wb.DeleteCF(cf, key)623 } else {624 aCtx.wb.DeleteCF(engine_util.CfDefault, key)625 }626 return &raft_cmdpb.Response{627 CmdType: raft_cmdpb.CmdType_Delete,628 }, nil629}630func (a *applier) handleGet(aCtx *applyContext, req *raft_cmdpb.GetRequest) (*raft_cmdpb.Response, error) {631 key := req.GetKey()632 if err := util.CheckKeyInRegion(key, a.region); err != nil {633 return nil, err634 }635 var val []byte636 var err error637 if cf := req.GetCf(); len(cf) != 0 {638 val, err = engine_util.GetCF(aCtx.engines.Kv, cf, key)639 } else {640 val, err = engine_util.GetCF(aCtx.engines.Kv, engine_util.CfDefault, key)641 }642 if err == badger.ErrKeyNotFound {643 err = nil644 val = nil645 }646 return &raft_cmdpb.Response{647 CmdType: raft_cmdpb.CmdType_Get,648 Get: &raft_cmdpb.GetResponse{Value: val},649 }, err650}651func (a *applier) execChangePeer(aCtx *applyContext, req *raft_cmdpb.AdminRequest) (652 resp *raft_cmdpb.AdminResponse, result applyResult, err error) {653 request := req.ChangePeer654 peer := request.Peer655 storeID := peer.StoreId656 changeType := request.ChangeType657 region := new(metapb.Region)658 err = util.CloneMsg(a.region, region)659 if err != nil {660 return661 }662 log.Info(fmt.Sprintf("%s exec ConfChange, peer_id %d, type %s, epoch %s",663 a.tag, peer.Id, changeType, region.RegionEpoch))664 // TODO: we should need more check, like peer validation, duplicated id, etc.665 region.RegionEpoch.ConfVer++666 switch changeType {667 case eraftpb.ConfChangeType_AddNode:668 if p := util.FindPeer(region, storeID); p != nil {669 errMsg := fmt.Sprintf("%s can't add duplicated peer, peer %s, region %s",670 a.tag, p, a.region)671 log.Error(errMsg)672 err = errors.New(errMsg)673 return674 }675 region.Peers = append(region.Peers, peer)676 log.Info(fmt.Sprintf("%s add peer successfully, peer %s, region %s", a.tag, peer, a.region))677 case eraftpb.ConfChangeType_RemoveNode:678 if p := util.RemovePeer(region, storeID); p != nil {679 if !util.PeerEqual(p, peer) {680 errMsg := fmt.Sprintf("%s ignore remove unmatched peer, expected_peer %s, got_peer %s",681 a.tag, peer, p)682 log.Error(errMsg)683 err = errors.New(errMsg)684 return685 }686 if a.id == peer.Id {687 // Remove ourself, we will destroy all region data later.688 // So we need not to apply following logs.689 a.pendingRemove = true690 }691 } else {692 errMsg := fmt.Sprintf("%s removing missing peers, peer %s, region %s",693 a.tag, peer, a.region)694 log.Error(errMsg)695 err = errors.New(errMsg)696 return697 }698 log.Info(fmt.Sprintf("%s remove peer successfully, peer %s, region %s", a.tag, peer, a.region))699 }700 state := rspb.PeerState_Normal701 if a.pendingRemove {702 state = rspb.PeerState_Tombstone703 }704 meta.WriteRegionState(aCtx.wb, region, state)705 resp = &raft_cmdpb.AdminResponse{706 ChangePeer: &raft_cmdpb.ChangePeerResponse{707 Region: region,708 },709 }710 result = applyResult{711 tp: applyResultTypeExecResult,712 data: &execResultChangePeer{713 confChange: new(eraftpb.ConfChange),714 region: region,715 peer: peer,716 },717 }718 return719}720func (a *applier) execSplit(aCtx *applyContext, req *raft_cmdpb.AdminRequest) (721 resp *raft_cmdpb.AdminResponse, result applyResult, err error) {722 splitReq := req.Split723 derived := new(metapb.Region)724 if err := util.CloneMsg(a.region, derived); err != nil {725 panic(err)726 }727 newRegionCnt := 1728 regions := make([]*metapb.Region, 0, newRegionCnt+1)729 keys := make([][]byte, 0, newRegionCnt+1)730 keys = append(keys, derived.StartKey)731 splitKey := splitReq.SplitKey732 if len(splitKey) == 0 {733 err = errors.New("missing split key")734 return735 }736 if bytes.Compare(splitKey, keys[len(keys)-1]) <= 0 {737 err = errors.Errorf("invalid split request:%s", splitReq)738 return739 }740 if len(splitReq.NewPeerIds) != len(derived.Peers) {741 err = errors.Errorf("invalid new peer id count, need %d but got %d",742 len(derived.Peers), len(splitReq.NewPeerIds))743 return744 }745 keys = append(keys, splitKey)746 err = util.CheckKeyInRegion(keys[len(keys)-1], a.region)747 if err != nil {748 return749 }750 if len(keys) < 2 {751 err = errors.New("losing the startKey or splitKey")752 return753 }754 log.Info(fmt.Sprintf("%s split region %s, keys %v", a.tag, a.region, keys))755 derived.RegionEpoch.Version += uint64(newRegionCnt)756 newRegion := &metapb.Region{757 Id: splitReq.NewRegionId,758 RegionEpoch: derived.RegionEpoch,759 StartKey: keys[0],760 EndKey: keys[1],761 }762 newRegion.Peers = make([]*metapb.Peer, len(derived.Peers))763 for j := range newRegion.Peers {764 newRegion.Peers[j] = &metapb.Peer{765 Id: splitReq.NewPeerIds[j],766 StoreId: derived.Peers[j].StoreId,767 }768 }769 meta.WriteRegionState(aCtx.wb, newRegion, rspb.PeerState_Normal)770 writeInitialApplyState(aCtx.wb, newRegion.Id)771 regions = append(regions, newRegion)772 derived.StartKey = keys[len(keys)-1]773 regions = append(regions, derived)774 meta.WriteRegionState(aCtx.wb, derived, rspb.PeerState_Normal)775 resp = &raft_cmdpb.AdminResponse{776 Split: &raft_cmdpb.SplitResponse{777 Regions: regions,778 },779 }780 result = applyResult{tp: applyResultTypeExecResult, data: &execResultSplitRegion{781 regions: regions,782 derived: derived,783 }}784 return...

Full Screen

Full Screen

team.go

Source:team.go Github

copy

Full Screen

...28 Description: "Plans/applies/describes/bootstraps team stacks",29 Commands: map[string]res.Command{30 "init": NewTeamInit(),31 "plan": NewTeamPlan(),32 "apply": NewTeamApply(),33 "auth": NewTeamAuth(),34 "bootstrap": NewTeamBootstrap(),35 },36 },37 }38 return f, nil39 }40}41func impersonate(username string, req interface{}) (v map[string]interface{}) {42 p, err := json.Marshal(req)43 if err != nil {44 panic(err)45 }46 if err := json.Unmarshal(p, &v); err != nil {47 panic(err)48 }49 v["impersonate"] = username50 return v51}52// Action is an entry point for "team" subcommand.53func (t *Team) Action(args []string) error {54 k, err := kloudClient()55 if err != nil {56 return err57 }58 ctx := context.Background()59 ctx = context.WithValue(ctx, kiteKey, k)60 t.Resource.ContextFunc = func([]string) context.Context { return ctx }61 return t.Resource.Main(args)62}63// TEAM DB64type UserOptions struct {65 Username string66 Groupname string67 Region string68 Provider string69 Template string70 KlientID string71}72type User struct {73 MachineIDs []bson.ObjectId74 MachineLabels []string75 StackID string76 StackTemplateID string77 CredID string78 CredDataID string79 AccountID bson.ObjectId80 PrivateKey string81 PublicKey string82 Identifiers []string83}84type TeamInit struct {85 Provider string86 Team string87 KlientID string88 Username string89 StackTemplate string90}91func NewTeamInit() *TeamInit {92 return &TeamInit{}93}94func (cmd *TeamInit) Valid() error {95 if cmd.KlientID == "" {96 return errors.New("empty value for -klient flag")97 }98 if cmd.Provider == "" {99 cmd.Provider = "vagrant"100 }101 if cmd.Provider != "vagrant" {102 return errors.New("currently only vagrant is supported")103 }104 if cmd.Team == "" {105 cmd.Team = utils.RandString(12)106 }107 if cmd.StackTemplate == "-" {108 p, err := ioutil.ReadAll(os.Stdin)109 if err != nil {110 return err111 }112 cmd.StackTemplate = string(p)113 } else {114 p, err := ioutil.ReadFile(cmd.StackTemplate)115 if err != nil {116 return err117 }118 cmd.StackTemplate = string(p)119 }120 if cmd.StackTemplate == "" {121 return errors.New("empty value for -stack flag")122 }123 return nil124}125func (cmd *TeamInit) Name() string {126 return "init"127}128func (cmd *TeamInit) RegisterFlags(f *flag.FlagSet) {129 f.StringVar(&cmd.Provider, "p", "vagrant", "Team provider name.")130 f.StringVar(&cmd.KlientID, "klient", "", "ID of the klient kite.")131 f.StringVar(&cmd.Team, "team", "koding", "Team name. If empty will get autogenerated.")132 f.StringVar(&cmd.StackTemplate, "stack", "-", "Stack template content.")133 f.StringVar(&cmd.Username, "u", defaultUsername, "Username for the kloud request.")134}135func (cmd *TeamInit) Run(ctx context.Context) error {136 opts := &UserOptions{137 Username: cmd.Username,138 Groupname: cmd.Team,139 Provider: cmd.Provider,140 Template: cmd.StackTemplate,141 KlientID: cmd.KlientID,142 }143 user, err := CreateUser(opts)144 if err != nil {145 return err146 }147 creds := strings.Join(user.Identifiers, ",")148 var dbg string149 if flagDebug {150 dbg = " -debug"151 }152 resp := &struct {153 TeamDetails *User `json:"teamDetails,omitempty"`154 Kloudctl map[string]string `json:"kloudctl,omitempty"`155 }{156 TeamDetails: user,157 Kloudctl: map[string]string{158 "auth": fmt.Sprintf("%s team%s auth -p %s -team %s -u %s -creds %s", os.Args[0], dbg, cmd.Provider, cmd.Team, cmd.Username, creds),159 "bootstrap": fmt.Sprintf("%s team%s bootstrap -p %s -team %s -u %s -creds %s", os.Args[0], dbg, cmd.Provider, cmd.Username, cmd.Team, creds),160 "plan": fmt.Sprintf("%s team%s plan -p %s -team %s -u %s -tid %s", os.Args[0], dbg, cmd.Provider, cmd.Team, cmd.Username, user.StackTemplateID),161 "apply": fmt.Sprintf("%s team%s apply -p %s -team %s -u %s -sid %s", os.Args[0], dbg, cmd.Provider, cmd.Team, cmd.Username, user.StackID),162 },163 }164 p, err := json.MarshalIndent(resp, "", "\t")165 if err != nil {166 return err167 }168 fmt.Println(string(p))169 return nil170}171/// TEAM PLAN172// TeamPlan provides an implementation for "team plan" subcommand.173type TeamPlan struct {174 Provider string175 Team string176 StackTemplateID string177 Username string178}179// NewTeamPlan gives new TeamPlan value.180func NewTeamPlan() *TeamPlan {181 return &TeamPlan{}182}183// Valid implements the kloud.Validator interface.184func (cmd *TeamPlan) Valid() error {185 if cmd.Provider == "" {186 return errors.New("empty value for -p flag")187 }188 if cmd.StackTemplateID == "" {189 return errors.New("empty value for -tid flag")190 }191 if cmd.Team == "" {192 return errors.New("empty value for -team flag")193 }194 return nil195}196// Name gives the name of the command, implements the res.Command interface.197func (cmd *TeamPlan) Name() string {198 return "plan"199}200// RegisterFlags sets the flags for the command - "team plan <flags>".201func (cmd *TeamPlan) RegisterFlags(f *flag.FlagSet) {202 f.StringVar(&cmd.Provider, "p", "aws", "Team provider name.")203 f.StringVar(&cmd.Team, "team", "koding", "Team name.")204 f.StringVar(&cmd.StackTemplateID, "tid", "", "Stack template ID.")205 f.StringVar(&cmd.Username, "u", defaultUsername, "Username for the kloud request.")206}207// Run executes the "team plan" subcommand.208func (cmd *TeamPlan) Run(ctx context.Context) error {209 k := kiteFromContext(ctx)210 req := impersonate(cmd.Username,211 &stack.PlanRequest{212 Provider: cmd.Provider,213 GroupName: cmd.Team,214 StackTemplateID: cmd.StackTemplateID,215 },216 )217 resp, err := k.TellWithTimeout("plan", defaultTellTimeout, req)218 if err != nil {219 return fmt.Errorf("%v %s kloud error: %s", k.Kite, k.Hostname, err)220 }221 DefaultUi.Info("plan raw response: " + string(resp.Raw))222 return nil223}224/// TEAM APPLY225// TeamApply provides an implementation for "team apply" subcommand.226type TeamApply struct {227 Provider string228 Team string229 StackID string230 Destroy bool231 Username string232}233// NewTeamApply gives new TeamApply value.234func NewTeamApply() *TeamApply {235 return &TeamApply{}236}237// Valid implements the kloud.Validator interface.238func (cmd *TeamApply) Valid() error {239 if cmd.Provider == "" {240 return errors.New("empty value for -p flag")241 }242 if cmd.Team == "" {243 return errors.New("empty value for -team flag")244 }245 if cmd.StackID == "" {246 return errors.New("empty value for -sid flag")247 }248 return nil249}250// Name gives the name of the command, implements the res.Command interface.251func (cmd *TeamApply) Name() string {252 return "apply"253}254// RegisterFlags sets the flags for the command - "team apply <flags>".255func (cmd *TeamApply) RegisterFlags(f *flag.FlagSet) {256 f.StringVar(&cmd.Provider, "p", "aws", "Team provider name.")257 f.StringVar(&cmd.Team, "team", "koding", "Team name.")258 f.StringVar(&cmd.StackID, "sid", "", "Compute stack ID.")259 f.BoolVar(&cmd.Destroy, "del", false, "Destroy resources.")260 f.StringVar(&cmd.Username, "u", defaultUsername, "Username for the kloud request.")261}262// Run executes the "team apply" command.263func (cmd *TeamApply) Run(ctx context.Context) error {264 k := kiteFromContext(ctx)265 req := impersonate(cmd.Username,266 &stack.ApplyRequest{267 Provider: cmd.Provider,268 StackID: cmd.StackID,269 GroupName: cmd.Team,270 Destroy: cmd.Destroy,271 },272 )273 resp, err := k.TellWithTimeout("apply", defaultTellTimeout, req)274 if err != nil {275 return err276 }277 var result stack.ControlResult278 err = resp.Unmarshal(&result)279 if err != nil {280 return err...

Full Screen

Full Screen

yamlproc.go

Source:yamlproc.go Github

copy

Full Screen

...6// ===== [ Types ] =====7// ===== [ Implementations ] =====8// ===== [ Private Functions ] =====9// ===== [ Public Functions ] =====10// NewYamlApplyCmd : "cbadm apply" (create/update objects according to YAML description)11func NewYamlApplyCmd() *cobra.Command {12 yamlApplyCmd := &cobra.Command{13 Use: "apply",14 Short: "This is a apply command for yaml",15 Long: "This is a apply command for yaml",16 Run: func(cmd *cobra.Command, args []string) {17 SetupAndRun(cmd, args)18 },19 }20 return yamlApplyCmd21}22// NewYamlGetCmd : "cbadm get" (get objects according to YAML description)23func NewYamlGetCmd() *cobra.Command {24 yamlGetCmd := &cobra.Command{25 Use: "get",26 Short: "This is a get command for yaml",27 Long: "This is a get command for yaml",28 Run: func(cmd *cobra.Command, args []string) {29 SetupAndRun(cmd, args)30 },31 }32 return yamlGetCmd33}34// NewYamlListCmd : "cbadm list" (list objects according to YAML description)...

Full Screen

Full Screen

Apply

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cmd := exec.Command("ls", "-l")4 err := cmd.Run()5 if err != nil {6 fmt.Println(err)7 }8}

Full Screen

Full Screen

Apply

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cmd := exec.Command("ls", "-l")4 stdout, err := cmd.StdoutPipe()5 if err != nil {6 fmt.Println(err)7 }8 if err := cmd.Start(); err != nil {9 fmt.Println(err)10 }11 output := make([]byte, 1024, 1024)12 n, err := stdout.Read(output)13 if err != nil {14 fmt.Println("Read failed:", err)15 }16 fmt.Println(string(output[:n]))17}

Full Screen

Full Screen

Apply

Using AI Code Generation

copy

Full Screen

1func main() {2 cmd := exec.Command("ls", "-ltr")3 err := cmd.Run()4 if err != nil {5 log.Fatal(err)6 }7 fmt.Printf("in all caps: %q8", out.String())9}10func main() {11 cmd := exec.Command("ls", "-ltr")12 out, err := cmd.Output()13 if err != nil {14 log.Fatal(err)15 }16 fmt.Printf("in all caps: %q17}18func main() {19 cmd := exec.Command("ls", "-ltr")20 out, err := cmd.CombinedOutput()21 if err != nil {22 log.Fatal(err)23 }24 fmt.Printf("in all caps: %q25}26func main() {27 cmd := exec.Command("ls", "-ltr")28 err := cmd.Start()29 if err != nil {30 log.Fatal(err)31 }32 fmt.Printf("in all caps: %q33}34func main() {35 cmd := exec.Command("ls", "-ltr")36 err := cmd.Wait()37 if err != nil {38 log.Fatal(err)39 }40 fmt.Printf("in all caps: %q41}42func main() {43 cmd := exec.Command("ls", "-ltr")44 stdout, err := cmd.StdoutPipe()45 if err != nil {46 log.Fatal(err)47 }48 err = cmd.Start()49 if err != nil {50 log.Fatal(err)51 }52 out, err := ioutil.ReadAll(stdout)53 if err != nil {54 log.Fatal(err)55 }56 fmt.Printf("in all caps: %q57}58func main() {59 cmd := exec.Command("ls", "-ltr")60 stderr, err := cmd.StderrPipe()61 if err != nil {

Full Screen

Full Screen

Apply

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cmd := exec.Command("ls", "-l")4 cmd.Run()5}6import (7func main() {8 cmd := exec.Command("ls", "-l")9 output, err := cmd.CombinedOutput()10 if err != nil {11 fmt.Println(err)12 }13 fmt.Println(string(output))14}15import (16func main() {17 path, err := exec.LookPath("ls")18 if err != nil {19 fmt.Println(err)20 }21 fmt.Println(path)22}23import (24func main() {25 cmd := exec.Command("ls", "-l")26 output, err := cmd.Output()27 if err != nil {28 fmt.Println(err)29 }30 fmt.Println(string(output))31}32import (33func main() {34 cmd := exec.Command("ls", "-l")35 err := cmd.Start()36 if err != nil {37 fmt.Println(err)38 }39 fmt.Println("Waiting for command to finish...")40 err = cmd.Wait()41 fmt.Println("Command finished with error: ", err)42}43import (44func main() {45 cmd := exec.Command("ls", "-l")46 stderr, err := cmd.StderrPipe()47 if err != nil {48 fmt.Println(err)49 }50 err = cmd.Start()51 if err != nil {52 fmt.Println(err)53 }54 output, err := io.ReadAll(stderr)55 if err != nil {56 fmt.Println(err)57 }58 fmt.Println(string(output))59 err = cmd.Wait()60 if err != nil {

Full Screen

Full Screen

Apply

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cmd := exec.Command("ls", "-l", "/tmp")4 err := cmd.Run()5 if err != nil {6 fmt.Println(err)7 }8}9import (10func main() {11 cmd := exec.Command("ls", "-l", "/tmp")12 output, err := cmd.CombinedOutput()13 if err != nil {14 fmt.Println(err)15 }16 fmt.Println(string(output))17}18import (19func main() {20 cmd := exec.Command("ls", "-l", "/tmp")21 output, err := cmd.Output()22 if err != nil {23 fmt.Println(err)24 }25 fmt.Println(string(output))26}27import (28func main() {29 cmd := exec.Command("ls", "-l", "/tmp")30 err := cmd.Start()31 if err != nil {32 fmt.Println(err)33 }34 fmt.Println("Waiting for command to finish...")35 err = cmd.Wait()36 if err != nil {37 fmt.Println(err)38 }39 fmt.Println("Command finished successfully")40}41import (42func main() {43 cmd := exec.Command("ls", "-l", "/tmp")44 stdout, err := cmd.StdoutPipe()45 if err != nil {46 fmt.Println(err)47 }48 err = cmd.Start()49 if err != nil {50 fmt.Println(err)51 }52 io.Copy(os.Stdout, stdout)53 err = cmd.Wait()54 if err != nil {55 fmt.Println(err)56 }57}58import (

Full Screen

Full Screen

Apply

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cmd := exec.Command("ls", "-l", "-a")4 output, err := cmd.Output()5 if err != nil {6 fmt.Println(err.Error())7 os.Exit(1)8 }9 fmt.Println(string(output))10}11import (12func main() {13 cmd := exec.Command("ls", "-l", "-a")14 output, err := cmd.CombinedOutput()15 if err != nil {16 fmt.Println(err.Error())17 os.Exit(1)18 }19 fmt.Println(string(output))20}

Full Screen

Full Screen

Apply

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 cmd := exec.Command("ls", "-l")4 out, err := cmd.Output()5 if err != nil {6 fmt.Println(err)7 }8 fmt.Println(string(out))9}10import (11func main() {12 cmd := exec.Command("ls", "-l")13 err := cmd.Run()14 if err != nil {15 fmt.Println(err)16 }17}

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run K6 automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Most used method in

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful