How to use token method in storybook-root

Best JavaScript code snippet using storybook-root

token-assert.js

Source:token-assert.js Github

copy

Full Screen

1var expect = require('chai').expect;2var sinon = require('sinon');3var JsFile = require('../../lib/js-file');4var TokenAssert = require('../../lib/token-assert');5var getPosition = require('../../lib/errors').getPosition;6describe('token-assert', function() {7 function createJsFile(sources) {8 return new JsFile({9 filename: 'example.js',10 source: sources11 });12 }13 describe('whitespaceBetween', function() {14 it('should trigger error on missing whitespace between tokens', function() {15 var file = createJsFile('x=y;');16 var tokenAssert = new TokenAssert(file);17 var onError = sinon.spy();18 tokenAssert.on('error', onError);19 var first = file.getTree().getFirstToken();20 tokenAssert.whitespaceBetween({21 token: first,22 nextToken: file.getNextToken(first)23 });24 expect(onError).to.have.callCount(1);25 var error = onError.getCall(0).args[0];26 expect(error.message).to.contain('Missing space between x and =');27 expect(getPosition(error).line).to.equal(1);28 expect(getPosition(error).column).to.equal(1);29 });30 it('should accept message for missing whitespace between tokens', function() {31 var file = createJsFile('x=y;');32 var tokenAssert = new TokenAssert(file);33 var onError = sinon.spy();34 tokenAssert.on('error', onError);35 var token = file.getTree().getFirstToken();36 var nextToken = file.findNextToken(token, 'Punctuator', '=');37 tokenAssert.whitespaceBetween({38 token: token,39 nextToken: nextToken,40 message: 'Custom message'41 });42 expect(!!onError.getCall(0).args[0].message).to.equal(true);43 });44 it('should not trigger error on existing whitespace between tokens', function() {45 var file = createJsFile('x = y;');46 var tokenAssert = new TokenAssert(file);47 var onError = sinon.spy();48 tokenAssert.on('error', onError);49 var token = file.getTree().getFirstToken();50 var nextToken = file.findNextToken(token, 'Punctuator', '=');51 tokenAssert.whitespaceBetween({52 token: token,53 nextToken: nextToken54 });55 expect(onError).to.have.callCount(0);56 });57 });58 describe('spacesBetween', function() {59 it('should do nothing if either token or nextToken is not specified', function() {60 var file = createJsFile('x = y;');61 var tokenAssert = new TokenAssert(file);62 var onError = sinon.spy();63 tokenAssert.on('error', onError);64 var token = file.getTree().getFirstToken();65 tokenAssert.spacesBetween({66 token: token,67 exactly: 1068 });69 tokenAssert.spacesBetween({70 nextToken: token,71 exactly: 1072 });73 expect(onError).to.have.callCount(0);74 });75 describe('exactly', function() {76 it('should trigger error on invalid space count between tokens', function() {77 var file = createJsFile('x = y;');78 var tokenAssert = new TokenAssert(file);79 var onError = sinon.spy();80 tokenAssert.on('error', onError);81 var token = file.getTree().getFirstToken();82 var nextToken = file.findNextToken(token, 'Punctuator', '=');83 tokenAssert.spacesBetween({84 token: token,85 nextToken: nextToken,86 exactly: 287 });88 expect(onError).to.have.callCount(1);89 var error = onError.getCall(0).args[0];90 expect(error.message).to.contain('2 spaces required between x and =');91 expect(getPosition(error).line).to.equal(1);92 expect(getPosition(error).column).to.equal(1);93 });94 it('should not trigger error on newline between tokens', function() {95 var file = createJsFile('x\n=y;');96 var tokenAssert = new TokenAssert(file);97 var onError = sinon.spy();98 tokenAssert.on('error', onError);99 var token = file.getTree().getFirstToken();100 var nextToken = file.findNextToken(token, 'Punctuator', '=');101 tokenAssert.spacesBetween({102 token: token,103 nextToken: nextToken,104 exactly: 2105 });106 expect(onError).to.have.callCount(0);107 });108 it('should not trigger error on valid space count between tokens', function() {109 var file = createJsFile('x = y;');110 var tokenAssert = new TokenAssert(file);111 var onError = sinon.spy();112 tokenAssert.on('error', onError);113 var token = file.getTree().getFirstToken();114 var nextToken = file.findNextToken(token, 'Punctuator', '=');115 tokenAssert.spacesBetween({116 token: token,117 nextToken: nextToken,118 exactly: 3119 });120 expect(onError).to.have.callCount(0);121 });122 it('should accept message for invalid space count between tokens', function() {123 var file = createJsFile('x = y;');124 var tokenAssert = new TokenAssert(file);125 var onError = sinon.spy();126 tokenAssert.on('error', onError);127 var token = file.getTree().getFirstToken();128 var nextToken = file.findNextToken(token, 'Punctuator', '=');129 tokenAssert.spacesBetween({130 token: token,131 nextToken: nextToken,132 exactly: 2,133 message: 'Custom message'134 });135 expect(onError.getCall(0).args[0].message).to.equal('Custom message');136 });137 it('should error, but not fix, when a comment exists between the two tokens', function() {138 var file = createJsFile('x/*blockcomment*/=y;');139 var tokenAssert = new TokenAssert(file);140 var onError = sinon.spy();141 var token = file.getTree().getFirstToken();142 var yToken = file.findNextToken(token, 'Identifier', 'y');143 tokenAssert.on('error', onError);144 tokenAssert.spacesBetween({145 token: token,146 nextToken: file.findNextToken(token, 'Identifier', 'y'),147 exactly: 5148 });149 expect(onError).to.have.callCount(1);150 var error = onError.getCall(0).args[0];151 expect(error.fix).to.equal(undefined);152 expect(file.getWhitespaceBefore(yToken)).to.equal('');153 });154 });155 describe('atMost', function() {156 it('should trigger error on invalid space count between tokens', function() {157 var file = createJsFile('x = y;');158 var tokenAssert = new TokenAssert(file);159 var onError = sinon.spy();160 tokenAssert.on('error', onError);161 var token = file.getTree().getFirstToken();162 var nextToken = file.findNextToken(token, 'Punctuator', '=');163 tokenAssert.spacesBetween({164 token: token,165 nextToken: nextToken,166 atMost: 1167 });168 expect(onError).to.have.callCount(1);169 var error = onError.getCall(0).args[0];170 expect(error.message).to.contain('at most 1 spaces required between x and =');171 expect(getPosition(error).line).to.equal(1);172 expect(getPosition(error).column).to.equal(1);173 });174 it('should not trigger error on valid space count between tokens', function() {175 var file = createJsFile('x = y;');176 var tokenAssert = new TokenAssert(file);177 var onError = sinon.spy();178 tokenAssert.on('error', onError);179 var token = file.getTree().getFirstToken();180 var nextToken = file.findNextToken(token, 'Punctuator', '=');181 tokenAssert.spacesBetween({182 token: token,183 nextToken: nextToken,184 atMost: 3185 });186 expect(onError).to.have.callCount(0);187 });188 it('should accept message for invalid space count between tokens', function() {189 var file = createJsFile('x = y;');190 var tokenAssert = new TokenAssert(file);191 var onError = sinon.spy();192 tokenAssert.on('error', onError);193 var token = file.getTree().getFirstToken();194 var nextToken = file.findNextToken(token, 'Punctuator', '=');195 tokenAssert.spacesBetween({196 token: token,197 nextToken: nextToken,198 atMost: 1,199 message: 'Custom message'200 });201 expect(onError.getCall(0).args[0].message).to.equal('Custom message');202 });203 });204 it('should trigger error on invalid maximum space count between tokens', function() {205 var file = createJsFile('x = y;');206 var tokenAssert = new TokenAssert(file);207 var onError = sinon.spy();208 tokenAssert.on('error', onError);209 var token = file.getTree().getFirstToken();210 var nextToken = file.findNextToken(token, 'Punctuator', '=');211 tokenAssert.whitespaceBetween({212 token: token,213 nextToken: nextToken,214 atMost: 1215 });216 expect(onError).to.have.callCount(1);217 var error = onError.getCall(0).args[0];218 expect(error.message).to.contain('at most 1 spaces required between x and =');219 expect(getPosition(error).line).to.equal(1);220 expect(getPosition(error).column).to.equal(1);221 });222 it('should trigger plural error on invalid maximum space count between tokens', function() {223 var file = createJsFile('x = y;');224 var tokenAssert = new TokenAssert(file);225 var onError = sinon.spy();226 tokenAssert.on('error', onError);227 var token = file.getTree().getFirstToken();228 var nextToken = file.findNextToken(token, 'Punctuator', '=');229 tokenAssert.whitespaceBetween({230 token: token,231 nextToken: nextToken,232 atMost: 2233 });234 expect(onError).to.have.callCount(1);235 var error = onError.getCall(0).args[0];236 expect(error.message).to.contain('at most 2 spaces required between x and =');237 expect(getPosition(error).line).to.equal(1);238 expect(getPosition(error).column).to.equal(1);239 });240 it('should not trigger error on newline between tokens for maximum spaces', function() {241 var file = createJsFile('x\n=y;');242 var tokenAssert = new TokenAssert(file);243 var onError = sinon.spy();244 tokenAssert.on('error', onError);245 var token = file.getTree().getFirstToken();246 var nextToken = file.findNextToken(token, 'Punctuator', '=');247 tokenAssert.whitespaceBetween({248 token: token,249 nextToken: nextToken,250 atMost: 1251 });252 expect(onError).to.have.not.callCount(1);253 });254 it('should not trigger error on valid maximum space count between tokens', function() {255 var file = createJsFile('x = y;');256 var tokenAssert = new TokenAssert(file);257 var onError = sinon.spy();258 tokenAssert.on('error', onError);259 var token = file.getTree().getFirstToken();260 var nextToken = file.findNextToken(token, 'Punctuator', '=');261 tokenAssert.whitespaceBetween({262 token: token,263 nextToken: nextToken,264 atMost: 3265 });266 expect(onError).to.have.not.callCount(1);267 });268 it('should accept message for invalid maximum space count between tokens', function() {269 var file = createJsFile('x = y;');270 var tokenAssert = new TokenAssert(file);271 var onError = sinon.spy();272 tokenAssert.on('error', onError);273 var token = file.getTree().getFirstToken();274 var nextToken = file.findNextToken(token, 'Punctuator', '=');275 tokenAssert.whitespaceBetween({276 token: token,277 nextToken: nextToken,278 atMost: 1,279 message: 'Custom message'280 });281 expect(onError.getCall(0).args[0].message).to.equal('Custom message');282 });283 });284 describe('noWhitespaceBetween', function() {285 it('should trigger error on existing whitespace between tokens', function() {286 var file = createJsFile('x = y;');287 var tokenAssert = new TokenAssert(file);288 var onError = sinon.spy();289 tokenAssert.on('error', onError);290 var token = file.getTree().getFirstToken();291 var nextToken = file.findNextToken(token, 'Punctuator', '=');292 tokenAssert.noWhitespaceBetween({293 token: token,294 nextToken: nextToken295 });296 expect(onError).to.have.callCount(1);297 var error = onError.getCall(0).args[0];298 expect(error.message).to.contain('Unexpected whitespace between x and =');299 expect(getPosition(error).line).to.equal(1);300 expect(getPosition(error).column).to.equal(1);301 });302 it('should not trigger error on newline between tokens', function() {303 var file = createJsFile('x\n=y;');304 var tokenAssert = new TokenAssert(file);305 var onError = sinon.spy();306 tokenAssert.on('error', onError);307 var token = file.getTree().getFirstToken();308 var nextToken = file.findNextToken(token, 'Punctuator', '=');309 tokenAssert.noWhitespaceBetween({310 token: token,311 nextToken: nextToken312 });313 expect(onError).to.have.callCount(0);314 });315 it('should trigger error on newline between tokens with disallowNewLine option', function() {316 var file = createJsFile('x\n=y;');317 var tokenAssert = new TokenAssert(file);318 var onError = sinon.spy();319 tokenAssert.on('error', onError);320 var token = file.getTree().getFirstToken();321 var nextToken = file.findNextToken(token, 'Punctuator', '=');322 tokenAssert.noWhitespaceBetween({323 token: token,324 nextToken: nextToken,325 disallowNewLine: true326 });327 expect(onError).to.have.callCount(1);328 var error = onError.getCall(0).args[0];329 expect(error.message).to.contain('Unexpected whitespace between x and =');330 expect(getPosition(error).line).to.equal(1);331 expect(getPosition(error).column).to.equal(1);332 });333 it('should not trigger error on missing whitespace between tokens', function() {334 var file = createJsFile('x=y;');335 var tokenAssert = new TokenAssert(file);336 var onError = sinon.spy();337 tokenAssert.on('error', onError);338 var token = file.getTree().getFirstToken();339 var nextToken = file.findNextToken(token, 'Punctuator', '=');340 tokenAssert.noWhitespaceBetween({341 token: token,342 nextToken: nextToken343 });344 expect(onError).to.have.callCount(0);345 });346 it('should accept message for existing space count between tokens', function() {347 var file = createJsFile('x = y;');348 var tokenAssert = new TokenAssert(file);349 var onError = sinon.spy();350 tokenAssert.on('error', onError);351 var token = file.getTree().getFirstToken();352 var nextToken = file.findNextToken(token, 'Punctuator', '=');353 tokenAssert.noWhitespaceBetween({354 token: token,355 nextToken: nextToken,356 message: 'Custom message'357 });358 expect(onError.getCall(0).args[0].message).to.equal('Custom message');359 });360 });361 describe('sameLine', function() {362 it('should trigger error on unexpected newline between tokens', function() {363 var file = createJsFile('x\n=y;');364 var tokenAssert = new TokenAssert(file);365 var onError = sinon.spy();366 tokenAssert.on('error', onError);367 var token = file.getTree().getFirstToken();368 var nextToken = file.findNextToken(token, 'Punctuator', '=');369 tokenAssert.sameLine({370 token: token,371 nextToken: nextToken372 });373 expect(onError).to.have.callCount(1);374 var error = onError.getCall(0).args[0];375 expect(error.message).to.contain('x and = should be on the same line');376 expect(getPosition(error).line).to.equal(1);377 expect(getPosition(error).column).to.equal(1);378 });379 it('should not trigger error on missing newline between tokens', function() {380 var file = createJsFile('x=y;');381 var tokenAssert = new TokenAssert(file);382 var onError = sinon.spy();383 tokenAssert.on('error', onError);384 var token = file.getTree().getFirstToken();385 var nextToken = file.findNextToken(token, 'Punctuator', '=');386 tokenAssert.sameLine({387 token: token,388 nextToken: nextToken389 });390 expect(onError).to.have.callCount(0);391 });392 it('should accept message for unexpected newline between tokens', function() {393 var file = createJsFile('x\n=y;');394 var tokenAssert = new TokenAssert(file);395 var onError = sinon.spy();396 tokenAssert.on('error', onError);397 var token = file.getTree().getFirstToken();398 var nextToken = file.findNextToken(token, 'Punctuator', '=');399 tokenAssert.sameLine({400 token: token,401 nextToken: nextToken,402 message: 'Custom message'403 });404 expect(onError.getCall(0).args[0].message).to.equal('Custom message');405 });406 it('should not throw if token or nextToken properties are undefined', function() {407 var file = createJsFile('x\n=y;');408 var tokenAssert = new TokenAssert(file);409 tokenAssert.sameLine({410 token: undefined,411 nextToken: undefined412 });413 });414 it('should move tokens instead of collapsing lines when asked', function() {415 var file = createJsFile('x\n + y;');416 var tokenAssert = new TokenAssert(file);417 tokenAssert.on('error', function(errorInfo) {418 errorInfo.fix();419 });420 var token = file.getTree().getFirstToken();421 var nextToken = file.findNextToken(token, 'Punctuator', '+');422 tokenAssert.sameLine({423 token: token,424 nextToken: nextToken,425 stickToPreviousToken: true426 });427 expect(file.render()).to.equal('x +\n y;');428 });429 });430 describe('differentLine', function() {431 it('should trigger error on missing newline between tokens', function() {432 var file = createJsFile('x=y;');433 var tokenAssert = new TokenAssert(file);434 var onError = sinon.spy();435 tokenAssert.on('error', onError);436 var token = file.getTree().getFirstToken();437 var nextToken = file.findNextToken(token, 'Punctuator', '=');438 tokenAssert.differentLine({439 token: token,440 nextToken: nextToken441 });442 expect(onError).to.have.callCount(1);443 var error = onError.getCall(0).args[0];444 expect(error.message).to.contain('x and = should be on different lines');445 expect(getPosition(error).line).to.equal(1);446 expect(getPosition(error).column).to.equal(1);447 });448 it('should not trigger error on existing newline between tokens', function() {449 var file = createJsFile('x\n=y;');450 var tokenAssert = new TokenAssert(file);451 var onError = sinon.spy();452 tokenAssert.on('error', onError);453 var token = file.getTree().getFirstToken();454 var nextToken = file.findNextToken(token, 'Punctuator', '=');455 tokenAssert.differentLine({456 token: token,457 nextToken: nextToken458 });459 expect(onError).to.have.callCount(0);460 });461 it('should not trigger on additional newlines between tokens', function() {462 var file = createJsFile('x\n\n=y;');463 var tokenAssert = new TokenAssert(file);464 var onError = sinon.spy();465 tokenAssert.on('error', onError);466 var token = file.getTree().getFirstToken();467 var nextToken = file.findNextToken(token, 'Punctuator', '=');468 tokenAssert.differentLine({469 token: token,470 nextToken: nextToken471 });472 expect(onError).to.have.callCount(0);473 });474 it('should not trigger on additional newlines between tokens', function() {475 var file = createJsFile('x\n\n=y;');476 var tokenAssert = new TokenAssert(file);477 var onError = sinon.spy();478 tokenAssert.on('error', onError);479 var token = file.getTree().getFirstToken();480 var nextToken = file.findNextToken(token, 'Punctuator', '=');481 tokenAssert.differentLine({482 token: token,483 nextToken: nextToken484 });485 expect(onError).to.have.callCount(0);486 });487 it('should accept message for missing newline between tokens', function() {488 var file = createJsFile('x=y;');489 var tokenAssert = new TokenAssert(file);490 var onError = sinon.spy();491 tokenAssert.on('error', onError);492 var token = file.getTree().getFirstToken();493 var nextToken = file.findNextToken(token, 'Punctuator', '=');494 tokenAssert.differentLine({495 token: token,496 nextToken: nextToken,497 message: 'Custom message'498 });499 expect(onError.getCall(0).args[0].message).to.equal('Custom message');500 });501 it('should not throw if token or nextToken properties are undefined', function() {502 var file = createJsFile('x\n=y;');503 var tokenAssert = new TokenAssert(file);504 tokenAssert.differentLine({505 token: undefined,506 nextToken: undefined507 });508 });509 });510 describe('linesBetween', function() {511 describe('error messages', function() {512 beforeEach(function() {513 var file = createJsFile('x=y;');514 this.tokenAssert = new TokenAssert(file);515 var onError = sinon.spy();516 this.tokenAssert.on('error', onError);517 this.firstToken = file.getTree().getFirstToken();518 this.secondToken = this.firstToken.getNextCodeToken();519 });520 it('should throw if no options are specified', function() {521 expect((function() {522 this.tokenAssert.linesBetween({523 token: this.firstToken,524 nextToken: this.secondToken525 });526 }).bind(this)).to.throw(/You must specify at least one option/);527 });528 it('should throw if atLeast and exactly are specified', function() {529 expect((function() {530 this.tokenAssert.linesBetween({531 token: this.firstToken,532 nextToken: this.secondToken,533 atLeast: 2,534 exactly: 1535 });536 }).bind(this)).to.throw(/You cannot specify atLeast or atMost with exactly/);537 });538 it('should throw if atMost and exactly are specified', function() {539 expect((function() {540 this.tokenAssert.linesBetween({541 token: this.firstToken,542 nextToken: this.secondToken,543 atMost: 2,544 exactly: 1545 });546 }).bind(this)).to.throw(/You cannot specify atLeast or atMost with exactly/);547 });548 it('should throw if atLeast and atMost are in conflict', function() {549 expect((function() {550 this.tokenAssert.linesBetween({551 token: this.firstToken,552 nextToken: this.secondToken,553 atLeast: 3,554 atMost: 2555 });556 }).bind(this)).to.throw(/atLeast and atMost are in conflict/);557 });558 it('should throw if token and nextToken are the same', function() {559 expect((function() {560 this.tokenAssert.linesBetween({561 token: this.firstToken,562 nextToken: this.firstToken,563 atLeast: 1564 });565 }).bind(this))566 .to.throw(/You cannot specify the same token as both token and nextToken/);567 });568 });569 it('should not throw if token or nextToken properties are undefined', function() {570 var file = createJsFile('x\n=y;');571 var tokenAssert = new TokenAssert(file);572 tokenAssert.linesBetween({573 token: undefined,574 nextToken: undefined,575 exactly: 1576 });577 });578 describe('exactly', function() {579 it('should trigger error on too few newlines', function() {580 var file = createJsFile('x\n=y;');581 var tokenAssert = new TokenAssert(file);582 var onError = sinon.spy();583 tokenAssert.on('error', onError);584 var token = file.getTree().getFirstToken();585 var nextToken = file.findNextToken(token, 'Punctuator', '=');586 tokenAssert.linesBetween({587 token: token,588 nextToken: nextToken,589 exactly: 2590 });591 expect(onError).to.have.callCount(1);592 var error = onError.getCall(0).args[0];593 expect(error.message).to.contain('x and = should have exactly 2 line(s) between them');594 });595 it('should trigger error on too many specified newlines', function() {596 var file = createJsFile('x\n\n\n\n=y;');597 var tokenAssert = new TokenAssert(file);598 var onError = sinon.spy();599 tokenAssert.on('error', onError);600 var token = file.getTree().getFirstToken();601 var nextToken = file.findNextToken(token, 'Punctuator', '=');602 tokenAssert.linesBetween({603 token: token,604 nextToken: nextToken,605 exactly: 2606 });607 expect(onError).to.have.callCount(1);608 var error = onError.getCall(0).args[0];609 expect(error.message).to.contain('x and = should have exactly 2 line(s) between them');610 });611 it('should not trigger error on correct specified newlines', function() {612 var file = createJsFile('x\n\n=y;');613 var tokenAssert = new TokenAssert(file);614 var onError = sinon.spy();615 tokenAssert.on('error', onError);616 var token = file.getTree().getFirstToken();617 var nextToken = file.findNextToken(token, 'Punctuator', '=');618 tokenAssert.linesBetween({619 token: token,620 nextToken: nextToken,621 exactly: 2622 });623 expect(onError).to.have.callCount(0);624 });625 it('should not trigger error on exactly 0 blank lines', function() {626 var file = createJsFile('x\n=y;');627 var tokenAssert = new TokenAssert(file);628 var onError = sinon.spy();629 tokenAssert.on('error', onError);630 var token = file.getTree().getFirstToken();631 var nextToken = file.findNextToken(token, 'Punctuator', '=');632 tokenAssert.linesBetween({633 token: token,634 nextToken: nextToken,635 exactly: 1636 });637 expect(onError).to.have.callCount(0);638 });639 it('should not trigger error on multiple specified newlines negative', function() {640 var file = createJsFile('x\n\n=y;');641 var tokenAssert = new TokenAssert(file);642 var onError = sinon.spy();643 tokenAssert.on('error', onError);644 var token = file.getTree().getFirstToken();645 var lastToken = file.getLastToken();646 tokenAssert.linesBetween({647 token: token,648 nextToken: lastToken,649 exactly: 2650 });651 expect(onError).to.have.callCount(0);652 });653 it('should edit the whitespaceBefore with too few lines between', function() {654 var file = createJsFile(' x\n =y;');655 var tokenAssert = new TokenAssert(file);656 tokenAssert.on('error', function(errorInfo) {657 errorInfo.fix();658 });659 var token = file.findNextToken(660 file.getTree().getFirstToken(),661 'Identifier'662 );663 var nextToken = file.findNextToken(token, 'Punctuator', '=');664 tokenAssert.linesBetween({665 token: token,666 nextToken: nextToken,667 exactly: 2668 });669 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n\n ');670 });671 it('should edit the whitespaceBefore with too many lines between', function() {672 var file = createJsFile(' x\n\n\n =y;');673 var tokenAssert = new TokenAssert(file);674 tokenAssert.on('error', function(errorInfo) {675 errorInfo.fix();676 });677 var token = file.getTree().getFirstToken();678 var nextToken = file.findNextToken(token, 'Punctuator', '=');679 tokenAssert.linesBetween({680 token: token,681 nextToken: nextToken,682 exactly: 2683 });684 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n\n ');685 });686 it('should not edit the whitespaceBefore with correct lines between', function() {687 var file = createJsFile(' x\n\n =y;');688 var tokenAssert = new TokenAssert(file);689 tokenAssert.on('error', function(errorInfo) {690 errorInfo.fix();691 });692 var token = file.getTree().getFirstToken();693 var nextToken = file.findNextToken(token, 'Punctuator', '=');694 tokenAssert.linesBetween({695 token: token,696 nextToken: nextToken,697 exactly: 2698 });699 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n\n ');700 });701 it('should error, but not fix, when a comment exists between the two tokens (with newline)', function() {702 var file = createJsFile('x\n//linecomment\n=y;');703 var tokenAssert = new TokenAssert(file);704 var onError = sinon.spy();705 tokenAssert.on('error', onError);706 var token = file.getTree().getFirstToken();707 var nextToken = file.findNextToken(token, 'Punctuator', '=');708 tokenAssert.linesBetween({709 token: token,710 nextToken: nextToken,711 exactly: 5712 });713 expect(onError).to.have.callCount(1);714 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n');715 });716 });717 describe('atLeast', function() {718 it('should trigger on too few lines', function() {719 var file = createJsFile('x\n\n=y;');720 var tokenAssert = new TokenAssert(file);721 var onError = sinon.spy();722 tokenAssert.on('error', onError);723 var token = file.getTree().getFirstToken();724 var nextToken = file.findNextToken(token, 'Punctuator', '=');725 tokenAssert.linesBetween({726 token: token,727 nextToken: nextToken,728 atLeast: 3729 });730 expect(onError).to.have.callCount(1);731 var error = onError.getCall(0).args[0];732 expect(error.message).to.contain('x and = should have at least 3 line(s) between them');733 });734 it('should not trigger with exact lines', function() {735 var file = createJsFile('x\n\n=y;');736 var tokenAssert = new TokenAssert(file);737 var onError = sinon.spy();738 tokenAssert.on('error', onError);739 var token = file.getTree().getFirstToken();740 var nextToken = file.findNextToken(token, 'Punctuator', '=');741 tokenAssert.linesBetween({742 token: token,743 nextToken: nextToken,744 atLeast: 2745 });746 expect(onError).to.have.callCount(0);747 });748 it('should not trigger error on too many lines', function() {749 var file = createJsFile('x\n\n\n=y;');750 var tokenAssert = new TokenAssert(file);751 var onError = sinon.spy();752 tokenAssert.on('error', onError);753 var token = file.getTree().getFirstToken();754 var nextToken = file.findNextToken(token, 'Punctuator', '=');755 tokenAssert.linesBetween({756 token: token,757 nextToken: nextToken,758 atLeast: 2759 });760 expect(onError).to.have.callCount(0);761 });762 it('should edit the whitespaceBefore with too few lines between', function() {763 var file = createJsFile('x\n =y;');764 var tokenAssert = new TokenAssert(file);765 tokenAssert.on('error', function(errorInfo) {766 errorInfo.fix();767 });768 var token = file.getTree().getFirstToken();769 var nextToken = file.findNextToken(token, 'Punctuator', '=');770 tokenAssert.linesBetween({771 token: token,772 nextToken: nextToken,773 atLeast: 2774 });775 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');776 });777 it('should edit the whitespaceBefore with too few lines (spaced then non spaced) between', function() {778 var file = createJsFile('x \n\n =y;');779 var tokenAssert = new TokenAssert(file);780 tokenAssert.on('error', function(errorInfo) {781 errorInfo.fix();782 });783 var token = file.getTree().getFirstToken();784 var nextToken = file.findNextToken(token, 'Punctuator', '=');785 tokenAssert.linesBetween({786 token: token,787 nextToken: nextToken,788 atLeast: 4789 });790 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal(' \n\n\n\n ');791 });792 it('should edit the whitespaceBefore with too few lines (non spaced then spaced) between ', function() {793 var file = createJsFile('x\n \n =y;');794 var tokenAssert = new TokenAssert(file);795 tokenAssert.on('error', function(errorInfo) {796 errorInfo.fix();797 });798 var token = file.getTree().getFirstToken();799 var nextToken = file.findNextToken(token, 'Punctuator', '=');800 tokenAssert.linesBetween({801 token: token,802 nextToken: nextToken,803 atLeast: 4804 });805 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n \n\n\n ');806 });807 it('should not edit the whitespaceBefore with too many lines between', function() {808 var file = createJsFile('x\n\n\n =y;');809 var tokenAssert = new TokenAssert(file);810 tokenAssert.on('error', function(errorInfo) {811 errorInfo.fix();812 });813 var token = file.getTree().getFirstToken();814 var nextToken = file.findNextToken(token, 'Punctuator', '=');815 tokenAssert.linesBetween({816 token: token,817 nextToken: nextToken,818 atLeast: 2819 });820 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n\n ');821 });822 it('should not edit the whitespaceBefore with correct lines between', function() {823 var file = createJsFile('x\n\n =y;');824 var tokenAssert = new TokenAssert(file);825 tokenAssert.on('error', function(errorInfo) {826 errorInfo.fix();827 });828 var token = file.getTree().getFirstToken();829 var nextToken = file.findNextToken(token, 'Punctuator', '=');830 tokenAssert.linesBetween({831 token: token,832 nextToken: nextToken,833 atLeast: 2834 });835 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');836 });837 });838 describe('atMost', function() {839 it('should not trigger on too few lines', function() {840 var file = createJsFile('x\n\n=y;');841 var tokenAssert = new TokenAssert(file);842 var onError = sinon.spy();843 tokenAssert.on('error', onError);844 var token = file.getTree().getFirstToken();845 var nextToken = file.findNextToken(token, 'Punctuator', '=');846 tokenAssert.linesBetween({847 token: token,848 nextToken: nextToken,849 atMost: 2850 });851 expect(onError).to.have.callCount(0);852 });853 it('should not trigger with exact lines', function() {854 var file = createJsFile('x\n\n=y;');855 var tokenAssert = new TokenAssert(file);856 var onError = sinon.spy();857 tokenAssert.on('error', onError);858 var token = file.getTree().getFirstToken();859 var nextToken = file.findNextToken(token, 'Punctuator', '=');860 tokenAssert.linesBetween({861 token: token,862 nextToken: nextToken,863 atMost: 2864 });865 expect(onError).to.have.callCount(0);866 });867 it('should trigger error on too many lines', function() {868 var file = createJsFile('x\n\n\n=y;');869 var tokenAssert = new TokenAssert(file);870 var onError = sinon.spy();871 tokenAssert.on('error', onError);872 var token = file.getTree().getFirstToken();873 var nextToken = file.findNextToken(token, 'Punctuator', '=');874 tokenAssert.linesBetween({875 token: token,876 nextToken: nextToken,877 atMost: 1878 });879 expect(onError).to.have.callCount(1);880 var error = onError.getCall(0).args[0];881 expect(error.message).to.contain('x and = should have at most 1 line(s) between them');882 });883 it('should not edit the whitespaceBefore with too few lines between', function() {884 var file = createJsFile('x\n =y;');885 var tokenAssert = new TokenAssert(file);886 tokenAssert.on('error', function(errorInfo) {887 errorInfo.fix();888 });889 var token = file.getTree().getFirstToken();890 var nextToken = file.findNextToken(token, 'Punctuator', '=');891 tokenAssert.linesBetween({892 token: token,893 nextToken: nextToken,894 atMost: 2895 });896 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n ');897 });898 it('should edit the whitespaceBefore with too many lines between', function() {899 var file = createJsFile('x\n\n\n =y;');900 var tokenAssert = new TokenAssert(file);901 tokenAssert.on('error', function(errorInfo) {902 errorInfo.fix();903 });904 var token = file.getTree().getFirstToken();905 var nextToken = file.findNextToken(token, 'Punctuator', '=');906 tokenAssert.linesBetween({907 token: token,908 nextToken: nextToken,909 atMost: 2910 });911 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');912 });913 it('should not edit the whitespaceBefore with correct lines between', function() {914 var file = createJsFile('x\n\n =y;');915 var tokenAssert = new TokenAssert(file);916 tokenAssert.on('error', function(errorInfo) {917 errorInfo.fix();918 });919 var token = file.getTree().getFirstToken();920 var nextToken = file.findNextToken(token, 'Punctuator', '=');921 tokenAssert.linesBetween({922 token: token,923 nextToken: nextToken,924 atMost: 2925 });926 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');927 });928 });929 describe('between', function() {930 it('should not trigger if within range', function() {931 var file = createJsFile('x\n\n\n=y;');932 var tokenAssert = new TokenAssert(file);933 var onError = sinon.spy();934 tokenAssert.on('error', onError);935 var token = file.getTree().getFirstToken();936 var nextToken = file.findNextToken(token, 'Punctuator', '=');937 tokenAssert.linesBetween({938 token: token,939 nextToken: nextToken,940 atLeast: 1,941 atMost: 3942 });943 expect(onError).to.have.callCount(0);944 });945 it('should trigger if below range', function() {946 var file = createJsFile('x\n=y;');947 var tokenAssert = new TokenAssert(file);948 var onError = sinon.spy();949 tokenAssert.on('error', onError);950 var token = file.getTree().getFirstToken();951 var nextToken = file.findNextToken(token, 'Punctuator', '=');952 tokenAssert.linesBetween({953 token: token,954 nextToken: nextToken,955 atLeast: 2,956 atMost: 3957 });958 expect(onError).to.have.callCount(1);959 });960 it('should trigger if above range', function() {961 var file = createJsFile('x\n\n\n\n=y;');962 var tokenAssert = new TokenAssert(file);963 var onError = sinon.spy();964 tokenAssert.on('error', onError);965 var token = file.getTree().getFirstToken();966 var nextToken = file.findNextToken(token, 'Punctuator', '=');967 tokenAssert.linesBetween({968 token: token,969 nextToken: nextToken,970 atLeast: 1,971 atMost: 2972 });973 expect(onError).to.have.callCount(1);974 });975 });976 });977 describe('indentation', function() {978 it('should not trigger on correct indentation', function() {979 var file = createJsFile('x=y;');980 var tokenAssert = new TokenAssert(file);981 var onError = sinon.spy();982 tokenAssert.on('error', onError);983 tokenAssert.indentation({984 token: file.getProgram().getFirstToken(),985 actual: 0,986 expected: 0,987 indentChar: ' '988 });989 expect(onError).to.have.callCount(0);990 });991 it('should trigger on incorrect indentation', function() {992 var file = createJsFile(' x=y;');993 var tokenAssert = new TokenAssert(file);994 var onError = sinon.spy();995 tokenAssert.on('error', onError);996 tokenAssert.indentation({997 token: file.getProgram().getFirstToken().getNextCodeToken(),998 actual: 2,999 expected: 0,1000 indentChar: ' '1001 });1002 expect(onError).to.have.not.callCount(0);1003 });1004 it('should fix whitespace on incorrect indentation for the first token', function() {1005 var file = createJsFile(' x=y;');1006 var tokenAssert = new TokenAssert(file);1007 var onError = sinon.spy();1008 tokenAssert.on('error', onError);1009 tokenAssert.indentation({1010 token: file.getProgram().getFirstToken().getNextCodeToken(),1011 actual: 2,1012 expected: 0,1013 indentChar: ' '1014 });1015 expect(file.getWhitespaceBefore(file.getFirstToken())).to.equal('');1016 });1017 it('should fix docblock on incorrect overindentation', function() {1018 var file = createJsFile(' /*\n *\n */\nx=y;');1019 var tokenAssert = new TokenAssert(file);1020 tokenAssert.on('error', function(errorInfo) {1021 errorInfo.fix();1022 });1023 var comment = file.getProgram().getFirstToken().getNextNonWhitespaceToken();1024 tokenAssert.indentation({1025 token: comment,1026 actual: 2,1027 expected: 0,1028 indentChar: ' '1029 });1030 comment = file.getProgram().getFirstToken();1031 expect(file.getWhitespaceBefore(comment)).to.equal('');1032 expect(comment.value).to.equal('\n *\n ');1033 });1034 it('should fix docblock on incorrect underindentation', function() {1035 var file = createJsFile(' /*\n *\n */\nx=y;');1036 var tokenAssert = new TokenAssert(file);1037 tokenAssert.on('error', function(errorInfo) {1038 errorInfo.fix();1039 });1040 var comment = file.getProgram().getFirstToken().getNextNonWhitespaceToken();1041 tokenAssert.indentation({1042 token: comment,1043 actual: 2,1044 expected: 4,1045 indentChar: ' '1046 });1047 comment = file.getProgram().getFirstToken().getNextNonWhitespaceToken();1048 expect(file.getWhitespaceBefore(comment)).to.equal(' ');1049 expect(comment.value).to.equal('\n *\n ');1050 });1051 });...

Full Screen

Full Screen

cimple.py

Source:cimple.py Github

copy

Full Screen

1import sys2import string34#############################################################################5# LEXICAL ANALYZER #6#############################################################################7line = 189family = ''10lexical = ''11tokenType = ''1213def lexicalAnalyzer():1415 # counting variables16 global line #Current line17 global family18 global lexical19 global tokenType2021 family = ''22 tokenType = ''23 lexical = ''24 character = 0 #counting number of letter2526 token_char = file.read(1)2728 # TAB or SPACE or newline29 while token_char == '\t' or token_char == ' ' or token_char == '\r':30 token_char = file.read(1)3132 if token_char == '\n':33 line += 134 return lexicalAnalyzer()3536 # Letter37 elif token_char.isalpha():38 lexical = token_char39 token_char = file.read(1)40 character += 141 while token_char.isalpha() or token_char.isdigit():42 if character > 30:43 print(('Error in line %d: Word lenght surpassed limit of 30.', line))44 lexical = lexical + token_char45 character += 146 token_char = file.read(1)47 #print('\t( %s )' % (token_char))48 file.seek(file.tell() - 1)49 family = 'Keyword'5051 if lexical == 'program':52 tokenType = 'program_token'5354 elif lexical == 'declare':55 tokenType = 'declare_token'5657 elif lexical == 'if':58 tokenType = 'if_token'5960 elif lexical == 'else':61 tokenType = 'else_token'6263 elif lexical == 'while':64 tokenType = 'while_token'6566 elif lexical == 'switchcase':67 tokenType = 'switchcase_token'6869 elif lexical == 'forcase':70 tokenType = 'forcase_token'7172 elif lexical == 'incase':73 tokenType = 'incase_token'7475 elif lexical == 'case':76 tokenType = 'case_token'7778 elif lexical == 'default':79 tokenType = 'default_token'8081 elif lexical == 'not':82 tokenType = 'not_token'8384 elif lexical == 'and':85 tokenType = 'and_token'8687 elif lexical == 'or':88 tokenType = 'or_token'8990 elif lexical == 'function':91 tokenType = 'function_token'9293 elif lexical == 'procedure':94 tokenType = 'procedure_token'9596 elif lexical == 'call':97 tokenType = 'call_token'9899 elif lexical == 'return':100 tokenType = 'return_token'101102 elif lexical == 'in':103 tokenType = 'in_token'104105 elif lexical == 'inout':106 tokenType = 'inout_token'107108 elif lexical == 'input':109 tokenType = 'input_token'110111 elif lexical == 'print':112 tokenType = 'print_token'113 else:114 tokenType = 'id_token'115 family = 'Identifier'116117 # Digit118 elif token_char.isdigit():119 lexical = token_char120 token_char = file.read(1)121122 while token_char.isdigit():123 lexical = lexical + token_char124 token_char = file.read(1)125 num = int(lexical)126 if (num < -4294967297 or num > 4294967295):127 print('Error in line %d: Invalid range of number %s ( -2^32+1 > number > 2^32-1).' % (line, lexical))128 sys.exit(0)129 file.seek(file.tell() - 1)130 tokenType = 'INTEGER_token'131132 family = 'Number'133134 # '+' or '-'135 elif token_char == '+' or token_char == '-':136 lexical = token_char137 if lexical == '+':138 tokenType = 'plus_token'139 elif lexical == '-':140 tokenType = 'minus_token'141142 family = 'Add_Operator'143144 # '*' or '/'145 elif token_char == '*' or token_char == '/':146 lexical = token_char147 if lexical == '*':148 tokenType = 'multiply_token'149 elif lexical == '/':150 tokenType = 'division_token'151152 family = 'Mul_Operator'153154 # ':='155 elif token_char == ':':156 lexical = lexical + token_char157 token_char = file.read(1)158 if token_char == '=':159 tokenType = 'assign_token'160 lexical = lexical + token_char161 token_char = file.read(1)162 file.seek(file.tell() - 1)163164 family = 'Assignment'165166 # ',' or ';'167 elif token_char == ',' or token_char == ';':168 lexical = token_char169 if lexical == ',':170 tokenType = 'comma_token'171 elif lexical == ';':172 tokenType = 'semicolon_token'173174 family = 'Delimiter'175176 # '=' or '<>' or '<=' or '<' or '>=' or '>'177 elif token_char == '=' or token_char == '<' or token_char == '>':178 lexical = token_char179 if lexical == '=':180 token_char = file.read(1)181 tokenType = 'equals_token'182 lexical = lexical + token_char183 elif lexical == '<':184 token_char = file.read(1)185 if token_char == '>':186 tokenType = 'notequal_token'187 lexical = lexical + token_char188189 elif token_char == '=':190 tokenType = 'lessorequals_token'191 lexical = lexical + token_char192 else:193 tokenType = 'less_token'194 file.seek(file.tell() - 1)195 elif lexical == '>':196 token_char = file.read(1)197 if token_char == '=':198 tokenType = 'greaterorequals_token'199 lexical = lexical + token_char200 else:201 tokenType = 'greater_token'202 file.seek(file.tell() - 1)203204 family = 'Rel_Operator'205 # '(' or ')' or '{' or '}' or '[' or ']'206 elif token_char == '(' or token_char == ')' or token_char == '{' or token_char == '}' or token_char == '[' or token_char == ']':207 lexical = token_char208 if lexical == '(':209 tokenType = 'leftbracket_token'210211 elif lexical == ')':212 tokenType = 'rightbracket_token'213214 elif lexical == '{':215 tokenType = 'begin_token'216217 elif lexical == '}':218 tokenType = 'end_token'219220 elif lexical == ']':221 tokenType = 'rightsquarebracket_token'222223 elif lexical == '[':224 tokenType = 'leftsquarebracket_token'225226 family = 'Group_Symbol'227228 # End program229 elif token_char == '.':230 lexical = token_char231 tokenType = 'endprogram_token'232233 family = 'Delimiter'234235 # Comments236 elif token_char == '#':237 lexical = token_char238 token_char = file.read(1)239 flag = False240 while token_char != '':241 token_char = file.read(1)242 if token_char == '#':243 flag= True244 break245 if flag == True:246 lexicalAnalyzer()247 else:248 print('Error in line %d: "#" is missing. The comment was supposed to be closed.' % (line))249 sys.exit(0)250251 elif token_char == '':252 lexical = ''253 tokenType = 'eof_token'254255 else:256 print('Error in line %d : character is not recognised as a language character/symbol ' % (line))257 sys.exit(0)258259 ### If it finds a comment, it prints the next lexical twice ###260 print('Line: %d \t%s\t\t\tfamily: %s ' % (line,lexical,family))261262 return tokenType263264265#############################################################################266# SYNTAX ANALYZER #267#############################################################################268269def syntaxAnalyzer():270271 global tokenType272 global lexical273274 def program():275 # program ID block .276277 # "program" is the starting symbol278 # followed by its name and a block279 # every program ends with a fullstop280 global tokenType281 global lexical282283 tokenType = lexicalAnalyzer()284285 if tokenType == 'program_token':286 tokenType = lexicalAnalyzer()287 if tokenType == 'id_token':288 programName = lexical289 tokenType = lexicalAnalyzer()290 block()291 if tokenType == 'endprogram_token':292 tokenType = lexicalAnalyzer()293 if tokenType == 'eof_token':294 print("\nCompilation successfully completed without errors.\n")295 return296 else:297 print('Error in line %d: No characters are allowed after the fullstop indicating the end of the program.' % (line))298 sys.exit(0)299 else:300 print('Error in line %d: A fullstop expected, the program should end with a fullstop.' % (line))301 sys.exit(0)302 else:303 print('Error in line %d: The program name expected after the keyword "program" but found "%s" .' % (line, lexical))304 sys.exit(0)305 else:306 print('Error in line %d: The program must start with the keyword "program" but instead it starts with the word "%s".' % (line, lexical))307 sys.exit(0)308309310 def block():311 # { declarations subprograms statements }312313 # a block consists of declarations, subprograms and statements314 global tokenType315316 if tokenType == 'begin_token':317 tokenType = lexicalAnalyzer()318 if tokenType == 'declare_token':319 declarations()320 subprograms()321 blockStatements()322 if tokenType == 'end_token':323 tokenType = lexicalAnalyzer()324 else:325 print('Error in line %d: The "}" was expected.' % line)326 sys.exit(0)327 else:328 print('Error in line %d: The "{" was expected .' % line)329 sys.exit(0)330 return331332333 def declarations():334 # ( declare varlist ; ) *335336 # declaration of variables337 # kleene star implies zero or more "declare" statements338 global tokenType339340 while tokenType == 'declare_token':341 tokenType = lexicalAnalyzer()342 varlist()343 if tokenType == 'semicolon_token':344 tokenType = lexicalAnalyzer()345 else:346 print('Error in line %d: The keyword ";" was expected\n' % line)347 sys.exit(0)348 return349350351 def varlist():352 # ID ( , ID ) *353 # | e354355 # a list of variables following the declaration keyword356 global tokenType357358 if tokenType == "id_token":359 tokenType = lexicalAnalyzer()360 while tokenType == 'comma_token':361 tokenType = lexicalAnalyzer()362 if tokenType == 'id_token':363 tokenType = lexicalAnalyzer()364 else:365 print('Error in line %d: A variable is expected after comma (,). ' % line)366 sys.exit(0)367 return368369370 def subprograms():371 # ( subprogram ) *372373 # zero or more subprograms374 global tokenType375376 while tokenType == 'procedure_token' or tokenType == 'function_token':377 subprogram()378 return379380381 def subprogram():382 # a subprogram is a function or a procedure383 # followed by parameters and block384 global tokenType385 global lexical386387 # function ID ( formalparlist ) block388 if tokenType == 'function_token':389 tokenType = lexicalAnalyzer()390 if tokenType == 'id_token':391 tokenType = lexicalAnalyzer()392 if tokenType == "leftbracket_token":393 tokenType = lexicalAnalyzer()394 formalparlist()395 if tokenType == 'rightbracket_token':396 tokenType = lexicalAnalyzer()397 block()398 else:399 print('Error in line %d: The ")" was expected .' % line)400 sys.exit(0)401 else:402 print('Error in line %d: The "(" was expected .' % line)403 sys.exit(0)404 else:405 print('Error in line %d: A variable is expected after the keyword "function".' % line)406 sys.exit(0)407408 # procedure ID ( formalparlist ) block409 elif tokenType == 'procedure_token':410 tokenType = lexicalAnalyzer()411 if tokenType == 'id_token':412 name = lexical413 tokenType = lexicalAnalyzer()414 if tokenType == "leftbracket_token":415 tokenType = lexicalAnalyzer()416 formalparlist()417 if tokenType == 'rightbracket_token':418 block()419 else:420 print('Error in line %d: The ")" was expected .' % line)421 sys.exit(0)422 else:423 print('Error in line %d: The "(" was expected .' % line)424 sys.exit(0)425 else:426 print('Error in line %d: A variable is expected after the keyword "procedure".' % line)427 sys.exit(0)428 else:429 print('Error in line %d: The keyword "function" or "procedure" was expected.' % line)430 sys.exit(0)431 return432433434 def formalparlist():435 # formalparitem ( , formalparitem ) *436437 # list of formal parameters438 # one or more parameters are allowed439 global tokenType440441 formalparitem()442 while tokenType == 'comma_token':443 tokenType = lexicalAnalyzer()444 if tokenType == 'in_token' or tokenType == 'inout_token':445 formalparitem()446 else:447 print('Error in line %d: Expected "in" or "inout" after the comma.' % line)448 sys.exit()449 return450451452 def formalparitem():453 # a formal parameters454 # "in": by value, "inout": by reference455 global tokenType456457 # in ID458 if tokenType == 'in_token':459 tokenType = lexicalAnalyzer()460 if tokenType == 'id_token':461 tokenType = lexicalAnalyzer()462 return463 else:464 print('Error in line %d: A variable is expected after the keyword "in".' % line)465 sys.exit(0)466467 # inout ID468 elif tokenType == 'inout_token':469 tokenType = lexicalAnalyzer()470 if tokenType == 'id_token':471 tokenType = lexicalAnalyzer()472 return473 else:474 print('Error in line %d: A variable is expected after the keyword "inout".' % line)475 sys.exit(0)476 else:477 print('Error in line %d: The keyword "in" or "inout" was expected.' % line)478 sys.exit(0)479 return480481482 def statements():483 # statement ;484 # | { statement ( ; statement ) * }485486 # one or more statements487 # more than one statements should be grouped with brackets488 global tokenType489490 if tokenType == 'begin_token':491 tokenType = lexicalAnalyzer()492 blockStatements()493 if tokenType == 'end_token':494 tokenType = lexicalAnalyzer()495 return496 else:497 print('Error in line %d: The "}" was expected .' % line)498 sys.exit(0)499 else:500 statement()501 if tokenType == 'semicolon_token':502 tokenType = lexicalAnalyzer()503 else:504 print('Error in line %d: The keyword ";" was expected\n' % line)505 sys.exit(0)506 return507508509 def blockStatements():510 # statement ( ; statement ) *511512 # statements cosidered as block (used in program and subprogram)513514 global tokenType515516 statement()517 while tokenType == 'semicolon_token':518 tokenType = lexicalAnalyzer()519 statement()520 return521522523 def statement():524 # one statement525526 global tokenType527528 # assignStat529 if tokenType == 'id_token':530 assignStat()531 # ifStat532 elif tokenType == 'if_token':533 ifStat()534 # whileStat535 elif tokenType == 'while_token':536 whileStat()537 # switchcaseStat538 elif tokenType == 'switchcase_token':539 switchcaseStat()540 # forcaseStat541 elif tokenType == 'forcase_token':542 forcaseStat()543 # incaseStat544 elif tokenType == 'incase_token':545 incaseStat()546 # callStat547 elif tokenType == 'call_token':548 callStat()549 # returnStat550 elif tokenType == 'return_token':551 returnStat()552 # inputStat553 elif tokenType == 'input_token':554 inputStat()555 # printStat556 elif tokenType == 'print_token':557 printStat()558 return559560561 def assignStat():562 # ID := expression563564 # assignment statement565 global tokenType566 global lexical567568 if tokenType == 'id_token':569 id = lexical570 tokenType = lexicalAnalyzer()571 if tokenType == 'assign_token':572 tokenType = lexicalAnalyzer()573 expression()574 else:575 print('Error in line %d: The assignment symbol ":=" was expected.' % line)576 sys.exit(0)577 else:578 print('Error in line %d: The "id" was expected.' % line)579 sys.exit(0)580 return581582583 def ifStat():584 # if ( condition ) statements585 # elsepart586587 # if statement588 global tokenType589590 if tokenType == 'if_token':591 tokenType = lexicalAnalyzer()592 if tokenType == 'leftbracket_token':593 tokenType = lexicalAnalyzer()594 condition()595 if tokenType == 'rightbracket_token':596 tokenType = lexicalAnalyzer()597 statements()598 elsePart()599 else:600 print('Error in line %d: The ")" was expected .' % line)601 sys.exit(0)602 else:603 print('Error in line %d: The "(" was expected .' % line)604 sys.exit(0)605 else:606 print('Error in line %d: The "if" was expected.' % line)607 sys.exit(0)608 return609610611 def elsePart():612 # else statements613 # | e614615 # else part is optional616 global tokenType617618 if tokenType == 'else_token':619 tokenType = lexicalAnalyzer()620 statements()621 return622623624 def whileStat():625 # while ( condition ) statements626627 # while statement628 global tokenType629630 if tokenType == 'while_token':631 tokenType = lexicalAnalyzer()632 if tokenType == 'leftbracket_token':633 tokenType = lexicalAnalyzer()634 condition()635 if tokenType == 'rightbracket_token':636 tokenType = lexicalAnalyzer()637 statements()638639 else:640 print('Error in line %d: The ")" was expected.' % line)641 sys.exit(0)642 else:643 print('Error in line %d: The "(" was expected.' % line)644 sys.exit(0)645 else:646 print('Error in line %d: The "while" was expected.' % line)647 sys.exit(0)648 return649650651 def switchcaseStat():652 # switchcase653 # ( case ( condition ) statements ) *654 # default statements655656 # switch statement657 global tokenType658659 if tokenType == 'switchcase_token':660 tokenType = lexicalAnalyzer()661 if tokenType == 'case_token':662 tokenType = lexicalAnalyzer()663 if tokenType == 'leftbracket_token':664 tokenType = lexicalAnalyzer()665 condition()666 if tokenType == 'rightbracket_token':667 tokenType = lexicalAnalyzer()668 statements()669 while tokenType == 'default_token':670 tokenType = lexicalAnalyzer()671 statements()672 else:673 print('Error in line %d: The ")" was expected.' % line)674 sys.exit(0)675 else:676 print('Error in line %d: The "(" was expected.' % line)677 sys.exit(0)678 else:679 print('Error in line %d: The "case" was expected.' % line)680 sys.exit(0)681 else:682 print('Error in line %d: The "switchcase" was expected.' % line)683 sys.exit(0)684 return685686687 def forcaseStat():688 # forcase689 # ( case ( condition ) statements ) *690 # default statements691692 # forcase statement693 global tokenType694695 if tokenType == 'forcase_token':696 tokenType = lexicalAnalyzer()697 if tokenType == 'case_token':698 tokenType = lexicalAnalyzer()699 if tokenType == 'leftbracket_token':700 tokenType = lexicalAnalyzer()701 condition()702 if tokenType == 'rightbracket_token':703 tokenType = lexicalAnalyzer()704 statements()705 while tokenType == 'default_token':706 tokenType = lexicalAnalyzer()707 statements()708 else:709 print('Error in line %d: The ")" was expected.' % line)710 sys.exit(0)711 else:712 print('Error in line %d: The "(" was expected.' % line)713 sys.exit(0)714 else:715 print('Error in line %d: The "case" was expected.' % line)716 sys.exit(0)717 else:718 print('Error in line %d: The "forcase" was expected.' % line)719 sys.exit(0)720 return721722723 def incaseStat():724 # incase725 # ( case ( condition ) statements )*726727 # incase statement728 global tokenType729730 if tokenType == 'incase_token':731 tokenType = lexicalAnalyzer()732 if tokenType == 'case_token':733 tokenType = lexicalAnalyzer()734 if tokenType == 'leftbracket_token':735 tokenType = lexicalAnalyzer()736 condition()737 if tokenType == 'rightbracket_token':738 tokenType = lexicalAnalyzer()739 statements()740 while tokenType == 'default_token':741 tokenType = lexicalAnalyzer()742 statements()743 else:744 print('Error in line %d: The ")" was expected.' % line)745 sys.exit(0)746 else:747 print('Error in line %d: The "(" was expected.' % line)748 sys.exit(0)749 else:750 print('Error in line %d: The "case" was expected.' % line)751 sys.exit(0)752 else:753 print('Error in line %d: The "incase" was expected.' % line)754 sys.exit(0)755 return756757758 def returnStat():759 # return ( expression )760761 # return statement762 global tokenType763764 if tokenType == 'return_token':765 tokenType = lexicalAnalyzer()766 if tokenType == 'leftbracket_token':767 tokenType = lexicalAnalyzer()768 expression()769 if tokenType == 'rightbracket_token':770 tokenType = lexicalAnalyzer()771 else:772 print('Error in line %d: The ")" was expected.' % line)773 sys.exit(0)774 else:775 print('Error in line %d: The "(" was expected.' % line)776 sys.exit(0)777 else:778 print('Error in line %d: The "return" was expected.' % line)779 sys.exit(0)780 return781782783 def callStat():784 # call ID ( actualparlist )785786 # call statement787 global tokenType788 global lexical789790 if tokenType == 'call_token':791 tokenType = lexicalAnalyzer()792 if tokenType == 'id_token':793 tokenType = lexicalAnalyzer()794 if tokenType == 'leftbracket_token':795 tokenType = lexicalAnalyzer()796 actualparlist()797 if tokenType == 'rightbracket_token':798 tokenType = lexicalAnalyzer()799 return800 else:801 print('Error in line %d: The ")" was expected.' % line)802 sys.exit(0)803 else:804 print('Error in line %d: The "(" was expected.' % line)805 sys.exit(0)806 else:807 print('Error in line %d: The "id" was expected.' % line)808 sys.exit(0)809 else:810 print('Error in line %d: The "call" was expected.' % line)811 sys.exit(0)812813814 def printStat():815 # print ( expression )816817 # print statement818 global tokenType819820 if tokenType == 'print_token':821 tokenType = lexicalAnalyzer()822 if tokenType == 'leftbracket_token':823 tokenType = lexicalAnalyzer()824 expression()825 if tokenType == 'rightbracket_token':826 tokenType = lexicalAnalyzer()827 else:828 print('Error in line %d: The ")" was expected.' % line)829 sys.exit(0)830 else:831 print('Error in line %d: The "(" was expected.' % line)832 sys.exit(0)833 else:834 print('Error in line %d: The "print" was expected.' % line)835 sys.exit(0)836 return837838839 def inputStat():840 # input ( ID )841842 # input statement843 global tokenType844 global lexical845846 if tokenType == 'input_token':847 tokenType = lexicalAnalyzer()848 if tokenType == 'leftbracket_token':849 tokenType = lexicalAnalyzer()850 if tokenType == 'id_token':851 tokenType = lexicalAnalyzer()852 if tokenType == 'rightbracket_token':853 tokenType = lexicalAnalyzer()854 return855 else:856 print('Error in line %d: The ")" was expected.' % line)857 sys.exit(0)858 else:859 print('Error in line %d: The "id" was expected.' % line)860 sys.exit(0)861 else:862 print('Error in line %d: The "(" was expected.' % line)863 sys.exit(0)864 else:865 print('Error in line %d: The "input" was expected.' % line)866 sys.exit(0)867868869 def actualparlist():870 # actualparitem ( , actualparitem ) *871 # | e872873 # list of actual parameters874 global tokenType875876 actualparitem()877 while tokenType == 'comma_token':878 tokenType = lexicalAnalyzer()879 actualparitem()880 return881882883 def actualparitem():884 # in expression885 # | inout ID886887 # an actual parameter888 # "in": value, "inout": reference889 global tokenType890891 if tokenType == 'in_token':892 tokenType = lexicalAnalyzer()893 expression()894 elif tokenType == 'inout_token':895 tokenType = lexicalAnalyzer()896 if tokenType == 'id_token':897 tokenType = lexicalAnalyzer()898 else:899 print('Error in line %d: A parameter was expected after the keyword "inout".\n' % line)900 sys.exit(0)901 else:902 print('Error in line %d: The keyword "in" or "inout" was expected \n' % line)903 sys.exit(0)904 return905906907 def condition():908 # boolterm ( or boolterm ) *909910 # boolean expression911 global tokenType912913 boolTerm()914 while tokenType == 'or_token':915 tokenType = lexicalAnalyzer()916 boolTerm()917 return918919920 def boolTerm():921 # boolfactor ( and boolfactor )*922923 # term in boolean expression924 global tokenType925926 boolfactor()927 while tokenType == 'and_token':928 tokenType = lexicalAnalyzer()929 boolfactor()930 return931932933 def boolfactor():934 # factor in boolean expression935936 global tokenType937 # not [ condition ]938 if tokenType == 'not_token':939 tokenType = lexicalAnalyzer()940 if tokenType == 'leftsquarebracket_token':941 tokenType = lexicalAnalyzer()942 condition()943 if tokenType == 'rightsquarebracket_token':944 tokenType = lexicalAnalyzer()945 return946 else:947 print('Error in line %d: The right square bracket symbol "]" was expected here.\n' % line)948 sys.exit(0)949 else:950 print('Error in line %d: The left square bracket symbol "[" was expected here.\n' % line)951 sys.exit(0)952953 # [ condition ]954 elif tokenType == 'leftsquarebracket_token':955 tokenType = lexicalAnalyzer()956 condition()957 if tokenType == 'rightsquarebracket_token':958 tokenType = lexicalAnalyzer()959 return960 else:961 print('Error in line %d: The right square bracket symbol "]" was expected here.\n' % line)962 sys.exit(0)963964 # expression REL_OP expression965 else:966 expression()967 REL_OP()968 expression()969 return970971972 def expression():973 # optionalSign term ( ADD_OP term ) *974975 # arithmetic expression976 global tokenType977978 optionalSign()979 term()980 while tokenType == 'plus_token' or tokenType == 'minus_token':981 ADD_OP()982 term()983 return984985986 def term():987 # factor ( MUL_OP factor ) *988989 # term in arithmetic expression990 global tokenType991992 factor()993 while tokenType == 'multiply_token' or tokenType == 'division_token':994 MUL_OP()995 factor()996 return997998999 def factor():1000 # factor in arithmetic expression1001 global tokenType10021003 # INTEGER1004 if tokenType == 'INTEGER_token':1005 fact = lexical1006 tokenType = lexicalAnalyzer()1007 return fact10081009 # | ( expression )1010 elif tokenType == 'leftbracket_token':1011 tokenType = lexicalAnalyzer()1012 e = expression()1013 if tokenType == 'rightbracket_token':1014 fact = e1015 tokenType = lexicalAnalyzer()1016 return fact1017 else:1018 print('Error in line %d: The right bracket symbol ")" was expected here\n' % line)1019 sys.exit(0)1020 # | ID idTail1021 elif tokenType == 'id_token':1022 fact = lexical1023 tokenType = lexicalAnalyzer()1024 idTail()1025 return fact1026 else:1027 print('Error in line %d: A integer, an expression , a procedure call or a function call was expected here.\n' % line)1028 sys.exit(0)102910301031 def idTail():1032 # ( actualparlist )1033 # | e10341035 # follows a function or procedure1036 # describes parethneses and parameters1037 global tokenType10381039 if tokenType == 'leftbracket_token':1040 tokenType = lexicalAnalyzer()1041 actualparlist()1042 if tokenType == 'rightbracket_token':1043 tokenType = lexicalAnalyzer()1044 return1045 return10461047 def optionalSign():1048 # ADD_OP1049 # | e10501051 # symbols "+" and "-" (are optional)1052 global tokenType1053 if tokenType == 'plus_token' or tokenType == 'minus_token':1054 opSign = ADD_OP()1055 tokenType = lexicalAnalyzer()1056 return opSign1057 return10581059 ########################################1060 # lexer rules: relational, arithentic operations, integer values and ids1061 ########################################10621063 def REL_OP():1064 # = | <= | >= | > | < | <>1065 global tokenType1066 global lexical10671068 if (tokenType == 'equals_token' or tokenType == 'lessorequals_token' or tokenType == 'greaterorequals_token'1069 or tokenType == 'less_token' or tokenType == 'greater_token' or tokenType == 'notequals_token'):1070 relOp = lexical1071 tokenType = lexicalAnalyzer()1072 else:1073 print('Error in line %d: A comparison sign was expected here.' % line)1074 sys.exit(0)1075 return relOp107610771078 def ADD_OP():1079 # + | -1080 global tokenType1081 global lexical10821083 if tokenType == 'plus_token' or tokenType == 'minus_token':1084 addOp = lexical1085 tokenType = lexicalAnalyzer()1086 else:1087 print('Error in line %d: A plus sign(+) or a minus sign(-) was expected here.' % (line))1088 sys.exit(0)1089 return addOp109010911092 def MUL_OP():1093 # * | /1094 global tokenType1095 global lexical10961097 if tokenType == 'multiply_token' or tokenType == 'division_token':1098 mulOp = lexical1099 tokenType = lexicalAnalyzer()1100 else:1101 print('Error in line %d: A multiplication sign(*) or a division sign(/) was expected here.' % (line))1102 sys.exit(0)1103 return mulOp1104 program()110511061107# Opening file, as arguement in command line:1108file = open(sys.argv[1], 'r')1109print("\n") ...

Full Screen

Full Screen

farms.ts

Source:farms.ts Github

copy

Full Screen

1import contracts from './contracts'2import { FarmConfig, QuoteToken } from './types'3const farms: FarmConfig[] = [4 {5 pid: 4,6 risk: 5,7 lpSymbol: 'WST',8 isTokenOnly: true,9 lpAddresses: {10 97: '0x3C26Cfb92fC1AA40B9eB81534CDFE9Ed4944462f',11 56: '',12 },13 tokenSymbol: 'WST',14 tokenAddresses: {15 97: '0xC14542fbC96f88E8c3982D26326b0691D7CE9c53',16 56: '',17 },18 quoteTokenSymbol: QuoteToken.BUSD,19 quoteTokenAdresses: contracts.busd,20 },21 {22 pid: 1,23 risk: 5,24 lpSymbol: 'WST-BNB LP',25 lpAddresses: {26 97: '0xb37aF8fF25552fDC405a93C2A893D383C019161A',27 56: '',28 },29 tokenSymbol: 'WST',30 tokenAddresses: {31 97: '0xC14542fbC96f88E8c3982D26326b0691D7CE9c53',32 56: '',33 },34 quoteTokenSymbol: QuoteToken.BNB,35 quoteTokenAdresses: contracts.wbnb,36 },37 {38 pid: 2,39 risk: 5,40 lpSymbol: 'BNB-BUSD LP',41 lpAddresses: {42 97: '0xe0e92035077c39594793e61802a350347c320cf2',43 56: '',44 },45 tokenSymbol: 'BNB',46 tokenAddresses: {47 97: '0xae13d989daC2f0dEbFf460aC112a837C89BAa7cd',48 56: '',49 },50 quoteTokenSymbol: QuoteToken.BUSD,51 quoteTokenAdresses: contracts.busd,52 },53 {54 pid: 3,55 risk: 5,56 lpSymbol: 'WST-BUSD LP',57 lpAddresses: {58 97: '0x3C26Cfb92fC1AA40B9eB81534CDFE9Ed4944462f',59 56: '',60 },61 tokenSymbol: 'WST',62 tokenAddresses: {63 97: '0xC14542fbC96f88E8c3982D26326b0691D7CE9c53',64 56: '',65 },66 quoteTokenSymbol: QuoteToken.BUSD,67 quoteTokenAdresses: contracts.busd,68 },69 // {70 // pid: 2,71 // risk: 5,72 // isTokenOnly: true,73 // lpSymbol: 'WST',74 // lpAddresses: {75 // 97: '',76 // 56: '0x7bf33458a7d077ea5aa67313e329741dd130a9ef', // EGG-BUSD LP77 // },78 // tokenSymbol: 'WST',79 // tokenAddresses: {80 // 97: '',81 // 56: '0xaAdFf17d56d80312b392Ced903f3E8dBE5c3ece7',82 // },83 // quoteTokenSymbol: QuoteToken.BUSD,84 // quoteTokenAdresses: contracts.busd,85 // isAutoVault:true,86 // },87 // pid: 3,88 // risk: 3,89 // lpSymbol: 'BNB-BUSD LP',90 // lpAddresses: {91 // 97: '',92 // 56: '0x58F876857a02D6762E0101bb5C46A8c1ED44Dc16',93 // },94 // tokenSymbol: 'BNB',95 // tokenAddresses: {96 // 97: '',97 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',98 // },99 // quoteTokenSymbol: QuoteToken.BUSD,100 // quoteTokenAdresses: contracts.busd,101 // },102 // {103 // pid: 21,104 // risk: 5,105 // lpSymbol: 'BFD-BNB LP',106 // lpAddresses: {107 // 97: '',108 // 56: '0x42c26eea50615e3f808754f46b69a3bb386fe846', // EGG-BUSD LP109 // },110 // tokenSymbol: 'BFD',111 // tokenAddresses: {112 // 97: '',113 // 56: '0x95f31E322E1Bee2F6DCB085A1DFe3d3081Aab653',114 // },115 // quoteTokenSymbol: QuoteToken.BNB,116 // quoteTokenAdresses: contracts.wbnb,117 // },118 // {119 // pid: 2,120 // risk: 5,121 // lpSymbol: 'RINI-BNB LP',122 // lpAddresses: {123 // 97: '',124 // 56: '0x5F188bEDe05D11b8d1474D6832b41d3EaE4ED98E',125 // },126 // tokenSymbol: 'RINI',127 // tokenAddresses: {128 // 97: '',129 // 56: '0x844B1B28b0FdA8075ea8eB99C46339398deD1673',130 // },131 // quoteTokenSymbol: QuoteToken.BNB,132 // quoteTokenAdresses: contracts.wbnb,133 // },134 // {135 // pid: 3,136 // risk: 3,137 // lpSymbol: 'BNB-BUSD LP',138 // lpAddresses: {139 // 97: '',140 // 56: '0x58F876857a02D6762E0101bb5C46A8c1ED44Dc16',141 // },142 // tokenSymbol: 'BNB',143 // tokenAddresses: {144 // 97: '',145 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',146 // },147 // quoteTokenSymbol: QuoteToken.BUSD,148 // quoteTokenAdresses: contracts.busd,149 // },150 // {151 // pid: 4,152 // risk: 5,153 // lpSymbol: 'CAKE-BNB LP',154 // lpAddresses: {155 // 97: '',156 // 56: '0x0eD7e52944161450477ee417DE9Cd3a859b14fD0',// EGG-BUSD LP157 // },158 // tokenSymbol: 'CAKE',159 // tokenAddresses: {160 // 97: '',161 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',162 // },163 // quoteTokenSymbol: QuoteToken.BNB,164 // quoteTokenAdresses: contracts.wbnb,165 // },166 // {167 // pid: 5,168 // risk: 5,169 // lpSymbol: 'CAKE-BUSD LP',170 // lpAddresses: {171 // 97: '',172 // 56: '0x804678fa97d91B974ec2af3c843270886528a9E6', // EGG-BUSD LP173 // },174 // tokenSymbol: 'CAKE',175 // tokenAddresses: {176 // 97: '',177 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',178 // },179 // quoteTokenSymbol: QuoteToken.BUSD,180 // quoteTokenAdresses: contracts.busd,181 // },182 // {183 // pid: 6,184 // risk: 5,185 // lpSymbol: 'BTCB-BUSD LP',186 // lpAddresses: {187 // 97: '',188 // 56: '0xF45cd219aEF8618A92BAa7aD848364a158a24F33', // EGG-BUSD LP189 // },190 // tokenSymbol: 'BTCB',191 // tokenAddresses: {192 // 97: '',193 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',194 // },195 // quoteTokenSymbol: QuoteToken.BUSD,196 // quoteTokenAdresses: contracts.busd,197 // },198 // {199 // pid: 7,200 // risk: 5,201 // lpSymbol: 'BTCB-BNB LP',202 // lpAddresses: {203 // 97: '',204 // 56: '0x61EB789d75A95CAa3fF50ed7E47b96c132fEc082', // LP adresi(stake edilecek token)205 // },206 // tokenSymbol: 'BTCB',207 // tokenAddresses: {208 // 97: '',209 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c', // BTCB adresi210 // },211 // quoteTokenSymbol: QuoteToken.BNB, // pool içerisindeki adres btcb-bnb lp için için bnb212 // quoteTokenAdresses: contracts.wbnb,213 // },214 // {215 // pid: 8,216 // risk: 5,217 // lpSymbol: 'BTCB-ETH LP',218 // lpAddresses: {219 // 97: '',220 // 56: '0xD171B26E4484402de70e3Ea256bE5A2630d7e88D', // EGG-BUSD LP221 // },222 // tokenSymbol: 'BTCB',223 // tokenAddresses: {224 // 97: '',225 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',226 // },227 // quoteTokenSymbol: QuoteToken.ETH,228 // quoteTokenAdresses: contracts.eth,229 // },230 // {231 // pid: 9,232 // risk: 5,233 // lpSymbol: 'ETH-BNB LP',234 // lpAddresses: {235 // 97: '',236 // 56: '0x74E4716E431f45807DCF19f284c7aA99F18a4fbc', // EGG-BUSD LP237 // },238 // tokenSymbol: 'ETH',239 // tokenAddresses: {240 // 97: '',241 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',242 // },243 // quoteTokenSymbol: QuoteToken.BNB,244 // quoteTokenAdresses: contracts.wbnb,245 // },246 // {247 // pid: 11,248 // risk: 5,249 // lpSymbol: 'USDC-BUSD LP',250 // lpAddresses: {251 // 97: '',252 // 56: '0x2354ef4DF11afacb85a5C7f98B624072ECcddbB1', // EGG-BUSD LP253 // },254 // tokenSymbol: 'USDC',255 // tokenAddresses: {256 // 97: '',257 // 56: '0x8AC76a51cc950d9822D68b83fE1Ad97B32Cd580d',258 // },259 // quoteTokenSymbol: QuoteToken.BUSD,260 // quoteTokenAdresses: contracts.busd,261 // },262 // {263 // pid: 12,264 // risk: 5,265 // lpSymbol: 'USDT-BUSD LP',266 // lpAddresses: {267 // 97: '',268 // 56: '0x7EFaEf62fDdCCa950418312c6C91Aef321375A00', // EGG-BUSD LP269 // },270 // tokenSymbol: 'USDT',271 // tokenAddresses: {272 // 97: '',273 // 56: '0x55d398326f99059ff775485246999027b3197955',274 // },275 // quoteTokenSymbol: QuoteToken.BUSD,276 // quoteTokenAdresses: contracts.busd,277 // },278 // {279 // pid: 13,280 // risk: 5,281 // lpSymbol: 'TUSD-BUSD LP',282 // lpAddresses: {283 // 97: '',284 // 56: '0x2E28b9B74D6d99D4697e913b82B41ef1CAC51c6C', // EGG-BUSD LP285 // },286 // tokenSymbol: 'TUSD',287 // tokenAddresses: {288 // 97: '',289 // 56: '0x14016E85a25aeb13065688cAFB43044C2ef86784',290 // },291 // quoteTokenSymbol: QuoteToken.BUSD,292 // quoteTokenAdresses: contracts.busd,293 // },294 // {295 // pid: 14,296 // risk: 5,297 // lpSymbol: 'USDC-USDT LP',298 // lpAddresses: {299 // 97: '',300 // 56: '0xec6557348085aa57c72514d67070dc863c0a5a8c', // EGG-BUSD LP301 // },302 // tokenSymbol: 'USDC',303 // tokenAddresses: {304 // 97: '',305 // 56: '0x8AC76a51cc950d9822D68b83fE1Ad97B32Cd580d',306 // },307 // quoteTokenSymbol: QuoteToken.USDT,308 // quoteTokenAdresses: contracts.usdt,309 // },310 // {311 // pid: 15,312 // risk: 5,313 // lpSymbol: 'DOT-BNB LP',314 // lpAddresses: {315 // 97: '',316 // 56: '0xDd5bAd8f8b360d76d12FdA230F8BAF42fe0022CF', // EGG-BUSD LP317 // },318 // tokenSymbol: 'DOT',319 // tokenAddresses: {320 // 97: '',321 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',322 // },323 // quoteTokenSymbol: QuoteToken.BNB,324 // quoteTokenAdresses: contracts.wbnb,325 // },326 // {327 // pid: 16,328 // risk: 5,329 // lpSymbol: 'LINK-BNB LP',330 // lpAddresses: {331 // 97: '',332 // 56: '0x824eb9faDFb377394430d2744fa7C42916DE3eCe', // EGG-BUSD LP333 // },334 // tokenSymbol: 'LINK',335 // tokenAddresses: {336 // 97: '',337 // 56: '0xF8A0BF9cF54Bb92F17374d9e9A321E6a111a51bD',338 // },339 // quoteTokenSymbol: QuoteToken.BNB,340 // quoteTokenAdresses: contracts.wbnb,341 // },342 // {343 // pid: 17,344 // risk: 5,345 // lpSymbol: 'ADA-BNB LP',346 // lpAddresses: {347 // 97: '',348 // 56: '0x28415ff2C35b65B9E5c7de82126b4015ab9d031F', // EGG-BUSD LP349 // },350 // tokenSymbol: 'ADA',351 // tokenAddresses: {352 // 97: '',353 // 56: '0x3ee2200efb3400fabb9aacf31297cbdd1d435d47',354 // },355 // quoteTokenSymbol: QuoteToken.BNB,356 // quoteTokenAdresses: contracts.wbnb,357 // },358 // {359 // pid: 18,360 // risk: 5,361 // lpSymbol: 'BSTEEM-BNB LP',362 // lpAddresses: {363 // 97: '',364 // 56: '0xa43857e02c94ee8323c2198ffe80fd590544b30a', // EGG-BUSD LP365 // },366 // tokenSymbol: 'BSTEEM',367 // tokenAddresses: {368 // 97: '',369 // 56: '0x382d36e85178f2d3fd84e336d4e1d442fce78e8e',370 // },371 // quoteTokenSymbol: QuoteToken.BNB,372 // quoteTokenAdresses: contracts.wbnb,373 // },374 // {375 // pid: 19,376 // risk: 5,377 // lpSymbol: 'BBLURT-BNB LP',378 // lpAddresses: {379 // 97: '',380 // 56: '0x37ddd439f7fc42f2964e3b09c4853c03075db0c9', // EGG-BUSD LP381 // },382 // tokenSymbol: 'BBLURT',383 // tokenAddresses: {384 // 97: '',385 // 56: '0xb0458283033e5a3f7867f409477f53754b667dcc',386 // },387 // quoteTokenSymbol: QuoteToken.BNB,388 // quoteTokenAdresses: contracts.wbnb,389 // },390 // {391 // pid: 20,392 // risk: 5,393 // lpSymbol: 'MOON-BNB LP',394 // lpAddresses: {395 // 97: '',396 // 56: '0x643D3f1C1fA1A7D5D6BDE50Bd3FDD1Cdd8A85692', // EGG-BUSD LP397 // },398 // tokenSymbol: 'MOON',399 // tokenAddresses: {400 // 97: '',401 // 56: '0xE8c93310af068aa50bd7bF0ebFa459Df2a02ceba',402 // },403 // quoteTokenSymbol: QuoteToken.BNB,404 // quoteTokenAdresses: contracts.wbnb,405 // },406 // {407 // pid: 42,408 // risk: 5,409 // lpSymbol: 'YNB-BNB LP',410 // lpAddresses: {411 // 97: '',412 // 56: '0x1a6eba3897fb4799fb6372f32771ee821370ab7e', // EGG-BUSD LP413 // },414 // tokenSymbol: 'YNB',415 // tokenAddresses: {416 // 97: '',417 // 56: '0xa05e01E9dF824CCAd284895Fff43B043e2133f50',418 // },419 // quoteTokenSymbol: QuoteToken.BNB,420 // quoteTokenAdresses: contracts.wbnb,421 // },422 // {423 // pid: 22,424 // risk: 5,425 // isTokenOnly: true,426 // lpSymbol: 'RBS',427 // lpAddresses: {428 // 97: '',429 // 56: '0xa2fa80da37170ed705cb0bd1f27558ccecd417c5', // EGG-BUSD LP430 // },431 // tokenSymbol: 'RBS',432 // tokenAddresses: {433 // 97: '',434 // 56: '0xAfAEEe58a58867c73245397C0F768FF041D32d70',435 // },436 // quoteTokenSymbol: QuoteToken.BUSD,437 // quoteTokenAdresses: contracts.busd,438 // },439 // {440 // pid: 23,441 // risk: 5,442 // isTokenOnly: true,443 // lpSymbol: 'WBNB',444 // lpAddresses: {445 // 97: '',446 // 56: '0x1b96b92314c44b159149f7e0303511fb2fc4774f', // EGG-BUSD LP447 // },448 // tokenSymbol: 'WBNB',449 // tokenAddresses: {450 // 97: '',451 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',452 // },453 // quoteTokenSymbol: QuoteToken.BUSD,454 // quoteTokenAdresses: contracts.busd,455 // },456 // {457 // pid: 24,458 // risk: 5,459 // isTokenOnly: true,460 // lpSymbol: 'CAKE',461 // lpAddresses: {462 // 97: '',463 // 56: '0x804678fa97d91B974ec2af3c843270886528a9E6', // EGG-BUSD LP464 // },465 // tokenSymbol: 'CAKE',466 // tokenAddresses: {467 // 97: '',468 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',469 // },470 // quoteTokenSymbol: QuoteToken.BUSD,471 // quoteTokenAdresses: contracts.busd,472 // },473 // {474 // pid: 25,475 // risk: 5,476 // isTokenOnly: true,477 // lpSymbol: 'BTCB',478 // lpAddresses: {479 // 97: '',480 // 56: '0xF45cd219aEF8618A92BAa7aD848364a158a24F33', // EGG-BUSD LP481 // },482 // tokenSymbol: 'BTCB',483 // tokenAddresses: {484 // 97: '',485 // 56: '0x7130d2A12B9BCbFAe4f2634d864A1Ee1Ce3Ead9c',486 // },487 // quoteTokenSymbol: QuoteToken.BUSD,488 // quoteTokenAdresses: contracts.busd,489 // },490 // {491 // pid: 26,492 // risk: 5,493 // isTokenOnly: true,494 // lpSymbol: 'ETH',495 // lpAddresses: {496 // 97: '',497 // 56: '0xd9a0d1f5e02de2403f68bb71a15f8847a854b494', // EGG-BUSD LP498 // },499 // tokenSymbol: 'ETH',500 // tokenAddresses: {501 // 97: '',502 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',503 // },504 // quoteTokenSymbol: QuoteToken.BUSD,505 // quoteTokenAdresses: contracts.busd,506 // },507 // {508 // pid: 27,509 // risk: 5,510 // isTokenOnly: true,511 // lpSymbol: 'BUSD',512 // lpAddresses: {513 // 97: '',514 // 56: '0xe9e7cea3dedca5984780bafc599bd69add087d56', // EGG-BUSD LP515 // },516 // tokenSymbol: 'BUSD',517 // tokenAddresses: {518 // 97: '',519 // 56: '0xe9e7cea3dedca5984780bafc599bd69add087d56',520 // },521 // quoteTokenSymbol: QuoteToken.BUSD,522 // quoteTokenAdresses: contracts.busd,523 // },524 // {525 // pid: 28,526 // risk: 5,527 // isTokenOnly: true,528 // lpSymbol: 'USDT',529 // lpAddresses: {530 // 97: '',531 // 56: '0x7EFaEf62fDdCCa950418312c6C91Aef321375A00', // EGG-BUSD LP532 // },533 // tokenSymbol: 'USDT',534 // tokenAddresses: {535 // 97: '',536 // 56: '0x55d398326f99059ff775485246999027b3197955',537 // },538 // quoteTokenSymbol: QuoteToken.BUSD,539 // quoteTokenAdresses: contracts.busd,540 // },541 // {542 // pid: 29,543 // risk: 5,544 // isTokenOnly: true,545 // lpSymbol: 'DOT',546 // lpAddresses: {547 // 97: '',548 // 56: '0x54c1ec2f543966953f2f7564692606ea7d5a184e', // EGG-BUSD LP549 // },550 // tokenSymbol: 'DOT',551 // tokenAddresses: {552 // 97: '',553 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',554 // },555 // quoteTokenSymbol: QuoteToken.BUSD,556 // quoteTokenAdresses: contracts.busd,557 // },558 // {559 // pid: 30,560 // risk: 5,561 // isTokenOnly: true,562 // lpSymbol: 'RINI',563 // lpAddresses: {564 // 97: '',565 // 56: '0x5F188bEDe05D11b8d1474D6832b41d3EaE4ED98E', // EGG-BUSD LP566 // },567 // tokenSymbol: 'RINI',568 // tokenAddresses: {569 // 97: '',570 // 56: '0x844b1b28b0fda8075ea8eb99c46339398ded1673',571 // },572 // quoteTokenSymbol: QuoteToken.BNB,573 // quoteTokenAdresses: contracts.wbnb,574 // },575 // // {576 // // pid: 31,577 // // risk: 5,578 // // isTokenOnly: true,579 // // lpSymbol: 'BSCT',580 // // lpAddresses: {581 // // 97: '',582 // // 56: '0xE920575CEcE01e5D9A7AB23d1a4FD15d8CF3Fe75', // EGG-BUSD LP583 // // },584 // // tokenSymbol: 'BSCT',585 // // tokenAddresses: {586 // // 97: '',587 // // 56: '0xE920575CEcE01e5D9A7AB23d1a4FD15d8CF3Fe75',588 // // },589 // // quoteTokenSymbol: QuoteToken.BSCT,590 // // quoteTokenAdresses: contracts.bsct,591 // // },592 // // {593 // // pid: 32,594 // // risk: 5,595 // // isTokenOnly: true,596 // // lpSymbol: 'KRWP',597 // // lpAddresses: {598 // // 97: '',599 // // 56: '0xB9Dd513420D68Ac4CCf65cBcaA8cc7bd539713ca', // EGG-BUSD LP600 // // },601 // // tokenSymbol: 'KRWP',602 // // tokenAddresses: {603 // // 97: '',604 // // 56: '0xB9Dd513420D68Ac4CCf65cBcaA8cc7bd539713ca',605 // // },606 // // quoteTokenSymbol: QuoteToken.KRWP,607 // // quoteTokenAdresses: contracts.krwp,608 // // },609 // {610 // pid: 33,611 // risk: 5,612 // isTokenOnly: true,613 // lpSymbol: 'ADA',614 // lpAddresses: {615 // 97: '',616 // 56: '0x28415ff2C35b65B9E5c7de82126b4015ab9d031F', // EGG-BUSD LP617 // },618 // tokenSymbol: 'ADA',619 // tokenAddresses: {620 // 97: '',621 // 56: '0x3ee2200efb3400fabb9aacf31297cbdd1d435d47',622 // },623 // quoteTokenSymbol: QuoteToken.BNB,624 // quoteTokenAdresses: contracts.wbnb,625 // },626 // {627 // pid: 34,628 // risk: 5,629 // isTokenOnly: true,630 // lpSymbol: 'BSTEEM',631 // lpAddresses: {632 // 97: '',633 // 56: '0xa43857e02c94ee8323c2198ffe80fd590544b30a', // EGG-BUSD LP634 // },635 // tokenSymbol: 'BSTEEM',636 // tokenAddresses: {637 // 97: '',638 // 56: '0x382d36e85178f2d3fd84e336d4e1d442fce78e8e',639 // },640 // quoteTokenSymbol: QuoteToken.BNB,641 // quoteTokenAdresses: contracts.wbnb,642 // },643 // {644 // pid: 35,645 // risk: 5,646 // isTokenOnly: true,647 // lpSymbol: 'BBLURT',648 // lpAddresses: {649 // 97: '',650 // 56: '0x37ddd439f7fc42f2964e3b09c4853c03075db0c9', // EGG-BUSD LP651 // },652 // tokenSymbol: 'BBLURT',653 // tokenAddresses: {654 // 97: '',655 // 56: '0xb0458283033e5a3f7867f409477f53754b667dcc',656 // },657 // quoteTokenSymbol: QuoteToken.BNB,658 // quoteTokenAdresses: contracts.wbnb,659 // },660 // {661 // pid: 36,662 // risk: 5,663 // isTokenOnly: true,664 // lpSymbol: 'MOON',665 // lpAddresses: {666 // 97: '',667 // 56: '0x643D3f1C1fA1A7D5D6BDE50Bd3FDD1Cdd8A85692', // EGG-BUSD LP668 // },669 // tokenSymbol: 'MOON',670 // tokenAddresses: {671 // 97: '',672 // 56: '0xE8c93310af068aa50bd7bF0ebFa459Df2a02ceba',673 // },674 // quoteTokenSymbol: QuoteToken.BNB,675 // quoteTokenAdresses: contracts.wbnb,676 // },677 // {678 // pid: 39,679 // risk: 5,680 // isTokenOnly: true,681 // lpSymbol: 'BSEREY',682 // lpAddresses: {683 // 97: '',684 // 56: '0xF9E47c3fB70F5364A536a393De6Ec0A48d026584', // EGG-BUSD LP685 // },686 // tokenSymbol: 'BSEREY',687 // tokenAddresses: {688 // 97: '',689 // 56: '0x2b618835a1eefcbf41e33497451ca1f3aa62f2d8',690 // },691 // quoteTokenSymbol: QuoteToken.BNB,692 // quoteTokenAdresses: contracts.wbnb,693 // },694 // {695 // pid: 40,696 // risk: 5,697 // lpSymbol: 'BSEREY-BNB LP',698 // lpAddresses: {699 // 97: '',700 // 56: '0xF9E47c3fB70F5364A536a393De6Ec0A48d026584', // EGG-BUSD LP701 // },702 // tokenSymbol: 'BSEREY',703 // tokenAddresses: {704 // 97: '',705 // 56: '0x2b618835a1eefcbf41e33497451ca1f3aa62f2d8',706 // },707 // quoteTokenSymbol: QuoteToken.BNB,708 // quoteTokenAdresses: contracts.wbnb,709 // },710 // {711 // pid: 41,712 // risk: 5,713 // isTokenOnly: true,714 // lpSymbol: 'BFD',715 // lpAddresses: {716 // 97: '',717 // 56: '0x42c26eea50615e3f808754f46b69a3bb386fe846', // EGG-BUSD LP718 // },719 // tokenSymbol: 'BFD',720 // tokenAddresses: {721 // 97: '',722 // 56: '0x95f31e322e1bee2f6dcb085a1dfe3d3081aab653',723 // },724 // quoteTokenSymbol: QuoteToken.BNB,725 // quoteTokenAdresses: contracts.wbnb,726 // },727 // {728 // pid: 43,729 // risk: 5,730 // isTokenOnly: true,731 // lpSymbol: 'BATOLO',732 // lpAddresses: {733 // 97: '',734 // 56: '0x69cd16563ebac608292fa9598d185e92d099a1a6', // EGG-BUSD LP735 // },736 // tokenSymbol: 'BATOLO',737 // tokenAddresses: {738 // 97: '',739 // 56: '0xcAa6f0d4c4796F26852ea3985DBd27b6b830C674',740 // },741 // quoteTokenSymbol: QuoteToken.BNB,742 // quoteTokenAdresses: contracts.wbnb,743 // },744 // {745 // pid: 44,746 // risk: 5,747 // lpSymbol: 'BATOLO-BNB',748 // lpAddresses: {749 // 97: '',750 // 56: '0x69Cd16563eBac608292FA9598D185e92d099a1a6', // EGG-BUSD LP751 // },752 // tokenSymbol: 'BATOLO',753 // tokenAddresses: {754 // 97: '',755 // 56: '0xcAa6f0d4c4796F26852ea3985DBd27b6b830C674',756 // },757 // quoteTokenSymbol: QuoteToken.BNB,758 // quoteTokenAdresses: contracts.wbnb,759 // },760 // {761 // pid: 38,762 // risk: 5,763 // lpSymbol: 'steem',764 // delegate:true,765 // depositFee : "5",766 // delegateAddress : 'robiniaswap',767 // isTokenOnly:true,768 // tokenSymbol : 'STEEM POWER',769 // delegateToken : "STEEM",770 // lpAddresses: {771 // 97: '',772 // 56: '0xa2fa80da37170ed705cb0bd1f27558ccecd417c5', // EGG-BUSD LP773 // },774 // tokenAddresses: {775 // 97: '',776 // 56: '0xafaeee58a58867c73245397c0f768ff041d32d70',777 // },778 // quoteTokenSymbol: QuoteToken.BUSD,779 // quoteTokenAdresses: contracts.busd,780 // }781 // {782 // pid: 7,783 // risk: 5,784 // isTokenOnly: true,785 // lpSymbol: 'ETH',786 // lpAddresses: {787 // 97: '',788 // 56: '0x0E09FaBB73Bd3Ade0a17ECC321fD13a19e81cE82', // EGG-BUSD LP789 // },790 // tokenSymbol: 'RBS',791 // tokenAddresses: {792 // 97: '',793 // 56: '0x2170Ed0880ac9A755fd29B2688956BD959F933F8',794 // },795 // quoteTokenSymbol: QuoteToken.ETH,796 // quoteTokenAdresses: contracts.eth,797 // },798 // {799 // pid: 3,800 // risk: 1,801 // lpSymbol: 'USDT-BUSD LP',802 // lpAddresses: {803 // 97: '',804 // 56: '0xc15fa3e22c912a276550f3e5fe3b0deb87b55acd',805 // },806 // tokenSymbol: 'USDT',807 // tokenAddresses: {808 // 97: '',809 // 56: '0x55d398326f99059ff775485246999027b3197955',810 // },811 // quoteTokenSymbol: QuoteToken.BUSD,812 // quoteTokenAdresses: contracts.busd,813 // },814 // {815 // pid: 4,816 // risk: 2,817 // lpSymbol: 'BTCB-BNB LP',818 // lpAddresses: {819 // 97: '',820 // 56: '0x7561eee90e24f3b348e1087a005f78b4c8453524',821 // },822 // tokenSymbol: 'BTCB',823 // tokenAddresses: {824 // 97: '',825 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',826 // },827 // quoteTokenSymbol: QuoteToken.BNB,828 // quoteTokenAdresses: contracts.wbnb,829 // },830 // {831 // pid: 5,832 // risk: 2,833 // lpSymbol: 'ETH-BNB LP',834 // lpAddresses: {835 // 97: '',836 // 56: '0x70d8929d04b60af4fb9b58713ebcf18765ade422',837 // },838 // tokenSymbol: 'ETH',839 // tokenAddresses: {840 // 97: '',841 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',842 // },843 // quoteTokenSymbol: QuoteToken.BNB,844 // quoteTokenAdresses: contracts.wbnb,845 // },846 // {847 // pid: 6,848 // risk: 1,849 // lpSymbol: 'DAI-BUSD LP',850 // lpAddresses: {851 // 97: '',852 // 56: '0x3ab77e40340ab084c3e23be8e5a6f7afed9d41dc',853 // },854 // tokenSymbol: 'DAI',855 // tokenAddresses: {856 // 97: '',857 // 56: '0x1af3f329e8be154074d8769d1ffa4ee058b1dbc3',858 // },859 // quoteTokenSymbol: QuoteToken.BUSD,860 // quoteTokenAdresses: contracts.busd,861 // },862 // {863 // pid: 7,864 // risk: 1,865 // lpSymbol: 'USDC-BUSD LP',866 // lpAddresses: {867 // 97: '',868 // 56: '0x680dd100e4b394bda26a59dd5c119a391e747d18',869 // },870 // tokenSymbol: 'USDC',871 // tokenAddresses: {872 // 97: '',873 // 56: '0x8ac76a51cc950d9822d68b83fe1ad97b32cd580d',874 // },875 // quoteTokenSymbol: QuoteToken.BUSD,876 // quoteTokenAdresses: contracts.busd,877 // },878 // {879 // pid: 9,880 // risk: 3,881 // lpSymbol: 'DOT-BNB LP',882 // lpAddresses: {883 // 97: '',884 // 56: '0xbcd62661a6b1ded703585d3af7d7649ef4dcdb5c',885 // },886 // tokenSymbol: 'DOT',887 // tokenAddresses: {888 // 97: '',889 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',890 // },891 // quoteTokenSymbol: QuoteToken.BNB,892 // quoteTokenAdresses: contracts.wbnb,893 // },894 // {895 // pid: 10,896 // risk: 4,897 // lpSymbol: 'CAKE-BUSD LP',898 // lpAddresses: {899 // 97: '',900 // 56: '0x0ed8e0a2d99643e1e65cca22ed4424090b8b7458',901 // },902 // tokenSymbol: 'CAKE',903 // tokenAddresses: {904 // 97: '',905 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',906 // },907 // quoteTokenSymbol: QuoteToken.BUSD,908 // quoteTokenAdresses: contracts.busd,909 // },910 // {911 // pid: 11,912 // risk: 4,913 // lpSymbol: 'CAKE-BNB LP',914 // lpAddresses: {915 // 97: '',916 // 56: '0xa527a61703d82139f8a06bc30097cc9caa2df5a6',917 // },918 // tokenSymbol: 'CAKE',919 // tokenAddresses: {920 // 97: '',921 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',922 // },923 // quoteTokenSymbol: QuoteToken.BNB,924 // quoteTokenAdresses: contracts.wbnb,925 // },926 // {927 // pid: 13,928 // risk: 1,929 // isTokenOnly: true,930 // lpSymbol: 'BUSD',931 // lpAddresses: {932 // 97: '',933 // 56: '0x19e7cbecdd23a16dfa5573df54d98f7caae03019', // EGG-BUSD LP (BUSD-BUSD will ignore)934 // },935 // tokenSymbol: 'BUSD',936 // tokenAddresses: {937 // 97: '',938 // 56: '0xe9e7cea3dedca5984780bafc599bd69add087d56',939 // },940 // quoteTokenSymbol: QuoteToken.BUSD,941 // quoteTokenAdresses: contracts.busd,942 // },943 // {944 // pid: 14,945 // risk: 3,946 // isTokenOnly: true,947 // lpSymbol: 'WBNB',948 // lpAddresses: {949 // 97: '',950 // 56: '0x1b96b92314c44b159149f7e0303511fb2fc4774f', // BNB-BUSD LP951 // },952 // tokenSymbol: 'WBNB',953 // tokenAddresses: {954 // 97: '',955 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',956 // },957 // quoteTokenSymbol: QuoteToken.BUSD,958 // quoteTokenAdresses: contracts.busd,959 // },960 // {961 // pid: 15,962 // risk: 1,963 // isTokenOnly: true,964 // lpSymbol: 'USDT',965 // lpAddresses: {966 // 97: '',967 // 56: '0xc15fa3e22c912a276550f3e5fe3b0deb87b55acd', // USDT-BUSD LP968 // },969 // tokenSymbol: 'USDT',970 // tokenAddresses: {971 // 97: '',972 // 56: '0x55d398326f99059ff775485246999027b3197955',973 // },974 // quoteTokenSymbol: QuoteToken.BUSD,975 // quoteTokenAdresses: contracts.busd,976 // },977 // {978 // pid: 16,979 // risk: 2,980 // isTokenOnly: true,981 // lpSymbol: 'BTCB',982 // lpAddresses: {983 // 97: '',984 // 56: '0xb8875e207ee8096a929d543c9981c9586992eacb', // BTCB-BUSD LP985 // },986 // tokenSymbol: 'BTCB',987 // tokenAddresses: {988 // 97: '',989 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',990 // },991 // quoteTokenSymbol: QuoteToken.BUSD,992 // quoteTokenAdresses: contracts.busd,993 // },994 // {995 // pid: 17,996 // risk: 2,997 // isTokenOnly: true,998 // lpSymbol: 'ETH',999 // lpAddresses: {1000 // 97: '',1001 // 56: '0xd9a0d1f5e02de2403f68bb71a15f8847a854b494', // ETH-BUSD LP1002 // },1003 // tokenSymbol: 'ETH',1004 // tokenAddresses: {1005 // 97: '',1006 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',1007 // },1008 // quoteTokenSymbol: QuoteToken.BUSD,1009 // quoteTokenAdresses: contracts.busd,1010 // },1011 // {1012 // pid: 18,1013 // risk: 1,1014 // isTokenOnly: true,1015 // lpSymbol: 'DAI',1016 // lpAddresses: {1017 // 97: '',1018 // 56: '0x3ab77e40340ab084c3e23be8e5a6f7afed9d41dc', // DAI-BUSD LP1019 // },1020 // tokenSymbol: 'DAI',1021 // tokenAddresses: {1022 // 97: '',1023 // 56: '0x1af3f329e8be154074d8769d1ffa4ee058b1dbc3',1024 // },1025 // quoteTokenSymbol: QuoteToken.BUSD,1026 // quoteTokenAdresses: contracts.busd,1027 // },1028 // {1029 // pid: 19,1030 // risk: 1,1031 // isTokenOnly: true,1032 // lpSymbol: 'USDC',1033 // lpAddresses: {1034 // 97: '',1035 // 56: '0x680dd100e4b394bda26a59dd5c119a391e747d18', // USDC-BUSD LP1036 // },1037 // tokenSymbol: 'USDC',1038 // tokenAddresses: {1039 // 97: '',1040 // 56: '0x8ac76a51cc950d9822d68b83fe1ad97b32cd580d',1041 // },1042 // quoteTokenSymbol: QuoteToken.BUSD,1043 // quoteTokenAdresses: contracts.busd,1044 // },1045 // {1046 // pid: 20,1047 // risk: 3,1048 // isTokenOnly: true,1049 // lpSymbol: 'DOT',1050 // lpAddresses: {1051 // 97: '',1052 // 56: '0x54c1ec2f543966953f2f7564692606ea7d5a184e', // DOT-BUSD LP1053 // },1054 // tokenSymbol: 'DOT',1055 // tokenAddresses: {1056 // 97: '',1057 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',1058 // },1059 // quoteTokenSymbol: QuoteToken.BUSD,1060 // quoteTokenAdresses: contracts.busd,1061 // },1062 // {1063 // pid: 21,1064 // risk: 4,1065 // isTokenOnly: true,1066 // lpSymbol: 'CAKE',1067 // lpAddresses: {1068 // 97: '',1069 // 56: '0x0ed8e0a2d99643e1e65cca22ed4424090b8b7458', // CAKE-BUSD LP1070 // },1071 // tokenSymbol: 'CAKE',1072 // tokenAddresses: {1073 // 97: '',1074 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',1075 // },1076 // quoteTokenSymbol: QuoteToken.BUSD,1077 // quoteTokenAdresses: contracts.busd,1078 // },1079 // {1080 // pid: 22,1081 // risk: 3,1082 // isTokenOnly: true,1083 // lpSymbol: 'BSCX',1084 // lpAddresses: {1085 // 97: '',1086 // 56: '0xa32a983a64ce21834221aa0ad1f1533907553136', // BSCX-BUSD LP1087 // },1088 // tokenSymbol: 'BSCX',1089 // tokenAddresses: {1090 // 97: '',1091 // 56: '0x5ac52ee5b2a633895292ff6d8a89bb9190451587',1092 // },1093 // quoteTokenSymbol: QuoteToken.BUSD,1094 // quoteTokenAdresses: contracts.busd,1095 // },1096 // {1097 // pid: 23,1098 // risk: 3,1099 // isTokenOnly: true,1100 // lpSymbol: 'AUTO',1101 // lpAddresses: {1102 // 97: '',1103 // 56: '0x4d0228ebeb39f6d2f29ba528e2d15fc9121ead56', // AUTO-BNB LP1104 // },1105 // tokenSymbol: 'AUTO',1106 // tokenAddresses: {1107 // 97: '',1108 // 56: '0xa184088a740c695e156f91f5cc086a06bb78b827',1109 // },1110 // quoteTokenSymbol: QuoteToken.BNB,1111 // quoteTokenAdresses: contracts.wbnb,1112 // },1113]...

Full Screen

Full Screen

tokenutils.py

Source:tokenutils.py Github

copy

Full Screen

1# coding:utf-82'''3@author: ota4'''5import re6from sqlparse import sql, tokens as T7from enum import Enum8class EngineComment(Enum):9 """10 SQLエンジン関連コメントType11 """12 none = 0 # SQLエンジン関連コメントではない13 syntax = 1 # ロジック14 param = 2 # パラメータ15 sql_identifier = 3 # SQL_IDENTIFIER16def get_comment_type(token, comment_syntax):17 """18 SQLエンジン関連コメントTypeを返す19 """20 if is_block_comment(token):21 return comment_syntax.get_block_comment_type(token)22 elif is_line_comment(token):23 return comment_syntax.get_line_comment_type(token)24def is_param_comment(token, next_token, comment_syntax):25 """26 SQLエンジンのパラメータコメント判定27 """28 return get_comment_type(token, comment_syntax) == EngineComment.param \29 and (is_literal(next_token) or is_wildcard(next_token) or is_parenthesis(next_token))30def is_hint_block_comment(token):31 """32 Oracleヒントコメント判定33 """34 if is_block_comment(token):35 tokens = token.tokens36 if len(tokens) >= 3 :37 comment = tokens[1].value38 if comment.startswith("+"):39 return True40 return False41def is_block_comment(token):42 """43 ブロックコメント判定44 """45 if is_comment(token):46 comment = token.token_next_by_type(0, T.Comment)47 return comment.value in ["/*", "*/"]48 return False49def is_line_comment(token):50 """51 ラインコメント判定52 """53 if is_comment(token):54 comment = token.token_next_by_type(0, T.Comment)55 return comment.value not in ["/*", "*/"]56 return False57def is_plain_line_comment(token, comment_syntax):58 """59 ラインコメント(SQLエンジン構文ではない)判定60 """61 return is_line_comment(token) and get_comment_type(token, comment_syntax) == EngineComment.none62def is_line_description_line_comment(token, comment_syntax):63 """64 ラインコメント(行説明になりうる)判定65 """66 return is_plain_line_comment(token, comment_syntax) and token.is_line_description67def is_comment(token):68 """69 コメント判定70 """71 return isinstance(token, sql.Comment)72def is_dot(token):73 """74 ドット判定75 """76 return is_punctuation(token) and token.value == "."77def is_comma(token):78 """79 カンマ判定80 """81 return is_punctuation(token) and token.value == ","82def is_literal(token):83 """84 リテラル判定(文字列・数値)85 """86 return token.ttype in T.Literal87def is_string_literal(token):88 """89 リテラル判定(文字列)90 """91 return token.ttype in T.Literal.String92def is_number_literal(token):93 """94 リテラル判定(数値)95 """96 return token.ttype in T.Literal.Number97def is_null_keyword(token):98 """99 「NULL」文字列判定100 """101 return token.match(T.Keyword, "NULL")102def is_comparison(token):103 """104 比較演算判定105 """106 return isinstance(token, sql.Comparison)107def is_identifier_list(token):108 """109 IdentifierList判定110 """111 return isinstance(token, sql.IdentifierList)112def is_identifier(token):113 """114 Identifier判定115 """116 return isinstance(token, sql.Identifier)117def is_function(token):118 """119 関数判定120 """121 return isinstance(token, sql.Function)122def is_value_candidate(token):123 """124 値になりうる125 """126 return is_string_candidate(token) or is_number_candidate(token)127def is_string_candidate(token):128 """129 文字列になりうる130 """131 if is_string_literal(token):132 return True133 if is_function(token):134 return True135 if is_null_keyword(token):136 return True137 if is_calculation(token):138 return True139 if is_parenthesis(token):140 tokens = [t for t in tokens_parenthesis_inner(token) if is_enable(t)]141 if len(tokens) == 1:142 return is_string_candidate(tokens[0])143 elif tokens:144 return is_select_dml(tokens[0])145 if is_identifier(token):146 tokens = [t for t in token.tokens if is_enable(t)]147 for tkn in tokens:148 if (not tkn.ttype in T.Name) and (not is_dot(tkn)):149 return False150 return True151 return False152def is_number_candidate(token):153 """154 数値になりうる155 """156 if is_number_literal(token):157 return True158 if is_function(token):159 return True160 if is_null_keyword(token):161 return True162 if is_calculation(token):163 return True164 if is_parenthesis(token):165 tokens = [t for t in tokens_parenthesis_inner(token) if is_enable(t)]166 if len(tokens) == 1:167 return is_number_candidate(tokens[0])168 elif tokens:169 return is_select_dml(tokens[0])170 if is_identifier(token):171 tokens = [t for t in token.tokens if is_enable(t)]172 for tkn in tokens:173 if (not tkn.ttype in T.Name) and (not is_dot(tkn)):174 return False175 return True176 return False177def is_exists_function(token):178 """179 EXISTS関数判定180 """181 if not is_function(token):182 return False183 ftoken = token_next_enable(token)184 return equals_ignore_case(ftoken.value, "EXISTS")185def is_over_function(token):186 """187 OVER関数判定188 """189 if not is_function(token):190 return False191 ftoken = token_next_enable(token)192 return equals_ignore_case(ftoken.value, "OVER")193def is_parenthesis(token):194 """195 括弧判定196 """197 return isinstance(token, sql.Parenthesis)198def is_dmlddl_parenthesis(token):199 """200 DMLかDDLの括弧判定201 """202 if not is_parenthesis(token):203 return False204 open_punc = token.token_next_match(0, T.Punctuation, '(')205 first = token_next_enable(token, open_punc)206 if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):207 return True208 if is_with(first):209 return True210 if is_parenthesis(first):211 return is_dmlddl_parenthesis(first)212 return False213def is_enum_parenthesis(token):214 """215 括弧の中身が値の列挙かどうかの判定216 """217 if not is_parenthesis(token):218 return False219 def is_enums(tokens):220 for token in tokens:221 if token.is_whitespace() \222 or is_comment(token) \223 or is_comma(token) \224 or is_literal(token) \225 or is_null_keyword(token) \226 or is_identifier(token):227 pass228 elif is_identifier_list(token):229 if not is_enums(token.tokens):230 return False231 else:232 return False233 return True234 return is_enums(tokens_parenthesis_inner(token))235def is_comparisons_parenthesis(token):236 """237 括弧の中身が比較演算かどうかの判定238 """239 if not is_parenthesis(token):240 return False241 exists_logical_operator = False242 exists_comparison_operator = False243 exists_parenthesis = False244 exists_exists_function = False245 prev_enable = None246 for tkn in tokens_parenthesis_inner(token):247 if is_comparison(tkn):248 return True249 if is_logical_operator_keyword(tkn):250 exists_logical_operator = True251 if is_comparison_operator(tkn):252 exists_comparison_operator = True253 if prev_enable and get_comparison_operator_words(prev_enable, tkn):254 exists_comparison_operator = True255 if is_parenthesis(tkn):256 exists_parenthesis = True257 if is_exists_function(tkn):258 exists_exists_function = True259 if exists_logical_operator and exists_comparison_operator:260 return True261 if exists_logical_operator and exists_parenthesis:262 return True263 if exists_logical_operator and exists_exists_function:264 return True265 if is_enable(tkn):266 prev_enable = tkn267 return False268def is_punctuation(token):269 """270 Punctuation判定271 """272 return token.ttype in T.Punctuation273def is_semicolon_punctuation(token):274 """275 セミコロン判定276 """277 return is_punctuation(token) and token.value == ";"278def is_open_punctuation(token):279 """280 開き括弧判定281 """282 return is_punctuation(token) and token.value == "("283def is_close_punctuation(token):284 """285 閉じ括弧判定286 """287 return is_punctuation(token) and token.value == ")"288def is_keyword(token):289 """290 keyword判定291 """292 return token.is_keyword293def is_as_keyword(token):294 """295 「AS」判定296 """297 return token.match(T.Keyword, "AS")298def is_distinct_keyword(token):299 """300 「DISTINCT」判定301 """302 return token.match(T.Keyword, "DISTINCT")303def is_from_keyword(token):304 """305 「FROM」判定306 """307 return token.match(T.Keyword, "FROM")308def is_by_keyword(token):309 """310 「BY」判定311 """312 return token.match(T.Keyword, "BY")313def is_select_dml(token):314 """315 SELECT句判定316 """317 return token.match(T.DML, "SELECT")318def is_update_dml(token):319 """320 UPDATE句判定321 """322 return token.match(T.DML, "UPDATE")323def is_insert_dml(token):324 """325 INSERT句判定326 """327 return token.match(T.DML, "INSERT")328def is_delete_dml(token):329 """330 DELETE句判定331 """332 return token.match(T.DML, "DELETE")333def is_with(token):334 """335 WITH句判定336 """337 from uroborosqlfmt.sql import With338 return isinstance(token, With)339def is_into_keyword(token):340 """341 INTO判定342 """343 return token.match(T.Keyword, "INTO")344def is_values_keyword(token):345 """346 VALUES判定347 """348 return token.match(T.Keyword, "VALUES")349def is_set_keyword(token):350 """351 SET判定352 """353 return token.match(T.Keyword, "SET")354def is_dml(token):355 """356 DML判定357 """358 return token.ttype in T.DML359def is_wildcard(token):360 """361 ワイルドカード「*」判定362 """363 return token.ttype in T.Wildcard364def is_where(token):365 """366 WHERE句判定367 """368 return isinstance(token, sql.Where)369def is_when(token):370 """371 WHEN句判定372 """373 from uroborosqlfmt.sql import When374 return isinstance(token, When)375def is_having(token):376 """377 HAVING句判定378 """379 from uroborosqlfmt.sql import Having380 return isinstance(token, Having)381def is_on(token):382 """383 ON句判定384 """385 from uroborosqlfmt.sql import On386 return isinstance(token, On)387def is_connectby(token):388 """389 CONNECT BY句判定390 """391 from uroborosqlfmt.sql import ConnectBy392 return isinstance(token, ConnectBy)393def is_startwith(token):394 """395 START WITH句判定396 """397 from uroborosqlfmt.sql import StartWith398 return isinstance(token, StartWith)399def is_case(token):400 """401 CASE句判定402 """403 return isinstance(token, sql.Case)404def is_forupdate(token):405 """406 FOR UPDATE句判定407 """408 from uroborosqlfmt.sql import ForUpdate409 return isinstance(token, ForUpdate)410def is_waitornowait(token):411 """412 WAIT / NOWAIT句判定413 """414 from uroborosqlfmt.sql import WaitOrNowait415 return isinstance(token, WaitOrNowait)416def is_union(token):417 """418 UNION句判定419 """420 from uroborosqlfmt.sql import Union421 return isinstance(token, Union)422def is_join(token):423 """424 JOIN句判定425 """426 from uroborosqlfmt.sql import Join427 return isinstance(token, Join)428def is_mergewhen(token):429 """430 WHEN句判定431 """432 from uroborosqlfmt.sql import MergeWhen433 return isinstance(token, MergeWhen)434def is_mergeupdateinsertclause(token):435 """436 MERGEの内のDML判定437 """438 from uroborosqlfmt.sql import MergeUpdateInsertClause439 return isinstance(token, MergeUpdateInsertClause)440def is_between_keyword(token):441 """442 「BETWEEN」判定443 """444 return token.match(T.Keyword, "BETWEEN")445def is_and_keyword(token):446 """447 AND演算子判定448 """449 return token.match(T.Keyword, "AND")450def is_using_keyword(token):451 """452 USING判定453 """454 return token.match(T.Keyword, "USING")455def is_logical_operator_keyword(token):456 """457 AND・OR演算子判定458 """459 return token.match(T.Keyword, ("AND", "OR"))460def is_name_or_keyword(token):461 """462 name or keyword判定463 """464 return is_keyword(token) or token.ttype in T.Name465def is_operator(token):466 """467 演算子判定468 """469 return token.ttype in T.Operator470def is_comparison_operator(token):471 """472 比較演算子判定473 """474 return token.ttype in T.Operator.Comparison475def is_concat_operator(token):476 """477 文字列連結演算子判定478 """479 return is_operator(token) and token.value == "||"480def is_phrase(token):481 """482 Phrase判定483 """484 from uroborosqlfmt.sql import Phrase485 return isinstance(token, Phrase)486def is_calculation(token):487 """488 演算判定489 """490 from uroborosqlfmt.sql import Calculation491 return isinstance(token, Calculation)492def is_calc_operator(token):493 """494 演算子判定495 """496 if is_concat_operator(token):497 return True498 if is_operator(token) and not is_comparison_operator(token):499 return True500 return False501def is_enable(token):502 """503 有効Token判定(コメント・空白以外)504 """505 if token.is_whitespace():506 return False507 if is_comment(token):508 return False509 if token.parent and is_comment(token.parent):510 return False511 return True512def find_comparison_operator_words(tokens):513 """514 比較演算子の検索515 """516 prev = None517 for token in tokens[:]:518 if not is_enable(token):519 continue520 if not prev:521 prev = token522 continue523 comps = get_comparison_operator_words(prev, token)524 if comps:525 return comps526 prev = token527 if prev:528 return get_comparison_operator_words(prev, None)529 else:530 return []531def get_comparison_operator_words(token, next_token):532 """533 比較演算子の取得534 """535 if next_token and is_keyword(next_token):536 if is_keyword(token):537 if equals_ignore_case(token.value, "NOT"):538 if equals_ignore_case(next_token.value, ["IN", "BETWEEN", "LIKE"]):539 return [token, next_token]540 elif equals_ignore_case(token.value, "IS"):541 if equals_ignore_case(next_token.value, ["NOT"]):542 return [token, next_token]543 else:544 return [token]545 elif is_comparison_operator(token):546 if equals_ignore_case(next_token.value, ["ANY", "SOME", "ALL"]):547 return [token, next_token]548 else:549 return [token]550 else:551 if is_keyword(token):552 if equals_ignore_case(token.value, ["IN", "BETWEEN", "LIKE", "IS"]):553 return [token]554 elif is_comparison_operator(token):555 return [token]556 return []557def tokens_parenthesis_inner(parenthesis):558 """559 括弧内Tokenリストの取得560 """561 open_punc = parenthesis.token_next_match(0, T.Punctuation, '(')562 close_punc = parenthesis.token_next_match(open_punc, T.Punctuation, ')')563 return parenthesis.tokens_between(open_punc, close_punc)[1:-1]564def token_function_inner_parenthesis(func):565 ftoken = token_next_enable(func)566 return token_next_enable(func, ftoken)567def token_next_enable(token, idx = -1):568 """569 次の有効Tokenの取得570 """571 if not isinstance(idx, int):572 idx = token.token_index(idx)573 return token.token_matching(idx + 1, [is_enable])574def token_prev_enable(token, idx = -1):575 """576 前の有効Tokenの取得577 """578 if not isinstance(idx, int):579 idx = token.token_index(idx)580 if idx < 0:581 idx = len(token.tokens)582 prv = token.token_prev(idx)583 while is_comment(prv):584 prv = token.token_prev(prv)585 return prv586def flatten_tokens_prev(top_token, token):587 """588 前Tokenのgenerator589 """590 tgt = next(flatten(token))591 iterator = flatten(top_token)592 tokens = []593 for tkn in iterator:594 if tkn == tgt:595 break596 tokens.append(tkn)597 for tkn in tokens[::-1]:598 yield tkn599def flatten_tokens_next(top_token, token):600 """601 後Tokenのgenerator602 """603 tgt = list(flatten(token))[-1]604 iterator = flatten(top_token)605 for tkn in iterator:606 if tkn == tgt:607 break608 for tkn in iterator:609 yield tkn610def token_parents(token):611 """612 親Tokenのgenerator613 """614 while token:615 yield token616 token = token.parent617def token_top_matching(token, sub, func):618 """619 親を走査してヒットするTokenがあるか判定620 """621 def in_parents(tkn):622 for parent in token_parents(sub):623 if tkn == parent:624 return True625 return False626 parents = token_parents(token)627 tkn = None628 for parent in parents:629 if func(parent):630 if in_parents(parent):631 return None632 tkn = parent633 break634 for parent in parents:635 if in_parents(parent):636 return tkn637 if not func(parent):638 return tkn639 tkn = parent640 return tkn641def within_with_section(stmt, token):642 """643 WITH句内判定644 """645 for tkn in tokens_tree_up(stmt, token):646 if equals_ignore_case(tkn.value, "WITH"):647 return tkn648 if is_dml(tkn):649 return None650 return None651def within_select_statement(stmt, token):652 """653 SELECT句内判定654 """655 for tkn in tokens_tree_up(stmt, token):656 if is_dml(tkn):657 if equals_ignore_case(tkn.value, "SELECT"):658 return tkn659 return None660 return None661def within_update_statement(stmt, token):662 """663 UPDATE句内判定664 """665 for tkn in tokens_tree_up(stmt, token):666 if is_dml(tkn):667 if equals_ignore_case(tkn.value, "UPDATE"):668 return tkn669 return None670 return None671def within_insert_statement(stmt, token):672 """673 INSERT句内判定674 """675 for tkn in tokens_tree_up(stmt, token):676 if is_dml(tkn):677 if equals_ignore_case(tkn.value, "INSERT"):678 return tkn679 return None680 return None681def within_merge_statement(stmt, token):682 """683 MERGE句内判定684 """685 for tkn in tokens_tree_up(stmt, token):686 if is_dml(tkn):687 if equals_ignore_case(tkn.value, "MERGE"):688 return tkn689 return None690 return None691def within_insert_values_section(stmt, token):692 """693 INSERTのVALUES句内判定694 """695 itr = tokens_tree_up(stmt, token)696 for tkn in itr:697 if is_parenthesis(tkn):698 break699 for tkn in itr:700 if is_enable(tkn):701 if is_values_keyword(tkn):702 return tkn703 return None704 return None705def within_insert_into_columns_section(stmt, token):706 """707 INSERTのカラム内判定708 """709 itr = tokens_tree_up(stmt, token)710 for tkn in itr:711 if is_parenthesis(tkn):712 break713 for tkn in itr:714 if is_enable(tkn):715 if is_identifier(tkn):716 break717 elif is_insert_dml(tkn):718 return tkn719 else:720 return None721 for tkn in itr:722 if is_enable(tkn):723 if is_into_keyword(tkn):724 return tkn725 return None726 return None727def within_update_set_section(stmt, token):728 """729 UPDATEのSET句内判定730 """731 if not within_update_statement(stmt, token):732 return None733 if within_where_section(stmt, token):734 return None735 itr = tokens_tree_up(stmt, token)736 for tkn in itr:737 if is_set_keyword(tkn):738 return tkn739 return None740def within_where_section(stmt, token):741 """742 WHERE句内判定743 """744 for tkn in tokens_tree_up(stmt, token):745 if equals_ignore_case(tkn.value, "WHERE"):746 return tkn747 if is_dml(tkn):748 return None749 return None750def within_function(stmt, token):751 """752 関数内判定753 """754 for tkn in get_roots(stmt, token)[:]:755 if is_function(tkn):756 return tkn757 return None758def within_parenthesis(stmt, token):759 """760 括弧内判定761 """762 for tkn in get_roots(stmt, token)[:]:763 if is_parenthesis(tkn):764 return tkn765 return None766def tokens_tree_up(stmt, token):767 """768 ツリー上での前へのgenerator769 """770 roots = get_roots(stmt, token)771 cld = roots.pop(0)772 while roots:773 parent = roots.pop(0)774 prevs = []775 for tkn in parent.tokens:776 prevs.append(tkn)777 if tkn == cld:778 cld = parent779 break780 for tkn in prevs[::-1]:781 yield tkn782def get_roots(parent, token):783 """784 ルートTokenリスト785 """786 for tkn in parent.tokens:787 if tkn == token:788 return [token, parent]789 if isinstance(tkn, sql.TokenList):790 ret = get_roots(tkn, token)791 if ret:792 ret.append(parent)793 return ret794 return []795def get_parent(top_parent, token):796 """797 ルートを指定した親Token取得798 """799 for tkn in top_parent.tokens:800 tkn.parent = top_parent801 if tkn == token:802 return top_parent803 if isinstance(tkn, sql.TokenList):804 ret = get_parent(tkn, token)805 if ret:806 return ret807 return None808def flatten(token):809 """810 フラット化したgenerator811 ※処理中にparentを再設定する。sql.TokenList#flattenとはここが違う812 """813 if isinstance(token, sql.TokenList):814 for tkn in token.tokens:815 tkn.parent = token816 if isinstance(tkn, sql.TokenList):817 for item in flatten(tkn):818 yield item819 else:820 yield tkn821 else:822 yield token823CONDITION = 1824VALUE = 2825def get_cases(case):826 """Returns a list of 2-tuples (condition, value).827 If an ELSE exists condition is None.828 """829 ret = []830 mode = CONDITION831 for token in case.tokens:832 # Set mode from the current statement833 if token.match(T.Keyword, 'CASE'):834 continue835 elif is_when(token):836 ret.append(([], []))837 mode = CONDITION838 elif token.match(T.Keyword, 'THEN'):839 mode = VALUE840 elif token.match(T.Keyword, 'ELSE'):841 ret.append((None, []))842 mode = VALUE843 elif token.match(T.Keyword, 'END'):844 mode = None845 # First condition without preceding WHEN846 if mode and not ret:847 ret.append(([], []))848 # Append token depending of the current mode849 if mode == CONDITION:850 ret[-1][0].append(token)851 elif mode == VALUE:852 ret[-1][1].append(token)853 # Return cases list854 return ret855def equals_ignore_case(txt1, txt2):856 """857 大文字小文字を無視した文字列比較858 """859 if isinstance(txt2, str):860 values = {re.compile(txt2 + "$", re.IGNORECASE)}861 else:862 values = set(re.compile(v + "$", re.IGNORECASE) for v in txt2)863 for pattern in values:864 if pattern.match(txt1):865 return True866 return False867def startswith_ignore_case(target, txt):868 """869 大文字小文字を無視したstartswith870 """871 if isinstance(txt, str):872 values = {re.compile(txt, re.IGNORECASE)}873 else:874 values = set(re.compile(v, re.IGNORECASE) for v in txt)875 for pattern in values:876 if pattern.match(target):877 return True878 return False879def endswith_ignore_case(target, txt):880 """881 大文字小文字を無視したendswith882 """883 if isinstance(txt, str):884 values = {re.compile(txt + "$", re.IGNORECASE)}885 else:886 values = set(re.compile(v + "$", re.IGNORECASE) for v in txt)887 for pattern in values:888 if pattern.search(target):889 return True...

Full Screen

Full Screen

tokenutil.py

Source:tokenutil.py Github

copy

Full Screen

1#!/usr/bin/env python2#3# Copyright 2007 The Closure Linter Authors. All Rights Reserved.4#5# Licensed under the Apache License, Version 2.0 (the "License");6# you may not use this file except in compliance with the License.7# You may obtain a copy of the License at8#9# http://www.apache.org/licenses/LICENSE-2.010#11# Unless required by applicable law or agreed to in writing, software12# distributed under the License is distributed on an "AS-IS" BASIS,13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.14# See the License for the specific language governing permissions and15# limitations under the License.16"""Token utility functions."""17__author__ = ('robbyw@google.com (Robert Walker)',18 'ajp@google.com (Andy Perelson)')19import copy20import StringIO21from closure_linter.common import tokens22from closure_linter.javascripttokens import JavaScriptToken23from closure_linter.javascripttokens import JavaScriptTokenType24# Shorthand25Type = tokens.TokenType26def GetFirstTokenInSameLine(token):27 """Returns the first token in the same line as token.28 Args:29 token: Any token in the line.30 Returns:31 The first token in the same line as token.32 """33 while not token.IsFirstInLine():34 token = token.previous35 return token36def GetFirstTokenInPreviousLine(token):37 """Returns the first token in the previous line as token.38 Args:39 token: Any token in the line.40 Returns:41 The first token in the previous line as token, or None if token is on the42 first line.43 """44 first_in_line = GetFirstTokenInSameLine(token)45 if first_in_line.previous:46 return GetFirstTokenInSameLine(first_in_line.previous)47 return None48def GetLastTokenInSameLine(token):49 """Returns the last token in the same line as token.50 Args:51 token: Any token in the line.52 Returns:53 The last token in the same line as token.54 """55 while not token.IsLastInLine():56 token = token.next57 return token58def GetAllTokensInSameLine(token):59 """Returns all tokens in the same line as the given token.60 Args:61 token: Any token in the line.62 Returns:63 All tokens on the same line as the given token.64 """65 first_token = GetFirstTokenInSameLine(token)66 last_token = GetLastTokenInSameLine(token)67 tokens_in_line = []68 while first_token != last_token:69 tokens_in_line.append(first_token)70 first_token = first_token.next71 tokens_in_line.append(last_token)72 return tokens_in_line73def CustomSearch(start_token, func, end_func=None, distance=None,74 reverse=False):75 """Returns the first token where func is True within distance of this token.76 Args:77 start_token: The token to start searching from78 func: The function to call to test a token for applicability79 end_func: The function to call to test a token to determine whether to abort80 the search.81 distance: The number of tokens to look through before failing search. Must82 be positive. If unspecified, will search until the end of the token83 chain84 reverse: When true, search the tokens before this one instead of the tokens85 after it86 Returns:87 The first token matching func within distance of this token, or None if no88 such token is found.89 """90 token = start_token91 if reverse:92 while token and (distance is None or distance > 0):93 previous = token.previous94 if previous:95 if func(previous):96 return previous97 if end_func and end_func(previous):98 return None99 token = previous100 if distance is not None:101 distance -= 1102 else:103 while token and (distance is None or distance > 0):104 next_token = token.next105 if next_token:106 if func(next_token):107 return next_token108 if end_func and end_func(next_token):109 return None110 token = next_token111 if distance is not None:112 distance -= 1113 return None114def Search(start_token, token_types, distance=None, reverse=False):115 """Returns the first token of type in token_types within distance.116 Args:117 start_token: The token to start searching from118 token_types: The allowable types of the token being searched for119 distance: The number of tokens to look through before failing search. Must120 be positive. If unspecified, will search until the end of the token121 chain122 reverse: When true, search the tokens before this one instead of the tokens123 after it124 Returns:125 The first token of any type in token_types within distance of this token, or126 None if no such token is found.127 """128 return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),129 None, distance, reverse)130def SearchExcept(start_token, token_types, distance=None, reverse=False):131 """Returns the first token not of any type in token_types within distance.132 Args:133 start_token: The token to start searching from134 token_types: The unallowable types of the token being searched for135 distance: The number of tokens to look through before failing search. Must136 be positive. If unspecified, will search until the end of the token137 chain138 reverse: When true, search the tokens before this one instead of the tokens139 after it140 Returns:141 The first token of any type in token_types within distance of this token, or142 None if no such token is found.143 """144 return CustomSearch(start_token,145 lambda token: not token.IsAnyType(token_types),146 None, distance, reverse)147def SearchUntil(start_token, token_types, end_types, distance=None,148 reverse=False):149 """Returns the first token of type in token_types before a token of end_type.150 Args:151 start_token: The token to start searching from.152 token_types: The allowable types of the token being searched for.153 end_types: Types of tokens to abort search if we find.154 distance: The number of tokens to look through before failing search. Must155 be positive. If unspecified, will search until the end of the token156 chain157 reverse: When true, search the tokens before this one instead of the tokens158 after it159 Returns:160 The first token of any type in token_types within distance of this token161 before any tokens of type in end_type, or None if no such token is found.162 """163 return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),164 lambda token: token.IsAnyType(end_types),165 distance, reverse)166def DeleteToken(token):167 """Deletes the given token from the linked list.168 Args:169 token: The token to delete170 """171 # When deleting a token, we do not update the deleted token itself to make172 # sure the previous and next pointers are still pointing to tokens which are173 # not deleted. Also it is very hard to keep track of all previously deleted174 # tokens to update them when their pointers become invalid. So we add this175 # flag that any token linked list iteration logic can skip deleted node safely176 # when its current token is deleted.177 token.is_deleted = True178 if token.previous:179 token.previous.next = token.next180 if token.next:181 token.next.previous = token.previous182 following_token = token.next183 while following_token and following_token.metadata.last_code == token:184 following_token.metadata.last_code = token.metadata.last_code185 following_token = following_token.next186def DeleteTokens(token, token_count):187 """Deletes the given number of tokens starting with the given token.188 Args:189 token: The token to start deleting at.190 token_count: The total number of tokens to delete.191 """192 for i in xrange(1, token_count):193 DeleteToken(token.next)194 DeleteToken(token)195def InsertTokenBefore(new_token, token):196 """Insert new_token before token.197 Args:198 new_token: A token to be added to the stream199 token: A token already in the stream200 """201 new_token.next = token202 new_token.previous = token.previous203 new_token.metadata = copy.copy(token.metadata)204 if new_token.IsCode():205 old_last_code = token.metadata.last_code206 following_token = token207 while (following_token and208 following_token.metadata.last_code == old_last_code):209 following_token.metadata.last_code = new_token210 following_token = following_token.next211 token.previous = new_token212 if new_token.previous:213 new_token.previous.next = new_token214 if new_token.start_index is None:215 if new_token.line_number == token.line_number:216 new_token.start_index = token.start_index217 else:218 previous_token = new_token.previous219 if previous_token:220 new_token.start_index = (previous_token.start_index +221 len(previous_token.string))222 else:223 new_token.start_index = 0224 iterator = new_token.next225 while iterator and iterator.line_number == new_token.line_number:226 iterator.start_index += len(new_token.string)227 iterator = iterator.next228def InsertTokenAfter(new_token, token):229 """Insert new_token after token.230 Args:231 new_token: A token to be added to the stream232 token: A token already in the stream233 """234 new_token.previous = token235 new_token.next = token.next236 new_token.metadata = copy.copy(token.metadata)237 if token.IsCode():238 new_token.metadata.last_code = token239 if new_token.IsCode():240 following_token = token.next241 while following_token and following_token.metadata.last_code == token:242 following_token.metadata.last_code = new_token243 following_token = following_token.next244 token.next = new_token245 if new_token.next:246 new_token.next.previous = new_token247 if new_token.start_index is None:248 if new_token.line_number == token.line_number:249 new_token.start_index = token.start_index + len(token.string)250 else:251 new_token.start_index = 0252 iterator = new_token.next253 while iterator and iterator.line_number == new_token.line_number:254 iterator.start_index += len(new_token.string)255 iterator = iterator.next256def InsertTokensAfter(new_tokens, token):257 """Insert multiple tokens after token.258 Args:259 new_tokens: An array of tokens to be added to the stream260 token: A token already in the stream261 """262 # TODO(user): It would be nicer to have InsertTokenAfter defer to here263 # instead of vice-versa.264 current_token = token265 for new_token in new_tokens:266 InsertTokenAfter(new_token, current_token)267 current_token = new_token268def InsertSpaceTokenAfter(token):269 """Inserts a space token after the given token.270 Args:271 token: The token to insert a space token after272 Returns:273 A single space token274 """275 space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,276 token.line_number)277 InsertTokenAfter(space_token, token)278def InsertBlankLineAfter(token):279 """Inserts a blank line after the given token.280 Args:281 token: The token to insert a blank line after282 Returns:283 A single space token284 """285 blank_token = JavaScriptToken('', Type.BLANK_LINE, '',286 token.line_number + 1)287 InsertLineAfter(token, [blank_token])288def InsertLineAfter(token, new_tokens):289 """Inserts a new line consisting of new_tokens after the given token.290 Args:291 token: The token to insert after.292 new_tokens: The tokens that will make up the new line.293 """294 insert_location = token295 for new_token in new_tokens:296 InsertTokenAfter(new_token, insert_location)297 insert_location = new_token298 # Update all subsequent line numbers.299 next_token = new_tokens[-1].next300 while next_token:301 next_token.line_number += 1302 next_token = next_token.next303def SplitToken(token, position):304 """Splits the token into two tokens at position.305 Args:306 token: The token to split307 position: The position to split at. Will be the beginning of second token.308 Returns:309 The new second token.310 """311 new_string = token.string[position:]312 token.string = token.string[:position]313 new_token = JavaScriptToken(new_string, token.type, token.line,314 token.line_number)315 InsertTokenAfter(new_token, token)316 return new_token317def Compare(token1, token2):318 """Compares two tokens and determines their relative order.319 Args:320 token1: The first token to compare.321 token2: The second token to compare.322 Returns:323 A negative integer, zero, or a positive integer as the first token is324 before, equal, or after the second in the token stream.325 """326 if token2.line_number != token1.line_number:327 return token1.line_number - token2.line_number328 else:329 return token1.start_index - token2.start_index330def GoogScopeOrNoneFromStartBlock(token):331 """Determines if the given START_BLOCK is part of a goog.scope statement.332 Args:333 token: A token of type START_BLOCK.334 Returns:335 The goog.scope function call token, or None if such call doesn't exist.336 """337 if token.type != JavaScriptTokenType.START_BLOCK:338 return None339 # Search for a goog.scope statement, which will be 5 tokens before the340 # block. Illustration of the tokens found prior to the start block:341 # goog.scope(function() {342 # 5 4 3 21 ^343 maybe_goog_scope = token344 for unused_i in xrange(5):345 maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and346 maybe_goog_scope.previous else None)347 if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':348 return maybe_goog_scope349def GetTokenRange(start_token, end_token):350 """Returns a list of tokens between the two given, inclusive.351 Args:352 start_token: Start token in the range.353 end_token: End token in the range.354 Returns:355 A list of tokens, in order, from start_token to end_token (including start356 and end). Returns none if the tokens do not describe a valid range.357 """358 token_range = []359 token = start_token360 while token:361 token_range.append(token)362 if token == end_token:363 return token_range364 token = token.next365def TokensToString(token_iterable):366 """Convert a number of tokens into a string.367 Newlines will be inserted whenever the line_number of two neighboring368 strings differ.369 Args:370 token_iterable: The tokens to turn to a string.371 Returns:372 A string representation of the given tokens.373 """374 buf = StringIO.StringIO()375 token_list = list(token_iterable)376 if not token_list:377 return ''378 line_number = token_list[0].line_number379 for token in token_list:380 while line_number < token.line_number:381 line_number += 1382 buf.write('\n')383 if line_number > token.line_number:384 line_number = token.line_number385 buf.write('\n')386 buf.write(token.string)387 return buf.getvalue()388def GetPreviousCodeToken(token):389 """Returns the code token before the specified token.390 Args:391 token: A token.392 Returns:393 The code token before the specified token or None if no such token394 exists.395 """396 return CustomSearch(397 token,398 lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,399 reverse=True)400def GetNextCodeToken(token):401 """Returns the next code token after the specified token.402 Args:403 token: A token.404 Returns:405 The next code token after the specified token or None if no such token406 exists.407 """408 return CustomSearch(409 token,410 lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,411 reverse=False)412def GetIdentifierStart(token):413 """Returns the first token in an identifier.414 Given a token which is part of an identifier, returns the token at the start415 of the identifier.416 Args:417 token: A token which is part of an identifier.418 Returns:419 The token at the start of the identifier or None if the identifier was not420 of the form 'a.b.c' (e.g. "['a']['b'].c").421 """422 start_token = token423 previous_code_token = GetPreviousCodeToken(token)424 while (previous_code_token and (425 previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or426 _IsDot(previous_code_token))):427 start_token = previous_code_token428 previous_code_token = GetPreviousCodeToken(previous_code_token)429 if _IsDot(start_token):430 return None431 return start_token432def GetIdentifierForToken(token):433 """Get the symbol specified by a token.434 Given a token, this function additionally concatenates any parts of an435 identifying symbol being identified that are split by whitespace or a436 newline.437 The function will return None if the token is not the first token of an438 identifier.439 Args:440 token: The first token of a symbol.441 Returns:442 The whole symbol, as a string.443 """444 # Search backward to determine if this token is the first token of the445 # identifier. If it is not the first token, return None to signal that this446 # token should be ignored.447 prev_token = token.previous448 while prev_token:449 if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or450 _IsDot(prev_token)):451 return None452 if (prev_token.IsType(tokens.TokenType.WHITESPACE) or453 prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):454 prev_token = prev_token.previous455 else:456 break457 # A "function foo()" declaration.458 if token.type is JavaScriptTokenType.FUNCTION_NAME:459 return token.string460 # A "var foo" declaration (if the previous token is 'var')461 previous_code_token = GetPreviousCodeToken(token)462 if previous_code_token and previous_code_token.IsKeyword('var'):463 return token.string464 # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that465 # could span multiple lines or be broken up by whitespace. We need466 # to concatenate.467 identifier_types = set([468 JavaScriptTokenType.IDENTIFIER,469 JavaScriptTokenType.SIMPLE_LVALUE470 ])471 assert token.type in identifier_types472 # Start with the first token473 symbol_tokens = [token]474 if token.next:475 for t in token.next:476 last_symbol_token = symbol_tokens[-1]477 # An identifier is part of the previous symbol if it has a trailing478 # dot.479 if t.type in identifier_types:480 if last_symbol_token.string.endswith('.'):481 symbol_tokens.append(t)482 continue483 else:484 break485 # A dot is part of the previous symbol if it does not have a trailing486 # dot.487 if _IsDot(t):488 if not last_symbol_token.string.endswith('.'):489 symbol_tokens.append(t)490 continue491 else:492 break493 # Skip any whitespace494 if t.type in JavaScriptTokenType.NON_CODE_TYPES:495 continue496 # This is the end of the identifier. Stop iterating.497 break498 if symbol_tokens:499 return ''.join([t.string for t in symbol_tokens])500def GetStringAfterToken(token):501 """Get string after token.502 Args:503 token: Search will be done after this token.504 Returns:505 String if found after token else None (empty string will also506 return None).507 Search until end of string as in case of empty string Type.STRING_TEXT is not508 present/found and don't want to return next string.509 E.g.510 a = '';511 b = 'test';512 When searching for string after 'a' if search is not limited by end of string513 then it will return 'test' which is not desirable as there is a empty string514 before that.515 This will return None for cases where string is empty or no string found516 as in both cases there is no Type.STRING_TEXT.517 """518 string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,519 [JavaScriptTokenType.SINGLE_QUOTE_STRING_END,520 JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])521 if string_token:522 return string_token.string523 else:524 return None525def _IsDot(token):526 """Whether the token represents a "dot" operator (foo.bar)."""...

Full Screen

Full Screen

CompilationEngine.py

Source:CompilationEngine.py Github

copy

Full Screen

...179 self.vm.write_push(VMWriter.CONST, 0)180 self.vm.write_arithmetic(VMWriter.NOT)181 182 elif k == TOKEN_FALSE or k == TOKEN_NULL:183 self.next_token()184 self.vm.write_push(VMWriter.CONST, 0)185 elif k == TOKEN_THIS:186 self.expect_peek(TOKEN_THIS)187 self.vm.write_push(VMWriter.POINTER, 0)188 def compile_statements(self):189 self.compile_statement()190 def compile_statement(self):191 self.write_xml_line("<statements>")192 token = self.peek_token["token"]193 if token == "if":194 self.compile_if()195 self.compile_statement()196 197 elif token == "while":198 self.compile_while()199 self.compile_statement()200 201 elif token == "let":202 self.compile_let()203 self.compile_statement()204 205 elif token == "do":206 self.compile_do()207 self.compile_statement()208 elif token == "class":209 self.compile_class()210 self.compile_statement()211 212 elif token == "return":213 self.compile_return()214 self.compile_statement()215 216 elif token == "EOF":217 pass218 self.write_xml_line("</statements>")219 def compile_class_statements(self):220 self.write_xml_line("<classStatement>")221 self.engine_advance()222 self.engine_advance()223 if self.tknz.get_token() != "{":224 raise Exception("Expected '{'")225 self.engine_advance()226 self.compile_statements()227 if self.tknz.get_token() != "}":228 raise Exception("Expected '}'")229 self.engine_advance(token_advance=False)230 231 self.write_xml_line("</classStatement>") 232 233 def compile_if(self):234 self.write_xml_line("<ifStatement>")235 label_true = f"IF_TRUE{self.if_label_num}"236 label_false = f"IF_FALSE{self.if_label_num}"237 label_end = f"IF_END{self.if_label_num}"238 self.if_label_num += 1239 self.expect_peek(TOKEN_IF)240 self.expect_peek("(")241 self.compile_expression()242 self.expect_peek(")")243 self.vm.write_if(label_true)244 self.vm.write_goto(label_false)245 self.vm.write_label(label_true)246 self.expect_peek("{")247 self.compile_statements()248 self.expect_peek("}")249 if self.peek_token_is(TOKEN_ELSE):250 self.vm.write_goto(label_end)251 252 self.vm.write_label(label_false)253 if self.peek_token_is(TOKEN_ELSE):254 self.expect_peek(TOKEN_ELSE)255 self.expect_peek("{")256 self.compile_statements()257 self.expect_peek("}")258 self.vm.write_label(label_end)259 260 self.write_xml_line("</ifStatement>")261 262 def compile_while(self):263 self.write_xml_line("<whileStatement>")264 label_while_exp = f"WHILE_EXP{self.while_label_num}"265 label_while_end = f"WHILE_END{self.while_label_num}"266 self.while_label_num += 1267 self.vm.write_label(label_while_exp)268 self.expect_peek(TOKEN_WHILE)269 self.expect_peek("(")270 self.compile_expression()271 self.vm.write_arithmetic(VMWriter.NOT)272 self.vm.write_if(label_while_end)273 self.expect_peek(")")274 self.expect_peek("{")275 self.compile_statements()276 self.vm.write_goto(label_while_exp)277 self.vm.write_label(label_while_end)278 self.expect_peek("}")279 280 self.write_xml_line("</whileStatement>")281 def compile_let(self):282 self.write_xml_line("<letStatement>")283 isArray = False284 self.expect_peek(TOKEN_LET)285 self.expect_peek(TOKEN_IDENT, token_class=True)286 var_name = self.current_token["token"]287 sym = self.st.resolve(var_name)288 if self.peek_token_is("["):289 self.expect_peek("[")290 self.compile_expression()291 self.vm.write_push(self.scope_to_segment(sym["scope"]), sym["index"])292 self.vm.write_arithmetic(VMWriter.ADD)293 self.expect_peek("]")294 isArray = True295 296 self.expect_peek("=")297 self.compile_expression()298 if isArray:299 self.vm.write_pop(VMWriter.TEMP, 0)300 self.vm.write_pop(VMWriter.POINTER, 1)301 self.vm.write_push(VMWriter.TEMP, 0)302 self.vm.write_pop(VMWriter.THAT, 0)303 else:304 self.vm.write_pop(self.scope_to_segment(sym["scope"]), sym["index"])305 306 self.expect_peek(";")307 self.write_xml_line("</letStatement>")308 def compile_do(self):309 self.expect_peek(TOKEN_DO)310 self.expect_peek(TOKEN_IDENT, token_class=True)311 self.compile_subroutine_call()312 self.expect_peek(";")313 self.vm.write_pop(VMWriter.TEMP, 0)314 def compile_expression(self):315 self.write_xml_line("<expression>")316 self.compile_term()317 while (not self.peek_token_is(TOKEN_EOF)) and (self.peek_token["token"] in OP_TERMS):318 self.next_token()319 op = self.current_token["token"]320 self.compile_term()321 self.compile_operators(op)322 323 self.write_xml_line("</expression>")324 def compile_term(self):325 self.write_xml_line("<term>")326 self.compile_factor()327 # while not self.peek_token_is(TOKEN_EOF) and (p)328 # self.engine_advance(token_advance=False)329 self.write_xml_line("</term>")330 def compile_factor(self):331 token = self.peek_token332 if token["token_class"] == "integer":333 self.next_token()334 self.vm.write_push(VMWriter.CONST, token["token"])335 elif token["token"] == TOKEN_TRUE or token["token"] == TOKEN_FALSE or token["token"] == TOKEN_NULL or token["token"] == TOKEN_THIS:336 self.compile_keyword_const(self.peek_token["token"])337 elif token["token_class"] == "string":338 self.expect_peek("string", token_class=True)339 string = self.current_token["token"]340 self.vm.write_push(VMWriter.CONST, len(string))341 self.vm.write_call("String.new", 1)342 for i in range(len(string)):343 self.vm.write_push(VMWriter.CONST, ord(string[i]))344 self.vm.write_call("String.appendChar", 2)345 elif token["token_class"] == TOKEN_IDENT:346 self.expect_peek(TOKEN_IDENT, token_class=True)347 identifier_name = self.current_token["token"]348 if self.peek_token["token"] == "[":349 self.expect_peek("[")350 self.compile_expression()351 sym = self.st.resolve(identifier_name)352 self.vm.write_push(self.scope_to_segment(sym["scope"]), sym["index"])353 self.vm.write_arithmetic(VMWriter.ADD)354 self.expect_peek("]")355 self.vm.write_pop(VMWriter.POINTER, 1)356 self.vm.write_push(VMWriter.THAT, 0)357 elif self.peek_token["token"] == "(" or self.peek_token["token"] == TOKEN_DOT:358 self.compile_subroutine_call()359 else: 360 sym = self.st.resolve(identifier_name)361 self.vm.write_push(self.scope_to_segment(sym["scope"]), sym["index"])362 elif token["token"] == "(":363 self.expect_peek("(")364 self.compile_expression()365 self.expect_peek(")")366 367 elif token["token"] == TOKEN_MINUS or token["token"] == TOKEN_NOT:368 self.next_token()369 op = self.current_token["token"]370 self.compile_term()371 if op == TOKEN_MINUS:372 self.vm.write_arithmetic(VMWriter.NEG)373 else:374 self.vm.write_arithmetic(VMWriter.NOT)375 else:376 raise Exception("Operador não reconhecido: ", token["token"])377 def compile_var_name(self):378 pass379 def compile_constant(self):380 pass381 def compile_operators(self, op):382 383 if op == TOKEN_PLUS:384 self.vm.write_arithmetic(VMWriter.ADD)385 elif op == TOKEN_MINUS:386 self.vm.write_arithmetic(VMWriter.SUB)387 elif op == TOKEN_ASTERISK:388 self.vm.write_call("Math.multiply", 2)389 elif op == TOKEN_SLASH:390 self.vm.write_call("Math.divide", 2)391 elif op == TOKEN_AND:392 self.vm.write_arithmetic(VMWriter.AND)393 elif op == TOKEN_OR:394 self.vm.write_arithmetic(VMWriter.OR)395 elif op == TOKEN_LT:396 self.vm.write_arithmetic(VMWriter.LT)397 elif op == TOKEN_GT:398 self.vm.write_arithmetic(VMWriter.GT)399 elif op == TOKEN_EQ:400 self.vm.write_arithmetic(VMWriter.EQ)401 elif op == TOKEN_NOT:402 self.vm.write_arithmetic(VMWriter.NOT)403 404 def compile_return(self):405 self.expect_peek(TOKEN_RETURN)406 if not self.peek_token_is(";"):407 self.compile_expression()408 self.vm.write_return()409 else:410 self.vm.write_push(VMWriter.CONST, 0)411 self.vm.write_return()412 413 self.expect_peek(";")414 def write_xml_line(self, line):415 xml_file_path = f'xml/{self.file_name}T.xml'416 if exists(xml_file_path) and self.begin:417 with open(xml_file_path, 'w') as xml:418 xml.write(f'{line}\n')419 420 self.begin = False421 422 elif not exists(xml_file_path):423 with open(xml_file_path, 'w') as xml:424 xml.write(f'{line}\n')425 else:426 with open(xml_file_path, 'a') as xml:427 xml.write(f'{line}\n')428 429 # def engine_advance(self, token_advance=True):430 # token = self.tknz.get_token()431 # token_tye = self.tknz.token_tye()432 # line = f'<{token_tye}> {token} </{token_tye}>'433 # self.write_xml_line(line)434 # if token_advance:435 # self.tknz.advance()436 def scope_to_segment(self, scope):437 if scope == SymbolTable.STATIC:438 return VMWriter.STATIC439 elif scope == SymbolTable.FIELD:440 return VMWriter.THIS441 elif scope == SymbolTable.VAR:442 return VMWriter.LOCAL443 elif scope == SymbolTable.ARG:444 return VMWriter.ARG445 else:446 raise Exception("Escopo indefinido")447 def next_token(self):448 self.current_token = self.peek_token449 try:450 self.tknz.advance()451 self.peek_token = self.tknz.current_token452 except:453 self.peek_token = {454 "token": "EOF",455 "token_class": "keyword"456 }457 def expect_peek(self, token_type, token_class=False):458 # print(self.peek_token)459 if token_class:460 if self.peek_token["token_class"] == token_type:461 self.next_token()462 else:463 self.peek_error(self.peek_token["start"], token_type, self.peek_token["token_class"])464 else:465 if self.peek_token["token"] == token_type:466 self.next_token()467 else:468 self.peek_error(self.peek_token["start"], token_type, self.peek_token["token"])469 def peek_token_is(self, token):470 return self.peek_token["token"] == token471 def peek_error(self, line, expected, real):472 raise Exception(f"{line}: expected next token to be {expected}, got {real} instead")473 def print_table(self, table):474 for key in table.keys():475 print("------------------")476 print("Name: ", table[key]["name"])477 print("Type: ", table[key]["type"])478 print("Scope: ", table[key]["scope"])479 print("Index: ", table[key]["index"])480 def run(self):...

Full Screen

Full Screen

test_token_refresh.py

Source:test_token_refresh.py Github

copy

Full Screen

...23 }24"""25@freeze_time("2020-03-18 12:00:00")26def test_refresh_token_get_token_from_cookie(api_client, customer_user, settings):27 csrf_token = _get_new_csrf_token()28 refresh_token = create_refresh_token(customer_user, {"csrfToken": csrf_token})29 variables = {"token": None, "csrf_token": csrf_token}30 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME] = refresh_token31 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME]["httponly"] = True32 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)33 content = get_graphql_content(response)34 data = content["data"]["tokenRefresh"]35 errors = data["errors"]36 assert not errors37 token = data.get("token")38 assert token39 payload = jwt_decode(token)40 assert payload["email"] == customer_user.email41 assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow()42 assert (43 datetime.fromtimestamp(payload["exp"])44 == datetime.utcnow() + settings.JWT_TTL_ACCESS45 )46 assert payload["type"] == JWT_ACCESS_TYPE47 assert payload["token"] == customer_user.jwt_token_key48@freeze_time("2020-03-18 12:00:00")49def test_refresh_token_get_token_from_input(api_client, customer_user, settings):50 csrf_token = _get_new_csrf_token()51 refresh_token = create_refresh_token(customer_user, {"csrfToken": csrf_token})52 variables = {"token": refresh_token, "csrf_token": None}53 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)54 content = get_graphql_content(response)55 data = content["data"]["tokenRefresh"]56 errors = data["errors"]57 assert not errors58 token = data.get("token")59 assert token60 payload = jwt_decode(token)61 assert payload["email"] == customer_user.email62 assert datetime.fromtimestamp(payload["iat"]) == datetime.utcnow()63 assert (64 datetime.fromtimestamp(payload["exp"])65 == datetime.utcnow() + settings.JWT_TTL_ACCESS66 )67 assert payload["type"] == JWT_ACCESS_TYPE68def test_refresh_token_get_token_missing_token(api_client, customer_user):69 variables = {"token": None, "csrf_token": "token"}70 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)71 content = get_graphql_content(response)72 data = content["data"]["tokenRefresh"]73 errors = data["errors"]74 token = data.get("token")75 assert not token76 assert len(errors) == 177 assert errors[0]["code"] == AccountErrorCode.JWT_MISSING_TOKEN.name78def test_access_token_used_as_a_refresh_token(api_client, customer_user):79 csrf_token = _get_new_csrf_token()80 access_token = create_access_token(customer_user, {"csrfToken": csrf_token})81 variables = {"token": access_token, "csrf_token": csrf_token}82 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)83 content = get_graphql_content(response)84 data = content["data"]["tokenRefresh"]85 errors = data["errors"]86 token = data.get("token")87 assert not token88 assert len(errors) == 189 assert errors[0]["code"] == AccountErrorCode.JWT_INVALID_TOKEN.name90def test_access_app_token_used_as_a_refresh_token(api_client, app, customer_user):91 csrf_token = _get_new_csrf_token()92 access_app_token = create_access_token_for_app(app, customer_user)93 variables = {"token": access_app_token, "csrf_token": csrf_token}94 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)95 content = get_graphql_content(response)96 data = content["data"]["tokenRefresh"]97 errors = data["errors"]98 token = data.get("token")99 assert not token100 assert len(errors) == 1101 assert errors[0]["code"] == AccountErrorCode.JWT_INVALID_TOKEN.name102def test_refresh_token_get_token_missing_csrf_token(api_client, customer_user):103 csrf_token = _get_new_csrf_token()104 refresh_token = create_refresh_token(customer_user, {"csrfToken": csrf_token})105 variables = {"token": None}106 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME] = refresh_token107 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME]["httponly"] = True108 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)109 content = get_graphql_content(response)110 data = content["data"]["tokenRefresh"]111 errors = data["errors"]112 token = data.get("token")113 assert not token114 assert len(errors) == 1115 assert errors[0]["code"] == AccountErrorCode.REQUIRED.name116 assert errors[0]["field"] == "csrfToken"117def test_refresh_token_get_token_incorrect_csrf_token(api_client, customer_user):118 csrf_token = _get_new_csrf_token()119 refresh_token = create_refresh_token(customer_user, {"csrfToken": csrf_token})120 variables = {"token": None, "csrf_token": "csrf_token"}121 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME] = refresh_token122 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME]["httponly"] = True123 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)124 content = get_graphql_content(response)125 data = content["data"]["tokenRefresh"]126 errors = data["errors"]127 token = data.get("token")128 assert not token129 assert len(errors) == 1130 assert errors[0]["code"] == AccountErrorCode.JWT_INVALID_CSRF_TOKEN.name131def test_refresh_token_when_expired(api_client, customer_user):132 with freeze_time("2018-05-31 12:00:01"):133 csrf_token = _get_new_csrf_token()134 refresh_token = create_refresh_token(customer_user, {"csrfToken": csrf_token})135 variables = {"token": None, "csrf_token": csrf_token}136 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME] = refresh_token137 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME]["httponly"] = True138 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)139 content = get_graphql_content(response)140 data = content["data"]["tokenRefresh"]141 errors = data["errors"]142 token = data.get("token")143 assert not token144 assert len(errors) == 1145 assert errors[0]["code"] == AccountErrorCode.JWT_SIGNATURE_EXPIRED.name146def test_refresh_token_when_incorrect_token(api_client, customer_user):147 csrf_token = _get_new_csrf_token()148 refresh_token = create_refresh_token(customer_user, {"csrfToken": csrf_token})149 variables = {"token": None, "csrf_token": csrf_token}150 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME] = refresh_token + "wrong-token"151 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME]["httponly"] = True152 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)153 content = get_graphql_content(response)154 data = content["data"]["tokenRefresh"]155 errors = data["errors"]156 token = data.get("token")157 assert not token158 assert len(errors) == 1159 assert errors[0]["code"] == AccountErrorCode.JWT_DECODE_ERROR.name160def test_refresh_token_when_user_deactivated_token(api_client, customer_user):161 csrf_token = _get_new_csrf_token()162 refresh_token = create_refresh_token(customer_user, {"csrfToken": csrf_token})163 customer_user.jwt_token_key = "new_key"164 customer_user.save()165 variables = {"token": None, "csrf_token": csrf_token}166 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME] = refresh_token167 api_client.cookies[JWT_REFRESH_TOKEN_COOKIE_NAME]["httponly"] = True168 response = api_client.post_graphql(MUTATION_TOKEN_REFRESH, variables)169 content = get_graphql_content(response)170 data = content["data"]["tokenRefresh"]171 errors = data["errors"]172 assert not data["token"]173 assert len(errors) == 1...

Full Screen

Full Screen

predicates.d.ts

Source:predicates.d.ts Github

copy

Full Screen

1import { TSESTree } from '../../ts-estree';2declare const isArrowToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {3 value: "=>";4};5declare const isNotArrowToken: (token: TSESTree.Token) => boolean;6declare const isClosingBraceToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {7 value: "}";8};9declare const isNotClosingBraceToken: (token: TSESTree.Token) => boolean;10declare const isClosingBracketToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {11 value: "]";12};13declare const isNotClosingBracketToken: (token: TSESTree.Token) => boolean;14declare const isClosingParenToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {15 value: ")";16};17declare const isNotClosingParenToken: (token: TSESTree.Token) => boolean;18declare const isColonToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {19 value: ":";20};21declare const isNotColonToken: (token: TSESTree.Token) => boolean;22declare const isCommaToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {23 value: ",";24};25declare const isNotCommaToken: (token: TSESTree.Token) => boolean;26declare const isCommentToken: (token: TSESTree.Token) => token is TSESTree.Comment;27declare const isNotCommentToken: <T extends TSESTree.Token>(token: T) => token is Exclude<T, TSESTree.Comment>;28declare const isOpeningBraceToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {29 value: "{";30};31declare const isNotOpeningBraceToken: (token: TSESTree.Token) => boolean;32declare const isOpeningBracketToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {33 value: "[";34};35declare const isNotOpeningBracketToken: (token: TSESTree.Token) => boolean;36declare const isOpeningParenToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {37 value: "(";38};39declare const isNotOpeningParenToken: (token: TSESTree.Token) => boolean;40declare const isSemicolonToken: (token: TSESTree.Token) => token is TSESTree.PunctuatorToken & {41 value: ";";42};43declare const isNotSemicolonToken: (token: TSESTree.Token) => boolean;44export { isArrowToken, isClosingBraceToken, isClosingBracketToken, isClosingParenToken, isColonToken, isCommaToken, isCommentToken, isNotArrowToken, isNotClosingBraceToken, isNotClosingBracketToken, isNotClosingParenToken, isNotColonToken, isNotCommaToken, isNotCommentToken, isNotOpeningBraceToken, isNotOpeningBracketToken, isNotOpeningParenToken, isNotSemicolonToken, isOpeningBraceToken, isOpeningBracketToken, isOpeningParenToken, isSemicolonToken, };...

Full Screen

Full Screen

Using AI Code Generation

copy

Full Screen

1import React from 'react';2import { storiesOf } from '@storybook/react';3import { withKnobs, text, number } from '@storybook/addon-knobs/react';4import { withInfo } from '@storybook/addon-info';5import { withA11y } from '@storybook/addon-a11y';6import { withOptions } from '@storybook/addon-options';7import { withViewport } from '@storybook/addon-viewport';8import { withBackgrounds } from '@storybook/addon-backgrounds';9import { withNotes } from '@storybook/addon-notes';10import { withTests } from '@storybook/addon-jest';11import { withConsole } from '@storybook/addon-console';12import { withLinks } from '@storybook/addon-links';13import { withToken } from 'storybook-root';14import Button from '../src/components/Button';15import ButtonReadme from '../src/components/Button/README.md';16import ButtonTests from '../src/components/Button/__tests__/Button.test.js';17storiesOf('Button', module)18 .addDecorator(withKnobs)19 .addDecorator(withInfo)20 .addDecorator(withA11y)21 .addDecorator(withOptions)22 .addDecorator(withViewport)23 .addDecorator(withBackgrounds)24 .addDecorator(withNotes)25 .addDecorator(withTests)26 .addDecorator(withConsole)27 .addDecorator(withLinks)28 .addDecorator(withToken)29 .add('with text', () => (30 text={text('Text', 'Hello Button')}31 loading={boolean('Loading', false)}32 disabled={boolean('Disabled', false)}33 onClick={action('clicked')}34 .add('with some emoji', () => (35 loading={boolean('Loading', false)}36 disabled={boolean('Disabled', false)}37 onClick={action('clicked')}38 .add('with some emoji and text', () => (39 text={text('Text', 'Hello Button')}40 loading={boolean('Loading', false)}41 disabled={boolean('Disabled', false)}42 onClick={action('clicked')}

Full Screen

Using AI Code Generation

copy

Full Screen

1import { withKnobs, text } from '@storybook/addon-knobs';2import { withA11y } from '@storybook/addon-a11y';3import { withDesign } from 'storybook-addon-designs';4import { withToken } from 'storybook-addon-token';5export default {6};7export const knobs = () => {8 const name = text('Name', 'Arunoda Susiripala');9 const age = number('Age', 89);10 return `<div>Hello! My name is ${name} and I'm ${age} years old.</div>`;11};12knobs.story = {13 parameters: {14 design: {15 },16 token: {17 },18 },19};20module.exports = {21 stories: ['../src/**/*.stories.@(js|mdx)'],22};23import { addDecorator, addParameters } from '@storybook/vue';24import { withKnobs } from '@storybook/addon-knobs';25import { withA11y } from '@storybook/addon-a11y';26import { withDesign } from 'storybook-addon-designs';27import { withToken } from 'storybook-addon-token';28addDecorator(withKnobs);29addDecorator(withA11y);30addDecorator(withDesign);31addDecorator(withToken);32addParameters({33 design: {

Full Screen

Using AI Code Generation

copy

Full Screen

1import { withToken } from 'storybook-root-decorator'2import { withKnobs } from '@storybook/addon-knobs'3import { withA11y } from '@storybook/addon-a11y'4export default {5}6export const Example = () => <div>Example</div>7import { withToken } from 'storybook-root-decorator'8export const parameters = {9 backgrounds: {10 {11 },12 {13 },14 },15}16import { addons } from '@storybook/addons'17import { create } from '@storybook/theming'18addons.setConfig({19 theme: create({20 }),21})

Full Screen

Using AI Code Generation

copy

Full Screen

1const { token } = require('storybook-root');2const { token } = require('storybook-root');3const { token } = require('storybook-root');4const { token } = require('storybook-root');5const { token } = require('storybook-root');6const { token } = require('storybook-root');7const { token } = require('storybook-root');8const { token } = require('storybook-root');9const { token } = require('storybook-root');10const { token } = require('storybook-root');11const { token } = require('storybook-root');12const { token } = require('storybook-root');13const { token } = require('storybook-root');14const { token } = require('storybook-root');15const { token } = require('storybook-root');16const { token } = require('storybook-root');17const { token } = require('storybook-root');18const { token } = require('storybook-root');19const { token } = require('storybook-root');20const { token } = require('storybook-root');21const { token } = require('storybook-root');

Full Screen

Using AI Code Generation

copy

Full Screen

1import { addDecorator } from '@storybook/react';2import { withToken } from 'storybook-root';3addDecorator(withToken);4import { addDecorator } from '@storybook/react';5import { withToken } from 'storybook-root';6addDecorator(withToken);7module.exports = {8};9import 'storybook-root/register';10import { token } from 'storybook-root';11import { token } from 'storybook-root';12module.exports = {13};14import 'storybook-root/register';15import { token } from 'storybook-root';16import { token } from 'storybook-root';17module.exports = {18};

Full Screen

Using AI Code Generation

copy

Full Screen

1import { withToken } from 'storybook-addon-designs';2export default {3 parameters: {4 design: withToken(token),5 },6};7export const Default = () => <Component />;8Default.story = {9};10import { withToken } from 'storybook-addon-designs';11export default {12 parameters: {13 design: withToken(token),14 },15};16export const Default = () => <Component />;17Default.story = {18};19import { withToken } from 'storybook-addon-designs';20export default {21 parameters: {22 design: withToken(token),23 },24};25export const Default = () => <Component />;26Default.story = {27};28import { withToken } from 'storybook-addon-designs';29export default {30 parameters: {31 design: withToken(token),32 },33};34export const Default = () => <Component />;35Default.story = {

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run storybook-root automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful