How to use token method in tempest

Best Python code snippet using tempest_python

token-assert.js

Source:token-assert.js Github

copy

Full Screen

1var expect = require('chai').expect;2var sinon = require('sinon');3var JsFile = require('../../lib/js-file');4var TokenAssert = require('../../lib/token-assert');5var getPosition = require('../../lib/errors').getPosition;6describe('token-assert', function() {7 function createJsFile(sources) {8 return new JsFile({9 filename: 'example.js',10 source: sources11 });12 }13 describe('whitespaceBetween', function() {14 it('should trigger error on missing whitespace between tokens', function() {15 var file = createJsFile('x=y;');16 var tokenAssert = new TokenAssert(file);17 var onError = sinon.spy();18 tokenAssert.on('error', onError);19 var first = file.getTree().getFirstToken();20 tokenAssert.whitespaceBetween({21 token: first,22 nextToken: file.getNextToken(first)23 });24 expect(onError).to.have.callCount(1);25 var error = onError.getCall(0).args[0];26 expect(error.message).to.contain('Missing space between x and =');27 expect(getPosition(error).line).to.equal(1);28 expect(getPosition(error).column).to.equal(1);29 });30 it('should accept message for missing whitespace between tokens', function() {31 var file = createJsFile('x=y;');32 var tokenAssert = new TokenAssert(file);33 var onError = sinon.spy();34 tokenAssert.on('error', onError);35 var token = file.getTree().getFirstToken();36 var nextToken = file.findNextToken(token, 'Punctuator', '=');37 tokenAssert.whitespaceBetween({38 token: token,39 nextToken: nextToken,40 message: 'Custom message'41 });42 expect(!!onError.getCall(0).args[0].message).to.equal(true);43 });44 it('should not trigger error on existing whitespace between tokens', function() {45 var file = createJsFile('x = y;');46 var tokenAssert = new TokenAssert(file);47 var onError = sinon.spy();48 tokenAssert.on('error', onError);49 var token = file.getTree().getFirstToken();50 var nextToken = file.findNextToken(token, 'Punctuator', '=');51 tokenAssert.whitespaceBetween({52 token: token,53 nextToken: nextToken54 });55 expect(onError).to.have.callCount(0);56 });57 });58 describe('spacesBetween', function() {59 it('should do nothing if either token or nextToken is not specified', function() {60 var file = createJsFile('x = y;');61 var tokenAssert = new TokenAssert(file);62 var onError = sinon.spy();63 tokenAssert.on('error', onError);64 var token = file.getTree().getFirstToken();65 tokenAssert.spacesBetween({66 token: token,67 exactly: 1068 });69 tokenAssert.spacesBetween({70 nextToken: token,71 exactly: 1072 });73 expect(onError).to.have.callCount(0);74 });75 describe('exactly', function() {76 it('should trigger error on invalid space count between tokens', function() {77 var file = createJsFile('x = y;');78 var tokenAssert = new TokenAssert(file);79 var onError = sinon.spy();80 tokenAssert.on('error', onError);81 var token = file.getTree().getFirstToken();82 var nextToken = file.findNextToken(token, 'Punctuator', '=');83 tokenAssert.spacesBetween({84 token: token,85 nextToken: nextToken,86 exactly: 287 });88 expect(onError).to.have.callCount(1);89 var error = onError.getCall(0).args[0];90 expect(error.message).to.contain('2 spaces required between x and =');91 expect(getPosition(error).line).to.equal(1);92 expect(getPosition(error).column).to.equal(1);93 });94 it('should not trigger error on newline between tokens', function() {95 var file = createJsFile('x\n=y;');96 var tokenAssert = new TokenAssert(file);97 var onError = sinon.spy();98 tokenAssert.on('error', onError);99 var token = file.getTree().getFirstToken();100 var nextToken = file.findNextToken(token, 'Punctuator', '=');101 tokenAssert.spacesBetween({102 token: token,103 nextToken: nextToken,104 exactly: 2105 });106 expect(onError).to.have.callCount(0);107 });108 it('should not trigger error on valid space count between tokens', function() {109 var file = createJsFile('x = y;');110 var tokenAssert = new TokenAssert(file);111 var onError = sinon.spy();112 tokenAssert.on('error', onError);113 var token = file.getTree().getFirstToken();114 var nextToken = file.findNextToken(token, 'Punctuator', '=');115 tokenAssert.spacesBetween({116 token: token,117 nextToken: nextToken,118 exactly: 3119 });120 expect(onError).to.have.callCount(0);121 });122 it('should accept message for invalid space count between tokens', function() {123 var file = createJsFile('x = y;');124 var tokenAssert = new TokenAssert(file);125 var onError = sinon.spy();126 tokenAssert.on('error', onError);127 var token = file.getTree().getFirstToken();128 var nextToken = file.findNextToken(token, 'Punctuator', '=');129 tokenAssert.spacesBetween({130 token: token,131 nextToken: nextToken,132 exactly: 2,133 message: 'Custom message'134 });135 expect(onError.getCall(0).args[0].message).to.equal('Custom message');136 });137 it('should error, but not fix, when a comment exists between the two tokens', function() {138 var file = createJsFile('x/*blockcomment*/=y;');139 var tokenAssert = new TokenAssert(file);140 var onError = sinon.spy();141 var token = file.getTree().getFirstToken();142 var yToken = file.findNextToken(token, 'Identifier', 'y');143 tokenAssert.on('error', onError);144 tokenAssert.spacesBetween({145 token: token,146 nextToken: file.findNextToken(token, 'Identifier', 'y'),147 exactly: 5148 });149 expect(onError).to.have.callCount(1);150 var error = onError.getCall(0).args[0];151 expect(error.fix).to.equal(undefined);152 expect(file.getWhitespaceBefore(yToken)).to.equal('');153 });154 });155 describe('atMost', function() {156 it('should trigger error on invalid space count between tokens', function() {157 var file = createJsFile('x = y;');158 var tokenAssert = new TokenAssert(file);159 var onError = sinon.spy();160 tokenAssert.on('error', onError);161 var token = file.getTree().getFirstToken();162 var nextToken = file.findNextToken(token, 'Punctuator', '=');163 tokenAssert.spacesBetween({164 token: token,165 nextToken: nextToken,166 atMost: 1167 });168 expect(onError).to.have.callCount(1);169 var error = onError.getCall(0).args[0];170 expect(error.message).to.contain('at most 1 spaces required between x and =');171 expect(getPosition(error).line).to.equal(1);172 expect(getPosition(error).column).to.equal(1);173 });174 it('should not trigger error on valid space count between tokens', function() {175 var file = createJsFile('x = y;');176 var tokenAssert = new TokenAssert(file);177 var onError = sinon.spy();178 tokenAssert.on('error', onError);179 var token = file.getTree().getFirstToken();180 var nextToken = file.findNextToken(token, 'Punctuator', '=');181 tokenAssert.spacesBetween({182 token: token,183 nextToken: nextToken,184 atMost: 3185 });186 expect(onError).to.have.callCount(0);187 });188 it('should accept message for invalid space count between tokens', function() {189 var file = createJsFile('x = y;');190 var tokenAssert = new TokenAssert(file);191 var onError = sinon.spy();192 tokenAssert.on('error', onError);193 var token = file.getTree().getFirstToken();194 var nextToken = file.findNextToken(token, 'Punctuator', '=');195 tokenAssert.spacesBetween({196 token: token,197 nextToken: nextToken,198 atMost: 1,199 message: 'Custom message'200 });201 expect(onError.getCall(0).args[0].message).to.equal('Custom message');202 });203 });204 it('should trigger error on invalid maximum space count between tokens', function() {205 var file = createJsFile('x = y;');206 var tokenAssert = new TokenAssert(file);207 var onError = sinon.spy();208 tokenAssert.on('error', onError);209 var token = file.getTree().getFirstToken();210 var nextToken = file.findNextToken(token, 'Punctuator', '=');211 tokenAssert.whitespaceBetween({212 token: token,213 nextToken: nextToken,214 atMost: 1215 });216 expect(onError).to.have.callCount(1);217 var error = onError.getCall(0).args[0];218 expect(error.message).to.contain('at most 1 spaces required between x and =');219 expect(getPosition(error).line).to.equal(1);220 expect(getPosition(error).column).to.equal(1);221 });222 it('should trigger plural error on invalid maximum space count between tokens', function() {223 var file = createJsFile('x = y;');224 var tokenAssert = new TokenAssert(file);225 var onError = sinon.spy();226 tokenAssert.on('error', onError);227 var token = file.getTree().getFirstToken();228 var nextToken = file.findNextToken(token, 'Punctuator', '=');229 tokenAssert.whitespaceBetween({230 token: token,231 nextToken: nextToken,232 atMost: 2233 });234 expect(onError).to.have.callCount(1);235 var error = onError.getCall(0).args[0];236 expect(error.message).to.contain('at most 2 spaces required between x and =');237 expect(getPosition(error).line).to.equal(1);238 expect(getPosition(error).column).to.equal(1);239 });240 it('should not trigger error on newline between tokens for maximum spaces', function() {241 var file = createJsFile('x\n=y;');242 var tokenAssert = new TokenAssert(file);243 var onError = sinon.spy();244 tokenAssert.on('error', onError);245 var token = file.getTree().getFirstToken();246 var nextToken = file.findNextToken(token, 'Punctuator', '=');247 tokenAssert.whitespaceBetween({248 token: token,249 nextToken: nextToken,250 atMost: 1251 });252 expect(onError).to.have.not.callCount(1);253 });254 it('should not trigger error on valid maximum space count between tokens', function() {255 var file = createJsFile('x = y;');256 var tokenAssert = new TokenAssert(file);257 var onError = sinon.spy();258 tokenAssert.on('error', onError);259 var token = file.getTree().getFirstToken();260 var nextToken = file.findNextToken(token, 'Punctuator', '=');261 tokenAssert.whitespaceBetween({262 token: token,263 nextToken: nextToken,264 atMost: 3265 });266 expect(onError).to.have.not.callCount(1);267 });268 it('should accept message for invalid maximum space count between tokens', function() {269 var file = createJsFile('x = y;');270 var tokenAssert = new TokenAssert(file);271 var onError = sinon.spy();272 tokenAssert.on('error', onError);273 var token = file.getTree().getFirstToken();274 var nextToken = file.findNextToken(token, 'Punctuator', '=');275 tokenAssert.whitespaceBetween({276 token: token,277 nextToken: nextToken,278 atMost: 1,279 message: 'Custom message'280 });281 expect(onError.getCall(0).args[0].message).to.equal('Custom message');282 });283 });284 describe('noWhitespaceBetween', function() {285 it('should trigger error on existing whitespace between tokens', function() {286 var file = createJsFile('x = y;');287 var tokenAssert = new TokenAssert(file);288 var onError = sinon.spy();289 tokenAssert.on('error', onError);290 var token = file.getTree().getFirstToken();291 var nextToken = file.findNextToken(token, 'Punctuator', '=');292 tokenAssert.noWhitespaceBetween({293 token: token,294 nextToken: nextToken295 });296 expect(onError).to.have.callCount(1);297 var error = onError.getCall(0).args[0];298 expect(error.message).to.contain('Unexpected whitespace between x and =');299 expect(getPosition(error).line).to.equal(1);300 expect(getPosition(error).column).to.equal(1);301 });302 it('should not trigger error on newline between tokens', function() {303 var file = createJsFile('x\n=y;');304 var tokenAssert = new TokenAssert(file);305 var onError = sinon.spy();306 tokenAssert.on('error', onError);307 var token = file.getTree().getFirstToken();308 var nextToken = file.findNextToken(token, 'Punctuator', '=');309 tokenAssert.noWhitespaceBetween({310 token: token,311 nextToken: nextToken312 });313 expect(onError).to.have.callCount(0);314 });315 it('should trigger error on newline between tokens with disallowNewLine option', function() {316 var file = createJsFile('x\n=y;');317 var tokenAssert = new TokenAssert(file);318 var onError = sinon.spy();319 tokenAssert.on('error', onError);320 var token = file.getTree().getFirstToken();321 var nextToken = file.findNextToken(token, 'Punctuator', '=');322 tokenAssert.noWhitespaceBetween({323 token: token,324 nextToken: nextToken,325 disallowNewLine: true326 });327 expect(onError).to.have.callCount(1);328 var error = onError.getCall(0).args[0];329 expect(error.message).to.contain('Unexpected whitespace between x and =');330 expect(getPosition(error).line).to.equal(1);331 expect(getPosition(error).column).to.equal(1);332 });333 it('should not trigger error on missing whitespace between tokens', function() {334 var file = createJsFile('x=y;');335 var tokenAssert = new TokenAssert(file);336 var onError = sinon.spy();337 tokenAssert.on('error', onError);338 var token = file.getTree().getFirstToken();339 var nextToken = file.findNextToken(token, 'Punctuator', '=');340 tokenAssert.noWhitespaceBetween({341 token: token,342 nextToken: nextToken343 });344 expect(onError).to.have.callCount(0);345 });346 it('should accept message for existing space count between tokens', function() {347 var file = createJsFile('x = y;');348 var tokenAssert = new TokenAssert(file);349 var onError = sinon.spy();350 tokenAssert.on('error', onError);351 var token = file.getTree().getFirstToken();352 var nextToken = file.findNextToken(token, 'Punctuator', '=');353 tokenAssert.noWhitespaceBetween({354 token: token,355 nextToken: nextToken,356 message: 'Custom message'357 });358 expect(onError.getCall(0).args[0].message).to.equal('Custom message');359 });360 });361 describe('sameLine', function() {362 it('should trigger error on unexpected newline between tokens', function() {363 var file = createJsFile('x\n=y;');364 var tokenAssert = new TokenAssert(file);365 var onError = sinon.spy();366 tokenAssert.on('error', onError);367 var token = file.getTree().getFirstToken();368 var nextToken = file.findNextToken(token, 'Punctuator', '=');369 tokenAssert.sameLine({370 token: token,371 nextToken: nextToken372 });373 expect(onError).to.have.callCount(1);374 var error = onError.getCall(0).args[0];375 expect(error.message).to.contain('x and = should be on the same line');376 expect(getPosition(error).line).to.equal(1);377 expect(getPosition(error).column).to.equal(1);378 });379 it('should not trigger error on missing newline between tokens', function() {380 var file = createJsFile('x=y;');381 var tokenAssert = new TokenAssert(file);382 var onError = sinon.spy();383 tokenAssert.on('error', onError);384 var token = file.getTree().getFirstToken();385 var nextToken = file.findNextToken(token, 'Punctuator', '=');386 tokenAssert.sameLine({387 token: token,388 nextToken: nextToken389 });390 expect(onError).to.have.callCount(0);391 });392 it('should accept message for unexpected newline between tokens', function() {393 var file = createJsFile('x\n=y;');394 var tokenAssert = new TokenAssert(file);395 var onError = sinon.spy();396 tokenAssert.on('error', onError);397 var token = file.getTree().getFirstToken();398 var nextToken = file.findNextToken(token, 'Punctuator', '=');399 tokenAssert.sameLine({400 token: token,401 nextToken: nextToken,402 message: 'Custom message'403 });404 expect(onError.getCall(0).args[0].message).to.equal('Custom message');405 });406 it('should not throw if token or nextToken properties are undefined', function() {407 var file = createJsFile('x\n=y;');408 var tokenAssert = new TokenAssert(file);409 tokenAssert.sameLine({410 token: undefined,411 nextToken: undefined412 });413 });414 it('should move tokens instead of collapsing lines when asked', function() {415 var file = createJsFile('x\n + y;');416 var tokenAssert = new TokenAssert(file);417 tokenAssert.on('error', function(errorInfo) {418 errorInfo.fix();419 });420 var token = file.getTree().getFirstToken();421 var nextToken = file.findNextToken(token, 'Punctuator', '+');422 tokenAssert.sameLine({423 token: token,424 nextToken: nextToken,425 stickToPreviousToken: true426 });427 expect(file.render()).to.equal('x +\n y;');428 });429 });430 describe('differentLine', function() {431 it('should trigger error on missing newline between tokens', function() {432 var file = createJsFile('x=y;');433 var tokenAssert = new TokenAssert(file);434 var onError = sinon.spy();435 tokenAssert.on('error', onError);436 var token = file.getTree().getFirstToken();437 var nextToken = file.findNextToken(token, 'Punctuator', '=');438 tokenAssert.differentLine({439 token: token,440 nextToken: nextToken441 });442 expect(onError).to.have.callCount(1);443 var error = onError.getCall(0).args[0];444 expect(error.message).to.contain('x and = should be on different lines');445 expect(getPosition(error).line).to.equal(1);446 expect(getPosition(error).column).to.equal(1);447 });448 it('should not trigger error on existing newline between tokens', function() {449 var file = createJsFile('x\n=y;');450 var tokenAssert = new TokenAssert(file);451 var onError = sinon.spy();452 tokenAssert.on('error', onError);453 var token = file.getTree().getFirstToken();454 var nextToken = file.findNextToken(token, 'Punctuator', '=');455 tokenAssert.differentLine({456 token: token,457 nextToken: nextToken458 });459 expect(onError).to.have.callCount(0);460 });461 it('should not trigger on additional newlines between tokens', function() {462 var file = createJsFile('x\n\n=y;');463 var tokenAssert = new TokenAssert(file);464 var onError = sinon.spy();465 tokenAssert.on('error', onError);466 var token = file.getTree().getFirstToken();467 var nextToken = file.findNextToken(token, 'Punctuator', '=');468 tokenAssert.differentLine({469 token: token,470 nextToken: nextToken471 });472 expect(onError).to.have.callCount(0);473 });474 it('should not trigger on additional newlines between tokens', function() {475 var file = createJsFile('x\n\n=y;');476 var tokenAssert = new TokenAssert(file);477 var onError = sinon.spy();478 tokenAssert.on('error', onError);479 var token = file.getTree().getFirstToken();480 var nextToken = file.findNextToken(token, 'Punctuator', '=');481 tokenAssert.differentLine({482 token: token,483 nextToken: nextToken484 });485 expect(onError).to.have.callCount(0);486 });487 it('should accept message for missing newline between tokens', function() {488 var file = createJsFile('x=y;');489 var tokenAssert = new TokenAssert(file);490 var onError = sinon.spy();491 tokenAssert.on('error', onError);492 var token = file.getTree().getFirstToken();493 var nextToken = file.findNextToken(token, 'Punctuator', '=');494 tokenAssert.differentLine({495 token: token,496 nextToken: nextToken,497 message: 'Custom message'498 });499 expect(onError.getCall(0).args[0].message).to.equal('Custom message');500 });501 it('should not throw if token or nextToken properties are undefined', function() {502 var file = createJsFile('x\n=y;');503 var tokenAssert = new TokenAssert(file);504 tokenAssert.differentLine({505 token: undefined,506 nextToken: undefined507 });508 });509 });510 describe('linesBetween', function() {511 describe('error messages', function() {512 beforeEach(function() {513 var file = createJsFile('x=y;');514 this.tokenAssert = new TokenAssert(file);515 var onError = sinon.spy();516 this.tokenAssert.on('error', onError);517 this.firstToken = file.getTree().getFirstToken();518 this.secondToken = this.firstToken.getNextCodeToken();519 });520 it('should throw if no options are specified', function() {521 expect((function() {522 this.tokenAssert.linesBetween({523 token: this.firstToken,524 nextToken: this.secondToken525 });526 }).bind(this)).to.throw(/You must specify at least one option/);527 });528 it('should throw if atLeast and exactly are specified', function() {529 expect((function() {530 this.tokenAssert.linesBetween({531 token: this.firstToken,532 nextToken: this.secondToken,533 atLeast: 2,534 exactly: 1535 });536 }).bind(this)).to.throw(/You cannot specify atLeast or atMost with exactly/);537 });538 it('should throw if atMost and exactly are specified', function() {539 expect((function() {540 this.tokenAssert.linesBetween({541 token: this.firstToken,542 nextToken: this.secondToken,543 atMost: 2,544 exactly: 1545 });546 }).bind(this)).to.throw(/You cannot specify atLeast or atMost with exactly/);547 });548 it('should throw if atLeast and atMost are in conflict', function() {549 expect((function() {550 this.tokenAssert.linesBetween({551 token: this.firstToken,552 nextToken: this.secondToken,553 atLeast: 3,554 atMost: 2555 });556 }).bind(this)).to.throw(/atLeast and atMost are in conflict/);557 });558 it('should throw if token and nextToken are the same', function() {559 expect((function() {560 this.tokenAssert.linesBetween({561 token: this.firstToken,562 nextToken: this.firstToken,563 atLeast: 1564 });565 }).bind(this))566 .to.throw(/You cannot specify the same token as both token and nextToken/);567 });568 });569 it('should not throw if token or nextToken properties are undefined', function() {570 var file = createJsFile('x\n=y;');571 var tokenAssert = new TokenAssert(file);572 tokenAssert.linesBetween({573 token: undefined,574 nextToken: undefined,575 exactly: 1576 });577 });578 describe('exactly', function() {579 it('should trigger error on too few newlines', function() {580 var file = createJsFile('x\n=y;');581 var tokenAssert = new TokenAssert(file);582 var onError = sinon.spy();583 tokenAssert.on('error', onError);584 var token = file.getTree().getFirstToken();585 var nextToken = file.findNextToken(token, 'Punctuator', '=');586 tokenAssert.linesBetween({587 token: token,588 nextToken: nextToken,589 exactly: 2590 });591 expect(onError).to.have.callCount(1);592 var error = onError.getCall(0).args[0];593 expect(error.message).to.contain('x and = should have exactly 2 line(s) between them');594 });595 it('should trigger error on too many specified newlines', function() {596 var file = createJsFile('x\n\n\n\n=y;');597 var tokenAssert = new TokenAssert(file);598 var onError = sinon.spy();599 tokenAssert.on('error', onError);600 var token = file.getTree().getFirstToken();601 var nextToken = file.findNextToken(token, 'Punctuator', '=');602 tokenAssert.linesBetween({603 token: token,604 nextToken: nextToken,605 exactly: 2606 });607 expect(onError).to.have.callCount(1);608 var error = onError.getCall(0).args[0];609 expect(error.message).to.contain('x and = should have exactly 2 line(s) between them');610 });611 it('should not trigger error on correct specified newlines', function() {612 var file = createJsFile('x\n\n=y;');613 var tokenAssert = new TokenAssert(file);614 var onError = sinon.spy();615 tokenAssert.on('error', onError);616 var token = file.getTree().getFirstToken();617 var nextToken = file.findNextToken(token, 'Punctuator', '=');618 tokenAssert.linesBetween({619 token: token,620 nextToken: nextToken,621 exactly: 2622 });623 expect(onError).to.have.callCount(0);624 });625 it('should not trigger error on exactly 0 blank lines', function() {626 var file = createJsFile('x\n=y;');627 var tokenAssert = new TokenAssert(file);628 var onError = sinon.spy();629 tokenAssert.on('error', onError);630 var token = file.getTree().getFirstToken();631 var nextToken = file.findNextToken(token, 'Punctuator', '=');632 tokenAssert.linesBetween({633 token: token,634 nextToken: nextToken,635 exactly: 1636 });637 expect(onError).to.have.callCount(0);638 });639 it('should not trigger error on multiple specified newlines negative', function() {640 var file = createJsFile('x\n\n=y;');641 var tokenAssert = new TokenAssert(file);642 var onError = sinon.spy();643 tokenAssert.on('error', onError);644 var token = file.getTree().getFirstToken();645 var lastToken = file.getLastToken();646 tokenAssert.linesBetween({647 token: token,648 nextToken: lastToken,649 exactly: 2650 });651 expect(onError).to.have.callCount(0);652 });653 it('should edit the whitespaceBefore with too few lines between', function() {654 var file = createJsFile(' x\n =y;');655 var tokenAssert = new TokenAssert(file);656 tokenAssert.on('error', function(errorInfo) {657 errorInfo.fix();658 });659 var token = file.findNextToken(660 file.getTree().getFirstToken(),661 'Identifier'662 );663 var nextToken = file.findNextToken(token, 'Punctuator', '=');664 tokenAssert.linesBetween({665 token: token,666 nextToken: nextToken,667 exactly: 2668 });669 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n\n ');670 });671 it('should edit the whitespaceBefore with too many lines between', function() {672 var file = createJsFile(' x\n\n\n =y;');673 var tokenAssert = new TokenAssert(file);674 tokenAssert.on('error', function(errorInfo) {675 errorInfo.fix();676 });677 var token = file.getTree().getFirstToken();678 var nextToken = file.findNextToken(token, 'Punctuator', '=');679 tokenAssert.linesBetween({680 token: token,681 nextToken: nextToken,682 exactly: 2683 });684 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n\n ');685 });686 it('should not edit the whitespaceBefore with correct lines between', function() {687 var file = createJsFile(' x\n\n =y;');688 var tokenAssert = new TokenAssert(file);689 tokenAssert.on('error', function(errorInfo) {690 errorInfo.fix();691 });692 var token = file.getTree().getFirstToken();693 var nextToken = file.findNextToken(token, 'Punctuator', '=');694 tokenAssert.linesBetween({695 token: token,696 nextToken: nextToken,697 exactly: 2698 });699 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n\n ');700 });701 it('should error, but not fix, when a comment exists between the two tokens (with newline)', function() {702 var file = createJsFile('x\n//linecomment\n=y;');703 var tokenAssert = new TokenAssert(file);704 var onError = sinon.spy();705 tokenAssert.on('error', onError);706 var token = file.getTree().getFirstToken();707 var nextToken = file.findNextToken(token, 'Punctuator', '=');708 tokenAssert.linesBetween({709 token: token,710 nextToken: nextToken,711 exactly: 5712 });713 expect(onError).to.have.callCount(1);714 expect(file.getWhitespaceBefore(nextToken)).to.equal('\n');715 });716 });717 describe('atLeast', function() {718 it('should trigger on too few lines', function() {719 var file = createJsFile('x\n\n=y;');720 var tokenAssert = new TokenAssert(file);721 var onError = sinon.spy();722 tokenAssert.on('error', onError);723 var token = file.getTree().getFirstToken();724 var nextToken = file.findNextToken(token, 'Punctuator', '=');725 tokenAssert.linesBetween({726 token: token,727 nextToken: nextToken,728 atLeast: 3729 });730 expect(onError).to.have.callCount(1);731 var error = onError.getCall(0).args[0];732 expect(error.message).to.contain('x and = should have at least 3 line(s) between them');733 });734 it('should not trigger with exact lines', function() {735 var file = createJsFile('x\n\n=y;');736 var tokenAssert = new TokenAssert(file);737 var onError = sinon.spy();738 tokenAssert.on('error', onError);739 var token = file.getTree().getFirstToken();740 var nextToken = file.findNextToken(token, 'Punctuator', '=');741 tokenAssert.linesBetween({742 token: token,743 nextToken: nextToken,744 atLeast: 2745 });746 expect(onError).to.have.callCount(0);747 });748 it('should not trigger error on too many lines', function() {749 var file = createJsFile('x\n\n\n=y;');750 var tokenAssert = new TokenAssert(file);751 var onError = sinon.spy();752 tokenAssert.on('error', onError);753 var token = file.getTree().getFirstToken();754 var nextToken = file.findNextToken(token, 'Punctuator', '=');755 tokenAssert.linesBetween({756 token: token,757 nextToken: nextToken,758 atLeast: 2759 });760 expect(onError).to.have.callCount(0);761 });762 it('should edit the whitespaceBefore with too few lines between', function() {763 var file = createJsFile('x\n =y;');764 var tokenAssert = new TokenAssert(file);765 tokenAssert.on('error', function(errorInfo) {766 errorInfo.fix();767 });768 var token = file.getTree().getFirstToken();769 var nextToken = file.findNextToken(token, 'Punctuator', '=');770 tokenAssert.linesBetween({771 token: token,772 nextToken: nextToken,773 atLeast: 2774 });775 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');776 });777 it('should edit the whitespaceBefore with too few lines (spaced then non spaced) between', function() {778 var file = createJsFile('x \n\n =y;');779 var tokenAssert = new TokenAssert(file);780 tokenAssert.on('error', function(errorInfo) {781 errorInfo.fix();782 });783 var token = file.getTree().getFirstToken();784 var nextToken = file.findNextToken(token, 'Punctuator', '=');785 tokenAssert.linesBetween({786 token: token,787 nextToken: nextToken,788 atLeast: 4789 });790 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal(' \n\n\n\n ');791 });792 it('should edit the whitespaceBefore with too few lines (non spaced then spaced) between ', function() {793 var file = createJsFile('x\n \n =y;');794 var tokenAssert = new TokenAssert(file);795 tokenAssert.on('error', function(errorInfo) {796 errorInfo.fix();797 });798 var token = file.getTree().getFirstToken();799 var nextToken = file.findNextToken(token, 'Punctuator', '=');800 tokenAssert.linesBetween({801 token: token,802 nextToken: nextToken,803 atLeast: 4804 });805 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n \n\n\n ');806 });807 it('should not edit the whitespaceBefore with too many lines between', function() {808 var file = createJsFile('x\n\n\n =y;');809 var tokenAssert = new TokenAssert(file);810 tokenAssert.on('error', function(errorInfo) {811 errorInfo.fix();812 });813 var token = file.getTree().getFirstToken();814 var nextToken = file.findNextToken(token, 'Punctuator', '=');815 tokenAssert.linesBetween({816 token: token,817 nextToken: nextToken,818 atLeast: 2819 });820 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n\n ');821 });822 it('should not edit the whitespaceBefore with correct lines between', function() {823 var file = createJsFile('x\n\n =y;');824 var tokenAssert = new TokenAssert(file);825 tokenAssert.on('error', function(errorInfo) {826 errorInfo.fix();827 });828 var token = file.getTree().getFirstToken();829 var nextToken = file.findNextToken(token, 'Punctuator', '=');830 tokenAssert.linesBetween({831 token: token,832 nextToken: nextToken,833 atLeast: 2834 });835 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');836 });837 });838 describe('atMost', function() {839 it('should not trigger on too few lines', function() {840 var file = createJsFile('x\n\n=y;');841 var tokenAssert = new TokenAssert(file);842 var onError = sinon.spy();843 tokenAssert.on('error', onError);844 var token = file.getTree().getFirstToken();845 var nextToken = file.findNextToken(token, 'Punctuator', '=');846 tokenAssert.linesBetween({847 token: token,848 nextToken: nextToken,849 atMost: 2850 });851 expect(onError).to.have.callCount(0);852 });853 it('should not trigger with exact lines', function() {854 var file = createJsFile('x\n\n=y;');855 var tokenAssert = new TokenAssert(file);856 var onError = sinon.spy();857 tokenAssert.on('error', onError);858 var token = file.getTree().getFirstToken();859 var nextToken = file.findNextToken(token, 'Punctuator', '=');860 tokenAssert.linesBetween({861 token: token,862 nextToken: nextToken,863 atMost: 2864 });865 expect(onError).to.have.callCount(0);866 });867 it('should trigger error on too many lines', function() {868 var file = createJsFile('x\n\n\n=y;');869 var tokenAssert = new TokenAssert(file);870 var onError = sinon.spy();871 tokenAssert.on('error', onError);872 var token = file.getTree().getFirstToken();873 var nextToken = file.findNextToken(token, 'Punctuator', '=');874 tokenAssert.linesBetween({875 token: token,876 nextToken: nextToken,877 atMost: 1878 });879 expect(onError).to.have.callCount(1);880 var error = onError.getCall(0).args[0];881 expect(error.message).to.contain('x and = should have at most 1 line(s) between them');882 });883 it('should not edit the whitespaceBefore with too few lines between', function() {884 var file = createJsFile('x\n =y;');885 var tokenAssert = new TokenAssert(file);886 tokenAssert.on('error', function(errorInfo) {887 errorInfo.fix();888 });889 var token = file.getTree().getFirstToken();890 var nextToken = file.findNextToken(token, 'Punctuator', '=');891 tokenAssert.linesBetween({892 token: token,893 nextToken: nextToken,894 atMost: 2895 });896 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n ');897 });898 it('should edit the whitespaceBefore with too many lines between', function() {899 var file = createJsFile('x\n\n\n =y;');900 var tokenAssert = new TokenAssert(file);901 tokenAssert.on('error', function(errorInfo) {902 errorInfo.fix();903 });904 var token = file.getTree().getFirstToken();905 var nextToken = file.findNextToken(token, 'Punctuator', '=');906 tokenAssert.linesBetween({907 token: token,908 nextToken: nextToken,909 atMost: 2910 });911 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');912 });913 it('should not edit the whitespaceBefore with correct lines between', function() {914 var file = createJsFile('x\n\n =y;');915 var tokenAssert = new TokenAssert(file);916 tokenAssert.on('error', function(errorInfo) {917 errorInfo.fix();918 });919 var token = file.getTree().getFirstToken();920 var nextToken = file.findNextToken(token, 'Punctuator', '=');921 tokenAssert.linesBetween({922 token: token,923 nextToken: nextToken,924 atMost: 2925 });926 expect(file.getWhitespaceBefore(file.getNextToken(token))).to.equal('\n\n ');927 });928 });929 describe('between', function() {930 it('should not trigger if within range', function() {931 var file = createJsFile('x\n\n\n=y;');932 var tokenAssert = new TokenAssert(file);933 var onError = sinon.spy();934 tokenAssert.on('error', onError);935 var token = file.getTree().getFirstToken();936 var nextToken = file.findNextToken(token, 'Punctuator', '=');937 tokenAssert.linesBetween({938 token: token,939 nextToken: nextToken,940 atLeast: 1,941 atMost: 3942 });943 expect(onError).to.have.callCount(0);944 });945 it('should trigger if below range', function() {946 var file = createJsFile('x\n=y;');947 var tokenAssert = new TokenAssert(file);948 var onError = sinon.spy();949 tokenAssert.on('error', onError);950 var token = file.getTree().getFirstToken();951 var nextToken = file.findNextToken(token, 'Punctuator', '=');952 tokenAssert.linesBetween({953 token: token,954 nextToken: nextToken,955 atLeast: 2,956 atMost: 3957 });958 expect(onError).to.have.callCount(1);959 });960 it('should trigger if above range', function() {961 var file = createJsFile('x\n\n\n\n=y;');962 var tokenAssert = new TokenAssert(file);963 var onError = sinon.spy();964 tokenAssert.on('error', onError);965 var token = file.getTree().getFirstToken();966 var nextToken = file.findNextToken(token, 'Punctuator', '=');967 tokenAssert.linesBetween({968 token: token,969 nextToken: nextToken,970 atLeast: 1,971 atMost: 2972 });973 expect(onError).to.have.callCount(1);974 });975 });976 });977 describe('indentation', function() {978 it('should not trigger on correct indentation', function() {979 var file = createJsFile('x=y;');980 var tokenAssert = new TokenAssert(file);981 var onError = sinon.spy();982 tokenAssert.on('error', onError);983 tokenAssert.indentation({984 token: file.getProgram().getFirstToken(),985 actual: 0,986 expected: 0,987 indentChar: ' '988 });989 expect(onError).to.have.callCount(0);990 });991 it('should trigger on incorrect indentation', function() {992 var file = createJsFile(' x=y;');993 var tokenAssert = new TokenAssert(file);994 var onError = sinon.spy();995 tokenAssert.on('error', onError);996 tokenAssert.indentation({997 token: file.getProgram().getFirstToken().getNextCodeToken(),998 actual: 2,999 expected: 0,1000 indentChar: ' '1001 });1002 expect(onError).to.have.not.callCount(0);1003 });1004 it('should fix whitespace on incorrect indentation for the first token', function() {1005 var file = createJsFile(' x=y;');1006 var tokenAssert = new TokenAssert(file);1007 var onError = sinon.spy();1008 tokenAssert.on('error', onError);1009 tokenAssert.indentation({1010 token: file.getProgram().getFirstToken().getNextCodeToken(),1011 actual: 2,1012 expected: 0,1013 indentChar: ' '1014 });1015 expect(file.getWhitespaceBefore(file.getFirstToken())).to.equal('');1016 });1017 it('should fix docblock on incorrect overindentation', function() {1018 var file = createJsFile(' /*\n *\n */\nx=y;');1019 var tokenAssert = new TokenAssert(file);1020 tokenAssert.on('error', function(errorInfo) {1021 errorInfo.fix();1022 });1023 var comment = file.getProgram().getFirstToken().getNextNonWhitespaceToken();1024 tokenAssert.indentation({1025 token: comment,1026 actual: 2,1027 expected: 0,1028 indentChar: ' '1029 });1030 comment = file.getProgram().getFirstToken();1031 expect(file.getWhitespaceBefore(comment)).to.equal('');1032 expect(comment.value).to.equal('\n *\n ');1033 });1034 it('should fix docblock on incorrect underindentation', function() {1035 var file = createJsFile(' /*\n *\n */\nx=y;');1036 var tokenAssert = new TokenAssert(file);1037 tokenAssert.on('error', function(errorInfo) {1038 errorInfo.fix();1039 });1040 var comment = file.getProgram().getFirstToken().getNextNonWhitespaceToken();1041 tokenAssert.indentation({1042 token: comment,1043 actual: 2,1044 expected: 4,1045 indentChar: ' '1046 });1047 comment = file.getProgram().getFirstToken().getNextNonWhitespaceToken();1048 expect(file.getWhitespaceBefore(comment)).to.equal(' ');1049 expect(comment.value).to.equal('\n *\n ');1050 });1051 });...

Full Screen

Full Screen

cimple.py

Source:cimple.py Github

copy

Full Screen

1import sys2import string34#############################################################################5# LEXICAL ANALYZER #6#############################################################################7line = 189family = ''10lexical = ''11tokenType = ''1213def lexicalAnalyzer():1415 # counting variables16 global line #Current line17 global family18 global lexical19 global tokenType2021 family = ''22 tokenType = ''23 lexical = ''24 character = 0 #counting number of letter2526 token_char = file.read(1)2728 # TAB or SPACE or newline29 while token_char == '\t' or token_char == ' ' or token_char == '\r':30 token_char = file.read(1)3132 if token_char == '\n':33 line += 134 return lexicalAnalyzer()3536 # Letter37 elif token_char.isalpha():38 lexical = token_char39 token_char = file.read(1)40 character += 141 while token_char.isalpha() or token_char.isdigit():42 if character > 30:43 print(('Error in line %d: Word lenght surpassed limit of 30.', line))44 lexical = lexical + token_char45 character += 146 token_char = file.read(1)47 #print('\t( %s )' % (token_char))48 file.seek(file.tell() - 1)49 family = 'Keyword'5051 if lexical == 'program':52 tokenType = 'program_token'5354 elif lexical == 'declare':55 tokenType = 'declare_token'5657 elif lexical == 'if':58 tokenType = 'if_token'5960 elif lexical == 'else':61 tokenType = 'else_token'6263 elif lexical == 'while':64 tokenType = 'while_token'6566 elif lexical == 'switchcase':67 tokenType = 'switchcase_token'6869 elif lexical == 'forcase':70 tokenType = 'forcase_token'7172 elif lexical == 'incase':73 tokenType = 'incase_token'7475 elif lexical == 'case':76 tokenType = 'case_token'7778 elif lexical == 'default':79 tokenType = 'default_token'8081 elif lexical == 'not':82 tokenType = 'not_token'8384 elif lexical == 'and':85 tokenType = 'and_token'8687 elif lexical == 'or':88 tokenType = 'or_token'8990 elif lexical == 'function':91 tokenType = 'function_token'9293 elif lexical == 'procedure':94 tokenType = 'procedure_token'9596 elif lexical == 'call':97 tokenType = 'call_token'9899 elif lexical == 'return':100 tokenType = 'return_token'101102 elif lexical == 'in':103 tokenType = 'in_token'104105 elif lexical == 'inout':106 tokenType = 'inout_token'107108 elif lexical == 'input':109 tokenType = 'input_token'110111 elif lexical == 'print':112 tokenType = 'print_token'113 else:114 tokenType = 'id_token'115 family = 'Identifier'116117 # Digit118 elif token_char.isdigit():119 lexical = token_char120 token_char = file.read(1)121122 while token_char.isdigit():123 lexical = lexical + token_char124 token_char = file.read(1)125 num = int(lexical)126 if (num < -4294967297 or num > 4294967295):127 print('Error in line %d: Invalid range of number %s ( -2^32+1 > number > 2^32-1).' % (line, lexical))128 sys.exit(0)129 file.seek(file.tell() - 1)130 tokenType = 'INTEGER_token'131132 family = 'Number'133134 # '+' or '-'135 elif token_char == '+' or token_char == '-':136 lexical = token_char137 if lexical == '+':138 tokenType = 'plus_token'139 elif lexical == '-':140 tokenType = 'minus_token'141142 family = 'Add_Operator'143144 # '*' or '/'145 elif token_char == '*' or token_char == '/':146 lexical = token_char147 if lexical == '*':148 tokenType = 'multiply_token'149 elif lexical == '/':150 tokenType = 'division_token'151152 family = 'Mul_Operator'153154 # ':='155 elif token_char == ':':156 lexical = lexical + token_char157 token_char = file.read(1)158 if token_char == '=':159 tokenType = 'assign_token'160 lexical = lexical + token_char161 token_char = file.read(1)162 file.seek(file.tell() - 1)163164 family = 'Assignment'165166 # ',' or ';'167 elif token_char == ',' or token_char == ';':168 lexical = token_char169 if lexical == ',':170 tokenType = 'comma_token'171 elif lexical == ';':172 tokenType = 'semicolon_token'173174 family = 'Delimiter'175176 # '=' or '<>' or '<=' or '<' or '>=' or '>'177 elif token_char == '=' or token_char == '<' or token_char == '>':178 lexical = token_char179 if lexical == '=':180 token_char = file.read(1)181 tokenType = 'equals_token'182 lexical = lexical + token_char183 elif lexical == '<':184 token_char = file.read(1)185 if token_char == '>':186 tokenType = 'notequal_token'187 lexical = lexical + token_char188189 elif token_char == '=':190 tokenType = 'lessorequals_token'191 lexical = lexical + token_char192 else:193 tokenType = 'less_token'194 file.seek(file.tell() - 1)195 elif lexical == '>':196 token_char = file.read(1)197 if token_char == '=':198 tokenType = 'greaterorequals_token'199 lexical = lexical + token_char200 else:201 tokenType = 'greater_token'202 file.seek(file.tell() - 1)203204 family = 'Rel_Operator'205 # '(' or ')' or '{' or '}' or '[' or ']'206 elif token_char == '(' or token_char == ')' or token_char == '{' or token_char == '}' or token_char == '[' or token_char == ']':207 lexical = token_char208 if lexical == '(':209 tokenType = 'leftbracket_token'210211 elif lexical == ')':212 tokenType = 'rightbracket_token'213214 elif lexical == '{':215 tokenType = 'begin_token'216217 elif lexical == '}':218 tokenType = 'end_token'219220 elif lexical == ']':221 tokenType = 'rightsquarebracket_token'222223 elif lexical == '[':224 tokenType = 'leftsquarebracket_token'225226 family = 'Group_Symbol'227228 # End program229 elif token_char == '.':230 lexical = token_char231 tokenType = 'endprogram_token'232233 family = 'Delimiter'234235 # Comments236 elif token_char == '#':237 lexical = token_char238 token_char = file.read(1)239 flag = False240 while token_char != '':241 token_char = file.read(1)242 if token_char == '#':243 flag= True244 break245 if flag == True:246 lexicalAnalyzer()247 else:248 print('Error in line %d: "#" is missing. The comment was supposed to be closed.' % (line))249 sys.exit(0)250251 elif token_char == '':252 lexical = ''253 tokenType = 'eof_token'254255 else:256 print('Error in line %d : character is not recognised as a language character/symbol ' % (line))257 sys.exit(0)258259 ### If it finds a comment, it prints the next lexical twice ###260 print('Line: %d \t%s\t\t\tfamily: %s ' % (line,lexical,family))261262 return tokenType263264265#############################################################################266# SYNTAX ANALYZER #267#############################################################################268269def syntaxAnalyzer():270271 global tokenType272 global lexical273274 def program():275 # program ID block .276277 # "program" is the starting symbol278 # followed by its name and a block279 # every program ends with a fullstop280 global tokenType281 global lexical282283 tokenType = lexicalAnalyzer()284285 if tokenType == 'program_token':286 tokenType = lexicalAnalyzer()287 if tokenType == 'id_token':288 programName = lexical289 tokenType = lexicalAnalyzer()290 block()291 if tokenType == 'endprogram_token':292 tokenType = lexicalAnalyzer()293 if tokenType == 'eof_token':294 print("\nCompilation successfully completed without errors.\n")295 return296 else:297 print('Error in line %d: No characters are allowed after the fullstop indicating the end of the program.' % (line))298 sys.exit(0)299 else:300 print('Error in line %d: A fullstop expected, the program should end with a fullstop.' % (line))301 sys.exit(0)302 else:303 print('Error in line %d: The program name expected after the keyword "program" but found "%s" .' % (line, lexical))304 sys.exit(0)305 else:306 print('Error in line %d: The program must start with the keyword "program" but instead it starts with the word "%s".' % (line, lexical))307 sys.exit(0)308309310 def block():311 # { declarations subprograms statements }312313 # a block consists of declarations, subprograms and statements314 global tokenType315316 if tokenType == 'begin_token':317 tokenType = lexicalAnalyzer()318 if tokenType == 'declare_token':319 declarations()320 subprograms()321 blockStatements()322 if tokenType == 'end_token':323 tokenType = lexicalAnalyzer()324 else:325 print('Error in line %d: The "}" was expected.' % line)326 sys.exit(0)327 else:328 print('Error in line %d: The "{" was expected .' % line)329 sys.exit(0)330 return331332333 def declarations():334 # ( declare varlist ; ) *335336 # declaration of variables337 # kleene star implies zero or more "declare" statements338 global tokenType339340 while tokenType == 'declare_token':341 tokenType = lexicalAnalyzer()342 varlist()343 if tokenType == 'semicolon_token':344 tokenType = lexicalAnalyzer()345 else:346 print('Error in line %d: The keyword ";" was expected\n' % line)347 sys.exit(0)348 return349350351 def varlist():352 # ID ( , ID ) *353 # | e354355 # a list of variables following the declaration keyword356 global tokenType357358 if tokenType == "id_token":359 tokenType = lexicalAnalyzer()360 while tokenType == 'comma_token':361 tokenType = lexicalAnalyzer()362 if tokenType == 'id_token':363 tokenType = lexicalAnalyzer()364 else:365 print('Error in line %d: A variable is expected after comma (,). ' % line)366 sys.exit(0)367 return368369370 def subprograms():371 # ( subprogram ) *372373 # zero or more subprograms374 global tokenType375376 while tokenType == 'procedure_token' or tokenType == 'function_token':377 subprogram()378 return379380381 def subprogram():382 # a subprogram is a function or a procedure383 # followed by parameters and block384 global tokenType385 global lexical386387 # function ID ( formalparlist ) block388 if tokenType == 'function_token':389 tokenType = lexicalAnalyzer()390 if tokenType == 'id_token':391 tokenType = lexicalAnalyzer()392 if tokenType == "leftbracket_token":393 tokenType = lexicalAnalyzer()394 formalparlist()395 if tokenType == 'rightbracket_token':396 tokenType = lexicalAnalyzer()397 block()398 else:399 print('Error in line %d: The ")" was expected .' % line)400 sys.exit(0)401 else:402 print('Error in line %d: The "(" was expected .' % line)403 sys.exit(0)404 else:405 print('Error in line %d: A variable is expected after the keyword "function".' % line)406 sys.exit(0)407408 # procedure ID ( formalparlist ) block409 elif tokenType == 'procedure_token':410 tokenType = lexicalAnalyzer()411 if tokenType == 'id_token':412 name = lexical413 tokenType = lexicalAnalyzer()414 if tokenType == "leftbracket_token":415 tokenType = lexicalAnalyzer()416 formalparlist()417 if tokenType == 'rightbracket_token':418 block()419 else:420 print('Error in line %d: The ")" was expected .' % line)421 sys.exit(0)422 else:423 print('Error in line %d: The "(" was expected .' % line)424 sys.exit(0)425 else:426 print('Error in line %d: A variable is expected after the keyword "procedure".' % line)427 sys.exit(0)428 else:429 print('Error in line %d: The keyword "function" or "procedure" was expected.' % line)430 sys.exit(0)431 return432433434 def formalparlist():435 # formalparitem ( , formalparitem ) *436437 # list of formal parameters438 # one or more parameters are allowed439 global tokenType440441 formalparitem()442 while tokenType == 'comma_token':443 tokenType = lexicalAnalyzer()444 if tokenType == 'in_token' or tokenType == 'inout_token':445 formalparitem()446 else:447 print('Error in line %d: Expected "in" or "inout" after the comma.' % line)448 sys.exit()449 return450451452 def formalparitem():453 # a formal parameters454 # "in": by value, "inout": by reference455 global tokenType456457 # in ID458 if tokenType == 'in_token':459 tokenType = lexicalAnalyzer()460 if tokenType == 'id_token':461 tokenType = lexicalAnalyzer()462 return463 else:464 print('Error in line %d: A variable is expected after the keyword "in".' % line)465 sys.exit(0)466467 # inout ID468 elif tokenType == 'inout_token':469 tokenType = lexicalAnalyzer()470 if tokenType == 'id_token':471 tokenType = lexicalAnalyzer()472 return473 else:474 print('Error in line %d: A variable is expected after the keyword "inout".' % line)475 sys.exit(0)476 else:477 print('Error in line %d: The keyword "in" or "inout" was expected.' % line)478 sys.exit(0)479 return480481482 def statements():483 # statement ;484 # | { statement ( ; statement ) * }485486 # one or more statements487 # more than one statements should be grouped with brackets488 global tokenType489490 if tokenType == 'begin_token':491 tokenType = lexicalAnalyzer()492 blockStatements()493 if tokenType == 'end_token':494 tokenType = lexicalAnalyzer()495 return496 else:497 print('Error in line %d: The "}" was expected .' % line)498 sys.exit(0)499 else:500 statement()501 if tokenType == 'semicolon_token':502 tokenType = lexicalAnalyzer()503 else:504 print('Error in line %d: The keyword ";" was expected\n' % line)505 sys.exit(0)506 return507508509 def blockStatements():510 # statement ( ; statement ) *511512 # statements cosidered as block (used in program and subprogram)513514 global tokenType515516 statement()517 while tokenType == 'semicolon_token':518 tokenType = lexicalAnalyzer()519 statement()520 return521522523 def statement():524 # one statement525526 global tokenType527528 # assignStat529 if tokenType == 'id_token':530 assignStat()531 # ifStat532 elif tokenType == 'if_token':533 ifStat()534 # whileStat535 elif tokenType == 'while_token':536 whileStat()537 # switchcaseStat538 elif tokenType == 'switchcase_token':539 switchcaseStat()540 # forcaseStat541 elif tokenType == 'forcase_token':542 forcaseStat()543 # incaseStat544 elif tokenType == 'incase_token':545 incaseStat()546 # callStat547 elif tokenType == 'call_token':548 callStat()549 # returnStat550 elif tokenType == 'return_token':551 returnStat()552 # inputStat553 elif tokenType == 'input_token':554 inputStat()555 # printStat556 elif tokenType == 'print_token':557 printStat()558 return559560561 def assignStat():562 # ID := expression563564 # assignment statement565 global tokenType566 global lexical567568 if tokenType == 'id_token':569 id = lexical570 tokenType = lexicalAnalyzer()571 if tokenType == 'assign_token':572 tokenType = lexicalAnalyzer()573 expression()574 else:575 print('Error in line %d: The assignment symbol ":=" was expected.' % line)576 sys.exit(0)577 else:578 print('Error in line %d: The "id" was expected.' % line)579 sys.exit(0)580 return581582583 def ifStat():584 # if ( condition ) statements585 # elsepart586587 # if statement588 global tokenType589590 if tokenType == 'if_token':591 tokenType = lexicalAnalyzer()592 if tokenType == 'leftbracket_token':593 tokenType = lexicalAnalyzer()594 condition()595 if tokenType == 'rightbracket_token':596 tokenType = lexicalAnalyzer()597 statements()598 elsePart()599 else:600 print('Error in line %d: The ")" was expected .' % line)601 sys.exit(0)602 else:603 print('Error in line %d: The "(" was expected .' % line)604 sys.exit(0)605 else:606 print('Error in line %d: The "if" was expected.' % line)607 sys.exit(0)608 return609610611 def elsePart():612 # else statements613 # | e614615 # else part is optional616 global tokenType617618 if tokenType == 'else_token':619 tokenType = lexicalAnalyzer()620 statements()621 return622623624 def whileStat():625 # while ( condition ) statements626627 # while statement628 global tokenType629630 if tokenType == 'while_token':631 tokenType = lexicalAnalyzer()632 if tokenType == 'leftbracket_token':633 tokenType = lexicalAnalyzer()634 condition()635 if tokenType == 'rightbracket_token':636 tokenType = lexicalAnalyzer()637 statements()638639 else:640 print('Error in line %d: The ")" was expected.' % line)641 sys.exit(0)642 else:643 print('Error in line %d: The "(" was expected.' % line)644 sys.exit(0)645 else:646 print('Error in line %d: The "while" was expected.' % line)647 sys.exit(0)648 return649650651 def switchcaseStat():652 # switchcase653 # ( case ( condition ) statements ) *654 # default statements655656 # switch statement657 global tokenType658659 if tokenType == 'switchcase_token':660 tokenType = lexicalAnalyzer()661 if tokenType == 'case_token':662 tokenType = lexicalAnalyzer()663 if tokenType == 'leftbracket_token':664 tokenType = lexicalAnalyzer()665 condition()666 if tokenType == 'rightbracket_token':667 tokenType = lexicalAnalyzer()668 statements()669 while tokenType == 'default_token':670 tokenType = lexicalAnalyzer()671 statements()672 else:673 print('Error in line %d: The ")" was expected.' % line)674 sys.exit(0)675 else:676 print('Error in line %d: The "(" was expected.' % line)677 sys.exit(0)678 else:679 print('Error in line %d: The "case" was expected.' % line)680 sys.exit(0)681 else:682 print('Error in line %d: The "switchcase" was expected.' % line)683 sys.exit(0)684 return685686687 def forcaseStat():688 # forcase689 # ( case ( condition ) statements ) *690 # default statements691692 # forcase statement693 global tokenType694695 if tokenType == 'forcase_token':696 tokenType = lexicalAnalyzer()697 if tokenType == 'case_token':698 tokenType = lexicalAnalyzer()699 if tokenType == 'leftbracket_token':700 tokenType = lexicalAnalyzer()701 condition()702 if tokenType == 'rightbracket_token':703 tokenType = lexicalAnalyzer()704 statements()705 while tokenType == 'default_token':706 tokenType = lexicalAnalyzer()707 statements()708 else:709 print('Error in line %d: The ")" was expected.' % line)710 sys.exit(0)711 else:712 print('Error in line %d: The "(" was expected.' % line)713 sys.exit(0)714 else:715 print('Error in line %d: The "case" was expected.' % line)716 sys.exit(0)717 else:718 print('Error in line %d: The "forcase" was expected.' % line)719 sys.exit(0)720 return721722723 def incaseStat():724 # incase725 # ( case ( condition ) statements )*726727 # incase statement728 global tokenType729730 if tokenType == 'incase_token':731 tokenType = lexicalAnalyzer()732 if tokenType == 'case_token':733 tokenType = lexicalAnalyzer()734 if tokenType == 'leftbracket_token':735 tokenType = lexicalAnalyzer()736 condition()737 if tokenType == 'rightbracket_token':738 tokenType = lexicalAnalyzer()739 statements()740 while tokenType == 'default_token':741 tokenType = lexicalAnalyzer()742 statements()743 else:744 print('Error in line %d: The ")" was expected.' % line)745 sys.exit(0)746 else:747 print('Error in line %d: The "(" was expected.' % line)748 sys.exit(0)749 else:750 print('Error in line %d: The "case" was expected.' % line)751 sys.exit(0)752 else:753 print('Error in line %d: The "incase" was expected.' % line)754 sys.exit(0)755 return756757758 def returnStat():759 # return ( expression )760761 # return statement762 global tokenType763764 if tokenType == 'return_token':765 tokenType = lexicalAnalyzer()766 if tokenType == 'leftbracket_token':767 tokenType = lexicalAnalyzer()768 expression()769 if tokenType == 'rightbracket_token':770 tokenType = lexicalAnalyzer()771 else:772 print('Error in line %d: The ")" was expected.' % line)773 sys.exit(0)774 else:775 print('Error in line %d: The "(" was expected.' % line)776 sys.exit(0)777 else:778 print('Error in line %d: The "return" was expected.' % line)779 sys.exit(0)780 return781782783 def callStat():784 # call ID ( actualparlist )785786 # call statement787 global tokenType788 global lexical789790 if tokenType == 'call_token':791 tokenType = lexicalAnalyzer()792 if tokenType == 'id_token':793 tokenType = lexicalAnalyzer()794 if tokenType == 'leftbracket_token':795 tokenType = lexicalAnalyzer()796 actualparlist()797 if tokenType == 'rightbracket_token':798 tokenType = lexicalAnalyzer()799 return800 else:801 print('Error in line %d: The ")" was expected.' % line)802 sys.exit(0)803 else:804 print('Error in line %d: The "(" was expected.' % line)805 sys.exit(0)806 else:807 print('Error in line %d: The "id" was expected.' % line)808 sys.exit(0)809 else:810 print('Error in line %d: The "call" was expected.' % line)811 sys.exit(0)812813814 def printStat():815 # print ( expression )816817 # print statement818 global tokenType819820 if tokenType == 'print_token':821 tokenType = lexicalAnalyzer()822 if tokenType == 'leftbracket_token':823 tokenType = lexicalAnalyzer()824 expression()825 if tokenType == 'rightbracket_token':826 tokenType = lexicalAnalyzer()827 else:828 print('Error in line %d: The ")" was expected.' % line)829 sys.exit(0)830 else:831 print('Error in line %d: The "(" was expected.' % line)832 sys.exit(0)833 else:834 print('Error in line %d: The "print" was expected.' % line)835 sys.exit(0)836 return837838839 def inputStat():840 # input ( ID )841842 # input statement843 global tokenType844 global lexical845846 if tokenType == 'input_token':847 tokenType = lexicalAnalyzer()848 if tokenType == 'leftbracket_token':849 tokenType = lexicalAnalyzer()850 if tokenType == 'id_token':851 tokenType = lexicalAnalyzer()852 if tokenType == 'rightbracket_token':853 tokenType = lexicalAnalyzer()854 return855 else:856 print('Error in line %d: The ")" was expected.' % line)857 sys.exit(0)858 else:859 print('Error in line %d: The "id" was expected.' % line)860 sys.exit(0)861 else:862 print('Error in line %d: The "(" was expected.' % line)863 sys.exit(0)864 else:865 print('Error in line %d: The "input" was expected.' % line)866 sys.exit(0)867868869 def actualparlist():870 # actualparitem ( , actualparitem ) *871 # | e872873 # list of actual parameters874 global tokenType875876 actualparitem()877 while tokenType == 'comma_token':878 tokenType = lexicalAnalyzer()879 actualparitem()880 return881882883 def actualparitem():884 # in expression885 # | inout ID886887 # an actual parameter888 # "in": value, "inout": reference889 global tokenType890891 if tokenType == 'in_token':892 tokenType = lexicalAnalyzer()893 expression()894 elif tokenType == 'inout_token':895 tokenType = lexicalAnalyzer()896 if tokenType == 'id_token':897 tokenType = lexicalAnalyzer()898 else:899 print('Error in line %d: A parameter was expected after the keyword "inout".\n' % line)900 sys.exit(0)901 else:902 print('Error in line %d: The keyword "in" or "inout" was expected \n' % line)903 sys.exit(0)904 return905906907 def condition():908 # boolterm ( or boolterm ) *909910 # boolean expression911 global tokenType912913 boolTerm()914 while tokenType == 'or_token':915 tokenType = lexicalAnalyzer()916 boolTerm()917 return918919920 def boolTerm():921 # boolfactor ( and boolfactor )*922923 # term in boolean expression924 global tokenType925926 boolfactor()927 while tokenType == 'and_token':928 tokenType = lexicalAnalyzer()929 boolfactor()930 return931932933 def boolfactor():934 # factor in boolean expression935936 global tokenType937 # not [ condition ]938 if tokenType == 'not_token':939 tokenType = lexicalAnalyzer()940 if tokenType == 'leftsquarebracket_token':941 tokenType = lexicalAnalyzer()942 condition()943 if tokenType == 'rightsquarebracket_token':944 tokenType = lexicalAnalyzer()945 return946 else:947 print('Error in line %d: The right square bracket symbol "]" was expected here.\n' % line)948 sys.exit(0)949 else:950 print('Error in line %d: The left square bracket symbol "[" was expected here.\n' % line)951 sys.exit(0)952953 # [ condition ]954 elif tokenType == 'leftsquarebracket_token':955 tokenType = lexicalAnalyzer()956 condition()957 if tokenType == 'rightsquarebracket_token':958 tokenType = lexicalAnalyzer()959 return960 else:961 print('Error in line %d: The right square bracket symbol "]" was expected here.\n' % line)962 sys.exit(0)963964 # expression REL_OP expression965 else:966 expression()967 REL_OP()968 expression()969 return970971972 def expression():973 # optionalSign term ( ADD_OP term ) *974975 # arithmetic expression976 global tokenType977978 optionalSign()979 term()980 while tokenType == 'plus_token' or tokenType == 'minus_token':981 ADD_OP()982 term()983 return984985986 def term():987 # factor ( MUL_OP factor ) *988989 # term in arithmetic expression990 global tokenType991992 factor()993 while tokenType == 'multiply_token' or tokenType == 'division_token':994 MUL_OP()995 factor()996 return997998999 def factor():1000 # factor in arithmetic expression1001 global tokenType10021003 # INTEGER1004 if tokenType == 'INTEGER_token':1005 fact = lexical1006 tokenType = lexicalAnalyzer()1007 return fact10081009 # | ( expression )1010 elif tokenType == 'leftbracket_token':1011 tokenType = lexicalAnalyzer()1012 e = expression()1013 if tokenType == 'rightbracket_token':1014 fact = e1015 tokenType = lexicalAnalyzer()1016 return fact1017 else:1018 print('Error in line %d: The right bracket symbol ")" was expected here\n' % line)1019 sys.exit(0)1020 # | ID idTail1021 elif tokenType == 'id_token':1022 fact = lexical1023 tokenType = lexicalAnalyzer()1024 idTail()1025 return fact1026 else:1027 print('Error in line %d: A integer, an expression , a procedure call or a function call was expected here.\n' % line)1028 sys.exit(0)102910301031 def idTail():1032 # ( actualparlist )1033 # | e10341035 # follows a function or procedure1036 # describes parethneses and parameters1037 global tokenType10381039 if tokenType == 'leftbracket_token':1040 tokenType = lexicalAnalyzer()1041 actualparlist()1042 if tokenType == 'rightbracket_token':1043 tokenType = lexicalAnalyzer()1044 return1045 return10461047 def optionalSign():1048 # ADD_OP1049 # | e10501051 # symbols "+" and "-" (are optional)1052 global tokenType1053 if tokenType == 'plus_token' or tokenType == 'minus_token':1054 opSign = ADD_OP()1055 tokenType = lexicalAnalyzer()1056 return opSign1057 return10581059 ########################################1060 # lexer rules: relational, arithentic operations, integer values and ids1061 ########################################10621063 def REL_OP():1064 # = | <= | >= | > | < | <>1065 global tokenType1066 global lexical10671068 if (tokenType == 'equals_token' or tokenType == 'lessorequals_token' or tokenType == 'greaterorequals_token'1069 or tokenType == 'less_token' or tokenType == 'greater_token' or tokenType == 'notequals_token'):1070 relOp = lexical1071 tokenType = lexicalAnalyzer()1072 else:1073 print('Error in line %d: A comparison sign was expected here.' % line)1074 sys.exit(0)1075 return relOp107610771078 def ADD_OP():1079 # + | -1080 global tokenType1081 global lexical10821083 if tokenType == 'plus_token' or tokenType == 'minus_token':1084 addOp = lexical1085 tokenType = lexicalAnalyzer()1086 else:1087 print('Error in line %d: A plus sign(+) or a minus sign(-) was expected here.' % (line))1088 sys.exit(0)1089 return addOp109010911092 def MUL_OP():1093 # * | /1094 global tokenType1095 global lexical10961097 if tokenType == 'multiply_token' or tokenType == 'division_token':1098 mulOp = lexical1099 tokenType = lexicalAnalyzer()1100 else:1101 print('Error in line %d: A multiplication sign(*) or a division sign(/) was expected here.' % (line))1102 sys.exit(0)1103 return mulOp1104 program()110511061107# Opening file, as arguement in command line:1108file = open(sys.argv[1], 'r')1109print("\n") ...

Full Screen

Full Screen

farms.ts

Source:farms.ts Github

copy

Full Screen

1import contracts from './contracts'2import { FarmConfig, QuoteToken } from './types'3const farms: FarmConfig[] = [4 {5 pid: 4,6 risk: 5,7 lpSymbol: 'WST',8 isTokenOnly: true,9 lpAddresses: {10 97: '0x3C26Cfb92fC1AA40B9eB81534CDFE9Ed4944462f',11 56: '',12 },13 tokenSymbol: 'WST',14 tokenAddresses: {15 97: '0xC14542fbC96f88E8c3982D26326b0691D7CE9c53',16 56: '',17 },18 quoteTokenSymbol: QuoteToken.BUSD,19 quoteTokenAdresses: contracts.busd,20 },21 {22 pid: 1,23 risk: 5,24 lpSymbol: 'WST-BNB LP',25 lpAddresses: {26 97: '0xb37aF8fF25552fDC405a93C2A893D383C019161A',27 56: '',28 },29 tokenSymbol: 'WST',30 tokenAddresses: {31 97: '0xC14542fbC96f88E8c3982D26326b0691D7CE9c53',32 56: '',33 },34 quoteTokenSymbol: QuoteToken.BNB,35 quoteTokenAdresses: contracts.wbnb,36 },37 {38 pid: 2,39 risk: 5,40 lpSymbol: 'BNB-BUSD LP',41 lpAddresses: {42 97: '0xe0e92035077c39594793e61802a350347c320cf2',43 56: '',44 },45 tokenSymbol: 'BNB',46 tokenAddresses: {47 97: '0xae13d989daC2f0dEbFf460aC112a837C89BAa7cd',48 56: '',49 },50 quoteTokenSymbol: QuoteToken.BUSD,51 quoteTokenAdresses: contracts.busd,52 },53 {54 pid: 3,55 risk: 5,56 lpSymbol: 'WST-BUSD LP',57 lpAddresses: {58 97: '0x3C26Cfb92fC1AA40B9eB81534CDFE9Ed4944462f',59 56: '',60 },61 tokenSymbol: 'WST',62 tokenAddresses: {63 97: '0xC14542fbC96f88E8c3982D26326b0691D7CE9c53',64 56: '',65 },66 quoteTokenSymbol: QuoteToken.BUSD,67 quoteTokenAdresses: contracts.busd,68 },69 // {70 // pid: 2,71 // risk: 5,72 // isTokenOnly: true,73 // lpSymbol: 'WST',74 // lpAddresses: {75 // 97: '',76 // 56: '0x7bf33458a7d077ea5aa67313e329741dd130a9ef', // EGG-BUSD LP77 // },78 // tokenSymbol: 'WST',79 // tokenAddresses: {80 // 97: '',81 // 56: '0xaAdFf17d56d80312b392Ced903f3E8dBE5c3ece7',82 // },83 // quoteTokenSymbol: QuoteToken.BUSD,84 // quoteTokenAdresses: contracts.busd,85 // isAutoVault:true,86 // },87 // pid: 3,88 // risk: 3,89 // lpSymbol: 'BNB-BUSD LP',90 // lpAddresses: {91 // 97: '',92 // 56: '0x58F876857a02D6762E0101bb5C46A8c1ED44Dc16',93 // },94 // tokenSymbol: 'BNB',95 // tokenAddresses: {96 // 97: '',97 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',98 // },99 // quoteTokenSymbol: QuoteToken.BUSD,100 // quoteTokenAdresses: contracts.busd,101 // },102 // {103 // pid: 21,104 // risk: 5,105 // lpSymbol: 'BFD-BNB LP',106 // lpAddresses: {107 // 97: '',108 // 56: '0x42c26eea50615e3f808754f46b69a3bb386fe846', // EGG-BUSD LP109 // },110 // tokenSymbol: 'BFD',111 // tokenAddresses: {112 // 97: '',113 // 56: '0x95f31E322E1Bee2F6DCB085A1DFe3d3081Aab653',114 // },115 // quoteTokenSymbol: QuoteToken.BNB,116 // quoteTokenAdresses: contracts.wbnb,117 // },118 // {119 // pid: 2,120 // risk: 5,121 // lpSymbol: 'RINI-BNB LP',122 // lpAddresses: {123 // 97: '',124 // 56: '0x5F188bEDe05D11b8d1474D6832b41d3EaE4ED98E',125 // },126 // tokenSymbol: 'RINI',127 // tokenAddresses: {128 // 97: '',129 // 56: '0x844B1B28b0FdA8075ea8eB99C46339398deD1673',130 // },131 // quoteTokenSymbol: QuoteToken.BNB,132 // quoteTokenAdresses: contracts.wbnb,133 // },134 // {135 // pid: 3,136 // risk: 3,137 // lpSymbol: 'BNB-BUSD LP',138 // lpAddresses: {139 // 97: '',140 // 56: '0x58F876857a02D6762E0101bb5C46A8c1ED44Dc16',141 // },142 // tokenSymbol: 'BNB',143 // tokenAddresses: {144 // 97: '',145 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',146 // },147 // quoteTokenSymbol: QuoteToken.BUSD,148 // quoteTokenAdresses: contracts.busd,149 // },150 // {151 // pid: 4,152 // risk: 5,153 // lpSymbol: 'CAKE-BNB LP',154 // lpAddresses: {155 // 97: '',156 // 56: '0x0eD7e52944161450477ee417DE9Cd3a859b14fD0',// EGG-BUSD LP157 // },158 // tokenSymbol: 'CAKE',159 // tokenAddresses: {160 // 97: '',161 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',162 // },163 // quoteTokenSymbol: QuoteToken.BNB,164 // quoteTokenAdresses: contracts.wbnb,165 // },166 // {167 // pid: 5,168 // risk: 5,169 // lpSymbol: 'CAKE-BUSD LP',170 // lpAddresses: {171 // 97: '',172 // 56: '0x804678fa97d91B974ec2af3c843270886528a9E6', // EGG-BUSD LP173 // },174 // tokenSymbol: 'CAKE',175 // tokenAddresses: {176 // 97: '',177 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',178 // },179 // quoteTokenSymbol: QuoteToken.BUSD,180 // quoteTokenAdresses: contracts.busd,181 // },182 // {183 // pid: 6,184 // risk: 5,185 // lpSymbol: 'BTCB-BUSD LP',186 // lpAddresses: {187 // 97: '',188 // 56: '0xF45cd219aEF8618A92BAa7aD848364a158a24F33', // EGG-BUSD LP189 // },190 // tokenSymbol: 'BTCB',191 // tokenAddresses: {192 // 97: '',193 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',194 // },195 // quoteTokenSymbol: QuoteToken.BUSD,196 // quoteTokenAdresses: contracts.busd,197 // },198 // {199 // pid: 7,200 // risk: 5,201 // lpSymbol: 'BTCB-BNB LP',202 // lpAddresses: {203 // 97: '',204 // 56: '0x61EB789d75A95CAa3fF50ed7E47b96c132fEc082', // LP adresi(stake edilecek token)205 // },206 // tokenSymbol: 'BTCB',207 // tokenAddresses: {208 // 97: '',209 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c', // BTCB adresi210 // },211 // quoteTokenSymbol: QuoteToken.BNB, // pool içerisindeki adres btcb-bnb lp için için bnb212 // quoteTokenAdresses: contracts.wbnb,213 // },214 // {215 // pid: 8,216 // risk: 5,217 // lpSymbol: 'BTCB-ETH LP',218 // lpAddresses: {219 // 97: '',220 // 56: '0xD171B26E4484402de70e3Ea256bE5A2630d7e88D', // EGG-BUSD LP221 // },222 // tokenSymbol: 'BTCB',223 // tokenAddresses: {224 // 97: '',225 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',226 // },227 // quoteTokenSymbol: QuoteToken.ETH,228 // quoteTokenAdresses: contracts.eth,229 // },230 // {231 // pid: 9,232 // risk: 5,233 // lpSymbol: 'ETH-BNB LP',234 // lpAddresses: {235 // 97: '',236 // 56: '0x74E4716E431f45807DCF19f284c7aA99F18a4fbc', // EGG-BUSD LP237 // },238 // tokenSymbol: 'ETH',239 // tokenAddresses: {240 // 97: '',241 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',242 // },243 // quoteTokenSymbol: QuoteToken.BNB,244 // quoteTokenAdresses: contracts.wbnb,245 // },246 // {247 // pid: 11,248 // risk: 5,249 // lpSymbol: 'USDC-BUSD LP',250 // lpAddresses: {251 // 97: '',252 // 56: '0x2354ef4DF11afacb85a5C7f98B624072ECcddbB1', // EGG-BUSD LP253 // },254 // tokenSymbol: 'USDC',255 // tokenAddresses: {256 // 97: '',257 // 56: '0x8AC76a51cc950d9822D68b83fE1Ad97B32Cd580d',258 // },259 // quoteTokenSymbol: QuoteToken.BUSD,260 // quoteTokenAdresses: contracts.busd,261 // },262 // {263 // pid: 12,264 // risk: 5,265 // lpSymbol: 'USDT-BUSD LP',266 // lpAddresses: {267 // 97: '',268 // 56: '0x7EFaEf62fDdCCa950418312c6C91Aef321375A00', // EGG-BUSD LP269 // },270 // tokenSymbol: 'USDT',271 // tokenAddresses: {272 // 97: '',273 // 56: '0x55d398326f99059ff775485246999027b3197955',274 // },275 // quoteTokenSymbol: QuoteToken.BUSD,276 // quoteTokenAdresses: contracts.busd,277 // },278 // {279 // pid: 13,280 // risk: 5,281 // lpSymbol: 'TUSD-BUSD LP',282 // lpAddresses: {283 // 97: '',284 // 56: '0x2E28b9B74D6d99D4697e913b82B41ef1CAC51c6C', // EGG-BUSD LP285 // },286 // tokenSymbol: 'TUSD',287 // tokenAddresses: {288 // 97: '',289 // 56: '0x14016E85a25aeb13065688cAFB43044C2ef86784',290 // },291 // quoteTokenSymbol: QuoteToken.BUSD,292 // quoteTokenAdresses: contracts.busd,293 // },294 // {295 // pid: 14,296 // risk: 5,297 // lpSymbol: 'USDC-USDT LP',298 // lpAddresses: {299 // 97: '',300 // 56: '0xec6557348085aa57c72514d67070dc863c0a5a8c', // EGG-BUSD LP301 // },302 // tokenSymbol: 'USDC',303 // tokenAddresses: {304 // 97: '',305 // 56: '0x8AC76a51cc950d9822D68b83fE1Ad97B32Cd580d',306 // },307 // quoteTokenSymbol: QuoteToken.USDT,308 // quoteTokenAdresses: contracts.usdt,309 // },310 // {311 // pid: 15,312 // risk: 5,313 // lpSymbol: 'DOT-BNB LP',314 // lpAddresses: {315 // 97: '',316 // 56: '0xDd5bAd8f8b360d76d12FdA230F8BAF42fe0022CF', // EGG-BUSD LP317 // },318 // tokenSymbol: 'DOT',319 // tokenAddresses: {320 // 97: '',321 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',322 // },323 // quoteTokenSymbol: QuoteToken.BNB,324 // quoteTokenAdresses: contracts.wbnb,325 // },326 // {327 // pid: 16,328 // risk: 5,329 // lpSymbol: 'LINK-BNB LP',330 // lpAddresses: {331 // 97: '',332 // 56: '0x824eb9faDFb377394430d2744fa7C42916DE3eCe', // EGG-BUSD LP333 // },334 // tokenSymbol: 'LINK',335 // tokenAddresses: {336 // 97: '',337 // 56: '0xF8A0BF9cF54Bb92F17374d9e9A321E6a111a51bD',338 // },339 // quoteTokenSymbol: QuoteToken.BNB,340 // quoteTokenAdresses: contracts.wbnb,341 // },342 // {343 // pid: 17,344 // risk: 5,345 // lpSymbol: 'ADA-BNB LP',346 // lpAddresses: {347 // 97: '',348 // 56: '0x28415ff2C35b65B9E5c7de82126b4015ab9d031F', // EGG-BUSD LP349 // },350 // tokenSymbol: 'ADA',351 // tokenAddresses: {352 // 97: '',353 // 56: '0x3ee2200efb3400fabb9aacf31297cbdd1d435d47',354 // },355 // quoteTokenSymbol: QuoteToken.BNB,356 // quoteTokenAdresses: contracts.wbnb,357 // },358 // {359 // pid: 18,360 // risk: 5,361 // lpSymbol: 'BSTEEM-BNB LP',362 // lpAddresses: {363 // 97: '',364 // 56: '0xa43857e02c94ee8323c2198ffe80fd590544b30a', // EGG-BUSD LP365 // },366 // tokenSymbol: 'BSTEEM',367 // tokenAddresses: {368 // 97: '',369 // 56: '0x382d36e85178f2d3fd84e336d4e1d442fce78e8e',370 // },371 // quoteTokenSymbol: QuoteToken.BNB,372 // quoteTokenAdresses: contracts.wbnb,373 // },374 // {375 // pid: 19,376 // risk: 5,377 // lpSymbol: 'BBLURT-BNB LP',378 // lpAddresses: {379 // 97: '',380 // 56: '0x37ddd439f7fc42f2964e3b09c4853c03075db0c9', // EGG-BUSD LP381 // },382 // tokenSymbol: 'BBLURT',383 // tokenAddresses: {384 // 97: '',385 // 56: '0xb0458283033e5a3f7867f409477f53754b667dcc',386 // },387 // quoteTokenSymbol: QuoteToken.BNB,388 // quoteTokenAdresses: contracts.wbnb,389 // },390 // {391 // pid: 20,392 // risk: 5,393 // lpSymbol: 'MOON-BNB LP',394 // lpAddresses: {395 // 97: '',396 // 56: '0x643D3f1C1fA1A7D5D6BDE50Bd3FDD1Cdd8A85692', // EGG-BUSD LP397 // },398 // tokenSymbol: 'MOON',399 // tokenAddresses: {400 // 97: '',401 // 56: '0xE8c93310af068aa50bd7bF0ebFa459Df2a02ceba',402 // },403 // quoteTokenSymbol: QuoteToken.BNB,404 // quoteTokenAdresses: contracts.wbnb,405 // },406 // {407 // pid: 42,408 // risk: 5,409 // lpSymbol: 'YNB-BNB LP',410 // lpAddresses: {411 // 97: '',412 // 56: '0x1a6eba3897fb4799fb6372f32771ee821370ab7e', // EGG-BUSD LP413 // },414 // tokenSymbol: 'YNB',415 // tokenAddresses: {416 // 97: '',417 // 56: '0xa05e01E9dF824CCAd284895Fff43B043e2133f50',418 // },419 // quoteTokenSymbol: QuoteToken.BNB,420 // quoteTokenAdresses: contracts.wbnb,421 // },422 // {423 // pid: 22,424 // risk: 5,425 // isTokenOnly: true,426 // lpSymbol: 'RBS',427 // lpAddresses: {428 // 97: '',429 // 56: '0xa2fa80da37170ed705cb0bd1f27558ccecd417c5', // EGG-BUSD LP430 // },431 // tokenSymbol: 'RBS',432 // tokenAddresses: {433 // 97: '',434 // 56: '0xAfAEEe58a58867c73245397C0F768FF041D32d70',435 // },436 // quoteTokenSymbol: QuoteToken.BUSD,437 // quoteTokenAdresses: contracts.busd,438 // },439 // {440 // pid: 23,441 // risk: 5,442 // isTokenOnly: true,443 // lpSymbol: 'WBNB',444 // lpAddresses: {445 // 97: '',446 // 56: '0x1b96b92314c44b159149f7e0303511fb2fc4774f', // EGG-BUSD LP447 // },448 // tokenSymbol: 'WBNB',449 // tokenAddresses: {450 // 97: '',451 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',452 // },453 // quoteTokenSymbol: QuoteToken.BUSD,454 // quoteTokenAdresses: contracts.busd,455 // },456 // {457 // pid: 24,458 // risk: 5,459 // isTokenOnly: true,460 // lpSymbol: 'CAKE',461 // lpAddresses: {462 // 97: '',463 // 56: '0x804678fa97d91B974ec2af3c843270886528a9E6', // EGG-BUSD LP464 // },465 // tokenSymbol: 'CAKE',466 // tokenAddresses: {467 // 97: '',468 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',469 // },470 // quoteTokenSymbol: QuoteToken.BUSD,471 // quoteTokenAdresses: contracts.busd,472 // },473 // {474 // pid: 25,475 // risk: 5,476 // isTokenOnly: true,477 // lpSymbol: 'BTCB',478 // lpAddresses: {479 // 97: '',480 // 56: '0xF45cd219aEF8618A92BAa7aD848364a158a24F33', // EGG-BUSD LP481 // },482 // tokenSymbol: 'BTCB',483 // tokenAddresses: {484 // 97: '',485 // 56: '0x7130d2A12B9BCbFAe4f2634d864A1Ee1Ce3Ead9c',486 // },487 // quoteTokenSymbol: QuoteToken.BUSD,488 // quoteTokenAdresses: contracts.busd,489 // },490 // {491 // pid: 26,492 // risk: 5,493 // isTokenOnly: true,494 // lpSymbol: 'ETH',495 // lpAddresses: {496 // 97: '',497 // 56: '0xd9a0d1f5e02de2403f68bb71a15f8847a854b494', // EGG-BUSD LP498 // },499 // tokenSymbol: 'ETH',500 // tokenAddresses: {501 // 97: '',502 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',503 // },504 // quoteTokenSymbol: QuoteToken.BUSD,505 // quoteTokenAdresses: contracts.busd,506 // },507 // {508 // pid: 27,509 // risk: 5,510 // isTokenOnly: true,511 // lpSymbol: 'BUSD',512 // lpAddresses: {513 // 97: '',514 // 56: '0xe9e7cea3dedca5984780bafc599bd69add087d56', // EGG-BUSD LP515 // },516 // tokenSymbol: 'BUSD',517 // tokenAddresses: {518 // 97: '',519 // 56: '0xe9e7cea3dedca5984780bafc599bd69add087d56',520 // },521 // quoteTokenSymbol: QuoteToken.BUSD,522 // quoteTokenAdresses: contracts.busd,523 // },524 // {525 // pid: 28,526 // risk: 5,527 // isTokenOnly: true,528 // lpSymbol: 'USDT',529 // lpAddresses: {530 // 97: '',531 // 56: '0x7EFaEf62fDdCCa950418312c6C91Aef321375A00', // EGG-BUSD LP532 // },533 // tokenSymbol: 'USDT',534 // tokenAddresses: {535 // 97: '',536 // 56: '0x55d398326f99059ff775485246999027b3197955',537 // },538 // quoteTokenSymbol: QuoteToken.BUSD,539 // quoteTokenAdresses: contracts.busd,540 // },541 // {542 // pid: 29,543 // risk: 5,544 // isTokenOnly: true,545 // lpSymbol: 'DOT',546 // lpAddresses: {547 // 97: '',548 // 56: '0x54c1ec2f543966953f2f7564692606ea7d5a184e', // EGG-BUSD LP549 // },550 // tokenSymbol: 'DOT',551 // tokenAddresses: {552 // 97: '',553 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',554 // },555 // quoteTokenSymbol: QuoteToken.BUSD,556 // quoteTokenAdresses: contracts.busd,557 // },558 // {559 // pid: 30,560 // risk: 5,561 // isTokenOnly: true,562 // lpSymbol: 'RINI',563 // lpAddresses: {564 // 97: '',565 // 56: '0x5F188bEDe05D11b8d1474D6832b41d3EaE4ED98E', // EGG-BUSD LP566 // },567 // tokenSymbol: 'RINI',568 // tokenAddresses: {569 // 97: '',570 // 56: '0x844b1b28b0fda8075ea8eb99c46339398ded1673',571 // },572 // quoteTokenSymbol: QuoteToken.BNB,573 // quoteTokenAdresses: contracts.wbnb,574 // },575 // // {576 // // pid: 31,577 // // risk: 5,578 // // isTokenOnly: true,579 // // lpSymbol: 'BSCT',580 // // lpAddresses: {581 // // 97: '',582 // // 56: '0xE920575CEcE01e5D9A7AB23d1a4FD15d8CF3Fe75', // EGG-BUSD LP583 // // },584 // // tokenSymbol: 'BSCT',585 // // tokenAddresses: {586 // // 97: '',587 // // 56: '0xE920575CEcE01e5D9A7AB23d1a4FD15d8CF3Fe75',588 // // },589 // // quoteTokenSymbol: QuoteToken.BSCT,590 // // quoteTokenAdresses: contracts.bsct,591 // // },592 // // {593 // // pid: 32,594 // // risk: 5,595 // // isTokenOnly: true,596 // // lpSymbol: 'KRWP',597 // // lpAddresses: {598 // // 97: '',599 // // 56: '0xB9Dd513420D68Ac4CCf65cBcaA8cc7bd539713ca', // EGG-BUSD LP600 // // },601 // // tokenSymbol: 'KRWP',602 // // tokenAddresses: {603 // // 97: '',604 // // 56: '0xB9Dd513420D68Ac4CCf65cBcaA8cc7bd539713ca',605 // // },606 // // quoteTokenSymbol: QuoteToken.KRWP,607 // // quoteTokenAdresses: contracts.krwp,608 // // },609 // {610 // pid: 33,611 // risk: 5,612 // isTokenOnly: true,613 // lpSymbol: 'ADA',614 // lpAddresses: {615 // 97: '',616 // 56: '0x28415ff2C35b65B9E5c7de82126b4015ab9d031F', // EGG-BUSD LP617 // },618 // tokenSymbol: 'ADA',619 // tokenAddresses: {620 // 97: '',621 // 56: '0x3ee2200efb3400fabb9aacf31297cbdd1d435d47',622 // },623 // quoteTokenSymbol: QuoteToken.BNB,624 // quoteTokenAdresses: contracts.wbnb,625 // },626 // {627 // pid: 34,628 // risk: 5,629 // isTokenOnly: true,630 // lpSymbol: 'BSTEEM',631 // lpAddresses: {632 // 97: '',633 // 56: '0xa43857e02c94ee8323c2198ffe80fd590544b30a', // EGG-BUSD LP634 // },635 // tokenSymbol: 'BSTEEM',636 // tokenAddresses: {637 // 97: '',638 // 56: '0x382d36e85178f2d3fd84e336d4e1d442fce78e8e',639 // },640 // quoteTokenSymbol: QuoteToken.BNB,641 // quoteTokenAdresses: contracts.wbnb,642 // },643 // {644 // pid: 35,645 // risk: 5,646 // isTokenOnly: true,647 // lpSymbol: 'BBLURT',648 // lpAddresses: {649 // 97: '',650 // 56: '0x37ddd439f7fc42f2964e3b09c4853c03075db0c9', // EGG-BUSD LP651 // },652 // tokenSymbol: 'BBLURT',653 // tokenAddresses: {654 // 97: '',655 // 56: '0xb0458283033e5a3f7867f409477f53754b667dcc',656 // },657 // quoteTokenSymbol: QuoteToken.BNB,658 // quoteTokenAdresses: contracts.wbnb,659 // },660 // {661 // pid: 36,662 // risk: 5,663 // isTokenOnly: true,664 // lpSymbol: 'MOON',665 // lpAddresses: {666 // 97: '',667 // 56: '0x643D3f1C1fA1A7D5D6BDE50Bd3FDD1Cdd8A85692', // EGG-BUSD LP668 // },669 // tokenSymbol: 'MOON',670 // tokenAddresses: {671 // 97: '',672 // 56: '0xE8c93310af068aa50bd7bF0ebFa459Df2a02ceba',673 // },674 // quoteTokenSymbol: QuoteToken.BNB,675 // quoteTokenAdresses: contracts.wbnb,676 // },677 // {678 // pid: 39,679 // risk: 5,680 // isTokenOnly: true,681 // lpSymbol: 'BSEREY',682 // lpAddresses: {683 // 97: '',684 // 56: '0xF9E47c3fB70F5364A536a393De6Ec0A48d026584', // EGG-BUSD LP685 // },686 // tokenSymbol: 'BSEREY',687 // tokenAddresses: {688 // 97: '',689 // 56: '0x2b618835a1eefcbf41e33497451ca1f3aa62f2d8',690 // },691 // quoteTokenSymbol: QuoteToken.BNB,692 // quoteTokenAdresses: contracts.wbnb,693 // },694 // {695 // pid: 40,696 // risk: 5,697 // lpSymbol: 'BSEREY-BNB LP',698 // lpAddresses: {699 // 97: '',700 // 56: '0xF9E47c3fB70F5364A536a393De6Ec0A48d026584', // EGG-BUSD LP701 // },702 // tokenSymbol: 'BSEREY',703 // tokenAddresses: {704 // 97: '',705 // 56: '0x2b618835a1eefcbf41e33497451ca1f3aa62f2d8',706 // },707 // quoteTokenSymbol: QuoteToken.BNB,708 // quoteTokenAdresses: contracts.wbnb,709 // },710 // {711 // pid: 41,712 // risk: 5,713 // isTokenOnly: true,714 // lpSymbol: 'BFD',715 // lpAddresses: {716 // 97: '',717 // 56: '0x42c26eea50615e3f808754f46b69a3bb386fe846', // EGG-BUSD LP718 // },719 // tokenSymbol: 'BFD',720 // tokenAddresses: {721 // 97: '',722 // 56: '0x95f31e322e1bee2f6dcb085a1dfe3d3081aab653',723 // },724 // quoteTokenSymbol: QuoteToken.BNB,725 // quoteTokenAdresses: contracts.wbnb,726 // },727 // {728 // pid: 43,729 // risk: 5,730 // isTokenOnly: true,731 // lpSymbol: 'BATOLO',732 // lpAddresses: {733 // 97: '',734 // 56: '0x69cd16563ebac608292fa9598d185e92d099a1a6', // EGG-BUSD LP735 // },736 // tokenSymbol: 'BATOLO',737 // tokenAddresses: {738 // 97: '',739 // 56: '0xcAa6f0d4c4796F26852ea3985DBd27b6b830C674',740 // },741 // quoteTokenSymbol: QuoteToken.BNB,742 // quoteTokenAdresses: contracts.wbnb,743 // },744 // {745 // pid: 44,746 // risk: 5,747 // lpSymbol: 'BATOLO-BNB',748 // lpAddresses: {749 // 97: '',750 // 56: '0x69Cd16563eBac608292FA9598D185e92d099a1a6', // EGG-BUSD LP751 // },752 // tokenSymbol: 'BATOLO',753 // tokenAddresses: {754 // 97: '',755 // 56: '0xcAa6f0d4c4796F26852ea3985DBd27b6b830C674',756 // },757 // quoteTokenSymbol: QuoteToken.BNB,758 // quoteTokenAdresses: contracts.wbnb,759 // },760 // {761 // pid: 38,762 // risk: 5,763 // lpSymbol: 'steem',764 // delegate:true,765 // depositFee : "5",766 // delegateAddress : 'robiniaswap',767 // isTokenOnly:true,768 // tokenSymbol : 'STEEM POWER',769 // delegateToken : "STEEM",770 // lpAddresses: {771 // 97: '',772 // 56: '0xa2fa80da37170ed705cb0bd1f27558ccecd417c5', // EGG-BUSD LP773 // },774 // tokenAddresses: {775 // 97: '',776 // 56: '0xafaeee58a58867c73245397c0f768ff041d32d70',777 // },778 // quoteTokenSymbol: QuoteToken.BUSD,779 // quoteTokenAdresses: contracts.busd,780 // }781 // {782 // pid: 7,783 // risk: 5,784 // isTokenOnly: true,785 // lpSymbol: 'ETH',786 // lpAddresses: {787 // 97: '',788 // 56: '0x0E09FaBB73Bd3Ade0a17ECC321fD13a19e81cE82', // EGG-BUSD LP789 // },790 // tokenSymbol: 'RBS',791 // tokenAddresses: {792 // 97: '',793 // 56: '0x2170Ed0880ac9A755fd29B2688956BD959F933F8',794 // },795 // quoteTokenSymbol: QuoteToken.ETH,796 // quoteTokenAdresses: contracts.eth,797 // },798 // {799 // pid: 3,800 // risk: 1,801 // lpSymbol: 'USDT-BUSD LP',802 // lpAddresses: {803 // 97: '',804 // 56: '0xc15fa3e22c912a276550f3e5fe3b0deb87b55acd',805 // },806 // tokenSymbol: 'USDT',807 // tokenAddresses: {808 // 97: '',809 // 56: '0x55d398326f99059ff775485246999027b3197955',810 // },811 // quoteTokenSymbol: QuoteToken.BUSD,812 // quoteTokenAdresses: contracts.busd,813 // },814 // {815 // pid: 4,816 // risk: 2,817 // lpSymbol: 'BTCB-BNB LP',818 // lpAddresses: {819 // 97: '',820 // 56: '0x7561eee90e24f3b348e1087a005f78b4c8453524',821 // },822 // tokenSymbol: 'BTCB',823 // tokenAddresses: {824 // 97: '',825 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',826 // },827 // quoteTokenSymbol: QuoteToken.BNB,828 // quoteTokenAdresses: contracts.wbnb,829 // },830 // {831 // pid: 5,832 // risk: 2,833 // lpSymbol: 'ETH-BNB LP',834 // lpAddresses: {835 // 97: '',836 // 56: '0x70d8929d04b60af4fb9b58713ebcf18765ade422',837 // },838 // tokenSymbol: 'ETH',839 // tokenAddresses: {840 // 97: '',841 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',842 // },843 // quoteTokenSymbol: QuoteToken.BNB,844 // quoteTokenAdresses: contracts.wbnb,845 // },846 // {847 // pid: 6,848 // risk: 1,849 // lpSymbol: 'DAI-BUSD LP',850 // lpAddresses: {851 // 97: '',852 // 56: '0x3ab77e40340ab084c3e23be8e5a6f7afed9d41dc',853 // },854 // tokenSymbol: 'DAI',855 // tokenAddresses: {856 // 97: '',857 // 56: '0x1af3f329e8be154074d8769d1ffa4ee058b1dbc3',858 // },859 // quoteTokenSymbol: QuoteToken.BUSD,860 // quoteTokenAdresses: contracts.busd,861 // },862 // {863 // pid: 7,864 // risk: 1,865 // lpSymbol: 'USDC-BUSD LP',866 // lpAddresses: {867 // 97: '',868 // 56: '0x680dd100e4b394bda26a59dd5c119a391e747d18',869 // },870 // tokenSymbol: 'USDC',871 // tokenAddresses: {872 // 97: '',873 // 56: '0x8ac76a51cc950d9822d68b83fe1ad97b32cd580d',874 // },875 // quoteTokenSymbol: QuoteToken.BUSD,876 // quoteTokenAdresses: contracts.busd,877 // },878 // {879 // pid: 9,880 // risk: 3,881 // lpSymbol: 'DOT-BNB LP',882 // lpAddresses: {883 // 97: '',884 // 56: '0xbcd62661a6b1ded703585d3af7d7649ef4dcdb5c',885 // },886 // tokenSymbol: 'DOT',887 // tokenAddresses: {888 // 97: '',889 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',890 // },891 // quoteTokenSymbol: QuoteToken.BNB,892 // quoteTokenAdresses: contracts.wbnb,893 // },894 // {895 // pid: 10,896 // risk: 4,897 // lpSymbol: 'CAKE-BUSD LP',898 // lpAddresses: {899 // 97: '',900 // 56: '0x0ed8e0a2d99643e1e65cca22ed4424090b8b7458',901 // },902 // tokenSymbol: 'CAKE',903 // tokenAddresses: {904 // 97: '',905 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',906 // },907 // quoteTokenSymbol: QuoteToken.BUSD,908 // quoteTokenAdresses: contracts.busd,909 // },910 // {911 // pid: 11,912 // risk: 4,913 // lpSymbol: 'CAKE-BNB LP',914 // lpAddresses: {915 // 97: '',916 // 56: '0xa527a61703d82139f8a06bc30097cc9caa2df5a6',917 // },918 // tokenSymbol: 'CAKE',919 // tokenAddresses: {920 // 97: '',921 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',922 // },923 // quoteTokenSymbol: QuoteToken.BNB,924 // quoteTokenAdresses: contracts.wbnb,925 // },926 // {927 // pid: 13,928 // risk: 1,929 // isTokenOnly: true,930 // lpSymbol: 'BUSD',931 // lpAddresses: {932 // 97: '',933 // 56: '0x19e7cbecdd23a16dfa5573df54d98f7caae03019', // EGG-BUSD LP (BUSD-BUSD will ignore)934 // },935 // tokenSymbol: 'BUSD',936 // tokenAddresses: {937 // 97: '',938 // 56: '0xe9e7cea3dedca5984780bafc599bd69add087d56',939 // },940 // quoteTokenSymbol: QuoteToken.BUSD,941 // quoteTokenAdresses: contracts.busd,942 // },943 // {944 // pid: 14,945 // risk: 3,946 // isTokenOnly: true,947 // lpSymbol: 'WBNB',948 // lpAddresses: {949 // 97: '',950 // 56: '0x1b96b92314c44b159149f7e0303511fb2fc4774f', // BNB-BUSD LP951 // },952 // tokenSymbol: 'WBNB',953 // tokenAddresses: {954 // 97: '',955 // 56: '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c',956 // },957 // quoteTokenSymbol: QuoteToken.BUSD,958 // quoteTokenAdresses: contracts.busd,959 // },960 // {961 // pid: 15,962 // risk: 1,963 // isTokenOnly: true,964 // lpSymbol: 'USDT',965 // lpAddresses: {966 // 97: '',967 // 56: '0xc15fa3e22c912a276550f3e5fe3b0deb87b55acd', // USDT-BUSD LP968 // },969 // tokenSymbol: 'USDT',970 // tokenAddresses: {971 // 97: '',972 // 56: '0x55d398326f99059ff775485246999027b3197955',973 // },974 // quoteTokenSymbol: QuoteToken.BUSD,975 // quoteTokenAdresses: contracts.busd,976 // },977 // {978 // pid: 16,979 // risk: 2,980 // isTokenOnly: true,981 // lpSymbol: 'BTCB',982 // lpAddresses: {983 // 97: '',984 // 56: '0xb8875e207ee8096a929d543c9981c9586992eacb', // BTCB-BUSD LP985 // },986 // tokenSymbol: 'BTCB',987 // tokenAddresses: {988 // 97: '',989 // 56: '0x7130d2a12b9bcbfae4f2634d864a1ee1ce3ead9c',990 // },991 // quoteTokenSymbol: QuoteToken.BUSD,992 // quoteTokenAdresses: contracts.busd,993 // },994 // {995 // pid: 17,996 // risk: 2,997 // isTokenOnly: true,998 // lpSymbol: 'ETH',999 // lpAddresses: {1000 // 97: '',1001 // 56: '0xd9a0d1f5e02de2403f68bb71a15f8847a854b494', // ETH-BUSD LP1002 // },1003 // tokenSymbol: 'ETH',1004 // tokenAddresses: {1005 // 97: '',1006 // 56: '0x2170ed0880ac9a755fd29b2688956bd959f933f8',1007 // },1008 // quoteTokenSymbol: QuoteToken.BUSD,1009 // quoteTokenAdresses: contracts.busd,1010 // },1011 // {1012 // pid: 18,1013 // risk: 1,1014 // isTokenOnly: true,1015 // lpSymbol: 'DAI',1016 // lpAddresses: {1017 // 97: '',1018 // 56: '0x3ab77e40340ab084c3e23be8e5a6f7afed9d41dc', // DAI-BUSD LP1019 // },1020 // tokenSymbol: 'DAI',1021 // tokenAddresses: {1022 // 97: '',1023 // 56: '0x1af3f329e8be154074d8769d1ffa4ee058b1dbc3',1024 // },1025 // quoteTokenSymbol: QuoteToken.BUSD,1026 // quoteTokenAdresses: contracts.busd,1027 // },1028 // {1029 // pid: 19,1030 // risk: 1,1031 // isTokenOnly: true,1032 // lpSymbol: 'USDC',1033 // lpAddresses: {1034 // 97: '',1035 // 56: '0x680dd100e4b394bda26a59dd5c119a391e747d18', // USDC-BUSD LP1036 // },1037 // tokenSymbol: 'USDC',1038 // tokenAddresses: {1039 // 97: '',1040 // 56: '0x8ac76a51cc950d9822d68b83fe1ad97b32cd580d',1041 // },1042 // quoteTokenSymbol: QuoteToken.BUSD,1043 // quoteTokenAdresses: contracts.busd,1044 // },1045 // {1046 // pid: 20,1047 // risk: 3,1048 // isTokenOnly: true,1049 // lpSymbol: 'DOT',1050 // lpAddresses: {1051 // 97: '',1052 // 56: '0x54c1ec2f543966953f2f7564692606ea7d5a184e', // DOT-BUSD LP1053 // },1054 // tokenSymbol: 'DOT',1055 // tokenAddresses: {1056 // 97: '',1057 // 56: '0x7083609fce4d1d8dc0c979aab8c869ea2c873402',1058 // },1059 // quoteTokenSymbol: QuoteToken.BUSD,1060 // quoteTokenAdresses: contracts.busd,1061 // },1062 // {1063 // pid: 21,1064 // risk: 4,1065 // isTokenOnly: true,1066 // lpSymbol: 'CAKE',1067 // lpAddresses: {1068 // 97: '',1069 // 56: '0x0ed8e0a2d99643e1e65cca22ed4424090b8b7458', // CAKE-BUSD LP1070 // },1071 // tokenSymbol: 'CAKE',1072 // tokenAddresses: {1073 // 97: '',1074 // 56: '0x0e09fabb73bd3ade0a17ecc321fd13a19e81ce82',1075 // },1076 // quoteTokenSymbol: QuoteToken.BUSD,1077 // quoteTokenAdresses: contracts.busd,1078 // },1079 // {1080 // pid: 22,1081 // risk: 3,1082 // isTokenOnly: true,1083 // lpSymbol: 'BSCX',1084 // lpAddresses: {1085 // 97: '',1086 // 56: '0xa32a983a64ce21834221aa0ad1f1533907553136', // BSCX-BUSD LP1087 // },1088 // tokenSymbol: 'BSCX',1089 // tokenAddresses: {1090 // 97: '',1091 // 56: '0x5ac52ee5b2a633895292ff6d8a89bb9190451587',1092 // },1093 // quoteTokenSymbol: QuoteToken.BUSD,1094 // quoteTokenAdresses: contracts.busd,1095 // },1096 // {1097 // pid: 23,1098 // risk: 3,1099 // isTokenOnly: true,1100 // lpSymbol: 'AUTO',1101 // lpAddresses: {1102 // 97: '',1103 // 56: '0x4d0228ebeb39f6d2f29ba528e2d15fc9121ead56', // AUTO-BNB LP1104 // },1105 // tokenSymbol: 'AUTO',1106 // tokenAddresses: {1107 // 97: '',1108 // 56: '0xa184088a740c695e156f91f5cc086a06bb78b827',1109 // },1110 // quoteTokenSymbol: QuoteToken.BNB,1111 // quoteTokenAdresses: contracts.wbnb,1112 // },1113]...

Full Screen

Full Screen

tokenutils.py

Source:tokenutils.py Github

copy

Full Screen

1# coding:utf-82'''3@author: ota4'''5import re6from sqlparse import sql, tokens as T7from enum import Enum8class EngineComment(Enum):9 """10 SQLエンジン関連コメントType11 """12 none = 0 # SQLエンジン関連コメントではない13 syntax = 1 # ロジック14 param = 2 # パラメータ15 sql_identifier = 3 # SQL_IDENTIFIER16def get_comment_type(token, comment_syntax):17 """18 SQLエンジン関連コメントTypeを返す19 """20 if is_block_comment(token):21 return comment_syntax.get_block_comment_type(token)22 elif is_line_comment(token):23 return comment_syntax.get_line_comment_type(token)24def is_param_comment(token, next_token, comment_syntax):25 """26 SQLエンジンのパラメータコメント判定27 """28 return get_comment_type(token, comment_syntax) == EngineComment.param \29 and (is_literal(next_token) or is_wildcard(next_token) or is_parenthesis(next_token))30def is_hint_block_comment(token):31 """32 Oracleヒントコメント判定33 """34 if is_block_comment(token):35 tokens = token.tokens36 if len(tokens) >= 3 :37 comment = tokens[1].value38 if comment.startswith("+"):39 return True40 return False41def is_block_comment(token):42 """43 ブロックコメント判定44 """45 if is_comment(token):46 comment = token.token_next_by_type(0, T.Comment)47 return comment.value in ["/*", "*/"]48 return False49def is_line_comment(token):50 """51 ラインコメント判定52 """53 if is_comment(token):54 comment = token.token_next_by_type(0, T.Comment)55 return comment.value not in ["/*", "*/"]56 return False57def is_plain_line_comment(token, comment_syntax):58 """59 ラインコメント(SQLエンジン構文ではない)判定60 """61 return is_line_comment(token) and get_comment_type(token, comment_syntax) == EngineComment.none62def is_line_description_line_comment(token, comment_syntax):63 """64 ラインコメント(行説明になりうる)判定65 """66 return is_plain_line_comment(token, comment_syntax) and token.is_line_description67def is_comment(token):68 """69 コメント判定70 """71 return isinstance(token, sql.Comment)72def is_dot(token):73 """74 ドット判定75 """76 return is_punctuation(token) and token.value == "."77def is_comma(token):78 """79 カンマ判定80 """81 return is_punctuation(token) and token.value == ","82def is_literal(token):83 """84 リテラル判定(文字列・数値)85 """86 return token.ttype in T.Literal87def is_string_literal(token):88 """89 リテラル判定(文字列)90 """91 return token.ttype in T.Literal.String92def is_number_literal(token):93 """94 リテラル判定(数値)95 """96 return token.ttype in T.Literal.Number97def is_null_keyword(token):98 """99 「NULL」文字列判定100 """101 return token.match(T.Keyword, "NULL")102def is_comparison(token):103 """104 比較演算判定105 """106 return isinstance(token, sql.Comparison)107def is_identifier_list(token):108 """109 IdentifierList判定110 """111 return isinstance(token, sql.IdentifierList)112def is_identifier(token):113 """114 Identifier判定115 """116 return isinstance(token, sql.Identifier)117def is_function(token):118 """119 関数判定120 """121 return isinstance(token, sql.Function)122def is_value_candidate(token):123 """124 値になりうる125 """126 return is_string_candidate(token) or is_number_candidate(token)127def is_string_candidate(token):128 """129 文字列になりうる130 """131 if is_string_literal(token):132 return True133 if is_function(token):134 return True135 if is_null_keyword(token):136 return True137 if is_calculation(token):138 return True139 if is_parenthesis(token):140 tokens = [t for t in tokens_parenthesis_inner(token) if is_enable(t)]141 if len(tokens) == 1:142 return is_string_candidate(tokens[0])143 elif tokens:144 return is_select_dml(tokens[0])145 if is_identifier(token):146 tokens = [t for t in token.tokens if is_enable(t)]147 for tkn in tokens:148 if (not tkn.ttype in T.Name) and (not is_dot(tkn)):149 return False150 return True151 return False152def is_number_candidate(token):153 """154 数値になりうる155 """156 if is_number_literal(token):157 return True158 if is_function(token):159 return True160 if is_null_keyword(token):161 return True162 if is_calculation(token):163 return True164 if is_parenthesis(token):165 tokens = [t for t in tokens_parenthesis_inner(token) if is_enable(t)]166 if len(tokens) == 1:167 return is_number_candidate(tokens[0])168 elif tokens:169 return is_select_dml(tokens[0])170 if is_identifier(token):171 tokens = [t for t in token.tokens if is_enable(t)]172 for tkn in tokens:173 if (not tkn.ttype in T.Name) and (not is_dot(tkn)):174 return False175 return True176 return False177def is_exists_function(token):178 """179 EXISTS関数判定180 """181 if not is_function(token):182 return False183 ftoken = token_next_enable(token)184 return equals_ignore_case(ftoken.value, "EXISTS")185def is_over_function(token):186 """187 OVER関数判定188 """189 if not is_function(token):190 return False191 ftoken = token_next_enable(token)192 return equals_ignore_case(ftoken.value, "OVER")193def is_parenthesis(token):194 """195 括弧判定196 """197 return isinstance(token, sql.Parenthesis)198def is_dmlddl_parenthesis(token):199 """200 DMLかDDLの括弧判定201 """202 if not is_parenthesis(token):203 return False204 open_punc = token.token_next_match(0, T.Punctuation, '(')205 first = token_next_enable(token, open_punc)206 if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):207 return True208 if is_with(first):209 return True210 if is_parenthesis(first):211 return is_dmlddl_parenthesis(first)212 return False213def is_enum_parenthesis(token):214 """215 括弧の中身が値の列挙かどうかの判定216 """217 if not is_parenthesis(token):218 return False219 def is_enums(tokens):220 for token in tokens:221 if token.is_whitespace() \222 or is_comment(token) \223 or is_comma(token) \224 or is_literal(token) \225 or is_null_keyword(token) \226 or is_identifier(token):227 pass228 elif is_identifier_list(token):229 if not is_enums(token.tokens):230 return False231 else:232 return False233 return True234 return is_enums(tokens_parenthesis_inner(token))235def is_comparisons_parenthesis(token):236 """237 括弧の中身が比較演算かどうかの判定238 """239 if not is_parenthesis(token):240 return False241 exists_logical_operator = False242 exists_comparison_operator = False243 exists_parenthesis = False244 exists_exists_function = False245 prev_enable = None246 for tkn in tokens_parenthesis_inner(token):247 if is_comparison(tkn):248 return True249 if is_logical_operator_keyword(tkn):250 exists_logical_operator = True251 if is_comparison_operator(tkn):252 exists_comparison_operator = True253 if prev_enable and get_comparison_operator_words(prev_enable, tkn):254 exists_comparison_operator = True255 if is_parenthesis(tkn):256 exists_parenthesis = True257 if is_exists_function(tkn):258 exists_exists_function = True259 if exists_logical_operator and exists_comparison_operator:260 return True261 if exists_logical_operator and exists_parenthesis:262 return True263 if exists_logical_operator and exists_exists_function:264 return True265 if is_enable(tkn):266 prev_enable = tkn267 return False268def is_punctuation(token):269 """270 Punctuation判定271 """272 return token.ttype in T.Punctuation273def is_semicolon_punctuation(token):274 """275 セミコロン判定276 """277 return is_punctuation(token) and token.value == ";"278def is_open_punctuation(token):279 """280 開き括弧判定281 """282 return is_punctuation(token) and token.value == "("283def is_close_punctuation(token):284 """285 閉じ括弧判定286 """287 return is_punctuation(token) and token.value == ")"288def is_keyword(token):289 """290 keyword判定291 """292 return token.is_keyword293def is_as_keyword(token):294 """295 「AS」判定296 """297 return token.match(T.Keyword, "AS")298def is_distinct_keyword(token):299 """300 「DISTINCT」判定301 """302 return token.match(T.Keyword, "DISTINCT")303def is_from_keyword(token):304 """305 「FROM」判定306 """307 return token.match(T.Keyword, "FROM")308def is_by_keyword(token):309 """310 「BY」判定311 """312 return token.match(T.Keyword, "BY")313def is_select_dml(token):314 """315 SELECT句判定316 """317 return token.match(T.DML, "SELECT")318def is_update_dml(token):319 """320 UPDATE句判定321 """322 return token.match(T.DML, "UPDATE")323def is_insert_dml(token):324 """325 INSERT句判定326 """327 return token.match(T.DML, "INSERT")328def is_delete_dml(token):329 """330 DELETE句判定331 """332 return token.match(T.DML, "DELETE")333def is_with(token):334 """335 WITH句判定336 """337 from uroborosqlfmt.sql import With338 return isinstance(token, With)339def is_into_keyword(token):340 """341 INTO判定342 """343 return token.match(T.Keyword, "INTO")344def is_values_keyword(token):345 """346 VALUES判定347 """348 return token.match(T.Keyword, "VALUES")349def is_set_keyword(token):350 """351 SET判定352 """353 return token.match(T.Keyword, "SET")354def is_dml(token):355 """356 DML判定357 """358 return token.ttype in T.DML359def is_wildcard(token):360 """361 ワイルドカード「*」判定362 """363 return token.ttype in T.Wildcard364def is_where(token):365 """366 WHERE句判定367 """368 return isinstance(token, sql.Where)369def is_when(token):370 """371 WHEN句判定372 """373 from uroborosqlfmt.sql import When374 return isinstance(token, When)375def is_having(token):376 """377 HAVING句判定378 """379 from uroborosqlfmt.sql import Having380 return isinstance(token, Having)381def is_on(token):382 """383 ON句判定384 """385 from uroborosqlfmt.sql import On386 return isinstance(token, On)387def is_connectby(token):388 """389 CONNECT BY句判定390 """391 from uroborosqlfmt.sql import ConnectBy392 return isinstance(token, ConnectBy)393def is_startwith(token):394 """395 START WITH句判定396 """397 from uroborosqlfmt.sql import StartWith398 return isinstance(token, StartWith)399def is_case(token):400 """401 CASE句判定402 """403 return isinstance(token, sql.Case)404def is_forupdate(token):405 """406 FOR UPDATE句判定407 """408 from uroborosqlfmt.sql import ForUpdate409 return isinstance(token, ForUpdate)410def is_waitornowait(token):411 """412 WAIT / NOWAIT句判定413 """414 from uroborosqlfmt.sql import WaitOrNowait415 return isinstance(token, WaitOrNowait)416def is_union(token):417 """418 UNION句判定419 """420 from uroborosqlfmt.sql import Union421 return isinstance(token, Union)422def is_join(token):423 """424 JOIN句判定425 """426 from uroborosqlfmt.sql import Join427 return isinstance(token, Join)428def is_mergewhen(token):429 """430 WHEN句判定431 """432 from uroborosqlfmt.sql import MergeWhen433 return isinstance(token, MergeWhen)434def is_mergeupdateinsertclause(token):435 """436 MERGEの内のDML判定437 """438 from uroborosqlfmt.sql import MergeUpdateInsertClause439 return isinstance(token, MergeUpdateInsertClause)440def is_between_keyword(token):441 """442 「BETWEEN」判定443 """444 return token.match(T.Keyword, "BETWEEN")445def is_and_keyword(token):446 """447 AND演算子判定448 """449 return token.match(T.Keyword, "AND")450def is_using_keyword(token):451 """452 USING判定453 """454 return token.match(T.Keyword, "USING")455def is_logical_operator_keyword(token):456 """457 AND・OR演算子判定458 """459 return token.match(T.Keyword, ("AND", "OR"))460def is_name_or_keyword(token):461 """462 name or keyword判定463 """464 return is_keyword(token) or token.ttype in T.Name465def is_operator(token):466 """467 演算子判定468 """469 return token.ttype in T.Operator470def is_comparison_operator(token):471 """472 比較演算子判定473 """474 return token.ttype in T.Operator.Comparison475def is_concat_operator(token):476 """477 文字列連結演算子判定478 """479 return is_operator(token) and token.value == "||"480def is_phrase(token):481 """482 Phrase判定483 """484 from uroborosqlfmt.sql import Phrase485 return isinstance(token, Phrase)486def is_calculation(token):487 """488 演算判定489 """490 from uroborosqlfmt.sql import Calculation491 return isinstance(token, Calculation)492def is_calc_operator(token):493 """494 演算子判定495 """496 if is_concat_operator(token):497 return True498 if is_operator(token) and not is_comparison_operator(token):499 return True500 return False501def is_enable(token):502 """503 有効Token判定(コメント・空白以外)504 """505 if token.is_whitespace():506 return False507 if is_comment(token):508 return False509 if token.parent and is_comment(token.parent):510 return False511 return True512def find_comparison_operator_words(tokens):513 """514 比較演算子の検索515 """516 prev = None517 for token in tokens[:]:518 if not is_enable(token):519 continue520 if not prev:521 prev = token522 continue523 comps = get_comparison_operator_words(prev, token)524 if comps:525 return comps526 prev = token527 if prev:528 return get_comparison_operator_words(prev, None)529 else:530 return []531def get_comparison_operator_words(token, next_token):532 """533 比較演算子の取得534 """535 if next_token and is_keyword(next_token):536 if is_keyword(token):537 if equals_ignore_case(token.value, "NOT"):538 if equals_ignore_case(next_token.value, ["IN", "BETWEEN", "LIKE"]):539 return [token, next_token]540 elif equals_ignore_case(token.value, "IS"):541 if equals_ignore_case(next_token.value, ["NOT"]):542 return [token, next_token]543 else:544 return [token]545 elif is_comparison_operator(token):546 if equals_ignore_case(next_token.value, ["ANY", "SOME", "ALL"]):547 return [token, next_token]548 else:549 return [token]550 else:551 if is_keyword(token):552 if equals_ignore_case(token.value, ["IN", "BETWEEN", "LIKE", "IS"]):553 return [token]554 elif is_comparison_operator(token):555 return [token]556 return []557def tokens_parenthesis_inner(parenthesis):558 """559 括弧内Tokenリストの取得560 """561 open_punc = parenthesis.token_next_match(0, T.Punctuation, '(')562 close_punc = parenthesis.token_next_match(open_punc, T.Punctuation, ')')563 return parenthesis.tokens_between(open_punc, close_punc)[1:-1]564def token_function_inner_parenthesis(func):565 ftoken = token_next_enable(func)566 return token_next_enable(func, ftoken)567def token_next_enable(token, idx = -1):568 """569 次の有効Tokenの取得570 """571 if not isinstance(idx, int):572 idx = token.token_index(idx)573 return token.token_matching(idx + 1, [is_enable])574def token_prev_enable(token, idx = -1):575 """576 前の有効Tokenの取得577 """578 if not isinstance(idx, int):579 idx = token.token_index(idx)580 if idx < 0:581 idx = len(token.tokens)582 prv = token.token_prev(idx)583 while is_comment(prv):584 prv = token.token_prev(prv)585 return prv586def flatten_tokens_prev(top_token, token):587 """588 前Tokenのgenerator589 """590 tgt = next(flatten(token))591 iterator = flatten(top_token)592 tokens = []593 for tkn in iterator:594 if tkn == tgt:595 break596 tokens.append(tkn)597 for tkn in tokens[::-1]:598 yield tkn599def flatten_tokens_next(top_token, token):600 """601 後Tokenのgenerator602 """603 tgt = list(flatten(token))[-1]604 iterator = flatten(top_token)605 for tkn in iterator:606 if tkn == tgt:607 break608 for tkn in iterator:609 yield tkn610def token_parents(token):611 """612 親Tokenのgenerator613 """614 while token:615 yield token616 token = token.parent617def token_top_matching(token, sub, func):618 """619 親を走査してヒットするTokenがあるか判定620 """621 def in_parents(tkn):622 for parent in token_parents(sub):623 if tkn == parent:624 return True625 return False626 parents = token_parents(token)627 tkn = None628 for parent in parents:629 if func(parent):630 if in_parents(parent):631 return None632 tkn = parent633 break634 for parent in parents:635 if in_parents(parent):636 return tkn637 if not func(parent):638 return tkn639 tkn = parent640 return tkn641def within_with_section(stmt, token):642 """643 WITH句内判定644 """645 for tkn in tokens_tree_up(stmt, token):646 if equals_ignore_case(tkn.value, "WITH"):647 return tkn648 if is_dml(tkn):649 return None650 return None651def within_select_statement(stmt, token):652 """653 SELECT句内判定654 """655 for tkn in tokens_tree_up(stmt, token):656 if is_dml(tkn):657 if equals_ignore_case(tkn.value, "SELECT"):658 return tkn659 return None660 return None661def within_update_statement(stmt, token):662 """663 UPDATE句内判定664 """665 for tkn in tokens_tree_up(stmt, token):666 if is_dml(tkn):667 if equals_ignore_case(tkn.value, "UPDATE"):668 return tkn669 return None670 return None671def within_insert_statement(stmt, token):672 """673 INSERT句内判定674 """675 for tkn in tokens_tree_up(stmt, token):676 if is_dml(tkn):677 if equals_ignore_case(tkn.value, "INSERT"):678 return tkn679 return None680 return None681def within_merge_statement(stmt, token):682 """683 MERGE句内判定684 """685 for tkn in tokens_tree_up(stmt, token):686 if is_dml(tkn):687 if equals_ignore_case(tkn.value, "MERGE"):688 return tkn689 return None690 return None691def within_insert_values_section(stmt, token):692 """693 INSERTのVALUES句内判定694 """695 itr = tokens_tree_up(stmt, token)696 for tkn in itr:697 if is_parenthesis(tkn):698 break699 for tkn in itr:700 if is_enable(tkn):701 if is_values_keyword(tkn):702 return tkn703 return None704 return None705def within_insert_into_columns_section(stmt, token):706 """707 INSERTのカラム内判定708 """709 itr = tokens_tree_up(stmt, token)710 for tkn in itr:711 if is_parenthesis(tkn):712 break713 for tkn in itr:714 if is_enable(tkn):715 if is_identifier(tkn):716 break717 elif is_insert_dml(tkn):718 return tkn719 else:720 return None721 for tkn in itr:722 if is_enable(tkn):723 if is_into_keyword(tkn):724 return tkn725 return None726 return None727def within_update_set_section(stmt, token):728 """729 UPDATEのSET句内判定730 """731 if not within_update_statement(stmt, token):732 return None733 if within_where_section(stmt, token):734 return None735 itr = tokens_tree_up(stmt, token)736 for tkn in itr:737 if is_set_keyword(tkn):738 return tkn739 return None740def within_where_section(stmt, token):741 """742 WHERE句内判定743 """744 for tkn in tokens_tree_up(stmt, token):745 if equals_ignore_case(tkn.value, "WHERE"):746 return tkn747 if is_dml(tkn):748 return None749 return None750def within_function(stmt, token):751 """752 関数内判定753 """754 for tkn in get_roots(stmt, token)[:]:755 if is_function(tkn):756 return tkn757 return None758def within_parenthesis(stmt, token):759 """760 括弧内判定761 """762 for tkn in get_roots(stmt, token)[:]:763 if is_parenthesis(tkn):764 return tkn765 return None766def tokens_tree_up(stmt, token):767 """768 ツリー上での前へのgenerator769 """770 roots = get_roots(stmt, token)771 cld = roots.pop(0)772 while roots:773 parent = roots.pop(0)774 prevs = []775 for tkn in parent.tokens:776 prevs.append(tkn)777 if tkn == cld:778 cld = parent779 break780 for tkn in prevs[::-1]:781 yield tkn782def get_roots(parent, token):783 """784 ルートTokenリスト785 """786 for tkn in parent.tokens:787 if tkn == token:788 return [token, parent]789 if isinstance(tkn, sql.TokenList):790 ret = get_roots(tkn, token)791 if ret:792 ret.append(parent)793 return ret794 return []795def get_parent(top_parent, token):796 """797 ルートを指定した親Token取得798 """799 for tkn in top_parent.tokens:800 tkn.parent = top_parent801 if tkn == token:802 return top_parent803 if isinstance(tkn, sql.TokenList):804 ret = get_parent(tkn, token)805 if ret:806 return ret807 return None808def flatten(token):809 """810 フラット化したgenerator811 ※処理中にparentを再設定する。sql.TokenList#flattenとはここが違う812 """813 if isinstance(token, sql.TokenList):814 for tkn in token.tokens:815 tkn.parent = token816 if isinstance(tkn, sql.TokenList):817 for item in flatten(tkn):818 yield item819 else:820 yield tkn821 else:822 yield token823CONDITION = 1824VALUE = 2825def get_cases(case):826 """Returns a list of 2-tuples (condition, value).827 If an ELSE exists condition is None.828 """829 ret = []830 mode = CONDITION831 for token in case.tokens:832 # Set mode from the current statement833 if token.match(T.Keyword, 'CASE'):834 continue835 elif is_when(token):836 ret.append(([], []))837 mode = CONDITION838 elif token.match(T.Keyword, 'THEN'):839 mode = VALUE840 elif token.match(T.Keyword, 'ELSE'):841 ret.append((None, []))842 mode = VALUE843 elif token.match(T.Keyword, 'END'):844 mode = None845 # First condition without preceding WHEN846 if mode and not ret:847 ret.append(([], []))848 # Append token depending of the current mode849 if mode == CONDITION:850 ret[-1][0].append(token)851 elif mode == VALUE:852 ret[-1][1].append(token)853 # Return cases list854 return ret855def equals_ignore_case(txt1, txt2):856 """857 大文字小文字を無視した文字列比較858 """859 if isinstance(txt2, str):860 values = {re.compile(txt2 + "$", re.IGNORECASE)}861 else:862 values = set(re.compile(v + "$", re.IGNORECASE) for v in txt2)863 for pattern in values:864 if pattern.match(txt1):865 return True866 return False867def startswith_ignore_case(target, txt):868 """869 大文字小文字を無視したstartswith870 """871 if isinstance(txt, str):872 values = {re.compile(txt, re.IGNORECASE)}873 else:874 values = set(re.compile(v, re.IGNORECASE) for v in txt)875 for pattern in values:876 if pattern.match(target):877 return True878 return False879def endswith_ignore_case(target, txt):880 """881 大文字小文字を無視したendswith882 """883 if isinstance(txt, str):884 values = {re.compile(txt + "$", re.IGNORECASE)}885 else:886 values = set(re.compile(v + "$", re.IGNORECASE) for v in txt)887 for pattern in values:888 if pattern.search(target):889 return True...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful