How to use get_token method in Gherkin-python

Best Python code snippet using gherkin-python

Run Gherkin-python automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

pascal.py

Source: pascal.py Github

copy
1#! /usr/bin/python3
2#-*- coding: utf-8 -*-
3import argparse
4import re
5import os
6from data import symbol_to_sort_map, max_length_symbol
7import logging
8from colorlog import ColoredFormatter
9
10logger = logging.getLogger()
11handler = logging.StreamHandler()
12formatter = ColoredFormatter(
13    "%(log_color)s%(levelname)-8s %(message)s",
14    datefmt=None,
15    reset=True,
16    log_colors={
17    'DEBUG': 'cyan',
18    'INFO': 'green',
19    'WARNING': 'yellow',
20    'ERROR': 'red',
21    'CRITICAL': 'red,bg_white',
22},
23secondary_log_colors={},
24style='%'
25)
26handler.setFormatter(formatter)
27logger.addHandler(handler)
28logger.setLevel(logging.DEBUG)
29
30
31class SymbolTable:
32    def __init__(self):
33        self.symbol = {}
34
35    def create(self, symbol):
36        if symbol not in self.symbol:
37            self.symbol[symbol] = {}
38
39class LexicalAnalysis:
40    def __init__(self, filename):
41        '''
42        二元式:
43            单词符号[space]种别
44            16     1      2
45        '''
46        self.filename = filename + '.dyd'
47        self.errorfile = filename + '.err'
48
49
50    @classmethod
51    def is_const(cls, symbol):
52        if re.match('^\d+$', symbol):
53            return True
54
55
56    def error(self, text):
57        open(self.errorfile, 'a').write(text)
58
59    def is_identify(self, symbol, line=0):
60        if symbol in symbol_to_sort_map:
61            return False
62        elif re.match('^[a-zA-Z_]+[a-zA-Z0-9_]*$', symbol):
63            if len(str(symbol)) <= 16:
64                return True
65            else:
66                e = '***LINE:%02d  overlength symbol "%s"' % (line, symbol)
67                errorMsg = 'LexicalAnalysis.is_identify(1): %s' % e
68                logger.error(errorMsg)
69                self.error(e)
70                return None
71        else:
72            return False
73
74
75    def write(self, symbol, sort):
76        text = '%16s %02d' % (symbol, sort)
77        logger.debug('LexicalAnalysis.write: %s' % text)
78        open(self.filename, 'a').write(text + '\n')
79
80
81    def writeEOLN(self):
82        text = '%16s %02d' % ('EOLN', 24)
83        logger.debug('LexicalAnalysis.writeEOLN: %s' % text)
84        open(self.filename, 'a').write(text + '\n')
85
86
87    def writeEOF(self):
88        text = '%16s %02d' % ('EOF', 24)
89        logger.debug('LexicalAnalysis.writeEOF: %s' % text)
90        open(self.filename, 'a').write(text + '\n')
91
92
93    def do(self, source_code, line=1):
94        # _ = re.findall('<\s+=', source_code)
95        # for i in _:
96        #     source_code = source_code.replace(i, '<=')
97        # _ = re.findall('<\s+>', source_code)
98        # for i in _:
99        #     source_code = source_code.replace(i, '<>')
100        # _ = re.findall('>\s+=', source_code)
101        # for i in _:
102        #     source_code = source_code.replace(i, '>=')
103        # _ = re.findall(':\s+=', source_code)
104        # for i in _:
105        #     source_code = source_code.replace(i, ':=')
106        source_code_split = source_code.replace('\n', ' \n ').split(' ')
107        for k in source_code_split:
108            if k == '\n':
109                line += 1
110                self.writeEOLN()
111                continue
112            if k == '':
113                continue
114            try:
115                code, token = self.check_defines(k, line)
116            except TypeError:
117                continue
118            # if code is None:
119                # continue
120            self.write(code, token)
121            if len(code) < len(k):
122                self.do(k[len(code):], line)
123
124
125    def check_defines(self, code, line=0):
126        l = len(code)
127        for i in range(max_length_symbol, 0, -1):
128            if code[0:i] in symbol_to_sort_map:
129                return (code[0:i], symbol_to_sort_map.get(code[0:i]))
130        if self.is_const(code):
131            return (code, 11)
132        if self.is_identify(code, line=line) is None:
133            return None
134        elif self.is_identify(code, line=line):
135            return (code, 10)
136        else:
137            pass
138        if l > 1:
139            return self.check_defines(code[0:-1], line)
140        else:
141            criticalMsg = '***LINE:%02d  invalid symbol "%s"' % (line, code)
142            logger.critical('LexicalAnalysis.do(1): %s' % criticalMsg)
143            self.error(criticalMsg + '\n')
144            # exit(0)
145            # return (-1, -1)
146
147class Var:
148    def __init__(self):
149
150        self.vname = ''  # 变量名
151        self.vproc = '' # 所属过程
152        self.vkind = -1  # 分类 0变量 1形参
153        self.vtype = int # 变量类型
154        self.vlev = -1   # 变量层次
155        self.vadr = 0  # 变量在变量表中的位置 相对第一个变量而言
156
157class Pro:
158    def __init__(self):
159
160        self.pname = ''  # 过程名
161        self.ptype = int # 过程类型
162        self.plev = 0   # 过程层次
163        self.varNum = 0 # 变量数量
164        self.fadr = 0   # 第一个变量位置
165        self.ladr = 0   # 最后一个变量位置
166        self.parameter = -1
167        self.parameterIsDefined = False
168
169class GrammarAnalysis:
170    def __init__(self, filename):
171        '''
172        '''
173        self.filename = filename + '.dyd'
174        self.dysfilename = filename + '.dys'
175        self.errorfile = filename + '.err'
176        self.varfile = filename + '.var'
177        self.profile = filename + '.pro'
178        self.line = 1
179        self.token_index = 0
180        self.var = []
181        self.pro = []
182        self.currentVar = Var()
183        self.currentPro = Pro()
184        self.char_index = 0
185        self.error_cnt = 0
186        pass
187
188    def init(self):
189        logger.debug('GrammarAnalysis.init(1)')
190        logger.info('GrammarAnalysis.init(2): read file %s' % self.filename)
191        os.system('cp %s %s' % (self.filename, self.dysfilename))
192        self.tokens = open(self.filename, 'r').readlines()
193
194    def check(self, currentVar=None):
195        print('total error: %d ' % self.error_cnt)
196        print('----')
197        # print('list is:')
198        # for i in range(0, len(self.tokens), 1):
199        #     print('<%d>%s' % (i, self.tokens[i]), end='')
200        print('now read:')
201        print(self.token_index, self.tokens[self.token_index])
202        print('var[]:')
203        for i in self.var:
204            print('vname=%s, vproc=%s, vkind=%d, vlev=%d, vadr=%d' % (i.vname, i.vproc, i.vkind, i.vlev, i.vadr))
205        print('pro[]:')
206        for i in self.pro:
207            print('pname=%s, ptype=%s, plev=%d, varNum=%d, fadr=%d, ladr=%d, parameter=%d, parameterIsDefined=%s' % (i.pname, i.ptype, i.plev, i.varNum, i.fadr, i.ladr, i.parameter, i.parameterIsDefined))
208
209        if currentVar is not None:
210            print('currentVar')
211            i = currentVar
212            print('vname=%s, vproc=%s, vkind=%d, vlev=%d, vadr=%d' % (i.vname, i.vproc, i.vkind, i.vlev, i.vadr))
213        print('currentPro:')
214        i = self.currentPro
215        print('pname=%s, ptype=%s, plev=%d, varNum=%d, fadr=%d, ladr=%d, parameter=%d, parameterIsDefined=%s' % (i.pname, i.ptype, i.plev, i.varNum, i.fadr, i.ladr, i.parameter, i.parameterIsDefined))
216        print('----')
217    def get_token(self, index=-1, code=False, change=True):
218        if index == -1:
219            index = self.token_index
220
221        d = ''.join(self.tokens[index].strip('\n')[0:16].split())
222        c = int(''.join(self.tokens[index].strip('\n')[17:19].split()))
223        if d == 'EOLN':
224            logger.info('get EOLN')
225
226            if change:
227                self.line += 1
228                logger.info('now line is %d, ->%s %d' % (self.line, d, c))
229                self.set_token_offset(1)
230            return self.get_token(index+1)
231        # logger.debug('-> %s' % d)
232        if code:
233            return c
234        else:
235            return d
236
237
238    def set_token_offset(self, offset=0, absolute=0):
239        self.char_index = 0
240        if offset != 0:
241            logger.debug('GrammarAnalysis.set_token_offset(1): get next %d' % offset)
242            self.token_index += offset
243        elif absolute != 0:
244            logger.debug('GrammarAnalysis.set_token_offset(1): turn to %d' % absolute)
245            self.token_index = absolute
246        else:
247            pass
248        logger.info('now token offset is %d' % self.token_index)
249
250    def error(self, text):
251        logger.error(text)
252        open(self.errorfile, 'a').write(text + '\n')
253        self.error_cnt += 1
254
255
256    def do(self):
257        self.init()
258        logger.debug('GrammarAnalysis.do(1)')
259        self.A()
260        self.check()
261        self.writeVar()
262        self.writePro()
263
264    def writeVar(self):
265        line = '%16s %16s %1d %s %d %d\n'
266        for i in self.var:
267            if i.vtype == int:
268                types = 'integer'
269            else:
270                types = ''
271            open(self.varfile, 'a').write(line % (i.vname, i.vproc,  i.vkind, types, i.vlev, i.vadr))
272
273    def writePro(self):
274        line = '%16s %s %d %d %d\n'
275        for i in self.pro:
276            if i.ptype == int:
277                types = 'integer'
278            else:
279                types = ''
280            open(self.profile, 'a').write(line % (i.pname, types, i.plev, i.fadr, i.ladr))
281
282
283    def A(self):
284        logger.debug('GrammarAnalysis.A(1)')
285        self.B()
286
287
288    def B(self):
289        logger.debug('GrammarAnalysis.B(1)')
290
291        if self.get_token() == 'begin':
292            logger.debug('GrammarAnalysis.B(2): get <begin>')
293            self.set_token_offset(offset=1)
294        else:
295            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'begin'))
296            if self.get_token() != 'integer':
297                self.set_token_offset(1)
298        self.C()
299        if self.get_token() == ';':
300            logger.debug('GrammarAnalysis.B(3): get <;>')
301            self.set_token_offset(1)
302        else:
303            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ';'))
304            if self.get_token() != 'integer' and self.get_token() != 'read' and self.get_token() != 'write' and self.get_token(code=True) != 10:
305                self.set_token_offset(1)
306        self.M()
307        if self.get_token() == 'end':
308            logger.debug('GrammarAnalysis.B(4): get <end>')
309            self.set_token_offset(1)
310        else:
311            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'end'))
312
313
314    def C(self):
315        logger.debug('GrammarAnalysis.C(1)')
316
317        self.D()
318        self.C_()
319
320
321    def C_(self):
322        logger.debug('GrammarAnalysis.C_(1)')
323
324        self.get_token()
325        if self.get_token() == ';' and self.get_token(self.token_index + 1, change=False) == 'integer':
326            logger.info('GrammarAnalysis.C(1): get ";" and next is "integer"')
327            self.set_token_offset(1)
328            self.D()
329            self.C_()
330        else:
331            if self.get_token() == 'integer':
332                logger.info('GrammarAnalysis.C(1): get "integer"')
333                self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ';'))
334                self.D()
335                self.C_()
336
337
338    def D(self):
339        logger.debug('GrammarAnalysis.D(1)')
340        self.get_token()
341        if self.get_token(self.token_index + 1, change=False) == 'function':
342            logger.info('GrammarAnalysis.D(2): next is "function"')
343            self.J()
344        else:
345            logger.info('GrammarAnalysis.D(3): next is %s NOT "function"' % self.get_token())
346            self.E()
347
348    def is_var_exists(self, vname, vproc, vkind):
349        for i in self.var:
350            logger.info('GrammarAnalysis.is_var_exists(1): check this <%s, %s, %s> == <%s, %s, %s>' % (vname, vproc, vkind, i.vname, i.vproc, i.vkind))
351            if i.vname == vname and i.vproc == vproc and i.vkind == vkind:
352                return True
353
354        for i in self.pro:
355            logger.info('GrammarAnalysis.is_var_exists(1): check this <%s> == <%s>' % (vname, i.pname))
356            if i.pname == vname:
357                return True
358        if vproc != '':
359            return self.is_var_exists(vname, '', vkind)
360        else:
361            return False
362
363    def is_pro_exists(self, vname):
364        for i in self.var:
365            if vname == i.vname:
366                return True
367        for i in self.pro:
368            if vname == i.pname:
369                return True
370        return False
371
372
373    def E(self):
374        logger.debug('GrammarAnalysis.E(1)')
375        currentVar = Var()
376
377        if self.get_token() == 'integer':
378            logger.info('GrammarAnalysis.E(2): get "%s"' % self.get_token())
379            self.set_token_offset(1)
380        else:
381            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'integer'))
382            self.set_token_offset(1)
383
384        currentVar.vname = self.get_token()
385        currentVar.vproc = self.currentPro.pname
386        # if self.token_index == self.currentPro.parameter:
387        if currentVar.vproc != '':
388            currentVar.vkind = 1
389            self.currentPro.parameterIsDefined = 1
390        else:
391            currentVar.vkind = 0
392        currentVar.vtype = int
393        currentVar.vlev = self.currentPro.plev
394        currentVar.vadr = len(self.var)
395
396        if self.is_var_exists(self.get_token(), self.currentPro.pname, currentVar.vkind):
397            self.error('***LINE:%02d  Redifine var "%s"' % (self.line, self.get_token()))
398        else:
399            if self.currentPro.varNum == 0:
400                self.currentPro.fadr = currentVar.vadr
401            self.currentPro.ladr = currentVar.vadr
402            self.currentPro.varNum += 1
403            self.var.append(currentVar)
404
405        self.F();
406
407
408    def F(self):
409        logger.debug('GrammarAnalysis.F(1)')
410        self.G()
411
412
413    def G(self):
414        logger.debug('GrammarAnalysis.G(1)')
415        if self.get_token(code=True) == 10:
416            logger.info('GrammarAnalysis.G(2): get const "%s"' % self.get_token())
417            self.set_token_offset(1)
418
419
420    def J(self):
421        logger.debug('GrammarAnalysis.J(1)')
422
423        pro_bak = self.currentPro
424
425        if self.get_token() == 'integer':
426            self.set_token_offset(1)
427        else:
428            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'integer'))
429            if self.get_token() != 'function':
430                self.set_token_offset(1)
431        if self.get_token() == 'function':
432            self.set_token_offset(1)
433        else:
434            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'function'))
435            if self.get_token(code=True) != 10:
436                self.set_token_offset(1)
437
438        self.currentPro.pname = self.get_token()
439        self.currentPro.ptype = int
440        self.currentPro.plev += 1
441        self.currentPro.varNum = 0
442        self.currentPro.parameterIsDefined = False
443
444        if self.is_pro_exists(self.get_token()):
445            self.error('***LINE:%02d  Redefine "%s"' % (self.line, self.get_token()))
446
447        self.G()
448
449        if self.get_token() == '(':
450            self.set_token_offset(1)
451        else:
452            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, '('))
453            if self.get_token(code=True) != 10:
454                self.set_token_offset(1)
455
456
457        self.currentPro.parameter = self.token_index
458        self.K()
459
460        if self.get_token() == ')':
461            self.set_token_offset(1)
462        else:
463            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ')'))
464            if self.get_token() != ';':
465                self.set_token_offset(1)
466
467        if self.get_token() == ';':
468            self.set_token_offset(1)
469        else:
470            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ';'))
471            if self.get_token() != 'begin':
472                self.set_token_offset(1)
473
474        self.L()
475
476        self.currentPro = pro_bak
477
478
479    def K(self):
480        logger.debug('GrammarAnalysis.K(1)')
481
482        self.F()
483
484
485    def L(self):
486        logger.debug('GrammarAnalysis.L(1)')
487
488        if self.get_token() == 'begin':
489            self.set_token_offset(1)
490        else:
491            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'begin'))
492            if self.get_token() != 'integer':
493                self.set_token_offset(1)
494
495        self.C()
496        if not self.currentPro.parameterIsDefined:
497            self.error('***LINE:%02d  No Para "%s"' % (self.line, self.get_token()))
498
499        _ = self.currentPro
500        self.pro.append(_)
501        if self.get_token() == ';':
502            self.set_token_offset(1)
503        else:
504            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ';'))
505            if self.get_token() != 'integer' and self.get_token() != 'read' and self.get_token() != 'write' and self.get_token(code=True) != 10:
506                self.set_token_offset(1)
507
508        self.M()
509        if self.get_token() == 'end':
510            self.set_token_offset(1)
511        else:
512            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'end'))
513            if self.get_token() != ';' and self.get_token() != 'end':
514                self.set_token_offset(1)
515
516    def M(self):
517        logger.debug('GrammarAnalysis.M(1)')
518
519        self.N()
520        self.M_()
521
522
523    def M_(self):
524        logger.debug('GrammarAnalysis.M_(1)')
525
526        if self.get_token() == ';':
527            logger.info('GrammarAnalysis.M_(2): get "%s"' % self.get_token())
528            self.set_token_offset(1)
529            self.N()
530            self.M_()
531        else:
532            if self.get_token() != 'end' and self.get_token() != 'EOF':
533                logger.info('GrammarAnalysis.M_(3): get "%s"' % self.get_token())
534                self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ';'))
535                self.N()
536                self.M_()
537
538
539    def N(self):
540        logger.debug('GrammarAnalysis.N(1)')
541
542        logger.info('GrammarAnalysis.N(2): get "%s"' % self.get_token())
543        if self.get_token() == 'read':
544            self.O()
545        elif self.get_token() == 'write':
546            self.P()
547        elif self.get_token() == 'if':
548            self.W()
549        elif self.get_token(code=True) == 10:
550            logger.info('GrammarAnalysis.N(2): get const "%s"' % self.get_token())
551            self.Q()
552        else:
553            self.error('***LINE:%02d  Symbol exec error "%s"' % (self.line, self.get_token()))
554            self.set_token_offset(1)
555
556    def O(self):
557        logger.debug('GrammarAnalysis.O(1)')
558
559        if self.get_token() == 'read':
560            self.set_token_offset(1)
561        else:
562            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ';'))
563            if self.get_token() != '(':
564                self.set_token_offset(1)
565
566        if self.get_token() == '(':
567            self.set_token_offset(1)
568        else:
569            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, '('))
570            if self.get_token(code=True) != 10:
571                self.set_token_offset(1)
572
573        if not self.is_var_exists(self.get_token(), self.currentPro.pname, False) and not self.is_var_exists(self.get_token(), self.currentPro.pname, True):
574            self.error('***LINE:%02d  Undefined Symbol "%s"' % (self.line, self.get_token()))
575        self.F()
576        if self.get_token() == ')':
577            self.set_token_offset(1)
578        else:
579            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ')'))
580            if self.get_token() != ';' and self.get_token() != 'end':
581                self.set_token_offset()
582
583    def P(self):
584        logger.debug('GrammarAnalysis.P(1)')
585
586        if self.get_token() == 'write':
587            self.set_token_offset(1)
588        else:
589            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'write'))
590            if self.get_token() != '(':
591                self.set_token_offset(1)
592        if self.get_token() == '(':
593            self.set_token_offset(1)
594        else:
595            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ')'))
596            if self.get_token(code=True) != 10:
597                self.set_token_offset(1)
598
599        if not self.is_var_exists(self.get_token(), self.currentPro.pname, False) and not self.is_var_exists(self.get_token(), self.currentPro.pname, True):
600            self.error('***LINE:%02d  Undefined Symbol "%s"' % (self.line, self.get_token()))
601
602        self.F()
603        if self.get_token() == ')':
604            self.set_token_offset(1)
605        else:
606            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ')'))
607            if self.get_token() != ';' and self.get_token() != 'end':
608                self.set_token_offset(1)
609
610    def Q(self):
611        logger.debug('GrammarAnalysis.Q(1)')
612
613        if not self.is_var_exists(self.get_token(), self.currentPro.pname, False) and not self.is_var_exists(self.get_token(), self.currentPro.pname, True):
614            logger.info('GrammarAnalysis.Q(2): get "%s"' % self.get_token())
615            self.error('***LINE:%02d  Undefined Symbol "%s"' % (self.line, self.get_token()))
616
617        self.F()
618        if self.get_token() == ':=':
619
620            self.set_token_offset(1)
621        else:
622            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ':='))
623            if self.get_token(code=True) != 10 and self.get_token(code=True) != 11:
624                self.set_token_offset(1)
625        self.R()
626
627    def R(self):
628        logger.debug('GrammarAnalysis.R(1)')
629
630        self.S()
631        self.R_()
632
633
634    def R_(self):
635        logger.debug('GrammarAnalysis.R_(1)')
636
637        if self.get_token() == '-':
638            logger.info('GrammarAnalysis.R_(2): get "%s"' % self.get_token())
639            self.set_token_offset(1)
640            self.S()
641            self.R_()
642        else:
643            if self.get_token(code=True) == 10 or self.get_token(code=True) == 11:
644                logger.info('GrammarAnalysis.R_(3): get const "%s"' % self.get_token())
645                self.S()
646                self.R_()
647
648
649    def S(self):
650        logger.debug('GrammarAnalysis.S(1)')
651        self.T()
652        self.S_()
653
654
655    def S_(self):
656        logger.debug('GrammarAnalysis.S_(1)')
657
658        if self.get_token() == '*':
659            logger.info('GrammarAnalysis.S_(2): get const "*"')
660            self.set_token_offset(1)
661            self.T()
662            self.S_()
663        else:
664            if self.get_token(code=True) == 10 or self.get_token(code=True) == 11:
665                logger.info('GrammarAnalysis.S_(3): get const "%s"' % self.get_token())
666                self.T()
667                self.S_()
668
669    def T(self):
670        logger.debug('GrammarAnalysis.T(1)')
671
672        if ord('0') <= ord(self.get_token()[self.char_index]) <= ord('9'):
673            logger.info('GrammarAnalysis.T(2): get const "%s"' % self.get_token())
674            self.U()
675        elif self.get_token(self.token_index + 1, change=False) == '(':
676            logger.info('GrammarAnalysis.T(3): get "("')
677            self.Z()
678        else:
679            if not self.is_var_exists(self.get_token(), self.currentPro.pname, False) and not self.is_var_exists(self.get_token(), self.currentPro.pname, True):
680                self.error('***LINE:%02d  Undefined Symbol "%s"' % (self.line, self.get_token()))
681            self.F()
682
683    def U(self):
684        logger.debug('GrammarAnalysis.U(1)')
685        if self.get_token(code=True) == 11:
686            self.set_token_offset(1)
687
688
689    def W(self):
690        logger.debug('GrammarAnalysis.W(1)')
691        if self.get_token() == 'if':
692            self.set_token_offset(1)
693        else:
694            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'if'))
695            if self.get_token(code=True) != 10 and self.get_token(code=True) != 11:
696                self.set_token_offset(1)
697        self.X()
698
699        if self.get_token() == 'then':
700            self.set_token_offset(1)
701        else:
702            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'then'))
703            if self.get_token() != 'integer' and self.get_token() != 'read' and self.get_token() != 'write' and self.get_token(code=True) != 10:
704                self.set_token_offset(1)
705        self.N()
706        if self.get_token() == 'else':
707            self.set_token_offset(1)
708        else:
709            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'else'))
710            if self.get_token() != 'integer' and self.get_token() != 'read' and self.get_token() != 'write' and self.get_token(code=True) != 10:
711                self.set_token_offset(1)
712        self.N()
713
714
715    def X(self):
716        logger.debug('GrammarAnalysis.X(1)')
717        self.R()
718        self.Y()
719        self.R()
720
721
722    def Y(self):
723        logger.debug('GrammarAnalysis.Y(1)')
724        if self.get_token() == '<' or self.get_token() == '<=' or self.get_token() == '>' or self.get_token() == '>=' or self.get_token() == '=' or self.get_token() == '<>':
725            self.set_token_offset(1)
726        else:
727            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, 'compair symbol'))
728            if self.get_token(code=True) != 10 and self.get_token(code=True) != 11:
729                self.set_token_offset(1)
730
731
732    def Z(self):
733        logger.debug('GrammarAnalysis.Z(1)')
734        if not self.is_pro_exists(self.get_token()):
735            self.error('***LINE:%02d  Undefined Symbol "%s"' % (self.line, self.get_token()))
736        self.G()
737        if self.get_token() == '(':
738            logger.info('GrammarAnalysis.Z(2): get "("')
739            self.set_token_offset(1)
740        else:
741            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, '('))
742            if self.get_token(code=True) != 10 and self.get_token(code) != 11:
743                logger.info('GrammarAnalysis.Z(3): get "%s"' % self.get_token())
744                self.set_token_offset(1)
745        self.R()
746        if self.get_token() == ')':
747            logger.info('GrammarAnalysis.Z(4): get "%s"' % self.get_token())
748            self.currentPro = Pro()
749
750            self.set_token_offset(1)
751        else:
752            logger.info('GrammarAnalysis.Z(5): get "%s"' % self.get_token())
753            self.error('***LINE:%02d  No Symbol "%s"' % (self.line, ')'))
754            if self.get_token() != '-' and self.get_token() != '*' and self.get_token() != ';' and self.get_token() != 'end':
755                self.set_token_offset(1)
756
757
758class ReadFile:
759    def __init__(self, input_file):
760        self.input_file = input_file
761        try:
762            source_lines = open(input_file, 'r').readlines()
763        except FileNotFoundError as e:
764            logger.error('FileNotFound: %s' % input_file)
765            exit(0)
766        self.source_code = ''.join(source_lines)
767    def get_source_code(self):
768        return self.source_code
769
770
771if __name__ == '__main__':
772    # a = LexicalAnalysis('test')
773    # a.write('begin', 1)
774    parser = argparse.ArgumentParser(description='little exp')
775    parser.add_argument('-f', '--file', help='source code file')
776    parser.add_argument('-c', '--clean', help='clean mid code', action='store_true')
777    parser.add_argument('-l', '--lexical', help='LexicalAnalysis', action='store_true')
778    parser.add_argument('-g', '--grammer', help='GrammarAnalysis', action='store_true')
779
780    #
781    source_code = ''
782    args = parser.parse_args()
783    if args.clean:
784        logger.info('main: delete files')
785        os.system('rm -f *.dyd *.err *.var *.pro *.dys')
786    elif args.file is None:
787        parser.print_help()
788    else:
789        logger.info('Read file from %s' % args.file)
790        source_code = ReadFile(args.file).get_source_code()
791        if args.lexical:
792            la = LexicalAnalysis('.'.join(args.file.split('.')[:-1]))
793            la.do(source_code)
794            la.writeEOF()
795        if args.grammer:
796            ga = GrammarAnalysis('.'.join(args.file.split('.')[:-1]))
797            ga.do()
798
Full Screen

parser.py

Source: parser.py Github

copy
1from tokens.token import TokenType, Token
2from error import SyntaxError
3import parser_objects
4
5
6class Parser:
7    def __init__(self, tokens):
8        self.tokens = tokens
9        self.current_token = -1
10        self.tree = self.create_tree()
11
12    # get next token from token list
13    def get_token(self):
14        self.current_token += 1
15        return self.tokens[self.current_token]
16
17    # compare token with expected token
18    def compare_tokens(self, token, type, msg=""):
19        if token.token_type != type:
20            raise SyntaxError(token, msg)
21
22    # get all token parsed and return AST
23    def create_tree(self):
24        model_description, model_content = self.parse_model()
25        model = parser_objects.Model(model_content[0], model_content[1], model_content[2],
26                                     model_content[3], model_description[0], model_description[1])
27        return model
28
29    # parse all tokens
30    def parse_model(self):
31        token = self.get_token()
32        self.compare_tokens(token, TokenType.T_LEFT_BRACKET, "Model does not start with '<'")
33        token = self.get_token()
34        self.compare_tokens(token, TokenType.T_UML_MODEL, "Model not defined")
35
36        model_description = self.parse_model_description()  # [id, name]
37
38        token = self.get_token()
39        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET, "Model description does not end with '>'")
40
41        model_content = self.parse_model_content()  # [file_description, imports, elements, profiles]
42
43        self.parse_model_end()
44        return model_description, model_content
45
46    # parse model description
47    # model description = 'xmi:version="2.0" xmlns:xmi="http://www.omg.org/XMI"
48    # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ecore="http://www.eclipse.org/emf/2002/Ecore" ,
49    # ['xmlns:notation="http://www.eclipse.org/gmf/runtime/1.0.2/notation"'] ,
50    # xmlns:uml="http://www.eclipse.org/uml2/3.0.0/UML" xmlns:umlnotation="http://www.ibm.com/xtools/1.5.3/Umlnotation"
51    # xmi:id=', string value, ' name=', string value;
52    def parse_model_description(self):
53        token = self.get_token()
54        self.compare_tokens(token, TokenType.T_XMI_VERSION)
55        token = self.get_token()
56        self.compare_tokens(token, TokenType.T_EQUALS)
57        token = self.get_token()
58        self.compare_tokens(token, TokenType.T_STRING_VALUE)
59        token = self.get_token()
60        self.compare_tokens(token, TokenType.T_XMLNS_XMI)
61        token = self.get_token()
62        self.compare_tokens(token, TokenType.T_EQUALS)
63        token = self.get_token()
64        self.compare_tokens(token, TokenType.T_STRING_VALUE)
65        token = self.get_token()
66        self.compare_tokens(token, TokenType.T_XMLNS_XSI)
67        token = self.get_token()
68        self.compare_tokens(token, TokenType.T_EQUALS)
69        token = self.get_token()
70        self.compare_tokens(token, TokenType.T_STRING_VALUE)
71        token = self.get_token()
72        self.compare_tokens(token, TokenType.T_XMLNS_ECORE)
73        token = self.get_token()
74        self.compare_tokens(token, TokenType.T_EQUALS)
75        token = self.get_token()
76        self.compare_tokens(token, TokenType.T_STRING_VALUE)
77        token = self.get_token()
78        if token.token_type == TokenType.T_XMLNS_NOTATION:
79            token = self.get_token()
80            self.compare_tokens(token, TokenType.T_EQUALS)
81            token = self.get_token()
82            self.compare_tokens(token, TokenType.T_STRING_VALUE)
83            token = self.get_token()
84        self.compare_tokens(token, TokenType.T_XMLNS_UML)
85        token = self.get_token()
86        self.compare_tokens(token, TokenType.T_EQUALS)
87        token = self.get_token()
88        self.compare_tokens(token, TokenType.T_STRING_VALUE)
89        token = self.get_token()
90        self.compare_tokens(token, TokenType.T_XMLNS_UML_NOTATION)
91        token = self.get_token()
92        self.compare_tokens(token, TokenType.T_EQUALS)
93        token = self.get_token()
94        self.compare_tokens(token, TokenType.T_STRING_VALUE)
95        token = self.get_token()
96        self.compare_tokens(token, TokenType.T_XMI_ID)
97        token = self.get_token()
98        self.compare_tokens(token, TokenType.T_EQUALS)
99        token = self.get_token()
100        self.compare_tokens(token, TokenType.T_STRING_VALUE)
101        # model id
102        id = token.value
103        token = self.get_token()
104        self.compare_tokens(token, TokenType.T_NAME)
105        token = self.get_token()
106        self.compare_tokens(token, TokenType.T_EQUALS)
107        token = self.get_token()
108        self.compare_tokens(token, TokenType.T_STRING_VALUE)
109        # model name
110        name = token.value
111        return [id, name]
112
113    # parse everything after model description
114    # model contents = file description,{ package import },{ packaged element },{ profile application };
115    def parse_model_content(self):
116        file_description = self.parse_file_description()
117        imports = []
118        pckg_import = self.parse_package_import()
119        while pckg_import != None:
120            imports.append(pckg_import)
121            pckg_import = self.parse_package_import()
122
123        elements = []
124        pckg_element = self.parse_packaged_element()
125        while pckg_element != None:
126            elements.append(pckg_element)
127            pckg_element = self.parse_packaged_element()
128
129        profiles = []
130        profile_application = self.parse_profile_application()
131        while profile_application != None:
132            profiles.append(profile_application)
133            profile_application = self.parse_profile_application()
134
135        return [file_description, imports, elements, profiles]
136
137    # file description = "<eAnnotations xmi:id=", string value, 'source="uml2.diagrams"', ['references=', string value]
138    # ,'>', file name, "</eAnnotations>";
139    def parse_file_description(self):
140        token = self.get_token()
141        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
142        token = self.get_token()
143        self.compare_tokens(token, TokenType.T_EANNOTATIONS)
144        token = self.get_token()
145        self.compare_tokens(token, TokenType.T_XMI_ID)
146        token = self.get_token()
147        self.compare_tokens(token, TokenType.T_EQUALS)
148        token = self.get_token()
149        self.compare_tokens(token, TokenType.T_STRING_VALUE)
150        id = token.value
151        token = self.get_token()
152        self.compare_tokens(token, TokenType.T_SOURCE)
153        token = self.get_token()
154        self.compare_tokens(token, TokenType.T_EQUALS)
155        token = self.get_token()
156        self.compare_tokens(token, TokenType.T_STRING_VALUE)
157        source = token.value
158        token = self.get_token()
159        if token.token_type == TokenType.T_REFERENCES:
160            token = self.get_token()
161            self.compare_tokens(token, TokenType.T_EQUALS)
162            token = self.get_token()
163            self.compare_tokens(token, TokenType.T_STRING_VALUE)
164            token = self.get_token()
165        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
166
167        graphics = self.parse_file_name()
168
169        file_description = parser_objects.FileDescription(graphics.graphic, id, source)
170        return file_description
171
172    # file name = "<contents xmi:type="umlnotation:UMLDiagram" xmi:id=", string value, ' type="Class" name=',
173    # string value, ">", graphic description, "</contents>";
174    def parse_file_name(self):
175        token = self.get_token()
176        graphics = parser_objects.GraphicDescription()
177        while type(token) is not Token:
178            # skip graphic description
179            # append all tokens to GraphicDescription class
180            graphics.graphic.append(token)
181            token = self.get_token()
182        self.current_token -= 1
183        return graphics
184
185    # package import = "<packageImport xmi:id=", id, ">", package, "</packageImport>";
186    def parse_package_import(self):
187        token = self.get_token()
188        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
189        token = self.get_token()
190
191        if token.token_type == TokenType.T_PACKAGED_ELEMENT:
192            self.current_token -= 2
193            return None
194
195        self.compare_tokens(token, TokenType.T_PACKAGE_IMPORT)
196        token = self.get_token()
197        self.compare_tokens(token, TokenType.T_XMI_ID)
198        token = self.get_token()
199        self.compare_tokens(token, TokenType.T_EQUALS)
200        token = self.get_token()
201        self.compare_tokens(token, TokenType.T_STRING_VALUE)
202        id = token.value
203        token = self.get_token()
204        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
205
206        type, href = self.parse_package()
207
208        token = self.get_token()
209        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
210        token = self.get_token()
211        self.compare_tokens(token, TokenType.T_SLASH)
212        token = self.get_token()
213        self.compare_tokens(token, TokenType.T_PACKAGE_IMPORT)
214        token = self.get_token()
215        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
216        package = parser_objects.ImportedPackage(id, type, href)
217        return package
218
219    # package = '<importedPackage xmi:type="uml:Model" href=', string value, "/>";
220    def parse_package(self):
221        token = self.get_token()
222        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
223        token = self.get_token()
224        self.compare_tokens(token, TokenType.T_IMPORTED_PACKAGE)
225        token = self.get_token()
226        self.compare_tokens(token, TokenType.T_XMI_TYPE)
227        token = self.get_token()
228        self.compare_tokens(token, TokenType.T_EQUALS)
229        token = self.get_token()
230        self.compare_tokens(token, TokenType.T_STRING_VALUE)
231        type = token.value
232        token = self.get_token()
233        self.compare_tokens(token, TokenType.T_HREF)
234        token = self.get_token()
235        self.compare_tokens(token, TokenType.T_EQUALS)
236        token = self.get_token()
237        self.compare_tokens(token, TokenType.T_STRING_VALUE)
238        href = token.value
239        token = self.get_token()
240        self.compare_tokens(token, TokenType.T_SLASH)
241        token = self.get_token()
242        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
243        return type, href
244
245    # packaged element = "<packagedElement xmi:type=", ( class | association ), "</packagedElement>";
246    def parse_packaged_element(self):
247        token = self.get_token()
248        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
249        token = self.get_token()
250
251        if token.token_type != TokenType.T_PACKAGED_ELEMENT:
252            self.current_token -= 2
253            return None
254
255        self.compare_tokens(token, TokenType.T_PACKAGED_ELEMENT)
256        token = self.get_token()
257        self.compare_tokens(token, TokenType.T_XMI_TYPE)
258        token = self.get_token()
259        self.compare_tokens(token, TokenType.T_EQUALS)
260        token = self.get_token()
261        self.compare_tokens(token, TokenType.T_STRING_VALUE)
262        if token.value == '"uml:Class"':
263            return self.parse_class()
264        elif token.value == '"uml:Association"':
265            return self.parse_association()
266        else:
267            raise SyntaxError(token, "Unrecognised packaged element, xmi:type expected: uml:Class or uml:Association")
268
269    # class = '"uml:Class" xmi:id=', string value, " name=", string value, visibility, ['isLeaf="true"'],
270    # ['isAbstract="true"'], ">", [ stereotype ], [ generalization ], {attribute}, {operation};
271    def parse_class(self):
272        token = self.get_token()
273        self.compare_tokens(token, TokenType.T_XMI_ID)
274        token = self.get_token()
275        self.compare_tokens(token, TokenType.T_EQUALS)
276        token = self.get_token()
277        self.compare_tokens(token, TokenType.T_STRING_VALUE)
278        id = token.value
279        token = self.get_token()
280        self.compare_tokens(token, TokenType.T_NAME)
281        token = self.get_token()
282        self.compare_tokens(token, TokenType.T_EQUALS)
283        token = self.get_token()
284        self.compare_tokens(token, TokenType.T_STRING_VALUE)
285        name = token.value
286
287        visibility = self.parse_visibility()
288        is_abstract = '"false"'
289        is_leaf = '"false"'
290        token = self.get_token()
291        if token.token_type != TokenType.T_RIGHT_BRACKET or token.token_type != TokenType.T_SLASH:
292            while token.token_type == TokenType.T_IS_LEAF or token.token_type == TokenType.T_IS_ABSTRACT:
293                option_type = token.token_type
294                token = self.get_token()
295                self.compare_tokens(token, TokenType.T_EQUALS)
296                token = self.get_token()
297                self.compare_tokens(token, TokenType.T_STRING_VALUE)
298                if token.value != '"true"' and token.value != '"false"':
299                    raise SyntaxError(token, "Unexpected value, expected true or false value")
300                else:
301                    if option_type == TokenType.T_IS_LEAF:
302                        is_leaf = token.value
303                    elif option_type == TokenType.T_IS_ABSTRACT:
304                        is_abstract = token.value
305                    token = self.get_token()
306        if token.token_type == TokenType.T_SLASH:
307            token = self.get_token()
308            self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
309            parsed_class = parser_objects.Class(id, name, visibility, is_leaf, is_abstract, None, [], [], [])
310            return parsed_class
311        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
312        stereotype = self.parse_stereotype()
313        generalizations = []
314        generalization = self.parse_generalization()
315        while generalization != None:
316            generalizations.append(generalization)
317            generalization = self.parse_generalization()
318        attributes = []
319        attribute = self.parse_attribute()
320        while attribute != None:
321            attributes.append(attribute)
322            attribute = self.parse_attribute()
323        operations = []
324        operation = self.parse_operation()
325        while operation != None:
326            operations.append(operation)
327            operation = self.parse_operation()
328        self.parse_packaged_element_end()
329        parsed_class = parser_objects.Class(id, name, visibility, is_leaf, is_abstract, stereotype, generalizations, attributes, operations)
330        return parsed_class
331
332    # visibility = " visibility=", visibility type;
333    # visibility type = "public" | "private" | "protected" | "package";
334    def parse_visibility(self):
335        token = self.get_token()
336        if token.token_type != TokenType.T_VISIBILITY:
337            self.current_token -= 1
338            return '"public"'
339        self.compare_tokens(token, TokenType.T_VISIBILITY)
340        token = self.get_token()
341        self.compare_tokens(token, TokenType.T_EQUALS)
342        token = self.get_token()
343        self.compare_tokens(token, TokenType.T_STRING_VALUE)
344        if token.value == '"public"' or token.value == '"private"' or token.value == '"protected"' \
345                or token.value == '"package"':
346            return token.value
347        else:
348            raise SyntaxError(token, "Unexpected visibility")
349
350    # stereotype = "<eAnnotations xmi:id=", string value, " source=", string value, ">", stereotype description,
351    #               {stereotype description}, "</eAnnotations>";
352    def parse_stereotype(self):
353        token = self.get_token()
354        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
355        token = self.get_token()
356        if token.token_type != TokenType.T_EANNOTATIONS:
357            self.current_token -= 2
358            return None
359        self.compare_tokens(token, TokenType.T_EANNOTATIONS)
360        token = self.get_token()
361        self.compare_tokens(token, TokenType.T_XMI_ID)
362        token = self.get_token()
363        self.compare_tokens(token, TokenType.T_EQUALS)
364        token = self.get_token()
365        self.compare_tokens(token, TokenType.T_STRING_VALUE)
366        id = token.value
367        token = self.get_token()
368        self.compare_tokens(token, TokenType.T_SOURCE)
369        token = self.get_token()
370        self.compare_tokens(token, TokenType.T_EQUALS)
371        token = self.get_token()
372        self.compare_tokens(token, TokenType.T_STRING_VALUE)
373        source = token.value
374        token = self.get_token()
375        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
376        stereotypes = []
377        stereotype = self.parse_stereotype_description()
378        while stereotype != None:
379            stereotypes.append(stereotype)
380            stereotype = self.parse_stereotype_description()
381        token = self.get_token()
382        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
383        token = self.get_token()
384        self.compare_tokens(token, TokenType.T_SLASH)
385        token = self.get_token()
386        self.compare_tokens(token, TokenType.T_EANNOTATIONS)
387        token = self.get_token()
388        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
389        stereotypes_obj = parser_objects.Stereotype(id, source, stereotypes)
390        return stereotypes_obj
391
392    # stereotype description = "<details xmi:id=", string value, " key=", string value, "/>";
393    def parse_stereotype_description(self):
394        token = self.get_token()
395        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
396        token = self.get_token()
397        if token.token_type != TokenType.T_DETAILS:
398            self.current_token -= 2
399            return None
400        self.compare_tokens(token, TokenType.T_DETAILS)
401        token = self.get_token()
402        self.compare_tokens(token, TokenType.T_XMI_ID)
403        token = self.get_token()
404        self.compare_tokens(token, TokenType.T_EQUALS)
405        token = self.get_token()
406        self.compare_tokens(token, TokenType.T_STRING_VALUE)
407        id = token.value
408        token = self.get_token()
409        self.compare_tokens(token, TokenType.T_KEY)
410        token = self.get_token()
411        self.compare_tokens(token, TokenType.T_EQUALS)
412        token = self.get_token()
413        self.compare_tokens(token, TokenType.T_STRING_VALUE)
414        key = token.value
415        token = self.get_token()
416        self.compare_tokens(token, TokenType.T_SLASH)
417        token = self.get_token()
418        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
419        return [id, key]
420
421    # generalization = "<generalization xmi:id=", string value, " general=", string value, "/>";
422    def parse_generalization(self):
423        token = self.get_token()
424        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
425        token = self.get_token()
426        if token.token_type != TokenType.T_GENERALIZATION:
427            self.current_token -= 2
428            return None
429        self.compare_tokens(token, TokenType.T_GENERALIZATION)
430        token = self.get_token()
431        self.compare_tokens(token, TokenType.T_XMI_ID)
432        token = self.get_token()
433        self.compare_tokens(token, TokenType.T_EQUALS)
434        token = self.get_token()
435        self.compare_tokens(token, TokenType.T_STRING_VALUE)
436        id = token.value
437        token = self.get_token()
438        self.compare_tokens(token, TokenType.T_GENERAL)
439        token = self.get_token()
440        self.compare_tokens(token, TokenType.T_EQUALS)
441        token = self.get_token()
442        self.compare_tokens(token, TokenType.T_STRING_VALUE)
443        general = token.value
444        token = self.get_token()
445        self.compare_tokens(token, TokenType.T_SLASH)
446        token = self.get_token()
447        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
448        generalization = parser_objects.Generalization(id, general)
449        return generalization
450
451    # attribute = "<ownedAttribute xmi:id=", string value, " name=", string value, attribute parameters,
452    #           ( "/>" | attribute description );
453    # attribute description = ">", [type], [limit], [default value], "</ownedAttribute>";
454    def parse_attribute(self):
455        token = self.get_token()
456        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
457        token = self.get_token()
458        if token.token_type != TokenType.T_OWNED_ATTRIBUTE:
459            self.current_token -= 2
460            return None
461        self.compare_tokens(token, TokenType.T_OWNED_ATTRIBUTE)
462        token = self.get_token()
463        self.compare_tokens(token, TokenType.T_XMI_ID)
464        token = self.get_token()
465        self.compare_tokens(token, TokenType.T_EQUALS)
466        token = self.get_token()
467        self.compare_tokens(token, TokenType.T_STRING_VALUE)
468        id = token.value
469        token = self.get_token()
470        self.compare_tokens(token, TokenType.T_NAME)
471        token = self.get_token()
472        self.compare_tokens(token, TokenType.T_EQUALS)
473        token = self.get_token()
474        self.compare_tokens(token, TokenType.T_STRING_VALUE)
475        name = token.value
476        parameters = self.parse_attribute_parameters()
477        token = self.get_token()
478        if token.token_type == TokenType.T_SLASH:
479            token = self.get_token()
480            self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
481            attribute = parser_objects.Attribute(id, name, parameters, None, None, None, None)
482            return attribute
483        elif token.token_type == TokenType.T_RIGHT_BRACKET:
484            type = self.parse_type()
485            upper_limit = self.parse_upper_limit()
486            lower_limit = self.parse_lower_limit()
487            default_value = self.parse_default_value()
488            if type is None and default_value[1] is not None:
489                type = default_value[1]
490            token = self.get_token()
491            self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
492            token = self.get_token()
493            self.compare_tokens(token, TokenType.T_SLASH)
494            token = self.get_token()
495            self.compare_tokens(token, TokenType.T_OWNED_ATTRIBUTE)
496            token = self.get_token()
497            self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
498            attribute = parser_objects.Attribute(id, name, parameters, type, upper_limit, lower_limit, default_value[0])
499            return attribute
500        else:
501            raise SyntaxError(token, "Unexpected OwnedAttribute ending, expected '/' or '>'")
502
503    # type = "<type xmi:type=", string value, "href=", string value,"/>";
504    def parse_type(self):
505        token = self.get_token()
506        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
507        token = self.get_token()
508        if token.token_type != TokenType.T_TYPE:
509            self.current_token -= 2
510            return None
511        self.compare_tokens(token, TokenType.T_TYPE)
512        token = self.get_token()
513        self.compare_tokens(token, TokenType.T_XMI_TYPE)
514        token = self.get_token()
515        self.compare_tokens(token, TokenType.T_EQUALS)
516        token = self.get_token()
517        self.compare_tokens(token, TokenType.T_STRING_VALUE)
518        type = token.value
519        token = self.get_token()
520        self.compare_tokens(token, TokenType.T_HREF)
521        token = self.get_token()
522        self.compare_tokens(token, TokenType.T_EQUALS)
523        token = self.get_token()
524        self.compare_tokens(token, TokenType.T_STRING_VALUE)
525        href = token.value
526        token = self.get_token()
527        self.compare_tokens(token, TokenType.T_SLASH)
528        token = self.get_token()
529        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
530        return type, href
531
532    # default value = "<defaultValue xmi:type=", string value, " xmi:id=", string value, " value=", string value,
533    # ( default value type | "/>" );
534    # default value type = type, "</defaultValue>";
535    def parse_default_value(self):
536        token = self.get_token()
537        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
538        token = self.get_token()
539        if token.token_type != TokenType.T_DEFAULT_VALUE:
540            self.current_token -= 2
541            return [None, None]
542        self.compare_tokens(token, TokenType.T_DEFAULT_VALUE)
543        token = self.get_token()
544        self.compare_tokens(token, TokenType.T_XMI_TYPE)
545        token = self.get_token()
546        self.compare_tokens(token, TokenType.T_EQUALS)
547        token = self.get_token()
548        self.compare_tokens(token, TokenType.T_STRING_VALUE)
549        type = token.value
550        token = self.get_token()
551        self.compare_tokens(token, TokenType.T_XMI_ID)
552        token = self.get_token()
553        self.compare_tokens(token, TokenType.T_EQUALS)
554        token = self.get_token()
555        self.compare_tokens(token, TokenType.T_STRING_VALUE)
556        id = token.value
557        token = self.get_token()
558        self.compare_tokens(token, TokenType.T_VALUE)
559        token = self.get_token()
560        self.compare_tokens(token, TokenType.T_EQUALS)
561        token = self.get_token()
562        self.compare_tokens(token, TokenType.T_STRING_VALUE)
563        value = token.value
564        token = self.get_token()
565        default_type = None
566        if token.token_type != TokenType.T_SLASH:
567            self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
568            default_type = self.parse_type()
569            token = self.get_token()
570            self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
571            token = self.get_token()
572            self.compare_tokens(token, TokenType.T_SLASH)
573            token = self.get_token()
574            self.compare_tokens(token, TokenType.T_DEFAULT_VALUE)
575        token = self.get_token()
576        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
577        default_value = parser_objects.Limit(type, id, value)
578        return [default_value, default_type]
579
580    # attribute parameters = visibility, ['isLeaf="true"'], ['isStatic="true"'], ['isOrdered="true"'],
581    # ['isReadOnly="true"'], ['isDerived="true"'], ['isDerivedUnion="true"'], [short type], [association type];
582    # association type = [ 'aggregation="composite"' | 'aggregation="shared"' ], "association=", string value;
583    def parse_attribute_parameters(self):
584        visibility = self.parse_visibility()
585        token = self.get_token()
586        aggregation = None
587        association = None
588        type = None
589        options = ['"false"', '"false"', '"false"', '"true"', '"false"', '"false"', '"false"']
590        if token.token_type != TokenType.T_RIGHT_BRACKET and token.token_type != TokenType.T_SLASH:
591            while token.token_type == TokenType.T_IS_LEAF or token.token_type == TokenType.T_IS_STATIC \
592                    or token.token_type == TokenType.T_IS_ORDERED or token.token_type == TokenType.T_IS_READ_ONLY \
593                    or token.token_type == TokenType.T_IS_DERIVED or token.token_type == TokenType.T_IS_DERIVED_UNION \
594                    or token.token_type == TokenType.T_IS_UNIQUE:
595                type = token.token_type
596                token = self.get_token()
597                self.compare_tokens(token, TokenType.T_EQUALS)
598                token = self.get_token()
599                self.compare_tokens(token, TokenType.T_STRING_VALUE)
600                if type == TokenType.T_IS_LEAF:
601                    options[0] = token.value
602                elif type == TokenType.T_IS_STATIC:
603                    options[1] = token.value
604                elif type == TokenType.T_IS_ORDERED:
605                    options[2] = token.value
606                elif type == TokenType.T_IS_UNIQUE:
607                    options[3] = token.value
608                elif type == TokenType.T_IS_READ_ONLY:
609                    options[4] = token.value
610                elif type == TokenType.T_IS_DERIVED:
611                    options[5] = token.value
612                elif type == TokenType.T_IS_DERIVED_UNION:
613                    options[6] = token.value
614                type = None
615                token = self.get_token()
616            if token.token_type == TokenType.T_TYPE:
617                token = self.get_token()
618                self.compare_tokens(token, TokenType.T_EQUALS)
619                token = self.get_token()
620                self.compare_tokens(token, TokenType.T_STRING_VALUE)
621                type = token.value
622                token = self.get_token()
623            if token.token_type == TokenType.T_AGGREGATION:
624                token = self.get_token()
625                self.compare_tokens(token, TokenType.T_EQUALS)
626                token = self.get_token()
627                self.compare_tokens(token, TokenType.T_STRING_VALUE)
628                aggregation = token.value
629                token = self.get_token()
630            if token.token_type == TokenType.T_ASSOCIATION:
631                token = self.get_token()
632                self.compare_tokens(token, TokenType.T_EQUALS)
633                token = self.get_token()
634                self.compare_tokens(token, TokenType.T_STRING_VALUE)
635                association = token.value
636                token = self.get_token()
637
638        self.current_token -= 1
639        parameters = parser_objects.AttributeParameters(visibility, options[0], options[1], options[2], options[3], options[4], options[5], options[6], aggregation, association, type)
640        return parameters
641
642    # operation = "<ownedOperation xmi:id=", string value, " name=", string value, [ operation parameters ],
643    #           ("/>" | parameter );
644    def parse_operation(self):
645        token = self.get_token()
646        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
647        token = self.get_token()
648        if token.token_type != TokenType.T_OWNED_OPERATION:
649            self.current_token -= 2
650            return None
651        self.compare_tokens(token, TokenType.T_OWNED_OPERATION)
652        token = self.get_token()
653        self.compare_tokens(token, TokenType.T_XMI_ID)
654        token = self.get_token()
655        self.compare_tokens(token, TokenType.T_EQUALS)
656        token = self.get_token()
657        self.compare_tokens(token, TokenType.T_STRING_VALUE)
658        id = token.value
659        token = self.get_token()
660        self.compare_tokens(token, TokenType.T_NAME)
661        token = self.get_token()
662        self.compare_tokens(token, TokenType.T_EQUALS)
663        token = self.get_token()
664        self.compare_tokens(token, TokenType.T_STRING_VALUE)
665        name = token.value
666        parameters = self.parse_operation_parameters()
667        token = self.get_token()
668        owned_parameters = []
669        if token.token_type != TokenType.T_SLASH:
670            owned_parameter = self.parse_owned_parameter()
671            while owned_parameter != None:
672                owned_parameters.append(owned_parameter)
673                owned_parameter = self.parse_owned_parameter()
674            token = self.get_token()
675            self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
676            token = self.get_token()
677            self.compare_tokens(token, TokenType.T_SLASH)
678            token = self.get_token()
679            self.compare_tokens(token, TokenType.T_OWNED_OPERATION)
680        token = self.get_token()
681        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
682        operation = parser_objects.Operation(id, name, parameters[0], parameters[1], parameters[2], parameters[3], parameters[4], owned_parameters)
683        return operation
684
685    # operation parameters = visibility, ['isLeaf="true"'], ['isStatic="true"'], ['isQuery="true"'];
686    def parse_operation_parameters(self):
687        visibility = self.parse_visibility()
688        isLeaf = '"false"'
689        isStatic = '"false"'
690        isAbstract = '"false"'
691        isQuery = '"false"'
692        token = self.get_token()
693        while token.token_type == TokenType.T_IS_LEAF or token.token_type == TokenType.T_IS_STATIC \
694                or token.token_type == TokenType.T_IS_QUERY or token.token_type == TokenType.T_IS_ABSTRACT:
695            type = token.token_type
696            token = self.get_token()
697            self.compare_tokens(token, TokenType.T_EQUALS)
698            token = self.get_token()
699            self.compare_tokens(token, TokenType.T_STRING_VALUE)
700            if type == TokenType.T_IS_LEAF:
701                isLeaf = token.value
702            elif type == TokenType.T_IS_STATIC:
703                isStatic = token.value
704            elif type == TokenType.T_IS_QUERY:
705                isQuery = token.value
706            elif type == TokenType.T_IS_ABSTRACT:
707                isAbstract = token.value
708            token = self.get_token()
709        self.current_token -= 1
710        return [visibility, isLeaf, isStatic, isAbstract, isQuery]
711
712    # parameter = owned parameter, {owned parameter}, "</ownedOperation>";
713    # owned parameter = "<ownedParameter xmi:id=", string value, " name=", string value, [owned parameter parameters],
714    #                   ("/>" | owned parameter description);
715    # owned parameter parameters = short type, ['isOrdered="true"'], ['isUnique="false"'], [parameter direction];
716    def parse_owned_parameter(self):
717        token = self.get_token()
718        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
719        token = self.get_token()
720        if token.token_type != TokenType.T_OWNED_PARAMETER:
721            self.current_token -= 2
722            return None
723        self.compare_tokens(token, TokenType.T_OWNED_PARAMETER)
724        token = self.get_token()
725        self.compare_tokens(token, TokenType.T_XMI_ID)
726        token = self.get_token()
727        self.compare_tokens(token, TokenType.T_EQUALS)
728        token = self.get_token()
729        self.compare_tokens(token, TokenType.T_STRING_VALUE)
730        id = token.value
731        token = self.get_token()
732        self.compare_tokens(token, TokenType.T_NAME)
733        token = self.get_token()
734        self.compare_tokens(token, TokenType.T_EQUALS)
735        token = self.get_token()
736        self.compare_tokens(token, TokenType.T_STRING_VALUE)
737        name = token.value
738        short_type = self.parse_short_type()
739        token = self.get_token()
740        isOrdered = '"false"'
741        isUnique = '"true"'
742        while token.token_type == TokenType.T_IS_ORDERED or token.token_type == TokenType.T_IS_UNIQUE:
743            token_type = token.token_type
744            token = self.get_token()
745            self.compare_tokens(token, TokenType.T_EQUALS)
746            token = self.get_token()
747            self.compare_tokens(token, TokenType.T_STRING_VALUE)
748            if token_type == TokenType.T_IS_ORDERED:
749                isOrdered = token.value
750            elif token_type == TokenType.T_IS_UNIQUE:
751                isUnique = token.value
752            token = self.get_token()
753        direction = self.parse_parameter_direction(token)
754        token = self.get_token()
755        if token.token_type != TokenType.T_SLASH:
756            self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
757            description = self.parse_owned_parameter_description()
758            owned_parameter = parser_objects.OwnedParameter(id, name, description[0], isOrdered, isUnique, direction, description[1], description[2], description[3], short_type)
759            return owned_parameter
760        else:
761            token = self.get_token()
762            self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
763            owned_parameter = parser_objects.OwnedParameter(id, name, None, isOrdered, isUnique, direction, None, None, None, short_type)
764            return owned_parameter
765
766    # owned parameter description = ">", [type], [upper limit], [lower limit], [default value], "</ownedParameter>";
767    def parse_owned_parameter_description(self):
768        type = self.parse_type()
769        upper_limit = self.parse_upper_limit()
770        lower_limit = self.parse_lower_limit()
771        default_value = self.parse_default_value()
772        if type is None and default_value[1] is not None:
773            type = default_value[1]
774        token = self.get_token()
775        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
776        token = self.get_token()
777        self.compare_tokens(token, TokenType.T_SLASH)
778        token = self.get_token()
779        self.compare_tokens(token, TokenType.T_OWNED_PARAMETER)
780        token = self.get_token()
781        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
782        return [type, upper_limit, lower_limit, default_value[0]]
783
784    # parameter direction = " direction=", direction type;
785    # direction type = "return" | "out" | "inout";
786    def parse_parameter_direction(self, tok):
787        if tok.token_type != TokenType.T_DIRECTION:
788            self.current_token -= 1
789            return '"in"'
790        token = self.get_token()
791        self.compare_tokens(token, TokenType.T_EQUALS)
792        token = self.get_token()
793        self.compare_tokens(token, TokenType.T_STRING_VALUE)
794        return token.value
795
796    # association = '"uml:Association"', " xmi:id=", string value, " memberEnd=", double string value, ">", owned end;
797    def parse_association(self):
798        token = self.get_token()
799        self.compare_tokens(token, TokenType.T_XMI_ID)
800        token = self.get_token()
801        self.compare_tokens(token, TokenType.T_EQUALS)
802        token = self.get_token()
803        self.compare_tokens(token, TokenType.T_STRING_VALUE)
804        id = token.value
805        token = self.get_token()
806        self.compare_tokens(token, TokenType.T_MEMBER_END)
807        token = self.get_token()
808        self.compare_tokens(token, TokenType.T_EQUALS)
809        token = self.get_token()
810        self.compare_tokens(token, TokenType.T_DOUBLE_STRING_VALUE)
811        member_end = token.value
812        token = self.get_token()
813        if token.token_type == TokenType.T_SLASH:
814            token = self.get_token()
815            self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
816            association = parser_objects.Association(id, member_end, None)
817            return association
818        elif token.token_type == TokenType.T_RIGHT_BRACKET:
819            owned_end = self.parse_owned_end()
820            association = parser_objects.Association(id, member_end, owned_end)
821            return association
822        else:
823            raise SyntaxError(token, "Invalid association ending, expected '/' or '>'")
824
825    # owned end = "<ownedEnd xmi:id=", string value, " name=", string value, visibility, short type, "association=",
826    #           string value, ">", upper limit, lower limit, "</ownedEnd>";
827    def parse_owned_end(self):
828        token = self.get_token()
829        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
830        token = self.get_token()
831        self.compare_tokens(token, TokenType.T_OWNED_END)
832        token = self.get_token()
833        self.compare_tokens(token, TokenType.T_XMI_ID)
834        token = self.get_token()
835        self.compare_tokens(token, TokenType.T_EQUALS)
836        token = self.get_token()
837        self.compare_tokens(token, TokenType.T_STRING_VALUE)
838        id = token.value
839        token = self.get_token()
840        self.compare_tokens(token, TokenType.T_NAME)
841        token = self.get_token()
842        self.compare_tokens(token, TokenType.T_EQUALS)
843        token = self.get_token()
844        self.compare_tokens(token, TokenType.T_STRING_VALUE)
845        name = token.value
846
847        visibility = self.parse_visibility()
848        type = self.parse_short_type()
849
850        token = self.get_token()
851        self.compare_tokens(token, TokenType.T_ASSOCIATION)
852        token = self.get_token()
853        self.compare_tokens(token, TokenType.T_EQUALS)
854        token = self.get_token()
855        self.compare_tokens(token, TokenType.T_STRING_VALUE)
856        association = token.value
857        token = self.get_token()
858        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
859
860        upper_limit = self.parse_upper_limit()
861        lower_limit = self.parse_lower_limit()
862
863        token = self.get_token()
864        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
865        token = self.get_token()
866        self.compare_tokens(token, TokenType.T_SLASH)
867        token = self.get_token()
868        self.compare_tokens(token, TokenType.T_OWNED_END)
869        token = self.get_token()
870        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
871        self.parse_packaged_element_end()
872        owned_end = parser_objects.OwnedEnd(id, name, visibility, type, association, upper_limit, lower_limit)
873        return owned_end
874
875    # upper limit = "<upperValue xmi:type=", string value, " xmi:id=", string value, [" value=", ("1" | "*")] ,"/>";
876    def parse_upper_limit(self):
877        token = self.get_token()
878        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
879        token = self.get_token()
880        if token.token_type != TokenType.T_UPPER_VALUE:
881            self.current_token -= 2
882            return None
883        self.compare_tokens(token, TokenType.T_UPPER_VALUE)
884        token = self.get_token()
885        self.compare_tokens(token, TokenType.T_XMI_TYPE)
886        token = self.get_token()
887        self.compare_tokens(token, TokenType.T_EQUALS)
888        token = self.get_token()
889        self.compare_tokens(token, TokenType.T_STRING_VALUE)
890        type = token.value
891        token = self.get_token()
892        self.compare_tokens(token, TokenType.T_XMI_ID)
893        token = self.get_token()
894        self.compare_tokens(token, TokenType.T_EQUALS)
895        token = self.get_token()
896        self.compare_tokens(token, TokenType.T_STRING_VALUE)
897        id = token.value
898        token = self.get_token()
899        value = None
900        if token.token_type == TokenType.T_VALUE:
901            token = self.get_token()
902            self.compare_tokens(token, TokenType.T_EQUALS)
903            token = self.get_token()
904            self.compare_tokens(token, TokenType.T_STRING_VALUE)
905            value = token.value
906            token = self.get_token()
907        self.compare_tokens(token, TokenType.T_SLASH)
908        token = self.get_token()
909        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
910        upper_limit = parser_objects.Limit(type, id, value)
911        return upper_limit
912
913    # lower limit = "<lowerValue xmi:type=", string value, " xmi:id=", string value, [" value=", ("1" | "*")] ,"/>";
914    def parse_lower_limit(self):
915        token = self.get_token()
916        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
917        token = self.get_token()
918        if token.token_type != TokenType.T_LOWER_VALUE:
919            self.current_token -= 2
920            return None
921        self.compare_tokens(token, TokenType.T_LOWER_VALUE)
922        token = self.get_token()
923        self.compare_tokens(token, TokenType.T_XMI_TYPE)
924        token = self.get_token()
925        self.compare_tokens(token, TokenType.T_EQUALS)
926        token = self.get_token()
927        self.compare_tokens(token, TokenType.T_STRING_VALUE)
928        type = token.value
929        token = self.get_token()
930        self.compare_tokens(token, TokenType.T_XMI_ID)
931        token = self.get_token()
932        self.compare_tokens(token, TokenType.T_EQUALS)
933        token = self.get_token()
934        self.compare_tokens(token, TokenType.T_STRING_VALUE)
935        id = token.value
936        token = self.get_token()
937        value = None
938        if token.token_type == TokenType.T_VALUE:
939            token = self.get_token()
940            self.compare_tokens(token, TokenType.T_EQUALS)
941            token = self.get_token()
942            self.compare_tokens(token, TokenType.T_STRING_VALUE)
943            value = token.value
944            token = self.get_token()
945        self.compare_tokens(token, TokenType.T_SLASH)
946        token = self.get_token()
947        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
948        lower_limit = parser_objects.Limit(type, id, value)
949        return lower_limit
950
951    # short type = " type=", string value;
952    def parse_short_type(self):
953        token = self.get_token()
954        if token.token_type != TokenType.T_TYPE:
955            self.current_token -= 1
956            return None
957        self.compare_tokens(token, TokenType.T_TYPE)
958        token = self.get_token()
959        self.compare_tokens(token, TokenType.T_EQUALS)
960        token = self.get_token()
961        self.compare_tokens(token, TokenType.T_STRING_VALUE)
962        return token.value
963
964    # "</packagedElement>"
965    def parse_packaged_element_end(self):
966        msg = "Packaged element ended incorrectly!"
967        token = self.get_token()
968        self.compare_tokens(token, TokenType.T_LEFT_BRACKET, msg)
969        token = self.get_token()
970        self.compare_tokens(token, TokenType.T_SLASH, msg)
971        token = self.get_token()
972        self.compare_tokens(token, TokenType.T_PACKAGED_ELEMENT, msg)
973        token = self.get_token()
974        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET, msg)
975
976    # profile application = "<profileApplication xmi:id=", id, ">", eannotation,
977    #                       applied profile, "</profileApplication>";
978    def parse_profile_application(self):
979        token = self.get_token()
980        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
981        token = self.get_token()
982
983        if token.token_type != TokenType.T_PROFILE_APPLICATION:
984            self.current_token -= 2
985            return None
986
987        self.compare_tokens(token, TokenType.T_PROFILE_APPLICATION)
988        token = self.get_token()
989        self.compare_tokens(token, TokenType.T_XMI_ID)
990        token = self.get_token()
991        self.compare_tokens(token, TokenType.T_EQUALS)
992        token = self.get_token()
993        self.compare_tokens(token, TokenType.T_STRING_VALUE)
994        id = token.value
995        token = self.get_token()
996        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
997        eannotation = self.parse_eannotation()
998        href = self.parse_applied_profile()
999        token = self.get_token()
1000        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
1001        token = self.get_token()
1002        self.compare_tokens(token, TokenType.T_SLASH)
1003        token = self.get_token()
1004        self.compare_tokens(token, TokenType.T_PROFILE_APPLICATION)
1005        token = self.get_token()
1006        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
1007        profile_application = parser_objects.ProfileApplication(eannotation, id, href)
1008        return profile_application
1009
1010    # eannotation = "<eAnnotations xmi:id=", string value, " source=", string value, ">", references, "</eAnnotations>";
1011    def parse_eannotation(self):
1012        token = self.get_token()
1013        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
1014        token = self.get_token()
1015        self.compare_tokens(token, TokenType.T_EANNOTATIONS)
1016        token = self.get_token()
1017        self.compare_tokens(token, TokenType.T_XMI_ID)
1018        token = self.get_token()
1019        self.compare_tokens(token, TokenType.T_EQUALS)
1020        token = self.get_token()
1021        self.compare_tokens(token, TokenType.T_STRING_VALUE)
1022        id = token.value
1023        token = self.get_token()
1024        self.compare_tokens(token, TokenType.T_SOURCE)
1025        token = self.get_token()
1026        self.compare_tokens(token, TokenType.T_EQUALS)
1027        token = self.get_token()
1028        self.compare_tokens(token, TokenType.T_STRING_VALUE)
1029        source = token.value
1030        token = self.get_token()
1031        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
1032
1033        type, href = self.parse_references()
1034
1035        token = self.get_token()
1036        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
1037        token = self.get_token()
1038        self.compare_tokens(token, TokenType.T_SLASH)
1039        token = self.get_token()
1040        self.compare_tokens(token, TokenType.T_EANNOTATIONS)
1041        token = self.get_token()
1042        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
1043        eannotation = parser_objects.EAnnotation(id, source, type, href)
1044        return eannotation
1045
1046    # references = '<references xmi:type="ecore:EPackage" href=', string value, "/>";
1047    def parse_references(self):
1048        token = self.get_token()
1049        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
1050        token = self.get_token()
1051        self.compare_tokens(token, TokenType.T_REFERENCES)
1052        token = self.get_token()
1053        self.compare_tokens(token, TokenType.T_XMI_TYPE)
1054        token = self.get_token()
1055        self.compare_tokens(token, TokenType.T_EQUALS)
1056        token = self.get_token()
1057        self.compare_tokens(token, TokenType.T_STRING_VALUE)
1058        type = token.value
1059        token = self.get_token()
1060        self.compare_tokens(token, TokenType.T_HREF)
1061        token = self.get_token()
1062        self.compare_tokens(token, TokenType.T_EQUALS)
1063        token = self.get_token()
1064        self.compare_tokens(token, TokenType.T_STRING_VALUE)
1065        href = token.value
1066        token = self.get_token()
1067        self.compare_tokens(token, TokenType.T_SLASH)
1068        token = self.get_token()
1069        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
1070        return type, href
1071
1072    # applied profile = "<appliedProfile href=", path, "/>";
1073    def parse_applied_profile(self):
1074        token = self.get_token()
1075        self.compare_tokens(token, TokenType.T_LEFT_BRACKET)
1076        token = self.get_token()
1077        self.compare_tokens(token, TokenType.T_APPLIED_PROFILE)
1078        token = self.get_token()
1079        self.compare_tokens(token, TokenType.T_HREF)
1080        token = self.get_token()
1081        self.compare_tokens(token, TokenType.T_EQUALS)
1082        token = self.get_token()
1083        self.compare_tokens(token, TokenType.T_STRING_VALUE)
1084        href = token.value
1085        token = self.get_token()
1086        self.compare_tokens(token, TokenType.T_SLASH)
1087        token = self.get_token()
1088        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET)
1089        return href
1090
1091    # parse </uml:Model> expression
1092    def parse_model_end(self):
1093        msg = "Model ended incorrectly!"
1094        token = self.get_token()
1095        self.compare_tokens(token, TokenType.T_LEFT_BRACKET, msg)
1096        token = self.get_token()
1097        self.compare_tokens(token, TokenType.T_SLASH, msg)
1098        token = self.get_token()
1099        self.compare_tokens(token, TokenType.T_UML_MODEL, msg)
1100        token = self.get_token()
1101        self.compare_tokens(token, TokenType.T_RIGHT_BRACKET, msg)
1102        token = self.get_token()
1103        self.compare_tokens(token, TokenType.T_EOF, msg)
1104
Full Screen

analisador_sintatico.py

Source: analisador_sintatico.py Github

copy
1from os import linesep, pipe
2from sys import exec_prefix
3from analisador_lexico import *
4import re
5
6#Verificação da entrada
7if(len(sys.argv) > 2):
8    print("Numero de argumentos invalido!")
9    sys.exit(1)
10
11#Verificação da abertura do arquivo
12try:
13    f = open(sys.argv[1], "r")
14except:
15    print("Nao foi possivel abrir o arquivo!")
16    sys.exit(2)
17
18output = open('output.txt', 'w')
19token = ''
20linha = f.readline()
21cont_linha = 1
22
23def error_func(S):
24    while(token not in S):
25        get_token()
26
27
28erro_regex = 'erro\\(\\"(.*)\\"\\)'
29def get_token():
30    global linha
31    global cont_linha
32    global token
33    
34    fim = False
35    while(True):
36        cadeia = analisador_lexico(linha)
37        if(cadeia != ''):
38            break
39        linha = f.readline()
40        cont_linha += 1
41        if(len(linha) == 0):
42            exit()
43
44    cadeia = cadeia.split(', ')
45
46    token = cadeia[1].replace('\n', '')
47    var = re.match(erro_regex, cadeia[1])
48    if(var):
49        print('Erro léxico na linha {}: '.format(cont_linha), var.groups()[0])
50        if(var.groups()[0] == 'comentario nao finalizado'):
51            exit()
52        elif(var.groups()[0] == 'numero real mal formado'):
53            token = 'num_real'
54    linha = linha.replace(cadeia[0],'', 1)
55    linha = linha.strip()
56'''    if(linha):
57        if(linha[0]=='{' and linha[len(linha)-1]=='}'):
58            linha=''
59    if(not linha):
60        linha = f.readline()
61        cont_linha += 1
62        if(not linha):
63            fim = True'''
64
65
66def programa(S):
67    if(token == 'simb_program'):
68        get_token()
69    else:
70        print("Erro sintático na linha {}: 'program' esperado".format(cont_linha), file=output)
71        error_func(['id'] + S)
72    if(token == 'id'):
73        get_token()
74    else:
75        print("Erro sintático na linha {}: identificador esperado".format(cont_linha), file=output)
76        error_func(['simb_pv'] + S)
77    if(token == 'simb_pv'):
78        get_token()
79    else:
80        print("Erro sintático na linha {}: ';' esperado".format(cont_linha), file=output)
81        error_func(P['corpo'] + ['simb_p'] + S)
82    corpo(['simb_p'] + S)
83    
84    if(token == 'simb_p'):
85        get_token()
86    else:
87        print("Erro sintático na linha {}: '.' esperado".format(cont_linha), file=output)
88        error_func(S)
89    
90
91def corpo(S):
92    dc(['simb_begin'] + S)
93    if(token == 'simb_begin'):
94        get_token()
95    else:
96        print("Erro sintático na linha {}: 'begin' esperado".format(cont_linha), file=output)
97        error_func(P['comandos'] + ['simb_end'] + S)
98    comandos(['simb_end'] + S)
99    
100    if(token == 'simb_end'):
101        get_token()
102    else:
103        print("Erro sintático na linha {}: 'end' esperado".format(cont_linha))
104        error_func(S)
105    
106
107def dc(S):
108    dc_c(P['dc_v'] + P['dc_p'] + S)
109    dc_v(P['dc_p'] + S)
110    dc_p(S)
111
112
113def dc_c(S):
114    if(token == 'simb_const'):
115        get_token()
116        if(token == 'id'):
117            get_token()
118        else:
119            print("Erro sintático na linha {}: identificador esperado".format(cont_linha), file=output)
120            error_func(['simb_igual'] + S)
121        if(token == 'simb_igual'):
122            get_token()
123        else:
124            print("Erro sintático na linha {}: '=' esperado".format(cont_linha), file=output)
125            error_func(P['numero'] + S)
126        numero(['simb_pv'] + S)
127        
128        if(token == 'simb_pv'):
129            get_token()
130        else:
131            print("Erro sintático na linha {}: ';' esperado".format(cont_linha), file=output)
132            error_func(P['dc_c'] + S)
133        dc_c(S)
134        
135    else:
136        return
137
138def dc_v(S):
139    if(token == 'simb_var'):
140        get_token()
141        variaveis(['simb_dp'] + S)
142        if(token == 'simb_dp'):
143            get_token()
144        else:
145            if(token == 'id'):
146                print("Erro sintático na linha {}: ',' esperado".format(cont_linha), file=output)
147            else:
148                print("Erro sintático na linha {}: ':' esperado".format(cont_linha), file=output)
149            error_func(P['tipo_var'] + S)
150
151        tipo_var(['simb_pv'] + S)
152        
153        if(token == 'simb_pv'):
154            get_token()
155        else:
156            print("Erro sintático na linha {}: ';' esperado".format(cont_linha), file=output)
157            error_func(P['dc_v'] + S)
158        dc_v(S)
159    else:
160        return
161
162
163def tipo_var(S):
164    if(token == 'simb_tipo'):
165        get_token()
166    else:
167        print("Erro sintático na linha {}: tipo da variável esperado".format(cont_linha), file=output)
168        error_func(S)
169
170
171def variaveis(S):
172    if(token == 'id'):
173        get_token()
174    else:
175        print("Erro sintático na linha {}: identificador esperado".format(cont_linha), file=output)
176        error_func(P['mais_var'] + S)
177    mais_var(S)
178
179
180def mais_var(S):
181    if(token == 'simb_virg'):
182        get_token()
183    else:
184        return
185    variaveis(S)
186
187
188def dc_p(S):
189    if(token == 'simb_procedure'):
190        get_token()
191        if(token == 'id'):
192            get_token()
193        else:
194            print("Erro sintático na linha {}: identificador esperado".format(cont_linha), file=output)
195            error_func(P['parametros'] + ['simb_pv'] + S)
196        parametros(['simb_pv'] + S)
197        
198        if(token == 'simb_pv'):
199            get_token()
200        else:
201            print("Erro sintático na linha {}: ';' esperado".format(cont_linha), file=output)
202            error_func(P['corpo_p'] + P['dc_p'] + S)
203        corpo_p(P['dc_p'] + S)
204        dc_p(S)
205
206    else:
207        return
208
209
210def parametros(S):
211    if(token == 'simb_apar'):
212        get_token()
213        lista_par(['simb_fpar'] + S)
214        if(token == 'simb_fpar'):
215            get_token()
216        else:
217            print("Erro sintático na linha {}: ')' esperado".format(cont_linha), file=output)
218            error_func(S)
219        
220    else:
221        return
222
223def lista_par(S):
224    variaveis(['simb_dp'] + S)
225    if(token == 'simb_dp'):
226        get_token()
227    else:
228        print("Erro sintático na linha {}: ':' esperado".format(cont_linha), file=output)
229        error_func(P['tipo_var'] + S)
230    tipo_var(P['mais_par'] + S)
231    mais_par(S)
232
233
234def mais_par(S):
235    if(token == 'simb_pv'):
236        get_token()
237        lista_par(S)
238    else:
239        return
240
241
242def corpo_p(S):
243    dc_loc(['simb_begin'] + S)
244    if(token == 'simb_begin'):
245        get_token()
246    else:
247        print("Erro sintático na linha {}: 'begin' esperado".format(cont_linha), file=output)
248        error_func(P['comandos'] + ['simb_end'] + S)
249    comandos(['simb_end'] + S)
250    
251    if(token == 'simb_end'):
252        get_token()
253    else:
254        print("Erro sintático na linha {}: 'end' esperado".format(cont_linha), file=output)
255        error_func(['simb_pv'] + S)
256    if(token == 'simb_pv'):
257        get_token()
258    else:
259        print("Erro sintático na linha {}: ';' esperado".format(cont_linha), file=output)
260        error_func(S)
261        
262
263def dc_loc(S):
264    dc_v(S)
265
266
267def lista_arg(S):
268    if(token == 'simb_apar'):
269        get_token()
270        argumentos(['simb_fpar'] + S)
271        if(token == 'simb_fpar'):
272            get_token()
273        else:
274            print("Erro sintático na linha {}: ')' esperado".format(cont_linha), file=output)
275            error_func(S)
276    else:
277        return
278
279def argumentos(S):
280    if(token == 'id'):
281        get_token()
282    else:
283        print("Erro sintático na linha {}: identificador esperado".format(cont_linha), file=output)
284        error_func(P['mais_ident'] + S)
285    mais_ident(S)
286
287
288def mais_ident(S):
289    if(token == 'simb_pv'):
290        get_token()
291        argumentos(S)
292    else:
293        return
294
295def pfalsa(S):
296    if(token == 'simb_else'):
297        get_token()
298        cmd(S)
299    else:
300        return
301
302def comandos(S):
303    if(token in P['cmd']):
304        cmd(['simb_pv'] + S)
305        if(token == 'simb_pv'):
306            get_token()
307        else:
308            print("Erro sintático na linha {}: ';' esperado".format(cont_linha), file=output)
309            error_func(P['comandos'] + S)
310        comandos(S)
311    else:
312        return
313
314def cmd(S):
315    if(token == 'simb_read'):
316        get_token()
317        if(token == 'simb_apar'):
318            get_token()
319        else:
320            print("Erro sintático na linha {}: '(' esperado".format(cont_linha), file=output)
321            error_func(P['variaveis'] + S)
322        variaveis(['simb_fpar'] + S)
323
324        if(token == 'simb_fpar'):
325            get_token()
326        else:
327            print("Erro sintático na linha {}: ')' esperado".format(cont_linha), file=output)
328            error_func(S)
329
330    elif(token == 'simb_write'):
331        get_token()
332        if(token == 'simb_apar'):
333            get_token()
334        else:
335            print("Erro sintático na linha {}: '(' esperado".format(cont_linha), file=output)
336            error_func(P['variaveis'] + S)
337        variaveis(['simb_fpar'] + S)
338        
339        if(token == 'simb_fpar'):
340            get_token()
341        else:
342            print("Erro sintático na linha {}: ')' esperado".format(cont_linha), file=output)
343            error_func(S)
344
345    elif(token == 'simb_while'):
346        get_token()
347        if(token == 'simb_apar'):
348            get_token()
349        else:
350            print("Erro sintático na linha {}: '(' esperado".format(cont_linha), file=output)
351            error_func(P['condicao'] + S)
352        condicao(['simb_fpar'] + S)
353
354        if(token == 'simb_fpar'):
355            get_token()
356        else:
357            print("Erro sintático na linha {}: ')' esperado".format(cont_linha), file=output)
358            error_func(['simb_do'] + S)
359        if(token == 'simb_do'):
360            get_token()
361        else:
362            print("Erro sintático na linha {}: 'do' esperado".format(cont_linha), file=output)
363            error_func(P['cmd'] + S)
364        cmd(S)
365        
366    elif(token == 'simb_if'):
367        get_token()
368        condicao(['simb_then'] + S)
369        if(token == 'simb_then'):
370            get_token()
371        else:
372            print("Erro sintático na linha {}: 'then' esperado".format(cont_linha), file=output)
373            error_func(P['cmd'] + S)
374        cmd(P['pfalsa'] + S)
375        pfalsa(S)
376
377    elif(token == 'id'):
378        get_token()
379        ident(S)
380
381    elif(token == 'simb_begin'):
382        get_token()
383        comandos(['simb_end'] + S)
384        if(token == 'simb_end'):
385            get_token()
386        else:
387            print("Erro sintático na linha {}: 'end' esperado".format(cont_linha), file=output)
388            error_func(S)
389
390    elif(token == 'simb_for'):
391        get_token()
392        if(token == 'id'):
393            get_token()
394        else:
395            print("Erro sintático na linha {}: identificador esperado".format(cont_linha), file=output)
396            error_func(['simb_atrib'] + S)
397        if(token == 'simb_atrib'):
398            get_token()
399        else:
400            print("Erro sintático na linha {}: ':=' esperado".format(cont_linha), file=output)
401            error_func(P['expressao'] + S)
402        expressao(['simb_to'] + S)
403        
404        if(token == 'simb_to'):
405            get_token()
406        else:
407            print("Erro sintático na linha {}: 'to' esperado".format(cont_linha), file=output)
408            error_func(P['expressao'] + S)
409        expressao(['simb_do'] + S)
410
411        if(token == 'simb_do'):
412            get_token()
413        else:
414            print("Erro sintático na linha {}: 'do' esperado".format(cont_linha), file=output)
415            error_func(P['cmd'] + S)
416        cmd(S)
417
418    else:
419        print("Erro sintático na linha {}: comando esperado".format(cont_linha), file=output)
420        error_func(S)
421
422def ident(S):
423    if(token == 'simb_atrib'):
424        get_token()
425        expressao(S)
426    elif(token == P['lista_arg']):
427        lista_arg(S)
428    else:
429        print("Erro sintático na linha {}: atribuição ou lista de argumentos esperada".format(cont_linha), file=output)
430        error_func(S)
431
432
433def condicao(S):
434    expressao(P['relacao'] + S) 
435    relacao(P['expressao'] + S)
436    expressao(S)
437
438def relacao(S):
439    if(token == 'simb_igual'):
440        get_token()
441    elif(token == 'simb_dif'):
442        get_token()
443    elif(token == 'simb_maior_igual'):
444        get_token()
445    elif(token == 'simb_menor_igual'):
446        get_token()
447    elif(token == 'simb_maior'):
448        get_token()
449    elif(token == 'simb_menor'):
450        get_token()
451    else:
452        print("Erro sintático na linha {}: comparação esperada".format(cont_linha), file=output)
453        error_func(S)
454
455def expressao(S):
456    termo(P['outros_termos'] + S)
457    outros_termos(S)
458    return
459
460def op_un(S):
461    if(token == 'simb_mais'):
462        get_token()
463    elif(token == 'simb_menos'):
464        get_token()
465    else:
466        return
467
468def outros_termos(S):
469    if(token in P['op_ad']):
470        op_ad(P['termo'] + S)
471        termo(P['outros_termos'] + S)
472        outros_termos(S)
473    else:
474        return
475
476def op_ad(S):
477    if(token == 'simb_mais'):
478        get_token()
479    elif(token == 'simb_menos'):
480        get_token()
481    else:
482        print("Erro sintático na linha {}: operador '+' ou '-' esperado".format(cont_linha), file=output)
483        error_func(S)
484
485def termo(S):
486    op_un(P['fator'] + S)
487    fator(P['mais_fatores'] + S)
488    mais_fatores(S)
489
490def mais_fatores(S):
491    if(token in P['op_mul']):
492        op_mul(P['fator'] + S)
493        fator(P['mais_fatores'] + S)
494        mais_fatores(S)
495    else:
496        return
497
498
499def op_mul(S):
500    if(token == 'simb_mult'):
501        get_token()
502    elif(token == 'simb_div'):
503        get_token()
504    else:
505        print("Erro sintático na linha {}: operador '*' ou '/' esperado".format(cont_linha), file=output)
506        error_func(S)
507
508
509def fator(S):
510    if(token == 'id'):
511        get_token()
512    elif(token == 'simb_apar'):
513        get_token()
514        expressao(['simb_fpar'] + S)
515        if(token == 'simb_fpar'):
516            get_token()
517        else:
518            error_func(S)
519    elif(token in P['numero']):
520        numero(S)
521    else:
522        print("Erro sintático na linha {}: fator esperado".format(cont_linha), file=output)
523        error_func(S)
524
525def numero(S):
526    if(token == 'num_int'):
527        get_token()
528    elif(token == 'num_real'):
529        get_token()
530    else:
531        print("Erro sintático na linha {}: valor numérico esperado".format(cont_linha), file=output)
532        error_func(S)
533
534P = {
535    'programa': ['simb_program'],
536    'corpo': ['simb_const', 'simb_var', 'simb_procedure'],
537    'dc': ['simb_const', 'simb_var', 'simb_procedure'],
538    'dc_c': ['simb_const'],
539    'dc_v': ['simb_var'],
540    'tipo_var': ['simb_tipo'],
541    'variaveis': ['id'],
542    'mais_var': ['simb_virg'],
543    'dc_p': ['simb_procedure'],
544    'parametros': ['simb_apar'],
545    'lista_par': ['id'],
546    'mais_par': ['simb_pv'],
547    'corpo_p': ['simb_var'],
548    'dc_loc': ['simb_var'],
549    'lista_arg': ['simb_apar'],
550    'argumentos': ['id'],
551    'mais_ident': ['simb_pv'],
552    'pfalsa': ['simb_else'],
553    'comandos': ['simb_read', 'simb_write', 'simb_while', 'simb_if', 'id', 'simb_begin', 'simb_for'],
554    'cmd': ['simb_read', 'simb_write', 'simb_while', 'simb_if', 'id', 'simb_begin', 'simb_for'],
555    'ident': ['simb_atrib', 'simb_apar'],
556    'relacao': ['simb_igual', 'simb_dif', 'simb_maior_igual', 'simb_menor_igual', 'simb_maior', 'simb_menor'],
557    'expressao': ['simb_mais', 'simb_menos', 'id', 'simb_apar', 'num_int', 'num_real'],
558    'op_un': ['simb_mais', 'simb_menos'],
559    'outros_termos': ['simb_mais', 'simb_menos'],
560    'op_ad': ['simb_mais', 'simb_menos'],
561    'termo': ['simb_mais', 'simb_menos', 'simb_apar', 'id', 'num_int', 'num_real'],
562    'mais_fatores': ['simb_mult', 'simb_div'],
563    'op_mul': ['simb_mult', 'simb_div'],
564    'fator': ['id', 'num_int', 'num_real', 'simb_fpar'],
565    'numero': ['num_int', 'num_real']
566}   
567
568get_token()
569programa([])
570f.close()
571output.close()
572
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest

Run Python Tests on LambdaTest Cloud Grid

Execute automation tests with Gherkin-python on a cloud-based Grid of 3000+ real browsers and operating systems for both web and mobile applications.

Test now for Free
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)