lex.py 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098
  1. # -----------------------------------------------------------------------------
  2. # ply: lex.py
  3. #
  4. # Copyright (C) 2001-2018
  5. # David M. Beazley (Dabeaz LLC)
  6. # All rights reserved.
  7. #
  8. # Redistribution and use in source and binary forms, with or without
  9. # modification, are permitted provided that the following conditions are
  10. # met:
  11. #
  12. # * Redistributions of source code must retain the above copyright notice,
  13. # this list of conditions and the following disclaimer.
  14. # * Redistributions in binary form must reproduce the above copyright notice,
  15. # this list of conditions and the following disclaimer in the documentation
  16. # and/or other materials provided with the distribution.
  17. # * Neither the name of the David Beazley or Dabeaz LLC may be used to
  18. # endorse or promote products derived from this software without
  19. # specific prior written permission.
  20. #
  21. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  22. # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  23. # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  24. # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  25. # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  26. # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  27. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  28. # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  29. # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  30. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  31. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32. # -----------------------------------------------------------------------------
  33. __version__ = '3.11'
  34. __tabversion__ = '3.10'
  35. import re
  36. import sys
  37. import types
  38. import copy
  39. import os
  40. import inspect
  41. # This tuple contains known string types
  42. try:
  43. # Python 2.6
  44. StringTypes = (types.StringType, types.UnicodeType)
  45. except AttributeError:
  46. # Python 3.0
  47. StringTypes = (str, bytes)
  48. # This regular expression is used to match valid token names
  49. _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
  50. # Exception thrown when invalid token encountered and no default error
  51. # handler is defined.
  52. class LexError(Exception):
  53. def __init__(self, message, s):
  54. self.args = (message,)
  55. self.text = s
  56. # Token class. This class is used to represent the tokens produced.
  57. class LexToken(object):
  58. def __str__(self):
  59. return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
  60. def __repr__(self):
  61. return str(self)
  62. # This object is a stand-in for a logging object created by the
  63. # logging module.
  64. class PlyLogger(object):
  65. def __init__(self, f):
  66. self.f = f
  67. def critical(self, msg, *args, **kwargs):
  68. self.f.write((msg % args) + '\n')
  69. def warning(self, msg, *args, **kwargs):
  70. self.f.write('WARNING: ' + (msg % args) + '\n')
  71. def error(self, msg, *args, **kwargs):
  72. self.f.write('ERROR: ' + (msg % args) + '\n')
  73. info = critical
  74. debug = critical
  75. # Null logger is used when no output is generated. Does nothing.
  76. class NullLogger(object):
  77. def __getattribute__(self, name):
  78. return self
  79. def __call__(self, *args, **kwargs):
  80. return self
  81. # -----------------------------------------------------------------------------
  82. # === Lexing Engine ===
  83. #
  84. # The following Lexer class implements the lexer runtime. There are only
  85. # a few public methods and attributes:
  86. #
  87. # input() - Store a new string in the lexer
  88. # token() - Get the next token
  89. # clone() - Clone the lexer
  90. #
  91. # lineno - Current line number
  92. # lexpos - Current position in the input string
  93. # -----------------------------------------------------------------------------
  94. class Lexer:
  95. def __init__(self):
  96. self.lexre = None # Master regular expression. This is a list of
  97. # tuples (re, findex) where re is a compiled
  98. # regular expression and findex is a list
  99. # mapping regex group numbers to rules
  100. self.lexretext = None # Current regular expression strings
  101. self.lexstatere = {} # Dictionary mapping lexer states to master regexs
  102. self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
  103. self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
  104. self.lexstate = 'INITIAL' # Current lexer state
  105. self.lexstatestack = [] # Stack of lexer states
  106. self.lexstateinfo = None # State information
  107. self.lexstateignore = {} # Dictionary of ignored characters for each state
  108. self.lexstateerrorf = {} # Dictionary of error functions for each state
  109. self.lexstateeoff = {} # Dictionary of eof functions for each state
  110. self.lexreflags = 0 # Optional re compile flags
  111. self.lexdata = None # Actual input data (as a string)
  112. self.lexpos = 0 # Current position in input text
  113. self.lexlen = 0 # Length of the input text
  114. self.lexerrorf = None # Error rule (if any)
  115. self.lexeoff = None # EOF rule (if any)
  116. self.lextokens = None # List of valid tokens
  117. self.lexignore = '' # Ignored characters
  118. self.lexliterals = '' # Literal characters that can be passed through
  119. self.lexmodule = None # Module
  120. self.lineno = 1 # Current line number
  121. self.lexoptimize = False # Optimized mode
  122. def clone(self, object=None):
  123. c = copy.copy(self)
  124. # If the object parameter has been supplied, it means we are attaching the
  125. # lexer to a new object. In this case, we have to rebind all methods in
  126. # the lexstatere and lexstateerrorf tables.
  127. if object:
  128. newtab = {}
  129. for key, ritem in self.lexstatere.items():
  130. newre = []
  131. for cre, findex in ritem:
  132. newfindex = []
  133. for f in findex:
  134. if not f or not f[0]:
  135. newfindex.append(f)
  136. continue
  137. newfindex.append((getattr(object, f[0].__name__), f[1]))
  138. newre.append((cre, newfindex))
  139. newtab[key] = newre
  140. c.lexstatere = newtab
  141. c.lexstateerrorf = {}
  142. for key, ef in self.lexstateerrorf.items():
  143. c.lexstateerrorf[key] = getattr(object, ef.__name__)
  144. c.lexmodule = object
  145. return c
  146. # ------------------------------------------------------------
  147. # writetab() - Write lexer information to a table file
  148. # ------------------------------------------------------------
  149. def writetab(self, lextab, outputdir=''):
  150. if isinstance(lextab, types.ModuleType):
  151. raise IOError("Won't overwrite existing lextab module")
  152. basetabmodule = lextab.split('.')[-1]
  153. filename = os.path.join(outputdir, basetabmodule) + '.py'
  154. with open(filename, 'w') as tf:
  155. tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
  156. tf.write('_tabversion = %s\n' % repr(__tabversion__))
  157. tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens))))
  158. tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags)))
  159. tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
  160. tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
  161. # Rewrite the lexstatere table, replacing function objects with function names
  162. tabre = {}
  163. for statename, lre in self.lexstatere.items():
  164. titem = []
  165. for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
  166. titem.append((retext, _funcs_to_names(func, renames)))
  167. tabre[statename] = titem
  168. tf.write('_lexstatere = %s\n' % repr(tabre))
  169. tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
  170. taberr = {}
  171. for statename, ef in self.lexstateerrorf.items():
  172. taberr[statename] = ef.__name__ if ef else None
  173. tf.write('_lexstateerrorf = %s\n' % repr(taberr))
  174. tabeof = {}
  175. for statename, ef in self.lexstateeoff.items():
  176. tabeof[statename] = ef.__name__ if ef else None
  177. tf.write('_lexstateeoff = %s\n' % repr(tabeof))
  178. # ------------------------------------------------------------
  179. # readtab() - Read lexer information from a tab file
  180. # ------------------------------------------------------------
  181. def readtab(self, tabfile, fdict):
  182. if isinstance(tabfile, types.ModuleType):
  183. lextab = tabfile
  184. else:
  185. exec('import %s' % tabfile)
  186. lextab = sys.modules[tabfile]
  187. if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
  188. raise ImportError('Inconsistent PLY version')
  189. self.lextokens = lextab._lextokens
  190. self.lexreflags = lextab._lexreflags
  191. self.lexliterals = lextab._lexliterals
  192. self.lextokens_all = self.lextokens | set(self.lexliterals)
  193. self.lexstateinfo = lextab._lexstateinfo
  194. self.lexstateignore = lextab._lexstateignore
  195. self.lexstatere = {}
  196. self.lexstateretext = {}
  197. for statename, lre in lextab._lexstatere.items():
  198. titem = []
  199. txtitem = []
  200. for pat, func_name in lre:
  201. titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
  202. self.lexstatere[statename] = titem
  203. self.lexstateretext[statename] = txtitem
  204. self.lexstateerrorf = {}
  205. for statename, ef in lextab._lexstateerrorf.items():
  206. self.lexstateerrorf[statename] = fdict[ef]
  207. self.lexstateeoff = {}
  208. for statename, ef in lextab._lexstateeoff.items():
  209. self.lexstateeoff[statename] = fdict[ef]
  210. self.begin('INITIAL')
  211. # ------------------------------------------------------------
  212. # input() - Push a new string into the lexer
  213. # ------------------------------------------------------------
  214. def input(self, s):
  215. # Pull off the first character to see if s looks like a string
  216. c = s[:1]
  217. if not isinstance(c, StringTypes):
  218. raise ValueError('Expected a string')
  219. self.lexdata = s
  220. self.lexpos = 0
  221. self.lexlen = len(s)
  222. # ------------------------------------------------------------
  223. # begin() - Changes the lexing state
  224. # ------------------------------------------------------------
  225. def begin(self, state):
  226. if state not in self.lexstatere:
  227. raise ValueError('Undefined state')
  228. self.lexre = self.lexstatere[state]
  229. self.lexretext = self.lexstateretext[state]
  230. self.lexignore = self.lexstateignore.get(state, '')
  231. self.lexerrorf = self.lexstateerrorf.get(state, None)
  232. self.lexeoff = self.lexstateeoff.get(state, None)
  233. self.lexstate = state
  234. # ------------------------------------------------------------
  235. # push_state() - Changes the lexing state and saves old on stack
  236. # ------------------------------------------------------------
  237. def push_state(self, state):
  238. self.lexstatestack.append(self.lexstate)
  239. self.begin(state)
  240. # ------------------------------------------------------------
  241. # pop_state() - Restores the previous state
  242. # ------------------------------------------------------------
  243. def pop_state(self):
  244. self.begin(self.lexstatestack.pop())
  245. # ------------------------------------------------------------
  246. # current_state() - Returns the current lexing state
  247. # ------------------------------------------------------------
  248. def current_state(self):
  249. return self.lexstate
  250. # ------------------------------------------------------------
  251. # skip() - Skip ahead n characters
  252. # ------------------------------------------------------------
  253. def skip(self, n):
  254. self.lexpos += n
  255. # ------------------------------------------------------------
  256. # opttoken() - Return the next token from the Lexer
  257. #
  258. # Note: This function has been carefully implemented to be as fast
  259. # as possible. Don't make changes unless you really know what
  260. # you are doing
  261. # ------------------------------------------------------------
  262. def token(self):
  263. # Make local copies of frequently referenced attributes
  264. lexpos = self.lexpos
  265. lexlen = self.lexlen
  266. lexignore = self.lexignore
  267. lexdata = self.lexdata
  268. while lexpos < lexlen:
  269. # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
  270. if lexdata[lexpos] in lexignore:
  271. lexpos += 1
  272. continue
  273. # Look for a regular expression match
  274. for lexre, lexindexfunc in self.lexre:
  275. m = lexre.match(lexdata, lexpos)
  276. if not m:
  277. continue
  278. # Create a token for return
  279. tok = LexToken()
  280. tok.value = m.group()
  281. tok.lineno = self.lineno
  282. tok.lexpos = lexpos
  283. i = m.lastindex
  284. func, tok.type = lexindexfunc[i]
  285. if not func:
  286. # If no token type was set, it's an ignored token
  287. if tok.type:
  288. self.lexpos = m.end()
  289. return tok
  290. else:
  291. lexpos = m.end()
  292. break
  293. lexpos = m.end()
  294. # If token is processed by a function, call it
  295. tok.lexer = self # Set additional attributes useful in token rules
  296. self.lexmatch = m
  297. self.lexpos = lexpos
  298. newtok = func(tok)
  299. # Every function must return a token, if nothing, we just move to next token
  300. if not newtok:
  301. lexpos = self.lexpos # This is here in case user has updated lexpos.
  302. lexignore = self.lexignore # This is here in case there was a state change
  303. break
  304. # Verify type of the token. If not in the token map, raise an error
  305. if not self.lexoptimize:
  306. if newtok.type not in self.lextokens_all:
  307. raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
  308. func.__code__.co_filename, func.__code__.co_firstlineno,
  309. func.__name__, newtok.type), lexdata[lexpos:])
  310. return newtok
  311. else:
  312. # No match, see if in literals
  313. if lexdata[lexpos] in self.lexliterals:
  314. tok = LexToken()
  315. tok.value = lexdata[lexpos]
  316. tok.lineno = self.lineno
  317. tok.type = tok.value
  318. tok.lexpos = lexpos
  319. self.lexpos = lexpos + 1
  320. return tok
  321. # No match. Call t_error() if defined.
  322. if self.lexerrorf:
  323. tok = LexToken()
  324. tok.value = self.lexdata[lexpos:]
  325. tok.lineno = self.lineno
  326. tok.type = 'error'
  327. tok.lexer = self
  328. tok.lexpos = lexpos
  329. self.lexpos = lexpos
  330. newtok = self.lexerrorf(tok)
  331. if lexpos == self.lexpos:
  332. # Error method didn't change text position at all. This is an error.
  333. raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
  334. lexpos = self.lexpos
  335. if not newtok:
  336. continue
  337. return newtok
  338. self.lexpos = lexpos
  339. raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
  340. if self.lexeoff:
  341. tok = LexToken()
  342. tok.type = 'eof'
  343. tok.value = ''
  344. tok.lineno = self.lineno
  345. tok.lexpos = lexpos
  346. tok.lexer = self
  347. self.lexpos = lexpos
  348. newtok = self.lexeoff(tok)
  349. return newtok
  350. self.lexpos = lexpos + 1
  351. if self.lexdata is None:
  352. raise RuntimeError('No input string given with input()')
  353. return None
  354. # Iterator interface
  355. def __iter__(self):
  356. return self
  357. def next(self):
  358. t = self.token()
  359. if t is None:
  360. raise StopIteration
  361. return t
  362. __next__ = next
  363. # -----------------------------------------------------------------------------
  364. # ==== Lex Builder ===
  365. #
  366. # The functions and classes below are used to collect lexing information
  367. # and build a Lexer object from it.
  368. # -----------------------------------------------------------------------------
  369. # -----------------------------------------------------------------------------
  370. # _get_regex(func)
  371. #
  372. # Returns the regular expression assigned to a function either as a doc string
  373. # or as a .regex attribute attached by the @TOKEN decorator.
  374. # -----------------------------------------------------------------------------
  375. def _get_regex(func):
  376. return getattr(func, 'regex', func.__doc__)
  377. # -----------------------------------------------------------------------------
  378. # get_caller_module_dict()
  379. #
  380. # This function returns a dictionary containing all of the symbols defined within
  381. # a caller further down the call stack. This is used to get the environment
  382. # associated with the yacc() call if none was provided.
  383. # -----------------------------------------------------------------------------
  384. def get_caller_module_dict(levels):
  385. f = sys._getframe(levels)
  386. ldict = f.f_globals.copy()
  387. if f.f_globals != f.f_locals:
  388. ldict.update(f.f_locals)
  389. return ldict
  390. # -----------------------------------------------------------------------------
  391. # _funcs_to_names()
  392. #
  393. # Given a list of regular expression functions, this converts it to a list
  394. # suitable for output to a table file
  395. # -----------------------------------------------------------------------------
  396. def _funcs_to_names(funclist, namelist):
  397. result = []
  398. for f, name in zip(funclist, namelist):
  399. if f and f[0]:
  400. result.append((name, f[1]))
  401. else:
  402. result.append(f)
  403. return result
  404. # -----------------------------------------------------------------------------
  405. # _names_to_funcs()
  406. #
  407. # Given a list of regular expression function names, this converts it back to
  408. # functions.
  409. # -----------------------------------------------------------------------------
  410. def _names_to_funcs(namelist, fdict):
  411. result = []
  412. for n in namelist:
  413. if n and n[0]:
  414. result.append((fdict[n[0]], n[1]))
  415. else:
  416. result.append(n)
  417. return result
  418. # -----------------------------------------------------------------------------
  419. # _form_master_re()
  420. #
  421. # This function takes a list of all of the regex components and attempts to
  422. # form the master regular expression. Given limitations in the Python re
  423. # module, it may be necessary to break the master regex into separate expressions.
  424. # -----------------------------------------------------------------------------
  425. def _form_master_re(relist, reflags, ldict, toknames):
  426. if not relist:
  427. return []
  428. regex = '|'.join(relist)
  429. try:
  430. lexre = re.compile(regex, reflags)
  431. # Build the index to function map for the matching engine
  432. lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
  433. lexindexnames = lexindexfunc[:]
  434. for f, i in lexre.groupindex.items():
  435. handle = ldict.get(f, None)
  436. if type(handle) in (types.FunctionType, types.MethodType):
  437. lexindexfunc[i] = (handle, toknames[f])
  438. lexindexnames[i] = f
  439. elif handle is not None:
  440. lexindexnames[i] = f
  441. if f.find('ignore_') > 0:
  442. lexindexfunc[i] = (None, None)
  443. else:
  444. lexindexfunc[i] = (None, toknames[f])
  445. return [(lexre, lexindexfunc)], [regex], [lexindexnames]
  446. except Exception:
  447. m = int(len(relist)/2)
  448. if m == 0:
  449. m = 1
  450. llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
  451. rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
  452. return (llist+rlist), (lre+rre), (lnames+rnames)
  453. # -----------------------------------------------------------------------------
  454. # def _statetoken(s,names)
  455. #
  456. # Given a declaration name s of the form "t_" and a dictionary whose keys are
  457. # state names, this function returns a tuple (states,tokenname) where states
  458. # is a tuple of state names and tokenname is the name of the token. For example,
  459. # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
  460. # -----------------------------------------------------------------------------
  461. def _statetoken(s, names):
  462. parts = s.split('_')
  463. for i, part in enumerate(parts[1:], 1):
  464. if part not in names and part != 'ANY':
  465. break
  466. if i > 1:
  467. states = tuple(parts[1:i])
  468. else:
  469. states = ('INITIAL',)
  470. if 'ANY' in states:
  471. states = tuple(names)
  472. tokenname = '_'.join(parts[i:])
  473. return (states, tokenname)
  474. # -----------------------------------------------------------------------------
  475. # LexerReflect()
  476. #
  477. # This class represents information needed to build a lexer as extracted from a
  478. # user's input file.
  479. # -----------------------------------------------------------------------------
  480. class LexerReflect(object):
  481. def __init__(self, ldict, log=None, reflags=0):
  482. self.ldict = ldict
  483. self.error_func = None
  484. self.tokens = []
  485. self.reflags = reflags
  486. self.stateinfo = {'INITIAL': 'inclusive'}
  487. self.modules = set()
  488. self.error = False
  489. self.log = PlyLogger(sys.stderr) if log is None else log
  490. # Get all of the basic information
  491. def get_all(self):
  492. self.get_tokens()
  493. self.get_literals()
  494. self.get_states()
  495. self.get_rules()
  496. # Validate all of the information
  497. def validate_all(self):
  498. self.validate_tokens()
  499. self.validate_literals()
  500. self.validate_rules()
  501. return self.error
  502. # Get the tokens map
  503. def get_tokens(self):
  504. tokens = self.ldict.get('tokens', None)
  505. if not tokens:
  506. self.log.error('No token list is defined')
  507. self.error = True
  508. return
  509. if not isinstance(tokens, (list, tuple)):
  510. self.log.error('tokens must be a list or tuple')
  511. self.error = True
  512. return
  513. if not tokens:
  514. self.log.error('tokens is empty')
  515. self.error = True
  516. return
  517. self.tokens = tokens
  518. # Validate the tokens
  519. def validate_tokens(self):
  520. terminals = {}
  521. for n in self.tokens:
  522. if not _is_identifier.match(n):
  523. self.log.error("Bad token name '%s'", n)
  524. self.error = True
  525. if n in terminals:
  526. self.log.warning("Token '%s' multiply defined", n)
  527. terminals[n] = 1
  528. # Get the literals specifier
  529. def get_literals(self):
  530. self.literals = self.ldict.get('literals', '')
  531. if not self.literals:
  532. self.literals = ''
  533. # Validate literals
  534. def validate_literals(self):
  535. try:
  536. for c in self.literals:
  537. if not isinstance(c, StringTypes) or len(c) > 1:
  538. self.log.error('Invalid literal %s. Must be a single character', repr(c))
  539. self.error = True
  540. except TypeError:
  541. self.log.error('Invalid literals specification. literals must be a sequence of characters')
  542. self.error = True
  543. def get_states(self):
  544. self.states = self.ldict.get('states', None)
  545. # Build statemap
  546. if self.states:
  547. if not isinstance(self.states, (tuple, list)):
  548. self.log.error('states must be defined as a tuple or list')
  549. self.error = True
  550. else:
  551. for s in self.states:
  552. if not isinstance(s, tuple) or len(s) != 2:
  553. self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
  554. self.error = True
  555. continue
  556. name, statetype = s
  557. if not isinstance(name, StringTypes):
  558. self.log.error('State name %s must be a string', repr(name))
  559. self.error = True
  560. continue
  561. if not (statetype == 'inclusive' or statetype == 'exclusive'):
  562. self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
  563. self.error = True
  564. continue
  565. if name in self.stateinfo:
  566. self.log.error("State '%s' already defined", name)
  567. self.error = True
  568. continue
  569. self.stateinfo[name] = statetype
  570. # Get all of the symbols with a t_ prefix and sort them into various
  571. # categories (functions, strings, error functions, and ignore characters)
  572. def get_rules(self):
  573. tsymbols = [f for f in self.ldict if f[:2] == 't_']
  574. # Now build up a list of functions and a list of strings
  575. self.toknames = {} # Mapping of symbols to token names
  576. self.funcsym = {} # Symbols defined as functions
  577. self.strsym = {} # Symbols defined as strings
  578. self.ignore = {} # Ignore strings by state
  579. self.errorf = {} # Error functions by state
  580. self.eoff = {} # EOF functions by state
  581. for s in self.stateinfo:
  582. self.funcsym[s] = []
  583. self.strsym[s] = []
  584. if len(tsymbols) == 0:
  585. self.log.error('No rules of the form t_rulename are defined')
  586. self.error = True
  587. return
  588. for f in tsymbols:
  589. t = self.ldict[f]
  590. states, tokname = _statetoken(f, self.stateinfo)
  591. self.toknames[f] = tokname
  592. if hasattr(t, '__call__'):
  593. if tokname == 'error':
  594. for s in states:
  595. self.errorf[s] = t
  596. elif tokname == 'eof':
  597. for s in states:
  598. self.eoff[s] = t
  599. elif tokname == 'ignore':
  600. line = t.__code__.co_firstlineno
  601. file = t.__code__.co_filename
  602. self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
  603. self.error = True
  604. else:
  605. for s in states:
  606. self.funcsym[s].append((f, t))
  607. elif isinstance(t, StringTypes):
  608. if tokname == 'ignore':
  609. for s in states:
  610. self.ignore[s] = t
  611. if '\\' in t:
  612. self.log.warning("%s contains a literal backslash '\\'", f)
  613. elif tokname == 'error':
  614. self.log.error("Rule '%s' must be defined as a function", f)
  615. self.error = True
  616. else:
  617. for s in states:
  618. self.strsym[s].append((f, t))
  619. else:
  620. self.log.error('%s not defined as a function or string', f)
  621. self.error = True
  622. # Sort the functions by line number
  623. for f in self.funcsym.values():
  624. f.sort(key=lambda x: x[1].__code__.co_firstlineno)
  625. # Sort the strings by regular expression length
  626. for s in self.strsym.values():
  627. s.sort(key=lambda x: len(x[1]), reverse=True)
  628. # Validate all of the t_rules collected
  629. def validate_rules(self):
  630. for state in self.stateinfo:
  631. # Validate all rules defined by functions
  632. for fname, f in self.funcsym[state]:
  633. line = f.__code__.co_firstlineno
  634. file = f.__code__.co_filename
  635. module = inspect.getmodule(f)
  636. self.modules.add(module)
  637. tokname = self.toknames[fname]
  638. if isinstance(f, types.MethodType):
  639. reqargs = 2
  640. else:
  641. reqargs = 1
  642. nargs = f.__code__.co_argcount
  643. if nargs > reqargs:
  644. self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
  645. self.error = True
  646. continue
  647. if nargs < reqargs:
  648. self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
  649. self.error = True
  650. continue
  651. if not _get_regex(f):
  652. self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
  653. self.error = True
  654. continue
  655. try:
  656. c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
  657. if c.match(''):
  658. self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
  659. self.error = True
  660. except re.error as e:
  661. self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
  662. if '#' in _get_regex(f):
  663. self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
  664. self.error = True
  665. # Validate all rules defined by strings
  666. for name, r in self.strsym[state]:
  667. tokname = self.toknames[name]
  668. if tokname == 'error':
  669. self.log.error("Rule '%s' must be defined as a function", name)
  670. self.error = True
  671. continue
  672. if tokname not in self.tokens and tokname.find('ignore_') < 0:
  673. self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
  674. self.error = True
  675. continue
  676. try:
  677. c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
  678. if (c.match('')):
  679. self.log.error("Regular expression for rule '%s' matches empty string", name)
  680. self.error = True
  681. except re.error as e:
  682. self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
  683. if '#' in r:
  684. self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
  685. self.error = True
  686. if not self.funcsym[state] and not self.strsym[state]:
  687. self.log.error("No rules defined for state '%s'", state)
  688. self.error = True
  689. # Validate the error function
  690. efunc = self.errorf.get(state, None)
  691. if efunc:
  692. f = efunc
  693. line = f.__code__.co_firstlineno
  694. file = f.__code__.co_filename
  695. module = inspect.getmodule(f)
  696. self.modules.add(module)
  697. if isinstance(f, types.MethodType):
  698. reqargs = 2
  699. else:
  700. reqargs = 1
  701. nargs = f.__code__.co_argcount
  702. if nargs > reqargs:
  703. self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
  704. self.error = True
  705. if nargs < reqargs:
  706. self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
  707. self.error = True
  708. for module in self.modules:
  709. self.validate_module(module)
  710. # -----------------------------------------------------------------------------
  711. # validate_module()
  712. #
  713. # This checks to see if there are duplicated t_rulename() functions or strings
  714. # in the parser input file. This is done using a simple regular expression
  715. # match on each line in the source code of the given module.
  716. # -----------------------------------------------------------------------------
  717. def validate_module(self, module):
  718. try:
  719. lines, linen = inspect.getsourcelines(module)
  720. except IOError:
  721. return
  722. fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
  723. sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
  724. counthash = {}
  725. linen += 1
  726. for line in lines:
  727. m = fre.match(line)
  728. if not m:
  729. m = sre.match(line)
  730. if m:
  731. name = m.group(1)
  732. prev = counthash.get(name)
  733. if not prev:
  734. counthash[name] = linen
  735. else:
  736. filename = inspect.getsourcefile(module)
  737. self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
  738. self.error = True
  739. linen += 1
  740. # -----------------------------------------------------------------------------
  741. # lex(module)
  742. #
  743. # Build all of the regular expression rules from definitions in the supplied module
  744. # -----------------------------------------------------------------------------
  745. def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
  746. reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
  747. if lextab is None:
  748. lextab = 'lextab'
  749. global lexer
  750. ldict = None
  751. stateinfo = {'INITIAL': 'inclusive'}
  752. lexobj = Lexer()
  753. lexobj.lexoptimize = optimize
  754. global token, input
  755. if errorlog is None:
  756. errorlog = PlyLogger(sys.stderr)
  757. if debug:
  758. if debuglog is None:
  759. debuglog = PlyLogger(sys.stderr)
  760. # Get the module dictionary used for the lexer
  761. if object:
  762. module = object
  763. # Get the module dictionary used for the parser
  764. if module:
  765. _items = [(k, getattr(module, k)) for k in dir(module)]
  766. ldict = dict(_items)
  767. # If no __file__ attribute is available, try to obtain it from the __module__ instead
  768. if '__file__' not in ldict:
  769. ldict['__file__'] = sys.modules[ldict['__module__']].__file__
  770. else:
  771. ldict = get_caller_module_dict(2)
  772. # Determine if the module is package of a package or not.
  773. # If so, fix the tabmodule setting so that tables load correctly
  774. pkg = ldict.get('__package__')
  775. if pkg and isinstance(lextab, str):
  776. if '.' not in lextab:
  777. lextab = pkg + '.' + lextab
  778. # Collect parser information from the dictionary
  779. linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
  780. linfo.get_all()
  781. if not optimize:
  782. if linfo.validate_all():
  783. raise SyntaxError("Can't build lexer")
  784. if optimize and lextab:
  785. try:
  786. lexobj.readtab(lextab, ldict)
  787. token = lexobj.token
  788. input = lexobj.input
  789. lexer = lexobj
  790. return lexobj
  791. except ImportError:
  792. pass
  793. # Dump some basic debugging information
  794. if debug:
  795. debuglog.info('lex: tokens = %r', linfo.tokens)
  796. debuglog.info('lex: literals = %r', linfo.literals)
  797. debuglog.info('lex: states = %r', linfo.stateinfo)
  798. # Build a dictionary of valid token names
  799. lexobj.lextokens = set()
  800. for n in linfo.tokens:
  801. lexobj.lextokens.add(n)
  802. # Get literals specification
  803. if isinstance(linfo.literals, (list, tuple)):
  804. lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
  805. else:
  806. lexobj.lexliterals = linfo.literals
  807. lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
  808. # Get the stateinfo dictionary
  809. stateinfo = linfo.stateinfo
  810. regexs = {}
  811. # Build the master regular expressions
  812. for state in stateinfo:
  813. regex_list = []
  814. # Add rules defined by functions first
  815. for fname, f in linfo.funcsym[state]:
  816. regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
  817. if debug:
  818. debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
  819. # Now add all of the simple rules
  820. for name, r in linfo.strsym[state]:
  821. regex_list.append('(?P<%s>%s)' % (name, r))
  822. if debug:
  823. debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
  824. regexs[state] = regex_list
  825. # Build the master regular expressions
  826. if debug:
  827. debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
  828. for state in regexs:
  829. lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
  830. lexobj.lexstatere[state] = lexre
  831. lexobj.lexstateretext[state] = re_text
  832. lexobj.lexstaterenames[state] = re_names
  833. if debug:
  834. for i, text in enumerate(re_text):
  835. debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
  836. # For inclusive states, we need to add the regular expressions from the INITIAL state
  837. for state, stype in stateinfo.items():
  838. if state != 'INITIAL' and stype == 'inclusive':
  839. lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
  840. lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
  841. lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
  842. lexobj.lexstateinfo = stateinfo
  843. lexobj.lexre = lexobj.lexstatere['INITIAL']
  844. lexobj.lexretext = lexobj.lexstateretext['INITIAL']
  845. lexobj.lexreflags = reflags
  846. # Set up ignore variables
  847. lexobj.lexstateignore = linfo.ignore
  848. lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
  849. # Set up error functions
  850. lexobj.lexstateerrorf = linfo.errorf
  851. lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
  852. if not lexobj.lexerrorf:
  853. errorlog.warning('No t_error rule is defined')
  854. # Set up eof functions
  855. lexobj.lexstateeoff = linfo.eoff
  856. lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
  857. # Check state information for ignore and error rules
  858. for s, stype in stateinfo.items():
  859. if stype == 'exclusive':
  860. if s not in linfo.errorf:
  861. errorlog.warning("No error rule is defined for exclusive state '%s'", s)
  862. if s not in linfo.ignore and lexobj.lexignore:
  863. errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
  864. elif stype == 'inclusive':
  865. if s not in linfo.errorf:
  866. linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
  867. if s not in linfo.ignore:
  868. linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
  869. # Create global versions of the token() and input() functions
  870. token = lexobj.token
  871. input = lexobj.input
  872. lexer = lexobj
  873. # If in optimize mode, we write the lextab
  874. if lextab and optimize:
  875. if outputdir is None:
  876. # If no output directory is set, the location of the output files
  877. # is determined according to the following rules:
  878. # - If lextab specifies a package, files go into that package directory
  879. # - Otherwise, files go in the same directory as the specifying module
  880. if isinstance(lextab, types.ModuleType):
  881. srcfile = lextab.__file__
  882. else:
  883. if '.' not in lextab:
  884. srcfile = ldict['__file__']
  885. else:
  886. parts = lextab.split('.')
  887. pkgname = '.'.join(parts[:-1])
  888. exec('import %s' % pkgname)
  889. srcfile = getattr(sys.modules[pkgname], '__file__', '')
  890. outputdir = os.path.dirname(srcfile)
  891. try:
  892. lexobj.writetab(lextab, outputdir)
  893. if lextab in sys.modules:
  894. del sys.modules[lextab]
  895. except IOError as e:
  896. errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
  897. return lexobj
  898. # -----------------------------------------------------------------------------
  899. # runmain()
  900. #
  901. # This runs the lexer as a main program
  902. # -----------------------------------------------------------------------------
  903. def runmain(lexer=None, data=None):
  904. if not data:
  905. try:
  906. filename = sys.argv[1]
  907. f = open(filename)
  908. data = f.read()
  909. f.close()
  910. except IndexError:
  911. sys.stdout.write('Reading from standard input (type EOF to end):\n')
  912. data = sys.stdin.read()
  913. if lexer:
  914. _input = lexer.input
  915. else:
  916. _input = input
  917. _input(data)
  918. if lexer:
  919. _token = lexer.token
  920. else:
  921. _token = token
  922. while True:
  923. tok = _token()
  924. if not tok:
  925. break
  926. sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
  927. # -----------------------------------------------------------------------------
  928. # @TOKEN(regex)
  929. #
  930. # This decorator function can be used to set the regex expression on a function
  931. # when its docstring might need to be set in an alternative way
  932. # -----------------------------------------------------------------------------
  933. def TOKEN(r):
  934. def set_regex(f):
  935. if hasattr(r, '__call__'):
  936. f.regex = _get_regex(r)
  937. else:
  938. f.regex = r
  939. return f
  940. return set_regex
  941. # Alternative spelling of the TOKEN decorator
  942. Token = TOKEN