pyshlex.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. # pyshlex.py - PLY compatible lexer for pysh.
  2. #
  3. # Copyright 2007 Patrick Mezard
  4. #
  5. # This software may be used and distributed according to the terms
  6. # of the GNU General Public License, incorporated herein by reference.
  7. # TODO:
  8. # - review all "char in 'abc'" snippets: the empty string can be matched
  9. # - test line continuations within quoted/expansion strings
  10. # - eof is buggy wrt sublexers
  11. # - the lexer cannot really work in pull mode as it would be required to run
  12. # PLY in pull mode. It was designed to work incrementally and it would not be
  13. # that hard to enable pull mode.
  14. import re
  15. try:
  16. s = set()
  17. del s
  18. except NameError:
  19. from Set import Set as set
  20. from ply import lex
  21. from bb.pysh.sherrors import *
  22. class NeedMore(Exception):
  23. pass
  24. def is_blank(c):
  25. return c in (' ', '\t')
  26. _RE_DIGITS = re.compile(r'^\d+$')
  27. def are_digits(s):
  28. return _RE_DIGITS.search(s) is not None
  29. _OPERATORS = dict([
  30. ('&&', 'AND_IF'),
  31. ('||', 'OR_IF'),
  32. (';;', 'DSEMI'),
  33. ('<<', 'DLESS'),
  34. ('>>', 'DGREAT'),
  35. ('<&', 'LESSAND'),
  36. ('>&', 'GREATAND'),
  37. ('<>', 'LESSGREAT'),
  38. ('<<-', 'DLESSDASH'),
  39. ('>|', 'CLOBBER'),
  40. ('&', 'AMP'),
  41. (';', 'COMMA'),
  42. ('<', 'LESS'),
  43. ('>', 'GREATER'),
  44. ('(', 'LPARENS'),
  45. (')', 'RPARENS'),
  46. ])
  47. #Make a function to silence pychecker "Local variable shadows global"
  48. def make_partial_ops():
  49. partials = {}
  50. for k in _OPERATORS:
  51. for i in range(1, len(k)+1):
  52. partials[k[:i]] = None
  53. return partials
  54. _PARTIAL_OPERATORS = make_partial_ops()
  55. def is_partial_op(s):
  56. """Return True if s matches a non-empty subpart of an operator starting
  57. at its first character.
  58. """
  59. return s in _PARTIAL_OPERATORS
  60. def is_op(s):
  61. """If s matches an operator, returns the operator identifier. Return None
  62. otherwise.
  63. """
  64. return _OPERATORS.get(s)
  65. _RESERVEDS = dict([
  66. ('if', 'If'),
  67. ('then', 'Then'),
  68. ('else', 'Else'),
  69. ('elif', 'Elif'),
  70. ('fi', 'Fi'),
  71. ('do', 'Do'),
  72. ('done', 'Done'),
  73. ('case', 'Case'),
  74. ('esac', 'Esac'),
  75. ('while', 'While'),
  76. ('until', 'Until'),
  77. ('for', 'For'),
  78. ('{', 'Lbrace'),
  79. ('}', 'Rbrace'),
  80. ('!', 'Bang'),
  81. ('in', 'In'),
  82. ('|', 'PIPE'),
  83. ])
  84. def get_reserved(s):
  85. return _RESERVEDS.get(s)
  86. _RE_NAME = re.compile(r'^[0-9a-zA-Z_]+$')
  87. def is_name(s):
  88. return _RE_NAME.search(s) is not None
  89. def find_chars(seq, chars):
  90. for i,v in enumerate(seq):
  91. if v in chars:
  92. return i,v
  93. return -1, None
  94. class WordLexer:
  95. """WordLexer parse quoted or expansion expressions and return an expression
  96. tree. The input string can be any well formed sequence beginning with quoting
  97. or expansion character. Embedded expressions are handled recursively. The
  98. resulting tree is made of lists and strings. Lists represent quoted or
  99. expansion expressions. Each list first element is the opening separator,
  100. the last one the closing separator. In-between can be any number of strings
  101. or lists for sub-expressions. Non quoted/expansion expression can written as
  102. strings or as lists with empty strings as starting and ending delimiters.
  103. """
  104. NAME_CHARSET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
  105. NAME_CHARSET = dict(zip(NAME_CHARSET, NAME_CHARSET))
  106. SPECIAL_CHARSET = '@*#?-$!0'
  107. #Characters which can be escaped depends on the current delimiters
  108. ESCAPABLE = {
  109. '`': set(['$', '\\', '`']),
  110. '"': set(['$', '\\', '`', '"']),
  111. "'": set(),
  112. }
  113. def __init__(self, heredoc = False):
  114. # _buffer is the unprocessed input characters buffer
  115. self._buffer = []
  116. # _stack is empty or contains a quoted list being processed
  117. # (this is the DFS path to the quoted expression being evaluated).
  118. self._stack = []
  119. self._escapable = None
  120. # True when parsing unquoted here documents
  121. self._heredoc = heredoc
  122. def add(self, data, eof=False):
  123. """Feed the lexer with more data. If the quoted expression can be
  124. delimited, return a tuple (expr, remaining) containing the expression
  125. tree and the unconsumed data.
  126. Otherwise, raise NeedMore.
  127. """
  128. self._buffer += list(data)
  129. self._parse(eof)
  130. result = self._stack[0]
  131. remaining = ''.join(self._buffer)
  132. self._stack = []
  133. self._buffer = []
  134. return result, remaining
  135. def _is_escapable(self, c, delim=None):
  136. if delim is None:
  137. if self._heredoc:
  138. # Backslashes works as if they were double quoted in unquoted
  139. # here-documents
  140. delim = '"'
  141. else:
  142. if len(self._stack)<=1:
  143. return True
  144. delim = self._stack[-2][0]
  145. escapables = self.ESCAPABLE.get(delim, None)
  146. return escapables is None or c in escapables
  147. def _parse_squote(self, buf, result, eof):
  148. if not buf:
  149. raise NeedMore()
  150. try:
  151. pos = buf.index("'")
  152. except ValueError:
  153. raise NeedMore()
  154. result[-1] += ''.join(buf[:pos])
  155. result += ["'"]
  156. return pos+1, True
  157. def _parse_bquote(self, buf, result, eof):
  158. if not buf:
  159. raise NeedMore()
  160. if buf[0]=='\n':
  161. #Remove line continuations
  162. result[:] = ['', '', '']
  163. elif self._is_escapable(buf[0]):
  164. result[-1] += buf[0]
  165. result += ['']
  166. else:
  167. #Keep as such
  168. result[:] = ['', '\\'+buf[0], '']
  169. return 1, True
  170. def _parse_dquote(self, buf, result, eof):
  171. if not buf:
  172. raise NeedMore()
  173. pos, sep = find_chars(buf, '$\\`"')
  174. if pos==-1:
  175. raise NeedMore()
  176. result[-1] += ''.join(buf[:pos])
  177. if sep=='"':
  178. result += ['"']
  179. return pos+1, True
  180. else:
  181. #Keep everything until the separator and defer processing
  182. return pos, False
  183. def _parse_command(self, buf, result, eof):
  184. if not buf:
  185. raise NeedMore()
  186. chars = '$\\`"\''
  187. if result[0] == '$(':
  188. chars += ')'
  189. pos, sep = find_chars(buf, chars)
  190. if pos == -1:
  191. raise NeedMore()
  192. result[-1] += ''.join(buf[:pos])
  193. if (result[0]=='$(' and sep==')') or (result[0]=='`' and sep=='`'):
  194. result += [sep]
  195. return pos+1, True
  196. else:
  197. return pos, False
  198. def _parse_parameter(self, buf, result, eof):
  199. if not buf:
  200. raise NeedMore()
  201. pos, sep = find_chars(buf, '$\\`"\'}')
  202. if pos==-1:
  203. raise NeedMore()
  204. result[-1] += ''.join(buf[:pos])
  205. if sep=='}':
  206. result += [sep]
  207. return pos+1, True
  208. else:
  209. return pos, False
  210. def _parse_dollar(self, buf, result, eof):
  211. sep = result[0]
  212. if sep=='$':
  213. if not buf:
  214. #TODO: handle empty $
  215. raise NeedMore()
  216. if buf[0]=='(':
  217. if len(buf)==1:
  218. raise NeedMore()
  219. if buf[1]=='(':
  220. result[0] = '$(('
  221. buf[:2] = []
  222. else:
  223. result[0] = '$('
  224. buf[:1] = []
  225. elif buf[0]=='{':
  226. result[0] = '${'
  227. buf[:1] = []
  228. else:
  229. if buf[0] in self.SPECIAL_CHARSET:
  230. result[-1] = buf[0]
  231. read = 1
  232. else:
  233. for read,c in enumerate(buf):
  234. if c not in self.NAME_CHARSET:
  235. break
  236. else:
  237. if not eof:
  238. raise NeedMore()
  239. read += 1
  240. result[-1] += ''.join(buf[0:read])
  241. if not result[-1]:
  242. result[:] = ['', result[0], '']
  243. else:
  244. result += ['']
  245. return read,True
  246. sep = result[0]
  247. if sep=='$(':
  248. parsefunc = self._parse_command
  249. elif sep=='${':
  250. parsefunc = self._parse_parameter
  251. else:
  252. raise NotImplementedError(sep)
  253. pos, closed = parsefunc(buf, result, eof)
  254. return pos, closed
  255. def _parse(self, eof):
  256. buf = self._buffer
  257. stack = self._stack
  258. recurse = False
  259. while 1:
  260. if not stack or recurse:
  261. if not buf:
  262. raise NeedMore()
  263. if buf[0] not in ('"\\`$\''):
  264. raise ShellSyntaxError('Invalid quoted string sequence')
  265. stack.append([buf[0], ''])
  266. buf[:1] = []
  267. recurse = False
  268. result = stack[-1]
  269. if result[0]=="'":
  270. parsefunc = self._parse_squote
  271. elif result[0]=='\\':
  272. parsefunc = self._parse_bquote
  273. elif result[0]=='"':
  274. parsefunc = self._parse_dquote
  275. elif result[0]=='`':
  276. parsefunc = self._parse_command
  277. elif result[0][0]=='$':
  278. parsefunc = self._parse_dollar
  279. else:
  280. raise NotImplementedError()
  281. read, closed = parsefunc(buf, result, eof)
  282. buf[:read] = []
  283. if closed:
  284. if len(stack)>1:
  285. #Merge in parent expression
  286. parsed = stack.pop()
  287. stack[-1] += [parsed]
  288. stack[-1] += ['']
  289. else:
  290. break
  291. else:
  292. recurse = True
  293. def normalize_wordtree(wtree):
  294. """Fold back every literal sequence (delimited with empty strings) into
  295. parent sequence.
  296. """
  297. def normalize(wtree):
  298. result = []
  299. for part in wtree[1:-1]:
  300. if isinstance(part, list):
  301. part = normalize(part)
  302. if part[0]=='':
  303. #Move the part content back at current level
  304. result += part[1:-1]
  305. continue
  306. elif not part:
  307. #Remove empty strings
  308. continue
  309. result.append(part)
  310. if not result:
  311. result = ['']
  312. return [wtree[0]] + result + [wtree[-1]]
  313. return normalize(wtree)
  314. def make_wordtree(token, here_document=False):
  315. """Parse a delimited token and return a tree similar to the ones returned by
  316. WordLexer. token may contain any combinations of expansion/quoted fields and
  317. non-ones.
  318. """
  319. tree = ['']
  320. remaining = token
  321. delimiters = '\\$`'
  322. if not here_document:
  323. delimiters += '\'"'
  324. while 1:
  325. pos, sep = find_chars(remaining, delimiters)
  326. if pos==-1:
  327. tree += [remaining, '']
  328. return normalize_wordtree(tree)
  329. tree.append(remaining[:pos])
  330. remaining = remaining[pos:]
  331. try:
  332. result, remaining = WordLexer(heredoc = here_document).add(remaining, True)
  333. except NeedMore:
  334. raise ShellSyntaxError('Invalid token "%s"')
  335. tree.append(result)
  336. def wordtree_as_string(wtree):
  337. """Rewrite an expression tree generated by make_wordtree as string."""
  338. def visit(node, output):
  339. for child in node:
  340. if isinstance(child, list):
  341. visit(child, output)
  342. else:
  343. output.append(child)
  344. output = []
  345. visit(wtree, output)
  346. return ''.join(output)
  347. def unquote_wordtree(wtree):
  348. """Fold the word tree while removing quotes everywhere. Other expansion
  349. sequences are joined as such.
  350. """
  351. def unquote(wtree):
  352. unquoted = []
  353. if wtree[0] in ('', "'", '"', '\\'):
  354. wtree = wtree[1:-1]
  355. for part in wtree:
  356. if isinstance(part, list):
  357. part = unquote(part)
  358. unquoted.append(part)
  359. return ''.join(unquoted)
  360. return unquote(wtree)
  361. class HereDocLexer:
  362. """HereDocLexer delimits whatever comes from the here-document starting newline
  363. not included to the closing delimiter line included.
  364. """
  365. def __init__(self, op, delim):
  366. assert op in ('<<', '<<-')
  367. if not delim:
  368. raise ShellSyntaxError('invalid here document delimiter %s' % str(delim))
  369. self._op = op
  370. self._delim = delim
  371. self._buffer = []
  372. self._token = []
  373. def add(self, data, eof):
  374. """If the here-document was delimited, return a tuple (content, remaining).
  375. Raise NeedMore() otherwise.
  376. """
  377. self._buffer += list(data)
  378. self._parse(eof)
  379. token = ''.join(self._token)
  380. remaining = ''.join(self._buffer)
  381. self._token, self._remaining = [], []
  382. return token, remaining
  383. def _parse(self, eof):
  384. while 1:
  385. #Look for first unescaped newline. Quotes may be ignored
  386. escaped = False
  387. for i,c in enumerate(self._buffer):
  388. if escaped:
  389. escaped = False
  390. elif c=='\\':
  391. escaped = True
  392. elif c=='\n':
  393. break
  394. else:
  395. i = -1
  396. if i==-1 or self._buffer[i]!='\n':
  397. if not eof:
  398. raise NeedMore()
  399. #No more data, maybe the last line is closing delimiter
  400. line = ''.join(self._buffer)
  401. eol = ''
  402. self._buffer[:] = []
  403. else:
  404. line = ''.join(self._buffer[:i])
  405. eol = self._buffer[i]
  406. self._buffer[:i+1] = []
  407. if self._op=='<<-':
  408. line = line.lstrip('\t')
  409. if line==self._delim:
  410. break
  411. self._token += [line, eol]
  412. if i==-1:
  413. break
  414. class Token:
  415. #TODO: check this is still in use
  416. OPERATOR = 'OPERATOR'
  417. WORD = 'WORD'
  418. def __init__(self):
  419. self.value = ''
  420. self.type = None
  421. def __getitem__(self, key):
  422. #Behave like a two elements tuple
  423. if key==0:
  424. return self.type
  425. if key==1:
  426. return self.value
  427. raise IndexError(key)
  428. class HereDoc:
  429. def __init__(self, op, name=None):
  430. self.op = op
  431. self.name = name
  432. self.pendings = []
  433. TK_COMMA = 'COMMA'
  434. TK_AMPERSAND = 'AMP'
  435. TK_OP = 'OP'
  436. TK_TOKEN = 'TOKEN'
  437. TK_COMMENT = 'COMMENT'
  438. TK_NEWLINE = 'NEWLINE'
  439. TK_IONUMBER = 'IO_NUMBER'
  440. TK_ASSIGNMENT = 'ASSIGNMENT_WORD'
  441. TK_HERENAME = 'HERENAME'
  442. class Lexer:
  443. """Main lexer.
  444. Call add() until the script AST is returned.
  445. """
  446. # Here-document handling makes the whole thing more complex because they basically
  447. # force tokens to be reordered: here-content must come right after the operator
  448. # and the here-document name, while some other tokens might be following the
  449. # here-document expression on the same line.
  450. #
  451. # So, here-doc states are basically:
  452. # *self._state==ST_NORMAL
  453. # - self._heredoc.op is None: no here-document
  454. # - self._heredoc.op is not None but name is: here-document operator matched,
  455. # waiting for the document name/delimiter
  456. # - self._heredoc.op and name are not None: here-document is ready, following
  457. # tokens are being stored and will be pushed again when the document is
  458. # completely parsed.
  459. # *self._state==ST_HEREDOC
  460. # - The here-document is being delimited by self._herelexer. Once it is done
  461. # the content is pushed in front of the pending token list then all these
  462. # tokens are pushed once again.
  463. ST_NORMAL = 'ST_NORMAL'
  464. ST_OP = 'ST_OP'
  465. ST_BACKSLASH = 'ST_BACKSLASH'
  466. ST_QUOTED = 'ST_QUOTED'
  467. ST_COMMENT = 'ST_COMMENT'
  468. ST_HEREDOC = 'ST_HEREDOC'
  469. #Match end of backquote strings
  470. RE_BACKQUOTE_END = re.compile(r'(?<!\\)(`)')
  471. def __init__(self, parent_state = None):
  472. self._input = []
  473. self._pos = 0
  474. self._token = ''
  475. self._type = TK_TOKEN
  476. self._state = self.ST_NORMAL
  477. self._parent_state = parent_state
  478. self._wordlexer = None
  479. self._heredoc = HereDoc(None)
  480. self._herelexer = None
  481. ### Following attributes are not used for delimiting token and can safely
  482. ### be changed after here-document detection (see _push_toke)
  483. # Count the number of tokens following a 'For' reserved word. Needed to
  484. # return an 'In' reserved word if it comes in third place.
  485. self._for_count = None
  486. def add(self, data, eof=False):
  487. """Feed the lexer with data.
  488. When eof is set to True, returns unconsumed data or raise if the lexer
  489. is in the middle of a delimiting operation.
  490. Raise NeedMore otherwise.
  491. """
  492. self._input += list(data)
  493. self._parse(eof)
  494. self._input[:self._pos] = []
  495. return ''.join(self._input)
  496. def _parse(self, eof):
  497. while self._state:
  498. if self._pos>=len(self._input):
  499. if not eof:
  500. raise NeedMore()
  501. elif self._state not in (self.ST_OP, self.ST_QUOTED, self.ST_HEREDOC):
  502. #Delimit the current token and leave cleanly
  503. self._push_token('')
  504. break
  505. else:
  506. #Let the sublexer handle the eof themselves
  507. pass
  508. if self._state==self.ST_NORMAL:
  509. self._parse_normal()
  510. elif self._state==self.ST_COMMENT:
  511. self._parse_comment()
  512. elif self._state==self.ST_OP:
  513. self._parse_op(eof)
  514. elif self._state==self.ST_QUOTED:
  515. self._parse_quoted(eof)
  516. elif self._state==self.ST_HEREDOC:
  517. self._parse_heredoc(eof)
  518. else:
  519. assert False, "Unknown state " + str(self._state)
  520. if self._heredoc.op is not None:
  521. raise ShellSyntaxError('missing here-document delimiter')
  522. def _parse_normal(self):
  523. c = self._input[self._pos]
  524. if c=='\n':
  525. self._push_token(c)
  526. self._token = c
  527. self._type = TK_NEWLINE
  528. self._push_token('')
  529. self._pos += 1
  530. elif c in ('\\', '\'', '"', '`', '$'):
  531. self._state = self.ST_QUOTED
  532. elif is_partial_op(c):
  533. self._push_token(c)
  534. self._type = TK_OP
  535. self._token += c
  536. self._pos += 1
  537. self._state = self.ST_OP
  538. elif is_blank(c):
  539. self._push_token(c)
  540. #Discard blanks
  541. self._pos += 1
  542. elif self._token:
  543. self._token += c
  544. self._pos += 1
  545. elif c=='#':
  546. self._state = self.ST_COMMENT
  547. self._type = TK_COMMENT
  548. self._pos += 1
  549. else:
  550. self._pos += 1
  551. self._token += c
  552. def _parse_op(self, eof):
  553. assert self._token
  554. while 1:
  555. if self._pos>=len(self._input):
  556. if not eof:
  557. raise NeedMore()
  558. c = ''
  559. else:
  560. c = self._input[self._pos]
  561. op = self._token + c
  562. if c and is_partial_op(op):
  563. #Still parsing an operator
  564. self._token = op
  565. self._pos += 1
  566. else:
  567. #End of operator
  568. self._push_token(c)
  569. self._state = self.ST_NORMAL
  570. break
  571. def _parse_comment(self):
  572. while 1:
  573. if self._pos>=len(self._input):
  574. raise NeedMore()
  575. c = self._input[self._pos]
  576. if c=='\n':
  577. #End of comment, do not consume the end of line
  578. self._state = self.ST_NORMAL
  579. break
  580. else:
  581. self._token += c
  582. self._pos += 1
  583. def _parse_quoted(self, eof):
  584. """Precondition: the starting backquote/dollar is still in the input queue."""
  585. if not self._wordlexer:
  586. self._wordlexer = WordLexer()
  587. if self._pos<len(self._input):
  588. #Transfer input queue character into the subparser
  589. input = self._input[self._pos:]
  590. self._pos += len(input)
  591. wtree, remaining = self._wordlexer.add(input, eof)
  592. self._wordlexer = None
  593. self._token += wordtree_as_string(wtree)
  594. #Put unparsed character back in the input queue
  595. if remaining:
  596. self._input[self._pos:self._pos] = list(remaining)
  597. self._state = self.ST_NORMAL
  598. def _parse_heredoc(self, eof):
  599. assert not self._token
  600. if self._herelexer is None:
  601. self._herelexer = HereDocLexer(self._heredoc.op, self._heredoc.name)
  602. if self._pos<len(self._input):
  603. #Transfer input queue character into the subparser
  604. input = self._input[self._pos:]
  605. self._pos += len(input)
  606. self._token, remaining = self._herelexer.add(input, eof)
  607. #Reset here-document state
  608. self._herelexer = None
  609. heredoc, self._heredoc = self._heredoc, HereDoc(None)
  610. if remaining:
  611. self._input[self._pos:self._pos] = list(remaining)
  612. self._state = self.ST_NORMAL
  613. #Push pending tokens
  614. heredoc.pendings[:0] = [(self._token, self._type, heredoc.name)]
  615. for token, type, delim in heredoc.pendings:
  616. self._token = token
  617. self._type = type
  618. self._push_token(delim)
  619. def _push_token(self, delim):
  620. if not self._token:
  621. return 0
  622. if self._heredoc.op is not None:
  623. if self._heredoc.name is None:
  624. #Here-document name
  625. if self._type!=TK_TOKEN:
  626. raise ShellSyntaxError("expecting here-document name, got '%s'" % self._token)
  627. self._heredoc.name = unquote_wordtree(make_wordtree(self._token))
  628. self._type = TK_HERENAME
  629. else:
  630. #Capture all tokens until the newline starting the here-document
  631. if self._type==TK_NEWLINE:
  632. assert self._state==self.ST_NORMAL
  633. self._state = self.ST_HEREDOC
  634. self._heredoc.pendings.append((self._token, self._type, delim))
  635. self._token = ''
  636. self._type = TK_TOKEN
  637. return 1
  638. # BEWARE: do not change parser state from here to the end of the function:
  639. # when parsing between an here-document operator to the end of the line
  640. # tokens are stored in self._heredoc.pendings. Therefore, they will not
  641. # reach the section below.
  642. #Check operators
  643. if self._type==TK_OP:
  644. #False positive because of partial op matching
  645. op = is_op(self._token)
  646. if not op:
  647. self._type = TK_TOKEN
  648. else:
  649. #Map to the specific operator
  650. self._type = op
  651. if self._token in ('<<', '<<-'):
  652. #Done here rather than in _parse_op because there is no need
  653. #to change the parser state since we are still waiting for
  654. #the here-document name
  655. if self._heredoc.op is not None:
  656. raise ShellSyntaxError("syntax error near token '%s'" % self._token)
  657. assert self._heredoc.op is None
  658. self._heredoc.op = self._token
  659. if self._type==TK_TOKEN:
  660. if '=' in self._token and not delim:
  661. if self._token.startswith('='):
  662. #Token is a WORD... a TOKEN that is.
  663. pass
  664. else:
  665. prev = self._token[:self._token.find('=')]
  666. if is_name(prev):
  667. self._type = TK_ASSIGNMENT
  668. else:
  669. #Just a token (unspecified)
  670. pass
  671. else:
  672. reserved = get_reserved(self._token)
  673. if reserved is not None:
  674. if reserved=='In' and self._for_count!=2:
  675. #Sorry, not a reserved word after all
  676. pass
  677. else:
  678. self._type = reserved
  679. if reserved in ('For', 'Case'):
  680. self._for_count = 0
  681. elif are_digits(self._token) and delim in ('<', '>'):
  682. #Detect IO_NUMBER
  683. self._type = TK_IONUMBER
  684. elif self._token==';':
  685. self._type = TK_COMMA
  686. elif self._token=='&':
  687. self._type = TK_AMPERSAND
  688. elif self._type==TK_COMMENT:
  689. #Comments are not part of sh grammar, ignore them
  690. self._token = ''
  691. self._type = TK_TOKEN
  692. return 0
  693. if self._for_count is not None:
  694. #Track token count in 'For' expression to detect 'In' reserved words.
  695. #Can only be in third position, no need to go beyond
  696. self._for_count += 1
  697. if self._for_count==3:
  698. self._for_count = None
  699. self.on_token((self._token, self._type))
  700. self._token = ''
  701. self._type = TK_TOKEN
  702. return 1
  703. def on_token(self, token):
  704. raise NotImplementedError
  705. tokens = [
  706. TK_TOKEN,
  707. # To silence yacc unused token warnings
  708. # TK_COMMENT,
  709. TK_NEWLINE,
  710. TK_IONUMBER,
  711. TK_ASSIGNMENT,
  712. TK_HERENAME,
  713. ]
  714. #Add specific operators
  715. tokens += _OPERATORS.values()
  716. #Add reserved words
  717. tokens += _RESERVEDS.values()
  718. class PLYLexer(Lexer):
  719. """Bridge Lexer and PLY lexer interface."""
  720. def __init__(self):
  721. Lexer.__init__(self)
  722. self._tokens = []
  723. self._current = 0
  724. self.lineno = 0
  725. def on_token(self, token):
  726. value, type = token
  727. self.lineno = 0
  728. t = lex.LexToken()
  729. t.value = value
  730. t.type = type
  731. t.lexer = self
  732. t.lexpos = 0
  733. t.lineno = 0
  734. self._tokens.append(t)
  735. def is_empty(self):
  736. return not bool(self._tokens)
  737. #PLY compliant interface
  738. def token(self):
  739. if self._current>=len(self._tokens):
  740. return None
  741. t = self._tokens[self._current]
  742. self._current += 1
  743. return t
  744. def get_tokens(s):
  745. """Parse the input string and return a tuple (tokens, unprocessed) where
  746. tokens is a list of parsed tokens and unprocessed is the part of the input
  747. string left untouched by the lexer.
  748. """
  749. lexer = PLYLexer()
  750. untouched = lexer.add(s, True)
  751. tokens = []
  752. while 1:
  753. token = lexer.token()
  754. if token is None:
  755. break
  756. tokens.append(token)
  757. tokens = [(t.value, t.type) for t in tokens]
  758. return tokens, untouched