LLgen_NCER.n 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712
  1. .RP
  2. .TL
  3. Top-down Non-Correcting Error Recovery
  4. in LLgen
  5. .AU
  6. Arthur van Deudekom
  7. Peter Kooiman
  8. .AI
  9. Department of Mathematics and Computer Science
  10. Vrije Universiteit
  11. Amsterdam
  12. Supervised by
  13. .AU
  14. dr. D. Grune
  15. .AI
  16. Department of Mathematics and Computer Science
  17. Vrije Universiteit
  18. Amsterdam
  19. .AB
  20. This paper describes the design and implementation of a parser
  21. generator with non-correcting error recovery based on the extended LL(1)
  22. parser generator LLgen. It describes a top-down algorithm for implementing
  23. this error recovery technique that can handle left-recursive grammars.
  24. The parser generator has been tested with several existing ACK-compilers,
  25. among which C and Modula-2. Various optimizations have been tried and are
  26. discussed in this paper.
  27. .AE
  28. .LP
  29. .nr PS 12
  30. .nr VS 14
  31. .NH
  32. Introduction
  33. .EQ
  34. delim $$
  35. .EN
  36. .nr PS 10
  37. .nr VS 12
  38. .RS
  39. .LP
  40. One of the trickier problems in constructing parser-generators is what
  41. to do when the input to the generated parser is not well formed. Several
  42. approaches are known, most of which are `correcting', meaning that they
  43. modify the input to make it correct. However, in most cases there are
  44. several possible corrections, and often the one chosen will turn out
  45. to be the wrong one. As a result of such an incorrect choice, spurious error
  46. messages can occur. Every programmer knows from experience how the omission
  47. of a single `)' can on occasion lead to pages of error messages.
  48. .LP
  49. A radically different approach is to just discard all the input up to
  50. and including the offending token, and start with a clean slate at the
  51. token following the offending one. [RICHTER] describes how
  52. this idea can be used to construct a non-correcting error recovery system
  53. that will never introduce spurious error messages. It is, however,
  54. possible that errors are overlooked.
  55. .LP
  56. In this paper we describe the incorporation of this non-correcting error
  57. recovery into LLgen, an existing LL(1) parser generator.
  58. In this introduction, we will describe in detail this non-correcting error
  59. recovery technique, give an overview of LLgen and how it handles
  60. errors, and finally describe how we have incorporated noncorrecting
  61. error recovery in LLgen.
  62. .RE
  63. .NH 2
  64. Non-correcting syntax error recovery
  65. .LP
  66. Richter describes how syntax error recovery can be done
  67. without making any corrections to the input text. Richter gives three
  68. reasons why recovery without correction is desirable:
  69. .IP 1
  70. In most cases there are many possible corrections, the choice among which
  71. will severely influence the further processing of the input. Thus, the
  72. probability of selecting the right correction is not high.
  73. .IP 2
  74. The harm done by selecting the wrong correction is often unlimited.
  75. .IP 3
  76. The loss of information to the user of a non-correcting recovery technique
  77. need not be grave.
  78. .LP
  79. The non-correcting technique described by Richter can be summarized as
  80. follows: When a syntax-error has occurred, the input up to and including the
  81. erroneous symbol is discarded; the remainder of the
  82. input is processed by a substring parser of the input
  83. language, that is a parser that recognizes any substring of a string in the input
  84. language. When the substring parser detects a syntax error, the offending
  85. symbol is reported as another error, and the input up to and including the
  86. erroneous symbol is discarded. The process is then repeated with the remaining input, possibly
  87. finding other syntax errors, until all the input is scanned.
  88. This process yields what Richter calls a
  89. .I
  90. suffix analysis
  91. .R
  92. of an input string. Formally, given an input string
  93. .I x
  94. , suffix analysis produces a set of strings $w sub k$ and a set of symbols
  95. $ a sub k$ such that
  96. .br
  97. .IP
  98. $x~ =~ w sub 0 a sub 0 w sub 1 a sub 1~...w sub n-1 a sub n-1 w sub n$
  99. .LP
  100. and such that:
  101. .br
  102. .IP
  103. $w sub 0$ is the longest prefix of $x$ that is a prefix of
  104. a string in the input language L, formally: there is a string $y$ such that
  105. $w sub 0 y$ is in L, but there is no string $z$ such that $w sub 0 a sub 0 z$
  106. is in L;
  107. .IP
  108. For $0 < k < n$, $w sub k$ is a longest substring of $x$ that is also a
  109. substring of a string in L, formally there are strings $u$ and $v$ such that
  110. $u w sub k v$ is in L, but there are no strings $y$ en $z$ such that
  111. $y w sub k a sub k z$ is in L;
  112. .IP
  113. $w sub n$ is a substring of $x$
  114. that is a substring of a string in L, formally:
  115. there exist $u$ and $v$, such that $u w sub n v$ is in L. Note that
  116. $w sub n$ need not be a suffix of a string in L, if $x$ represents incomplete
  117. input $w sub n$ is not a suffix of a string in L.
  118. .LP
  119. Now, the $a sub k$ indicate points at which an error is detected. The
  120. "real" error need not be at $a sub k$, it can have occurred anywhere
  121. within $w sub k a sub k$.
  122. In his paper, Richter shows that, although this method may miss errors, it
  123. will never introduce spurious errors.
  124. .LP
  125. For implementing the technique, a parser that recognizes any
  126. substring of the input language is needed. If we confine ourselves to
  127. syntactical analysis, it is sufficient to construct a substring
  128. recognizer. Richter himself does not give a practical construction, but
  129. [CORMACK] describes how a LR substring parser can be constructed
  130. that handles BC-LR(1,1) grammars. In this paper, we describe the construction
  131. of a LL substring recognizer that can handle any grammar. Furthermore,
  132. our recognizer is actually a suffix-recognizer, that is, a recognizer that
  133. recognizes any suffix of a string in the input language. Our suffix recognizer has the
  134. correct-prefix property,
  135. meaning that it detects the first syntax error as early as possible
  136. in a left-to-right scan of the input. Specifically, if the input language
  137. is L and the invalid input is $x$ , it finds a string $w$ and an input symbol
  138. $a$ such that $x = way$ , there is a string $z$ such that $wz$
  139. is in L, and there is no string $z$ such that $waz$ is in L.
  140. Because the suffix parser has this correct-prefix property, it can be
  141. used as a substring parser, because it will detect the first input symbol that
  142. is not part of a substring of the language. Because it is a suffix-recognizer,
  143. it additionally will detect incomplete input, because in that case
  144. at the end of the input the parser will not be in an accepting state.
  145. .NH 2
  146. Overview of LLgen
  147. .LP
  148. LLgen is an extended LL(1) parser generator. For a complete description,
  149. see [GRUNE].
  150. LLgen can actually handle grammars that are not LL(1), because it allows
  151. the use of conflict-resolvers. In case of an LL(1) conflict, these resolvers
  152. are used to statically or dynamically decide which rule to use. As we will see
  153. later, this feature makes it necessary for the suffix-recognizer to
  154. handle grammars that are not LL(1). Semantic actions can occur anywhere
  155. in the grammar rules, and they are executed when their position is
  156. reached during parsing. A typical LLgen rule looks like
  157. .br
  158. .IP
  159. S: A {
  160. .I action
  161. } B
  162. .LP
  163. where the action is a piece of C-code, that will be executed
  164. when the parser is using the rule for S and has recognized A.
  165. .LP
  166. LLgen-generated parsers use correcting syntax error recovery, based on a
  167. scheme designed by R\*:ohrich [ROEHRICH], inserting or deleting symbols at the point of error detection
  168. until correct input results. This means that actions in the parser will
  169. always be executed in an order that could also have resulted from
  170. syntactically correct input, and most importantly, once a grammar-rule
  171. is started it is guaranteed to be completed. This means that syntactic
  172. errors can never result in inconsistencies for the actions. Actions
  173. only have to deal with syntactically correct input. In a nutshell, the
  174. error recovery in LLgen-parsers works as follows: Suppose the parser is
  175. presented with correct input that breaks off before the end. The error
  176. recovery mechanism now provides a continuation path, chosen in such a
  177. way that all active rules are left as soon as possible. Effectively, the
  178. continuation path is the `shortest way out'. The symbols on this path are
  179. called `acceptable', and end-of-file is also `acceptable'. Furthermore, at
  180. each point along this `shortest path' there can be other terminals that
  181. would be correct; these are `acceptable' as well. Now, when an
  182. error occurs, all symbols that are not acceptable are discarded, until
  183. an acceptable symbol appears in the input. The tokens on the path up to
  184. but not including the acceptable input symbol are inserted.
  185. From then on, normal parsing resumes.
  186. .NH 2
  187. Incorporation of non-correcting error recovery in LLgen
  188. .LP
  189. An important consideration in incorporating the non-correcting recovery
  190. in LLgen was that correct programs should suffer as little as possible
  191. in what regards compilation speed. Furthermore, the existing error
  192. recovery method has the highly desirable property that rules that are
  193. started will be finished too, thus ensuring that errors in the
  194. input text will not cause inconsistencies in the semantic actions. We have
  195. implemented the non-correcting error recovery in such a way that this
  196. property is preserved.
  197. .LP
  198. The way we have achieved these goals is by actually including
  199. the suffix recognizer as a `second recognizer' in the generated parser.
  200. Correct programs are handled in the usual way by the parser, but if an error
  201. occurs the following happens: instead of going to the standard error
  202. recovery routine, the parser starts executing the non-correcting error
  203. handler. This process continues, reporting all errors, until the
  204. end of the input text is reached. Then, control is handed back to
  205. the standard error recovery routine. This routine will now think
  206. there is no more input, and thus start inserting tokens so as to construct
  207. a `shortest way out'. This ensures that all rules that were started are
  208. also finished, and no inconsistencies can occur in the semantic actions.
  209. However, this method does require some modifications to the error reporting
  210. routine. Normally, if the generated parser inserts a token, it reports
  211. this to the user, but in this case this is undesirable. The insertions only
  212. serve to maintain consistency in the semantic actions
  213. and do not signify errors, so reporting of insertions should be suppressed.
  214. .bp
  215. .nr PS 12
  216. .nr VS 14
  217. .PS
  218. boxwid = boxwid / 1.5
  219. boxht = boxht / 1.5
  220. arcrad = arcrad / 1.5
  221. movewid = movewid / 1.5
  222. moveht = moveht / 1.5
  223. arrowwid = arrowwid / 1.5
  224. arrowht = arrowht / 1.5
  225. arrowhead = arrowhead / 1.5
  226. linewid = linewid / 1.5
  227. lineht = lineht / 1.5
  228. .PE
  229. .NH
  230. The LL suffix parser
  231. .nr PS 10
  232. .nr VS 12
  233. .RS
  234. .LP
  235. In this chapter, we describe the construction of the LL suffix parser.
  236. The described parser is not restricted to LL(1) grammars, because the
  237. presence of conflict resolvers in LLgen allows for more general grammars,
  238. that may even be left-recursive. We start this chapter with a discussion
  239. of the implications of conflict resolvers, and continue with descriptions
  240. of the parser algorithm, the used data-structures,
  241. the handling of left- and right recursion, and some possible optimizations.
  242. .RE
  243. .NH 2
  244. LLgen conflict resolvers and their implications
  245. .LP
  246. In grammars that are nearly but not completely LL(1), conflicts
  247. will arise in the two places where parsing decisions are made: the choice
  248. of which alternative to start (`alternation conflicts') and the decision
  249. to stop or continue a repeated item (`repetition conflicts'). In order to
  250. allow LLgen to handle this type of grammar, the user can
  251. specify conflict resolvers in those places where conflicts arise.
  252. These resolvers are Boolean expressions labeling an alternative,
  253. and are evaluated when a conflict arises during parsing. If the
  254. expression evaluates to `true' the labeled alternative will be taken.
  255. The Boolean expressions are expressions in C, and can consult
  256. any information available at the point they occur.
  257. However, if a syntactic error has occurred in the input, and the non-correcting
  258. error recovery starts, we can no longer rely on the conflict resolvers to
  259. guide parsing decisions. The suffix recognizer is only concerned with
  260. syntax, and will not execute any semantic actions. It recognizes suffices
  261. of correct input, but does not know or care what prefix would make
  262. the suffix a correct program; as a result, the information that conflict
  263. resolvers could use is not available, because the semantic actions
  264. that would build this information have not been executed.
  265. Therefore, the information used by the conflict resolvers is no longer
  266. reliable, and the suffix parser needs to be able to handle the underlying
  267. grammar without their help. In particular, it has to be able to handle
  268. left-recursive grammars.
  269. .NH 2
  270. The suffix parser algorithm
  271. .LP
  272. Our algorithm needs easy access to the grammar rules; in the description
  273. we assume there is an efficient way to access the grammar rules. In
  274. the next chapter we will describe the details of the actual implementation.
  275. For the moment, we will only consider grammars that are not left- or
  276. right-recursive. In the next section, we will discuss how the algorithm has to be adapted
  277. to handle left- and right recursion.
  278. .LP
  279. Suppose the grammar is G, and the input to the suffix recognizer is
  280. $a sub 0 a sub 1 ... a sub n-1 a sub n$. Remember that parsing is
  281. always started by the `normal' LLgen generated parser. It's only after
  282. a syntactic error has occurred that the suffix recognizer will be started.
  283. The input to the suffix recognizer thus is the `tail' of the input, starting
  284. at the first symbol after the position where the first syntax error was
  285. found.
  286. .LP
  287. Now, in order to get parsing going again, the parser scans the grammar
  288. for rules which contain symbol $a sub 0$ in the right hand side:
  289. .br
  290. A: $alpha ~ a sub 0 ~ beta$
  291. .br
  292. .LP
  293. where $alpha$ and $beta$ represent a string of terminals and non-terminals,
  294. possible empty. Now, for each of these rules found, and for any string
  295. $b sub 0 b sub 1$...$ b sub m$ that can be generated by $beta$ it holds that
  296. $a sub 0 b sub o b sub 1$...$b sub m$ is a substring of some string in L.
  297. This can be shown as follows, supposing that the start symbol is S and
  298. S $-> sup * gamma$ A $delta$:
  299. .br
  300. S $-> sup * gamma$ A $delta$ $-> sup * gamma ~ alpha ~ a sub 0 beta ~ delta
  301. -> sup * gamma ~ alpha ~ a sub 0 b sub 0 b sub 1$...$b sub m delta$
  302. .br
  303. Of course, there may very well be more than one such string
  304. $b sub 1 b sub 2$..$b sub m$, and one of these strings can be empty as well, if
  305. $beta$ can produce empty. Now, in what we will call the
  306. .I
  307. predicting phase
  308. .R
  309. the algorithm will
  310. produce all possible symbols $b sub 0$. Then, in what we will call the
  311. .I
  312. accepting phase
  313. .R
  314. these symbols are matched against
  315. the input, and those not matching are discarded. Then, entering the next
  316. predicting phase, the algorithm will produce
  317. all symbols $b sub 1$, and match them against the next input symbol in
  318. the subsequent accepting phase,
  319. etc. In case one of the strings $b sub 0$...$b sub m$ is empty, or
  320. the end of one of the strings is reached, some way to continue is
  321. needed; we will discuss this later. First let's see how the
  322. algorithm produces the strings $b sub 0$...$b sub m$ .
  323. .LP
  324. For each rule in the grammar of the form
  325. .br
  326. A: $alpha a sub 0 W sub 1 W sub 2$...$W sub p$
  327. .br
  328. with each $W sub k$ a terminal or nonterminal, a
  329. .I
  330. prediction graph
  331. .R
  332. is created that looks like this:
  333. .PS
  334. down; box "$a sub 0$"; arrow; box "$W sub 1$"; arrow
  335. box "$W sub 2$"; arrow dashed; box "$W sub p$"
  336. arrow; box "END" "$[A]$"
  337. .PE
  338. .LP
  339. The bottom element of these prediction graphs is an end-marker containing the
  340. left-hand side of the rule used. All these graphs have $a sub 0$ on top, and
  341. this $a sub 0$ is matched against the $a sub 0$ in the input in the
  342. accepting phase that follows, removing the
  343. $a sub 0$ from the graph. If the prediction graph is now empty, we have to find a way
  344. to continue; this case is treated later. First we will consider what to do if
  345. the prediction graph is not empty. There are two possibilities: either $W sub 1$ is a
  346. terminal, or it is a nonterminal. If it is a terminal, we are finished for
  347. the moment; if not, the algorithm scans for rules of the form
  348. .br
  349. $W sub 1$: $U sub 1 U sub 2$...$U sub i$
  350. .br
  351. .LP
  352. with each $U sub k$ a terminal or nonterminal. Now, the algorithm substitutes
  353. the top of the prediction graph with the right-hand sides
  354. of all the rules found. Because there can be more than one rule, the
  355. prediction graph can now become a DAG (Directed Acyclic Graph).
  356. Supposing there are two rules with $W sub 1$ in the LHS:
  357. .br
  358. $W sub 1$: $U sub 1 U sub 2$...$U sub i$
  359. .br
  360. $W sub 1$: $V sub 1 V sub 2$...$V sub j$
  361. .LP
  362. the prediction graph will now look like this:
  363. .PS
  364. B1: box "$U sub 1$"
  365. move
  366. B2: box "$V sub 1$"
  367. arrow dashed down from bottom of B1
  368. B3: box "$U sub i$"
  369. arrow dashed down from bottom of B2
  370. B4:box "$V sub j$"
  371. move to 0.5 <B3.se, B4.sw>
  372. down;move
  373. B5:box "$[W sub 1 ]$"
  374. arrow dashed;
  375. box "$W sub p$"
  376. arrow;
  377. box "END" "$[A]$"
  378. arrow from B3.bottom to B5.top
  379. arrow from B4.bottom to B5.top
  380. .PE
  381. .LP
  382. The graph element representing $W sub 1$ is left in the stack, the
  383. notation $[W sub 1 ]$ indicates it has been substituted. These substituted
  384. element will from now on be ignored by the algorithm. The elements
  385. $U sub 1$ and $V sub 1$ are now `on top' of the prediction graph.
  386. .LP
  387. If $W sub 1$ can also produce empty, its successor in the prediction graph
  388. has to be processed
  389. as well; the algorithm walks down the graph to this successor, and
  390. there the process is repeated; if it is a terminal we are finished, else we
  391. substitute it with the right hand sides of its grammar rule.
  392. However, the element that we want to substitute now, say $W sub k$, cannot
  393. be marked `substituted' just like that, because it can be on another
  394. path, on which it cannot be substituted yet. Therefore, a copy of element
  395. $W sub k$ is made, it is marked $[W sub k ]$, and an edge is created
  396. from $[W sub k ]$ to the successor of $W sub k$. This produces graphs like
  397. this:
  398. .br
  399. .PS
  400. B1: box "$U sub 1$"
  401. move
  402. B2: box "$V sub 1$"
  403. move
  404. X1:box "$X sub 1$"
  405. arrow dashed down from bottom of B1
  406. B3: box "$U sub m$"
  407. arrow dashed down from bottom of B2
  408. B4:box "$V sub j$"
  409. arrow dashed down from bottom of X1
  410. Xj: box "$X sub j$"
  411. move to 0.5 <B3.se, B4.sw>
  412. down;move
  413. B5:box "$[W sub 1 ]$"
  414. arrow dashed;
  415. B6: box "$W sub k$"
  416. arrow
  417. Wk1:box "$W sub k+1$"
  418. arrow dashed
  419. box "$W sub n$"
  420. arrow;
  421. box "END" "$[A]$"
  422. arrow from B3.bottom to B5.top
  423. arrow from B4.bottom to B5.top
  424. move down from Xj.top;move;move;move
  425. Wk: box "$[W sub k ]$"
  426. arrow from Xj.bottom to Wk.top
  427. arrow from Wk.bottom to Wk1.top
  428. .PE
  429. .LP
  430. This process of substituting is repeated with all nonterminals that are
  431. now on top of the prediction graph, until there are only terminals on top of
  432. the graph.
  433. This completes the prediction phase of the algorithm, not taking into account
  434. what to do if an END marker appears on top of the graph.
  435. Now, the algorithm enters its accepting phase, in which
  436. the terminals on top are compared with the next symbol in the input.
  437. If a terminal in the graph matches the input, its element is deleted
  438. from the graph, and the substitution process will continue with its
  439. successors, in the next prediction phase.
  440. If a terminal on top of the graph does not
  441. match the input, the path it is on represents a `dead-end', which
  442. does not need to be processed any further. The terminal is no longer
  443. a `top', and the algorithm will not visit it again.
  444. .LP
  445. There is one tricky situation: consider again this graph:
  446. .PS
  447. B1: box "$U$"
  448. move
  449. B2: box "$a$"
  450. move to 0.5 <B1.se, B2.sw>
  451. down;move
  452. B5:box "$W sub 1 $"
  453. arrow dashed;
  454. box "$W sub n$"
  455. arrow;
  456. box "END" "$[A]$"
  457. arrow from B1.bottom to B5.top
  458. arrow from B2.bottom to B5.top
  459. .PE
  460. .LP
  461. Here, the algorithm is processing $W sub 1$ in the predicting phase, and
  462. using some rule it has produced $a$ on top; there is another rule with
  463. $W sub 1$ in its LHS which has produced nonterminal $U$ on top.
  464. Now, suppose $U$ is a nonterminal that can
  465. produce empty. Now, the algorithm starts substituting $U$, and walks
  466. down $W sub 1$. What we definitely do not want
  467. is the algorithm to start substituting $W sub 1$ again, because then we
  468. would loop forever. Therefore, if the algorithm starts processing
  469. element $W sub 1$ it should make it $[W sub 1 ]$ before it does
  470. anything else. On entering the element
  471. for the second time in the prediction phase , it sees that it is already substituted,
  472. so there is nothing to do.
  473. It then just walks to the successor of $W sub 1$ and
  474. starts substituting it. This is correct, since the fact that the algorithm
  475. enters an element for the second time in a prediction phase means that the element
  476. indirectly can produce the empty string, and thus its successor must
  477. be substituted as well in the prediction phase.
  478. .LP
  479. It is easy to see that the substitution process will stop: the algorithm can
  480. only loop if it starts processing an element for the second time in a
  481. prediction phase,
  482. or if the processing of an element eventually yields a graph with that
  483. same element on top.
  484. The first case cannot occur because the algorithm marks elements it is
  485. processing as `substituted' before it does anything else, meaning that those elements will not
  486. be processed again; the second case can only occur if the grammar is
  487. left-recursive, which we assumed it was not.
  488. .LP
  489. The algorithm simulates
  490. left-most derivations of strings $a sub 0 b sub 0 b sub 1$..$b sub n$
  491. starting from $a sub 0 W sub 1$..$W sub p$; as we showed before, if
  492. the algorithm recognizes a string $a sub 0 b sub 0$..$b sub n$ that
  493. string is a substring of some string in L. Conversely, because the
  494. algorithm start out by using all rules of the form
  495. A: $alpha a sub 0 beta$, and then proceeds to simulate all
  496. possible left-most derivations, it will recognize all input
  497. $a sub 0 b sub 0$... $b sub n$ that can be produced starting from
  498. $a sub 0 beta$.
  499. .LP
  500. Now we will discuss what has to be done if an END marker appears as
  501. top of the prediction graph.
  502. When this happens, it means that starting from some rule
  503. .br
  504. A: $alpha a sub 0 beta$
  505. .br
  506. the algorithm has produced a leftmost-derivation of a string
  507. $a sub 0 b sub 1 .. b sub n$ starting from $a sub 0 beta$, or that $beta$ can produce
  508. empty and the string so far is just $a sub 0$. The next step is to assume
  509. that the have recognized A and that that some string produced by $alpha$
  510. is part of the prefix that makes the suffix we are recognizing a
  511. correct string in L. Remember that in the END marker we kept record of
  512. the LHS of the rule that has started the graph, and we will now use this
  513. LHS to continue recognizing. What the algorithm does is scan for all
  514. rules of the form:
  515. .br
  516. B: $gamma$ A $delta$
  517. .br
  518. with $gamma$ and $delta$ possibly empty strings of terminals and nonterminals.
  519. The algorithm now starts a new component in the prediction graph, and if $delta$ is
  520. $W sub 1 W sub 2$...$W sub n$ it looks like this:
  521. .PS
  522. down;box "$W sub 1$"; arrow
  523. box "$W sub 2$"; line dashed; box "$W sub n$"
  524. arrow; box "END" "$[B]$"
  525. .PE
  526. .LP
  527. Note that the END marker now contains B, because we have started to match
  528. a rule for B. If the $delta$ in the rule for B was empty, this just produces
  529. and END marker with B in it; in this case, the process is just repeated
  530. with all rules of the form:
  531. .br
  532. C: $zeta$ B $eta$
  533. .br
  534. .LP
  535. etc, until we have a prediction graph with a nonterminal or terminal on top.
  536. Now, the substitution algorithm is again applied over all nonterminals on
  537. top, until every top contains a terminal. It is possible that during
  538. substitution again an END marker will turn up; if this happens
  539. we again scan for rules to continue with etc.
  540. This `continuation algorithm' can only loop if, when
  541. trying to build a new prediction graph for matched symbol A, it produces an empty
  542. graph with again matched symbol A. If this happens, the grammar was
  543. (directly or indirectly) right-recursive, and we assumed that it was not.
  544. Therefore, the algorithm will terminate. The terminals on top of the
  545. new graph after applying this `continuation' algorithm are exactly those
  546. that could follow the string $A sub 0 b sub 0$..$b sub n$ in a substring
  547. of a string in L.
  548. To see this, suppose we have `recognized' the rule
  549. .br
  550. A: $alpha a sub 0 beta$
  551. .br
  552. and $a sub 0 b sub 0 b sub 1$...$b sub n$ is the string produced from
  553. $a sub 0 beta$ by the algorithm. Now, using rule:
  554. .br
  555. B: $gamma$ A $delta$
  556. .br
  557. and supposing that S $->$ $zeta$ B $eta$ we get
  558. .br
  559. S $->$ $zeta$ B $eta$ $->$ $zeta gamma$ A $delta$ $eta$ $->$ $zeta gamma a sub 0 b sub 0 b sub 1$ ... $b sub n$ $delta$ $eta$
  560. .br
  561. .LP
  562. and thus any string produced by a derivation starting from
  563. $delta$ can come right after $a sub 0 b sub 0$...$b sub n$ in a substring
  564. of some string in L. The algorithm will proceed to generate all these
  565. strings starting from $delta$. If $delta$ produces empty, the above
  566. is just repeated. Because in the `continuation' part
  567. all possible rules are considered, the whole algorithm will recognize
  568. all substrings of any string in L. In order to determine if we
  569. have actually recognized a suffix of some string in L, we need to
  570. remember if within a predicting phase the `continuation' part of the algorithm has been run
  571. on an END marker containing the start-symbol S;
  572. if this is the case, then the input seen until now is a suffix of some string in L.
  573. Formally, it means that there is a derivation starting from start symbol
  574. $S$ such that if the
  575. input seen until now is $a sub 0 a sub 1$..$a sub n$, then:
  576. .br
  577. S $-> sup * alpha beta$ $-> sup * alpha a sub 0 a sub 1$..$a sub n$
  578. .br
  579. .LP
  580. where $alpha$ can be empty, $beta$ is not empty.
  581. .NH 2
  582. The prediction graph data structure
  583. .LP
  584. The graphs that are produced by the suffix recognizer may grow extremely
  585. large; to facilitate an efficient
  586. implementation we have devised a way of keeping the size of the
  587. data structure under control, in a way that is very similar to
  588. the way described in [TOMITA].
  589. .LP
  590. The basic idea is, that in a prediction phase of the algorithm, it is not
  591. necessary to explicitly substitute each nonterminal every time it
  592. turns up as a `top'; it is sufficient to do it once, because the
  593. second substitution will produce exactly the same subgraph starting at
  594. the substituted nonterminal. Here is an example:
  595. .PS
  596. down;box "$a$";arrow;box "A";arrow dashed;box "[B]";arrow
  597. box "C";arrow dashed;box "END" "[X]"
  598. move right from last box.e;
  599. box "END" "[Y]";
  600. arrow <- dashed up from last box.top;
  601. box "D";arrow <- up from last box.top
  602. box "B"
  603. .PE
  604. .LP
  605. Here, in the left component of the graph, nonterminal B has been
  606. substituted. Now, in the same prediction phase, the algorithm again runs into
  607. B, now in the right component. There is no need to compute again
  608. what the substitution will produce, it is exactly the part on top
  609. of B in the left component. Therefore, all that is needed is:
  610. .PS
  611. down;box "$a$";arrow;box "A";arrow dashed;
  612. B1: box "[B]";arrow
  613. box "C";arrow dashed;box "END" "[X]"
  614. move right from last box.e;
  615. box "END" "[Y]";
  616. arrow <- dashed up from last box.top;
  617. box "D"
  618. arrow from B1.bottom to last box.top
  619. .PE
  620. So, when, in a prediction phase of the algorithm, a nonterminal is substituted,
  621. the nonterminal is placed on a list, together with a pointer to
  622. the substituted nonterminal. If in the same prediction phase a nonterminal that
  623. is on the list becomes a top, all we need to do is place an edge
  624. between the already substituted one and the successor of the top we are currently
  625. processing. When a prediction phase is finished, the list is cleared.
  626. There is one catch: if we consider again the last picture,
  627. note that if nonterminal B can (directly or indirectly) produce empty,
  628. it is also necessary to substitute D. However, it is not difficult to
  629. determine if a nonterminal can produce empty. LLgen already computes
  630. this information for each nonterminal.
  631. .LP
  632. Without this `joining together' of graph components, each
  633. element in the graph has exactly one successor, except the END marker,
  634. which has none.
  635. Now that components get joined as described, an element can have any
  636. number of successors. The recognizer algorithm now has to consider all
  637. successors of a graph element instead of one.
  638. .NH 2
  639. Handling right recursion
  640. .LP
  641. The only problem right-recursive grammars cause in the algorithm is in the
  642. `continuation' part; they can cause this part of the algorithm to loop
  643. forever. As an example, consider:
  644. .br
  645. A: $alpha$ B
  646. .br
  647. B: $beta$ C
  648. .br
  649. C: $gamma$ A
  650. .LP
  651. Now suppose the `substitution' part of the algorithm has turned up
  652. an END marker with nonterminal A in it. The continuation algorithm will
  653. now produce:
  654. .PS
  655. box "END" "[A]";move;box "END" "[C]";move;box "END" "[B]";move
  656. box "END" "[A]";move;box "END" "[C]"
  657. .PE
  658. .LP
  659. etc. etc. However, a slight modification to the algorithm suffices
  660. to eliminate this problem; within each prediction phase of the algorithm, we
  661. simply maintain a list of nonterminals that have turned up in an
  662. END marker. As soon as an END marker turns up whose nonterminal is
  663. already in the list, we stop the `continuation' algorithm; the part
  664. of the graph that would be produced by it already has been generated
  665. by an earlier invocation of the algorithm in the same prediction phase.
  666. At the end
  667. of a prediction phase, when all heads are terminals, we clear the list.
  668. This way, no looping can occur; even if the right recursion is
  669. indirect, for instance if in the above example the rule for A had been
  670. .br
  671. A: $alpha$ B $delta$
  672. .br
  673. .LP
  674. where $delta$ can produce empty, the algorithm still works; the substitution
  675. of $delta$ will yield an END marker on top, and when trying to find
  676. a continuation for LHS A the algorithm notices A is already on the list.
  677. .NH 2
  678. Handling left recursion
  679. .LP
  680. Left-recursion is, unfortunately, a much tougher problem than
  681. right-recursion. The result of left-recursive grammar rules is that
  682. the substitution algorithm never stops, because it can keep on building
  683. the graph with the same set of rules without ever turning up a terminal.
  684. One course of action would be to pre-process the grammar rules to
  685. eliminate left-recursion; there are algorithms that eliminate direct
  686. and indirect left-recursion. However, we have taken another course; by
  687. allowing the produced graphs to contain loops, we can handle left
  688. recursion without any modifications to the grammar. As soon as
  689. we come to the point that we want to substitute a nonterminal
  690. which was already substituted earlier on the same path and in
  691. the same prediction phase, we can
  692. make a link from the `older' nonterminal to the successor of
  693. the `new' nonterminal. In this way we have constructed a loop
  694. in the graph. As an example, suppose we have the following rules:
  695. .br
  696. D: A
  697. A: B a
  698. B: A | x
  699. .br
  700. Suppose also that we have nonterminal `D' on top of a stack. We
  701. now start substituting `D':
  702. .PS
  703. A: box "A"
  704. move
  705. X: box "x"
  706. move to 0.5 <A.se, X.sw>
  707. down
  708. move
  709. B: box "[B]"
  710. arrow
  711. box "a"
  712. arrow
  713. box "[A]"
  714. arrow
  715. box "[D]"
  716. arrow dashed
  717. box "END" "[S]"
  718. arrow from A.s to B.n
  719. arrow from X.s to B.n
  720. .PE
  721. .LP
  722. We now have an `A' on top of of the stack which was already
  723. substituted on the same path and also in the same prediction phase. To avoid
  724. never ending substitution we make a loop as follows:
  725. .PS
  726. A: box "A" dashed
  727. move
  728. X: box "x"
  729. move to 0.5 <A.se, X.sw>
  730. down
  731. move
  732. B: box "[B]"
  733. arrow
  734. box "a"
  735. arrow
  736. A2: box "[A]"
  737. arrow
  738. box "[D]"
  739. arrow dashed
  740. box "END" "[S]"
  741. arrow dashed from A.s to B.n
  742. arrow from X.s to B.n
  743. arc <- from B.w to A2.w
  744. .PE
  745. .LP
  746. The dashed box with `A' in it means that it can be deleted, because
  747. there is already an occurrence of it in the loop.
  748. .LP
  749. The most beautiful result of loops in graphs is
  750. that the original parsing algorithm needs only one minor change.
  751. When the algorithm visits an element which has more than one
  752. outgoing edge the algorithm starts tracking down both paths,
  753. just like before, only now there may be one or more backedges among
  754. these edges, but the algorithm needs not to be aware of this fact.
  755. The only difficulty with loops is that the algorithm might go into
  756. a loop; it continues searching for terminals but it might happen
  757. that there are no valid terminals in the loop. The solution to this
  758. problem is not very difficult; just set a flag at all elements we
  759. visit. When we reach an element which has this flag turned on, we
  760. don't have to search any further. At the end of the prediction phase, when we
  761. have found all possible new heads, all flags are cleared.
  762. Even if there are no loops in the
  763. prediction graph, setting flags may be used as an optimization:
  764. it is possible that two paths come together at one point. In that situation
  765. it is useless to scan for the second time the part of the graph which
  766. both paths have in common.
  767. .NH 2
  768. Some optimizations using reference counts
  769. .LP
  770. As explained in section 2.2, it is sometimes necessary to copy a
  771. prediction graph element before substituting it. In order to determine
  772. if a certain element has to be copied, it is convenient to maintain
  773. a reference count in each graph element. This reference count keeps
  774. track of the number of edges that enter an element. Now, when we want
  775. to substitute an element with reference count not 0, we need to
  776. copy it, because there is another path in the prediction graph that
  777. contains the element we want to substitute, and on this other path
  778. the element cannot be substituted yet.
  779. .LP
  780. Maintaining reference counts also enables us to perform another
  781. optimization: remember that if, in a prediction phase, a terminal
  782. is predicted that does not match the current inputsymbol, we from
  783. then on just ignore the path in the graph starting at the terminal.
  784. However, we can safely delete the terminal from the graph; furthermore,
  785. all its successors in the prediction graph that have reference count
  786. 0 can be deleted as well, as can their successors with reference
  787. count 0, etc. This way, we delete from the prediction graph
  788. most elements that are no longer accessible, but not all of them; as will
  789. be explained in the next section, loops in the prediction graph
  790. can cause problems.
  791. .NH 2
  792. The algorithm to delete inaccessible loops
  793. .LP
  794. Deleting graph elements which are no longer reachable is not as easy
  795. as it looks when there are loops in the graph, introduced by
  796. the extension to the algorithm that handles left recursive grammars.
  797. Suppose for example that we have a very simple loop as in the left
  798. picture below:
  799. .PS
  800. down
  801. X: box "x" "(0)"
  802. arrow
  803. box "[B]" "(2)"
  804. arrow
  805. box "a" "(1)"
  806. arrow
  807. box "[A]" "(1)"
  808. arrow
  809. box "[D]" "(1)"
  810. arc <- from 2nd box.w to 2nd last box.w
  811. move right from X.ne
  812. move
  813. move
  814. move
  815. move
  816. move
  817. move
  818. down
  819. box "x" "(0)" dashed
  820. arrow dashed
  821. B: box "[B]" "(1)"
  822. arrow
  823. box "a" "(1)"
  824. arrow
  825. box "[A]" "(1)"
  826. arrow
  827. box "[D]" "(1)"
  828. arc <- from B.w to 2nd last box.w
  829. .PE
  830. .LP
  831. The number below each symbol indicates the reference count of that element.
  832. Suppose now that we delete `x', then we have the situation depicted in the
  833. picture on the right. The loop consisting of `[B]', `a' and `[A]' is now
  834. unreachable, so all these elements can be deallocated.
  835. The reference count of `[B]' is 1, so it will not be deleted. To be precise
  836. all elements in the loop have their reference counts on 1, and
  837. consequently none of these will be deleted. But we stated earlier
  838. that all elements of the loop cannot be reached anymore and that the
  839. loop had to be deleted! In this example the reference counts of the
  840. loop elements are all 1, but in more complex situations it is also
  841. possible that some of the elements have a reference count of more
  842. than 1.
  843. .LP
  844. To solve this problem we present an algorithm, devised by E. Wattel, that
  845. determines whether a loop can be deleted or not.
  846. The algorithm consists of two parts. The first part of the algorithm goes as
  847. follows: it presumes that all elements of the loop will indeed be
  848. deleted. Every time it deletes an element it decreases the reference
  849. count of all the successors of the element that are also member of the same
  850. loop. How the algorithm knows which elements belong to the loop and which
  851. do not will be explained later. The situation of the example above will now
  852. look like this:
  853. .PS
  854. down
  855. box "[B]" "(0)"
  856. arrow
  857. box "a" "(0)"
  858. arrow
  859. box "[A]" "(0)"
  860. arrow
  861. box "[D]" "(1)"
  862. arc <- from 1st box.w to 2nd last box.w
  863. .PE
  864. .LP
  865. The number below each symbol indicates again the reference count
  866. after we have applied the first part of the algorithm.
  867. .LP
  868. The second part of the algorithm checks and restores the
  869. reference counts of all members of the loop . When it finds
  870. out that one or more reference counts are not 0, it concludes
  871. that it is still possible to enter the loop in some way, and
  872. that it cannot be
  873. deleted yet. In the other case it reports that the loop can be
  874. deleted, which is also true in our example.
  875. .LP
  876. We will now formally describe the first part of the algorithm
  877. that finds all directed circuits from a given vertex, and determines if
  878. the vertices on those circuits can be deleted.
  879. The algorithm works on prediction-graphs in which every edge that
  880. is in a circuit is marked. Note that a marked edge may be in more than one circuit.
  881. We will call this mark `C'.
  882. The input to the algorithm is such a prediction graph, and a start vertex,
  883. say A. The first part of the algorithm is:
  884. .IP 1
  885. Put the start vertex A on a list L; mark all edges `unused'
  886. .IP 2
  887. If L is empty, stop
  888. .IP 3
  889. For each vertex in list L, check if there are edges marked both C' and
  890. `unused'. For each edge found, mark it `used', and traverse it to its
  891. other endpoint; put this endpoint on a new list M, initially empty
  892. .IP 4
  893. Decrease the reference count of all vertices on M by 1
  894. .IP 5
  895. L := M; go to 2
  896. .LP
  897. It is clear that the algorithm will terminate: each edge is only traversed once,
  898. and the number of edges is finite. We will now prove some properties of this
  899. part of the algorithm.
  900. .LP
  901. .I
  902. An edge is traversed by the algorithm if and only if it is on some
  903. directed circuit $A ->$...$->A$.
  904. .R
  905. .br
  906. The if-part is easy; if an edge $e$ connecting vertices $W$ and $V$ is on some directed circuit starting in
  907. $A$, then there is a path $A ->$...$-> W -> V$; let $A ->$...$-> W -> V$ be a path
  908. of minimum length from $A$ to $V$. If the length of the path from $A$ to
  909. $W$ is $k$, then after turn $k$ of the algorithm $W$ will be on list L. To see
  910. that this is the case, suppose that $W$ is not on list L after turn $k$;
  911. this means that the edge entering $W$ was already marked used in a
  912. previous turn, but then there would be a shorter path from $A$
  913. to $W$, contradicting the assumption that the path is of
  914. minimum length. The edge
  915. $e$ is marked `C', because it is in a circuit; it is marked `unused', for if
  916. it were marked used, there would be a shorter path from $A$ to $V$. So,
  917. in turn $k + 1$, the edge $e$ will be traversed.
  918. .LP
  919. On the other hand, suppose that an edge $e$ is traversed by the algorithm;
  920. we will show by induction on the number of turns the algorithm has made
  921. that $e$ is on a directed circuit $A->$..$->A$. In the first turn, all
  922. edges from $A$ that are marked `C' are traversed, and clearly, if an edge
  923. from $A$ is part of a circuit then that edge is part of a circuit from $A$ to $A$.
  924. Now suppose that in turn $n+1$ an edge $e$ connecting vertices $W$ and
  925. $V$ is traversed. This means the edge is
  926. marked `C', so it is part of some circuit. If there is a path from $V$ to $A$,
  927. we can simply trace a circuit
  928. $A->$...$-> W -> V -> $...$-> A$, and clearly $e$ is on a circuit from
  929. $A$ to $A$. Now, suppose there is no path from $V$ to
  930. $A$. We can always trace a circuit $W -> V ->$...$-> W$ because the
  931. edge from $W$ to $V$ is part of a circuit; and by the
  932. induction hypothesis there is a circuit $A ->$...$-> W ->$...$-> A$. We can
  933. now make a `detour' at $W$, yielding a circuit $A->$...$-> W -> V$...
  934. $-> W ->$...$-> A$. This case is shown in the picture below.
  935. So in either case $e$ is on a circuit from $A$ to $A$.
  936. .PS
  937. down;
  938. B1: box "A";
  939. arrow dashed;
  940. B3: box dashed;
  941. arrow dashed;
  942. B2: box "W";
  943. arrow dashed; box dashed;
  944. arc <- from B1.w to last box.w
  945. arrow right "$e$" "C" from B2.e
  946. box "V"; arrow dashed; box dashed;
  947. arrow dashed -> from last box.n to B3.e
  948. .PE
  949. .LP
  950. .I
  951. A vertex appears on list L if and only if it is on some directed
  952. circuit from $A$ to $A$.
  953. .R
  954. .br
  955. .LP
  956. If a vertex is in such a circuit, there is an edge that enters it, which
  957. is part of a circuit form $A$ to $A$; we already showed that this edge
  958. is traversed by the algorithm, and thus the vertex will appear on list
  959. L. Conversely, if a vertex appears on list L, then an edge entering
  960. that vertex has been traversed by the algorithm; we showed that this
  961. edge is part of a circuit from $A$ to $A$, and thus the vertex is
  962. part of a circuit from $A$ to $A$.
  963. .LP
  964. .I
  965. When the algorithm is finished, each vertex that is part of some
  966. directed circuit from $A$ to $A$ has its reference count decreased by exactly
  967. the number of edges entering it that are part of a directed circuit from $A$ to $A$.
  968. .R
  969. .br
  970. .LP
  971. Each edge that is part of some circuit from $A$ to $A$ is traversed
  972. exactly once; the reference count of the endpoint is decreased
  973. by one after an edge has been traversed. Thus, if a vertex is endpoint
  974. of $k$ such vertices, its reference count is decreased by $k$.
  975. .LP
  976. .I
  977. If the reference count of each of the vertices visited by the algorithm
  978. is 0 after the algorithm has finised, all these vertices can be deleted;
  979. if the reference count is not zero for one or more of the visited
  980. vertices, then none of them can be deleted.
  981. .R
  982. .br
  983. .LP
  984. Suppose all visited vertices have reference count 0; this means that
  985. each of the vertices is only entered by edges that are on a circuit
  986. from $A$ to $A$. Therefore, it holds that any path leading to any
  987. of the visited vertices has to start in one of the visited vertices; there
  988. is no path starting in an unvisited vertex to a visited one. Thus,
  989. all the visited vertices are unreachable.
  990. Conversely, if one of the visited vertices has reference count not zero,
  991. then there is a path from an unvisited vertex to this vertex. Because from
  992. the vertex with reference count non zero, we can get to $A$, and from $A$
  993. we can get to any of the other vertices, all visited vertices are
  994. reachable.
  995. .LP
  996. The second part of the algorithm now checks if all reference counts are
  997. zero, and if they are, it deletes all visited vertices.
  998. .NH 2
  999. Marking loop elements
  1000. .LP
  1001. One point we have omitted so far is how the edges in the prediction
  1002. graph that are part of a loop get marked.
  1003. Basically, a loop can be detected:
  1004. a. when it is made;
  1005. .br
  1006. b. when we want to know about it.
  1007. .LP
  1008. The first approach checks if a loop is constructed
  1009. as soon as we join two paths in the graph, and if so, marks all
  1010. edges of the loop. The other approach does not do any checking when two
  1011. paths are joined together; it starts looking for loops when we want
  1012. to delete an element with reference count not 0, marking all edges
  1013. belonging to the loops it discovers. In practice it turns out that
  1014. we very often encounter elements that we would like to delete, but that have
  1015. reference count not 0, whereas the joining of paths occurs relatively
  1016. infrequently. We therefore have chosen to check if a loop is created
  1017. when two paths in a prediction graph are joined.
  1018. .LP
  1019. Now the question arises how to find and mark all edges of
  1020. the loop. For this problem we devised also an algorithm.
  1021. Because we already know that there is an edge from the element on which
  1022. the new path is connected to the successor of the joined element, the
  1023. algorithm only has to find a path from this last element back to the first one.
  1024. This can be done by a backtracking depth first search; to find a path from
  1025. one element to another we have to find a possible empty path
  1026. from one of the successors of the first element to the last element. As
  1027. soon as we have found a path, we can mark all the edges on the path and also
  1028. the backedge as loop edges. In case that there is more than one path
  1029. back to the first element it is necessary that the algorithm continues
  1030. searching after it has found one path.
  1031. .LP
  1032. To avoid looping of this algorithm we have to set a flag at the elements
  1033. which are on the path already. When the algorithm is backtracking it can
  1034. clear the flags at the elements it is leaving.
  1035. .LP
  1036. To speed up the searching process we can set flags at the edges we have already
  1037. visited but did not lead back to the first element. When the algorithm
  1038. encounters such an edge it already knows that this edge is not worth
  1039. searching again and can be skipped. At the end of the algorithm these
  1040. flags have to be cleared again.
  1041. .LP
  1042. One might propose another optimization: as soon as
  1043. we reach an edge that is already marked as a loop edge, we
  1044. can stop searching for other loop edges. There is, however,
  1045. a case in which this can go wrong. Imagine the following situation:
  1046. .PS
  1047. down
  1048. E: box "[E]"
  1049. arrow " C" ljust
  1050. D: box "[D]"
  1051. arrow " C" ljust
  1052. C: box "c"
  1053. arrow " C" ljust
  1054. box "b"
  1055. arrow " C" ljust
  1056. A: box "[A]"
  1057. arrow
  1058. box "a"
  1059. move right from D
  1060. move right
  1061. J: box "[J]"
  1062. down
  1063. arrow from J.s " C" ljust
  1064. I: box "i"
  1065. arrow " C" ljust
  1066. H: box "[H]"
  1067. arrow from H.s to A.e
  1068. arc <- from E.w to A.w
  1069. move left from C
  1070. move left
  1071. "C"
  1072. arc -> from H.e to J.e
  1073. move right from I
  1074. move right
  1075. "C"
  1076. arrow dashed from E.s to J.n
  1077. .PE
  1078. What we have here is a prediction graph with two loops; all edges that belong
  1079. to a loop are again marked with an `C'. Note that the edge between `[H]'
  1080. and `[A]' is not a loop edge. Suppose that `[J]' is not yet
  1081. completely substituted, i.e. there is another production rule for
  1082. J:
  1083. .br
  1084. J: E
  1085. .br
  1086. The `E' on top of the right path is now joined with the `[E]'
  1087. on the left path, which is depicted by the dashed arrow
  1088. between `[E]' and `[J]'. When we take a good look at the graph
  1089. we see that the two loops are merged into one. But that is not
  1090. the most important observation we have to make: not only the
  1091. edge between `[E]' and `[J]' must be marked as a loop edge, but
  1092. also the edge between `[H]' and `[A]'! So it is not possible
  1093. to stop searching for loop edges as soon as we have found an
  1094. edge which was already marked as a loop edge. We have to continue
  1095. until we reach the element at which we started: `[E]'. So the
  1096. optimization proposed above is incorrect.
  1097. .NH 2
  1098. Optimizations using FIRST and FOLLOW sets
  1099. .LP
  1100. In the algorithm as we have described it, every nonterminal on top of the graph
  1101. is substituted until only terminals remain on top; these terminals are
  1102. then matched against the current input symbol. However, by using
  1103. FIRST sets, we can save considerably on the number of computations
  1104. necessary. Suppose one of the top elements of the graph is nonterminal A,
  1105. and the current inputsymbol is $a$. Then, it is of no use to substitute
  1106. A if terminal $a$ is not in FIRST(A), because then substituting A will
  1107. never produce $a$ on top of the graph. So, before substituting a
  1108. nonterminal we check if the current inputsymbol is in its FIRST set; if
  1109. it is not, we can declare the path the nonterminal is on a dead end, and
  1110. delete it, without having to perform the actual substitution. Of course, if
  1111. A can produce empty, we still have to consider its successor in the graph.
  1112. .LP
  1113. Similarly, when we have an END marker on top, with nonterminal B in
  1114. it, and we consider using rule
  1115. .br
  1116. D: $alpha$ B C $gamma$
  1117. .br
  1118. We first check if the current inputsymbol is in FIRST(C); if this is
  1119. not the case, there is no need to start a graph component with this
  1120. rule, because it will never produce the next inputsymbol on top.
  1121. Again, if C produces empty, we still have to evaluate the part of the
  1122. rule following C.
  1123. .LP
  1124. To circumvent the problems caused in the FIRST set optimization by
  1125. nonterminal that produce empty, we can also make use of FOLLOW-sets.
  1126. When substituting, if we encounter a nonterminal whose FIRST set does
  1127. not contain the current inputsymbol but which can produce empty,
  1128. we check if the current inputsymbol is in its FOLLOW set. If it is not,
  1129. there is no need to process its successor. Similarly, in case we
  1130. are processing an END marker as explained above, there is no need
  1131. to process the part of the rule following C if FIRST(C) does not
  1132. contain the input symbol, or C produces empty but the inputsymbol
  1133. is not in FOLLOW(C).
  1134. .bp
  1135. .nr PS 12
  1136. .nr VS 14
  1137. .NH
  1138. Test results
  1139. .nr PS 10
  1140. .nr VS 12
  1141. .RS
  1142. .LP
  1143. In this chapter, we discuss some test results that were obtained
  1144. by recompiling existing ACK compilers with the modified LLgen.
  1145. We tried several combinations of possible optimizations, including
  1146. `dumb' ones, like no optimization at all, not even deleting unreachable
  1147. prediction graph elements.
  1148. The incorporation of LLgen with non-correcting error recovery went
  1149. smoothly; only minor modifications to the Make-files were necessary.
  1150. Specifically, these modifications consisted of passing an extra
  1151. flag to LLgen, and including the new generated C-file Lncor.c in
  1152. the list of generated C-files. Also, the LLmessage error reporting
  1153. routine had to be adapted. We successfully recompiled the C, Modula-2
  1154. and Occam compilers; in the next sections, we discuss some test results
  1155. that were obtained with the Modula-2 and C compilers.
  1156. .RE
  1157. .LP
  1158. .NH 2
  1159. Performance
  1160. .LP
  1161. We will now present and discuss, with the aid of some
  1162. diagrams, time and space measurements on the non-correcting error
  1163. recovery. We have measured the effect of various optimizations.
  1164. These optimizations include the first-set optimization and the follow-set
  1165. optimization. We also measured the effect of leaving out the loop-deletion
  1166. algorithm, regarding both time and space. We performed out measurements using
  1167. C- and Modula-2-programs of three different sizes; one of approximately
  1168. 750 tokens, one of appr. 5000 tokens and one of appr. 15000 tokens. We have
  1169. chosen to represent the sizes of programs in the number of tokens instead of
  1170. number of lines, because the number of tokens more realistically
  1171. reflects the load the programs put on the error recovery mechanism. Also we give
  1172. our time measurements in usertime instead of realtime, because realtime
  1173. depends heavily on the load of the system, which usertime does not.
  1174. Our space measurements are based on the size of the prediction graphs.
  1175. Note that all files are entirely recognized by the non-correcting error
  1176. recovery technique. We achieved this by putting a `1' at the beginning
  1177. of each file; because then each file starts with a syntax error LLgen
  1178. is forced to continue with the non-correcting error recovery.
  1179. .NH 3
  1180. Time and space measurements on the effect of the first-set optimization
  1181. .LP
  1182. In the diagram below we show our time measurements we got from recognizing
  1183. the C-programs both with and without first-set optimization.
  1184. .G1
  1185. coord x 0, 17000 y 0, 65
  1186. ticks bot out at 750, 5000, 15000
  1187. label bot "Number of tokens"
  1188. label left "User Time" "(sec)" left .3
  1189. draw no_opt dashed
  1190. draw first_opt dashed
  1191. copy thru X
  1192. times size +2 at $1, $2
  1193. times size +2 at $1, $3
  1194. next no_opt at $1, $2
  1195. next first_opt at $1, $3
  1196. X until "XXX"
  1197. 742 2.5 .9
  1198. 5010 16.3 5.8
  1199. 14308 54.2 16.8
  1200. XXX
  1201. copy thru X "$1 $2" size -2 at 11000, $3 X until "XXX"
  1202. No optimization 55
  1203. First-set optimization 20
  1204. XXX
  1205. .G2
  1206. .I
  1207. .ce
  1208. Time measurements of three C-programs with and without first-set optimization
  1209. .R
  1210. .LP
  1211. Notice the considerable time savings we
  1212. get when the first-set optimization is turned on; a factor of slightly more than
  1213. 3. Obviously this is an extremely useful optimization. On the other hand
  1214. we found there were no measurable time savings when using the follow-set
  1215. optimization; for that reason we did not chart the result of this optimization.
  1216. It seems that the time savings gained by the optimization are
  1217. waisted again by the extra processing time needed. We conclude that
  1218. this optimization is of little or no use when we want to save on time.
  1219. .LP
  1220. In the following picture the time measurements of three Modula-2 programs
  1221. are given, again with and without first-set optimization.
  1222. .G1
  1223. coord x 0, 17000 y 0, 65
  1224. ticks bot out at 750, 5000, 15000
  1225. label bot "Number of tokens"
  1226. label left "User Time" "(sec)" left .3
  1227. draw no_opt dashed
  1228. draw first_opt dashed
  1229. copy thru X
  1230. times size +2 at $1, $2
  1231. times size +2 at $1, $3
  1232. next no_opt at $1, $2
  1233. next first_opt at $1, $3
  1234. X until "XXX"
  1235. 823 1.3 .6
  1236. 4290 7.6 3.5
  1237. 16530 30.5 14.3
  1238. XXX
  1239. copy thru X "$1 $2" size -2 at 13000, $3 X until "XXX"
  1240. No optimization 30
  1241. First-set optimization 15
  1242. XXX
  1243. .G2
  1244. .I
  1245. .ce
  1246. Time measurements of three Modula-2-programs with and without first-set optimization
  1247. .R
  1248. .LP
  1249. From this picture we can conclude mainly the same as above; considerable
  1250. time savings when we use the first-set optimization;
  1251. the factor is somewhat less, but still more than 2. Again we have omitted
  1252. the results of the follow-set optimization, for the same reason as before.
  1253. .LP
  1254. There is however one remarkable difference between the two languages: parsing
  1255. C-programs needs almost twice the time as parsing programs of comparable
  1256. sizes written in Modula-2. This can be explained by the fact that the
  1257. C-grammar is far more complicated than that of Modula-2, and also the
  1258. production rules are longer in C, so building, deleting and definitely
  1259. traversing the graph will consume more time.
  1260. .LP
  1261. Now we come to the space measurements of both C- and Modula-2 programs.
  1262. In the picture below we present the maximum sizes of the prediction graphs,
  1263. during the recognition of the three C-programs.
  1264. .G1
  1265. coord x 0, 17000 y 0, 18000
  1266. ticks bot out at 750, 5000, 15000
  1267. label bot "Number of tokens"
  1268. label left "Maximum size of" "the prediction graph" "(bytes)"left .3
  1269. draw no_opt dashed
  1270. draw first_opt dashed
  1271. copy thru X
  1272. times size +2 at $1, $2
  1273. times size +2 at $1, $3
  1274. next no_opt at $1, $2
  1275. next first_opt at $1, $3
  1276. X until "XXX"
  1277. 742 5568 10444
  1278. 5010 7668 12664
  1279. 14308 13636 17308
  1280. XXX
  1281. copy thru X "$1 $2" size -2 at 8000, $3 X until "XXX"
  1282. No optimization 16000
  1283. First-set optimization 7000
  1284. XXX
  1285. .G2
  1286. .I
  1287. .ce
  1288. Maximum sizes of the prediction graphs when recognizing three C-programs
  1289. .R
  1290. .LP
  1291. From this diagram we see that, although the prediction graphs
  1292. are smaller when the first-set optimization is used, the space savings are
  1293. not as spectacular as the time savings achieved by this optimization.
  1294. .LP
  1295. In Modula-2 the first-set optimization also causes a decrease in memory
  1296. usage. The savings are less than in C, but still about 1.5 Kb. Again
  1297. this can be explained by the fact that the rules of the Modula-2 grammar
  1298. are shorter than that of C.
  1299. .G1
  1300. coord x 0, 17000 y 0, 12000
  1301. ticks bot out at 750, 5000, 15000
  1302. label bot "Number of tokens"
  1303. label left "Maximum size of" "the prediction graph" "(bytes)" left .3
  1304. draw no_opt dashed
  1305. draw first_opt dashed
  1306. copy thru X
  1307. times size +2 at $1, $2
  1308. times size +2 at $1, $3
  1309. next no_opt at $1, $2
  1310. next first_opt at $1, $3
  1311. X until "XXX"
  1312. 823 5056 3292
  1313. 4290 6420 4664
  1314. 16530 11388 9632
  1315. XXX
  1316. copy thru X "$1 $2" size -2 at 8000, $3 X until "XXX"
  1317. No optimization 10000
  1318. First-set optimization 4000
  1319. XXX
  1320. .G2
  1321. .I
  1322. .ce
  1323. Maximum sizes of the prediction graphs when recognizing three Modula-2-programs
  1324. .R
  1325. .NH 3
  1326. Input that is recognized in quadratic time
  1327. .LP
  1328. The measurements presented may suggest that the time required to
  1329. recognize input depends linearly on the length of the input; however,
  1330. this is not always the case. When there are recursive rules in the
  1331. grammar, the time needed to recognize input that is produced by this
  1332. rules can become proportional to the square of the input length.
  1333. Consider this set of grammar rules:
  1334. .br
  1335. .nf
  1336. S: '{' A '}'
  1337. A: 'a' A | $epsilon$
  1338. .fi
  1339. .LP
  1340. When the input is `{aaa....', the algorithm will produce the following
  1341. prediction graphs:
  1342. .PS
  1343. up; B1: box "END" "S"; arrow <- ;box "}";arrow <- ;box "A";arrow <- ;box "{";
  1344. move right from B1.se; move
  1345. up; B2: box "END" "S"; arrow <-; box "}"; arrow <-; box "[A]";
  1346. arrow <-; box "A"; arrow <-; box "a";
  1347. move right from B2.se; move
  1348. up; B3: box "END" "S"; arrow <-; box "}"; arrow <-; box "[A]";
  1349. arrow <-; box "[A]"; arrow <-; box "A"; arrow <-; box "a";
  1350. move right from B3.se;move
  1351. up; B4: box "END" "S"; arrow <-; box "}"; arrow <-; box "[A]";
  1352. arrow <-; box "[A]"; arrow <-; box "[A]"; arrow <- ; box "A"; arrow <-;box "a";
  1353. .PE
  1354. .LP
  1355. In each prediction phase, a new [A] appears on the prediction graph. However,
  1356. since A also produces empty, the prediction algorithm has to traverse all the
  1357. elements [A] until it finds the element `}'. In the first prediction phase,
  1358. there is one element [A], in the second there are two, etc, so in all
  1359. 1 + 2 + 3 + ... + k = $k(k+1) over 2$ elements have to be traversed if
  1360. there are k prediction phases, making this proportional to the square
  1361. of the input length. We constructed a parser with this simple input grammar
  1362. and measured the processing time the error recovery mechanism used.
  1363. In the following diagram the dashed line shows the processing time needed;
  1364. the dotted line is the curve $t = 13 n sup 2$. Clearly the processing time
  1365. is proportional to the square of the number of tokens.
  1366. .G1
  1367. coord x 0, 2100 y 0, 60
  1368. ticks bot out at 500, 1000, 1500, 2000
  1369. label bot "Number of tokens"
  1370. label left "User Time" "(sec)" left .3
  1371. draw quad dashed
  1372. copy thru X
  1373. times size +2 at $1, $2
  1374. next quad at $1, $2
  1375. X until "XXX"
  1376. 500 3.0
  1377. 1000 12.4
  1378. 1500 28.6
  1379. 2000 51.4
  1380. XXX
  1381. draw dotted
  1382. for i from 0 to 2100 by 25 do { next at i, 0.000013 * i * i }
  1383. .G2
  1384. .LP
  1385. In the grammar used for the C compiler, array initializations are handled by a recursive
  1386. rule, so we would expect that the error recovery mechanism needs quadratic
  1387. processing time to recognize such an initialization; we made measurements on
  1388. the processing time and indeed, the
  1389. processing time needed grows proportionally to the square of the size of the input, as the
  1390. next figure shows. Here, the processing times are about half of those in
  1391. the previous example; this is so because the recursion appears after two
  1392. tokens are recognized. Note that the algorithm only takes quadratic time
  1393. when it is recognizing input that is generated by a recursive grammar rule.
  1394. Other input is still recognized in linear time, regardless of the fact that
  1395. there are recursive grammar rules.
  1396. .G1
  1397. coord x 0, 5000 y 0, 85
  1398. ticks bot out at 1150, 2400, 3600, 4800
  1399. label bot "Number of tokens"
  1400. label left "User Time" "(sec)" left .3
  1401. draw quad dashed
  1402. copy thru X
  1403. times size +2 at $1, $2
  1404. next quad at $1, $2
  1405. X until "XXX"
  1406. 1150 5.1
  1407. 2400 20.3
  1408. 3600 43.7
  1409. 4800 78.6
  1410. XXX
  1411. .G2
  1412. .LP
  1413. Unfortunately, there is no easy way to speed up the recognition of these
  1414. recursively defined language elements; they are caused by the substituted
  1415. tokens that are left in the prediction graph, and we cannot just delete those
  1416. `dummies' from the graph during a prediction phase because the `join' part of the
  1417. prediction algorithm depends on them. One could traverse the graph after
  1418. a prediction phase to delete the dummies, but then the processing
  1419. time needed to recognize non-recursively defined language elements would
  1420. increase dramatically. However, we feel that in practice things
  1421. like large array initializations will not occur in hand-made programs; when
  1422. they occur, it is probably in computer-generated programs, which normally
  1423. will be correct anyway, meaning that the error recovery never sees them.
  1424. When testing such generated programs, one is likely
  1425. to use small test-cases, which are handled well by the error recovery.
  1426. .NH 3
  1427. Time measurements on the effect of leaving out the loop-deletion algorithm
  1428. .LP
  1429. We now show what effect the loop-deletion algorithm has on processing time.
  1430. To put it another way: how much time can be saved when we turn off the
  1431. loop-deletion algorithm. In the diagram below we give the measurements of
  1432. the three C-programs; note that we do use the first-set optimization.
  1433. .G1
  1434. coord x 0, 17000 y 0, 22
  1435. ticks bot out at 750, 5000, 15000
  1436. label bot "Number of tokens"
  1437. label left "User Time" "(sec)" left .3
  1438. draw no_loop dashed
  1439. draw loop dashed
  1440. copy thru X
  1441. times size +2 at $1, $2
  1442. times size +2 at $1, $3
  1443. next no_loop at $1, $2
  1444. next loop at $1, $3
  1445. X until "XXX"
  1446. 742 .9 .4
  1447. 5010 5.8 6.8
  1448. 14308 16.8 20.5
  1449. XXX
  1450. copy thru X "$1 $2" size -2 at 11300, $3 X until "XXX"
  1451. With loop-deletion 20
  1452. Without loop-deletion 9
  1453. XXX
  1454. .G2
  1455. .I
  1456. .ce
  1457. Time measurements on processing three C-programs with and without the loop-deletion algorithm
  1458. .R
  1459. The diagram shows that the loop-deletion algorithm
  1460. does not dramatically slow down the recognizing process. There is, however,
  1461. a measurable time loss of \(+-25%. As we will see later, the loop-deletion
  1462. algorithm will turn out to be extremely useful in efficient use of memory
  1463. when there are many loops in the graph.
  1464. The effect of the loop-detecion algorithm on parsing Modula-2 programs
  1465. is even less than with C-programs; in fact there is no measurable
  1466. time loss:
  1467. .G1
  1468. coord x 0, 17000 y 0, 15
  1469. ticks bot out at 750, 5000, 15000
  1470. label bot "Number of tokens"
  1471. label left "User Time" "(sec)" left .3
  1472. draw no_loop dashed
  1473. draw loop dashed
  1474. copy thru X
  1475. times size +2 at $1, $2
  1476. times size +2 at $1, $3
  1477. next no_loop at $1, $2
  1478. next loop at $1, $3
  1479. X until "XXX"
  1480. 823 .6 .6
  1481. 4290 3.5 3.8
  1482. 16530 14.3 14.3
  1483. XXX
  1484. copy thru X "$1 $2" size -2 at 11800, $3 X until "XXX"
  1485. With loop-deletion 13
  1486. Without loop-deletion 7
  1487. XXX
  1488. .G2
  1489. .I
  1490. .ce
  1491. Time measurements on processing three Modula-2-programs with and without a loop-deletion algorithm
  1492. .R
  1493. There are at least two reasons for this; both result from the relative
  1494. simplicity of the Modula-2 grammar. The distance from a head to an
  1495. end of stack marker is shorter than in C, and secondly Modula-2
  1496. causes fewer joins to occur than C, meaning that the loop marking algorithm
  1497. is run less often and when it is run it has fewer paths to search.
  1498. .NH 3
  1499. Space measurements on the effect of leaving out the loop-deletion algorithm
  1500. .LP
  1501. Clearly, to make any measurements on the space-usage effects of leaving out
  1502. the loop-deletion algorithm we need a program that causes the prediction
  1503. graph to contain loops; however, we have not been able to devise a C
  1504. or Modula-2 program that does this. In order to be able to make measurements,
  1505. we added an extra alternative to a rule of the C compiler grammar, making
  1506. it directly left-recursive. To make LLgen accept this new grammar, we
  1507. put a `%if' directive in the rule.
  1508. .LP
  1509. We have input our standard C test program consisting of 800 tokens to
  1510. the error recovery routine for this `doctored' C compiler,
  1511. and compared the storage needed for the prediction graphs with the
  1512. loop deletion algorithm enabled with the storage needed when the
  1513. algorithm is disabled. With the loop-deletion algorithm enabled, the
  1514. maximum size of the prediction graph was 5576 bytes. When the loop
  1515. algorithm was disabled, the maximum size of the prediction graph
  1516. grew to 12676 bytes; furthermore, 12676 bytes of heap were allocated
  1517. for the prediction graph, but not deallocated again, because they were
  1518. in use by graph elements that were in inaccessible loops. The user-time
  1519. the program needed decreased only slightly, from 0.9 to 1.0 seconds. Given the
  1520. relatively small input program, this data suggests that when loops
  1521. are actually being made, the loop deletion algorithm is definitely
  1522. worth the extra overhead it costs, considering the space
  1523. that would otherwise be occupied by inaccessible loops. To verify this,
  1524. we input the C program consisting of 15000 tokens to the compiler;
  1525. execution time increased from 17.3 to 21.1 seconds after enabling
  1526. the loop deletion algorithm, while the maximum size of the prediction graph
  1527. shrunk from 328664 to 13664 bytes. With the loop-deletion algorithm
  1528. disabled, 326720 bytes allocated for the graph were not deallocated again.
  1529. Again, given the relatively small increase in execution time and the
  1530. large reduction of memory usage, we feel that the loop-deletion
  1531. algorithm is useful enough to justify the overhead it creates.
  1532. .NH 2
  1533. Problems encountered
  1534. .LP
  1535. In this section we describe some of the problems we encountered
  1536. while testing the non-correcting error recovery.
  1537. .NH 3
  1538. The LLgen error reporting mechanism.
  1539. .LP
  1540. The parsers generated by LLgen call a user-supplied error reporting
  1541. routine, usually called LLmessage. This routine is called with an
  1542. integer parameter that is positive, zero or negative. When the parameter
  1543. is positive the parser has just inserted a token, whose
  1544. number is equal to the parameter; if it is zero, the parser
  1545. has deleted a token whose number is in a global variable called LLsymb; if
  1546. it is negative, it means that LLgen expected end-of-file, but did not
  1547. find it. The routine LLmessage is supposed to print an error message,
  1548. and when a token is inserted, it should set all necessary attributes.
  1549. .LP
  1550. However, when non-correcting error recovery is used, the situation becomes slightly
  1551. different; when the parser inserts a token, it is only to keep the
  1552. semantic actions consistent, and does no longer signify an error.
  1553. However, the LLmessage routine still has to be called because the
  1554. attributes of the inserted token need to be set. Therefore, when
  1555. non-correcting error recovery is used, the LLmessage routine should not
  1556. print an error message when the parameter is positive, or else it will
  1557. print highly confusing error messages indeed. Furthermore, the
  1558. LLmessage routine will usually print a message like `token ... deleted' when
  1559. it is called with parameter equal to zero; however, when the non-correcting
  1560. error recovery is used, it is more appropriate to report something
  1561. like `token ... illegal', as the non-correcting error recovery does
  1562. not delete tokens. Finally, when an unexpected end-of-file is encountered,
  1563. LLgen normally just inserts the missing tokens and calls
  1564. LLmessage with the parameter equal to the token number;
  1565. when non-correcting error recovery is used we need a way to
  1566. actually report we have encountered an unexpected end-of-file. The
  1567. way we achieved this is by calling LLgen with parameter 0 and the
  1568. global variable LLsymb set to EOFILE when this situation occurs; the
  1569. routine LLmessage should print something like `unexpected end of file'
  1570. when it is called with parameter 0 and LLsymb is EOFILE. To facilitate
  1571. switching between correcting and non-correcting error recovery, the
  1572. file Lpars.h contains a statement `#define LLNONCORR' if non-correcting
  1573. error recovery is used.
  1574. .NH 3
  1575. Parsers being started in semantic actions
  1576. .LP
  1577. LLgen allows the programmer to define more than one nonterminal as the
  1578. start symbol of the input grammar; it will generate a parsing routine
  1579. for each of the start symbols. However, the error recovery code
  1580. is generated only once; it is shared by all parsers.
  1581. The programmer is free to call any
  1582. of the generated parsers whenever he wants; for instance, in the C-compiler
  1583. a separate parser for expressions in #if and #elsif statements is used. Whenever
  1584. the lexical analyzer encounters such a statement, it calls the expression
  1585. parser. It is also possible to call a parser in a semantic action of
  1586. another parser; in the MODULA-2 compiler a separate parser for
  1587. definition modules is used. When the main parser encounters a
  1588. FROM defmod IMPORT statement a semantic
  1589. actions opens the definition module defmod and starts the parser for
  1590. definition modules.
  1591. .LP
  1592. The fact that subparsers can be started just about anywhere causes
  1593. problems when non-correcting error recovery is used.
  1594. Suppose a parser calls another parser in a semantic action
  1595. to parse a separate input file. In the Modula-2 compiler, after
  1596. seeing the FROM defmod IMPORT statement a semantic action opens
  1597. defmod and parses it; now, if a syntax error occurred before the
  1598. FROM IMPORT statement, the non-correcting error recovery will not
  1599. execute the action that opens and parses the definition module, but
  1600. it will not report an error either, because the statement
  1601. FROM defmod IMPORT is part of the input language of the main parser.
  1602. However, suppose that during the parsing of a definition module
  1603. an error occurs; then, some semantic actions that would normally
  1604. be executed during parsing of the definition module will not have
  1605. taken place. When normal parsing is now resumed by the main parser,
  1606. after the non-correcting error recovery has finished with the
  1607. definition module, a lot of spurious semantic errors are likely to be
  1608. reported, because the semantic actions that would normally have been
  1609. executed during the definition module parsing have not been executed
  1610. by the error recovery. Therefore, it is desirable that the main parser
  1611. does not resume normal parsing, but instead continues with the non-correcting
  1612. error recovery as well. Any syntactic errors in the main program will
  1613. still be reported, but no spurious semantic errors will be reported
  1614. that way.
  1615. .LP
  1616. When the lexical analyzer calls other parsers, as is the case in
  1617. the ACK C compiler, recursive invocations of the non-correcting error
  1618. recovery routine can occur. This will happen if a parser starts the
  1619. error recovery, the error recovery calls the lexical analyzer, which
  1620. starts another parser that finds a syntax error and calls the
  1621. error recovery again. This is not really a problem, but is has
  1622. consequences for the implementation of the error recovery routine.
  1623. .LP
  1624. The worst case
  1625. occurs when two parsers are involved in parsing one input file, and
  1626. the secondary parser (e.g. an inline assembly parser) is called in a semantic
  1627. action of the main parser. Suppose now that the input text contains
  1628. a syntax error; after detecting this error, the parser starts the
  1629. non-correcting error recovery. This recovery does not execute any
  1630. semantic actions; therefore it will not start the subparser at those points
  1631. where the original LLgen generated parser would. As a result, parts
  1632. of the program that would be accepted by the subparser will now probably
  1633. be rejected as illegal, because the error recovery does not know it
  1634. should use another grammar to check these parts. This is a serious
  1635. problem, and we have devised and implemented two ways to solve it.
  1636. .LP
  1637. The first solution is based on the assumption that whenever a semantic
  1638. action occurs in the grammar, another parser can be started at that
  1639. point. Obviously, we have no way of knowing which semantic actions start
  1640. a parser and which don't, so we assume the worst.
  1641. Now, assume that in the grammar there are k symbols defined as
  1642. start symbols, say $W sub 1 , W sub 2 , ..., W sub k$. Each of these symbols
  1643. will cause LLgen to generate a parser that can be called in any
  1644. of the semantic actions of the grammar. We now introduce a new
  1645. symbol $X$, and a new grammar rule $X -> W sub 1 X | W sub 2 X | ... |
  1646. W sub k X |
  1647. epsilon$.
  1648. In the grammar the error recovery algorithm uses, we insert this symbol
  1649. X at all positions where there are semantic actions in the original grammar,
  1650. so a rule $A -> alpha$ { action } $beta$ becomes $A -> alpha X beta$. As a
  1651. result, at each position in a grammar rule where a semantic action
  1652. occurs, we now accept any input that would be accepted by any of the
  1653. parsers. Clearly, this solution is somewhat of a kludge, as it will
  1654. accept a lot of input that is not accepted by the original parser.
  1655. However, it is guaranteed to never give spurious error messages, because
  1656. whenever a parser would be started by the original parser, there now
  1657. is an $X$ in the grammar that produces all the strings that would be
  1658. accepted by that parser. We have implemented this solution, and found
  1659. it to be extremely slow, which of course was to be expected given the
  1660. number of semantic actions in the average grammar. Furthermore,
  1661. because each time a semantic action occurs in the grammar
  1662. a string accepted by any of the generated parsers is accepted, including
  1663. strings recognized by the currently running parser, error messages
  1664. become hard to interpret. As an example, consider the following
  1665. C program:
  1666. .br
  1667. .nf
  1668. main()
  1669. {
  1670. int i, j;
  1671. while (i < j
  1672. j++;
  1673. i = 1;
  1674. j = 2;
  1675. }
  1676. .fi
  1677. .LP
  1678. Clearly, there is a `)' missing in the while-statement;
  1679. however, if this program is input to the error recovery it will complain
  1680. "} illegal", since after recognizing the
  1681. expression controlling the while the original parser starts a
  1682. semantic action, so the non-correcting recovery will accept a valid
  1683. C program at that point; after recognizing the three statements
  1684. following the while-statement as a separate program the
  1685. recognizer expects the missing `)', but gets `}' instead.
  1686. .LP
  1687. Our second solution is based on the observation that if we knew
  1688. which semantic actions can start other parsers, we would only
  1689. have to introduce the new symbol $X$ at those places where parsers
  1690. can get started. We have therefore extended LLgen with a new directive
  1691. %substart, which is used to indicate to the parser generator that
  1692. another parser may be started. The %substart is followed by the
  1693. startsymbols that will produce the parsers that can be called,
  1694. so %substart A, B, C; indicates that in the semantic action
  1695. following the directive the parsers produced by startsymbols
  1696. A, B, en C can be started. In the grammar used by the error
  1697. recovery, a new symbol $X$ will be introduced at this point,
  1698. along with a new rule $X -> AX | BX | CX | epsilon$. Of course, this
  1699. solution can still accept input that would not have been accepted
  1700. by original parser, for instance if a parser is started
  1701. conditionally, based on other semantic information. However, it
  1702. is a big improvement over the first solution, both in performance
  1703. and the input it accepts.
  1704. .NH 3
  1705. Syntactic errors being handled in semantic actions
  1706. .LP
  1707. A programmer may decide to handle certain syntactic errors
  1708. in semantic actions, for instance because he is not satisfied with
  1709. the standard error recovery. However, since the non-correcting error
  1710. recovery does not execute semantic actions, this may cause errors
  1711. to remain undetected. We encountered the following example in the ACK
  1712. Modula-2 compiler, in the grammar rule for assignment statement:
  1713. .br
  1714. .nf
  1715. Assignment_statement: lvalue
  1716. [
  1717. '='
  1718. {
  1719. error(":= expected");
  1720. }
  1721. |
  1722. ':='
  1723. ]
  1724. expression
  1725. ;
  1726. .fi
  1727. .LP
  1728. This works well in the original LLgen; however, statements like
  1729. `j=9' are not treated as syntactic, but as semantic errors.
  1730. The original LLgen generated parser
  1731. will print the (semantic) error message, but the non-correcting recovery
  1732. will not execute the semantic action and therefore the erroneous
  1733. input will be accepted.
  1734. .LP
  1735. To facilitate the incorporation of non-correcting error recovery in parsers
  1736. that use this kind of `trick', we extended LLgen with the %erroneous
  1737. directive. The directive indicates to the non-correcting recovery
  1738. mechanism that the token following it is not really part of the grammar.
  1739. When recognizing input, the error recovery will ignore tokens in the
  1740. grammar that have %erroneous in front of them. If in the example above,
  1741. the '=' is replaced with %erroneous '=', the non-correcting mechanism will
  1742. report an error when it sees a statement like 'j = 9'. See appendix B
  1743. for details about the implementation of the %erroneous directive.
  1744. .LP
  1745. Another example is in the ACK C compiler. For some reason, the
  1746. grammar accepts function definitions without `()', so according
  1747. to the syntax a function definition can look like:
  1748. .br
  1749. .nf
  1750. int func
  1751. {
  1752. ....
  1753. }
  1754. .fi
  1755. .LP
  1756. The absence of the `()', however, causes `func' to be entered in the
  1757. symbol table as non-function, and when the parser encounters the body
  1758. a semantic action will complain with the error message "Making function body
  1759. for non-function". This again will cause the non-correcting error
  1760. recovery to miss errors. Consider this piece of code:
  1761. .br
  1762. .nf
  1763. int i int j = 1;
  1764. {}
  1765. .fi
  1766. .LP
  1767. where apparently there's a `;' missing between the declarations
  1768. of i and j. The original LLgen-generated parser only gives semantic errors:
  1769. .br
  1770. .nf
  1771. "Making function body for non-function"
  1772. "j is not in parameter list"
  1773. "Illegal initialization of formal parameter, ignored"
  1774. .fi
  1775. .LP
  1776. As a result, the non-correcting error recovery will not report
  1777. any errors in this piece of code, because it does not execute the
  1778. semantic actions that recognize and report the error. Unfortunately,
  1779. due to the way the C-grammar is written, it is not possible to solve
  1780. this problem using a %erroneous directive; the part of the grammar
  1781. that deals with declaratons would have to be rewritten so as to
  1782. syntactically reject functions without `()'.
  1783. .NH 3
  1784. Semantic actions that read input
  1785. .LP
  1786. There are no restrictions on what a semantic action can do;
  1787. there is nothing to stop the programmer from writing a parser in such
  1788. a way that some of the input to the parser is processed by semantic
  1789. actions. Obviously, because the non-correcting error recovery does not
  1790. execute semantic actions, this kind of parser will not work at all
  1791. with the new error recovery. Ironically, LLgen itself is written in
  1792. such a fashion; {}-enclosed C-code in its input is processed by
  1793. a semantic action in the LLgen grammar. We feel that it is bad
  1794. practice to write parsers this way; the `eating' of parts of
  1795. the input should be done in the lexical analyzer, not in the parser.
  1796. After all, in the case of LLgen, one can regard a semantic action
  1797. in the input as one token, and thus it should be handled by
  1798. the lexical analyzer as such.
  1799. .NH 2
  1800. Examples of error recovery
  1801. .LP
  1802. We will now give some examples that compare non-correcting error
  1803. recovery with the correcting error recovery used by parsers generated
  1804. by `standard' LLgen.
  1805. Consider the next C program, where there is a `)' missing in the
  1806. header of function `test'.
  1807. .br
  1808. .nf
  1809. 1 int test(a,b
  1810. 2
  1811. 3 int a,b;
  1812. 4
  1813. 5 {
  1814. 6 if (a < b)
  1815. 7 return(1);
  1816. 8 else
  1817. 9 return(0);
  1818. 10 }
  1819. .fi
  1820. .LP
  1821. This small error derails the `standard' parser; it produces the
  1822. following error messages, where we have left out 7 messages reporting
  1823. semantic errors:
  1824. .br
  1825. .nf
  1826. line 3: , missing before type_identifier
  1827. line 3: , missing before identifier
  1828. line 3: ) missing before ;
  1829. line 5: { deleted
  1830. line 6: if deleted
  1831. line 6: < deleted
  1832. line 6: ) missing before identifier
  1833. line 6: ) deleted
  1834. line 7: identifier missing before return
  1835. line 7: ; missing before return
  1836. line 7: { missing before return
  1837. line 8: else deleted
  1838. .fi
  1839. .LP
  1840. In contrast, the parser using non-correcting error recovery produces
  1841. only one error message:
  1842. .br
  1843. line 3: type_identifier illegal
  1844. This error message correctly pin-points the error: there should
  1845. have been a `)' at the position where type-identifier `int' is.
  1846. .LP
  1847. Now, an example with Modula-2; consider this program:
  1848. .br
  1849. .nf
  1850. 1 MODULE test;
  1851. 2
  1852. 3 TYPES
  1853. 4 ElementRecordType = RECORD
  1854. 5 Element: ElementType;
  1855. 6 Next,
  1856. 7 Prior: ElementPointerType;
  1857. 8 END;
  1858. 9
  1859. 10 VARS a,b,c: ElementRecordType;
  1860. 11
  1861. 12
  1862. 13 BEGIN
  1863. 14
  1864. 15 a := b;
  1865. 16
  1866. 17 END test.
  1867. .fi
  1868. .LP
  1869. There are two syntactic errors in this program; on line 3, TYPES should be TYPE, and
  1870. on line 10, VARS should be VAR. We have left out the type declarations of
  1871. ElementType and ElementPointerType; clearly this will generate semantic
  1872. errors, but we are only interested in syntactic errors anyway.
  1873. The correcting error recovery parser
  1874. again derails on this program; it produces the following syntactic error messages:
  1875. .br
  1876. .nf
  1877. line 3: CONST missing before identifier
  1878. line 4: '=' missing before identifier
  1879. line 4: RECORD deleted
  1880. line 5: ':' deleted
  1881. line 5: ';' missing before identifier
  1882. line 5: '=' missing before ';'
  1883. line 5: number missing before ';'
  1884. line 6: ',' deleted
  1885. line 7: '=' missing before identifier
  1886. line 7: ':' deleted
  1887. line 7: ';' missing before identifier
  1888. line 7: '=' missing before ';'
  1889. line 7: number missing before ';'
  1890. line 8: ';' deleted
  1891. line 10: identifier deleted
  1892. line 10: ',' deleted
  1893. line 10: identifier deleted
  1894. line 10: ',' deleted
  1895. line 10: identifier deleted
  1896. line 10: ':' deleted
  1897. line 10: identifier deleted
  1898. line 10: ';' deleted
  1899. line 13: BEGIN deleted
  1900. line 15: identifier deleted
  1901. line 15: := deleted
  1902. line 15: identifier deleted
  1903. line 15: ';' deleted
  1904. line 17: END deleted
  1905. line 17: identifier deleted
  1906. .fi
  1907. .LP
  1908. The error correction mechanism clearly makes the wrong guess by inserting
  1909. CONST on line 3; as a result, all that follows is rejected as incorrect.
  1910. In contrast, the non-correcting error recovery mechanism only produces
  1911. two error messages:
  1912. .br
  1913. .nf
  1914. line 3: identifier illegal
  1915. line 10: identifier illegal
  1916. .fi
  1917. .LP
  1918. This again exactly pin-points the errors: the identifiers TYPES and
  1919. VARS constitute the only errors in the program. Note that the
  1920. presence of more than one error does not cause any problems to the
  1921. non-correcting recovery mechanism.
  1922. .bp
  1923. .nr PS 12
  1924. .nr VS 14
  1925. .NH
  1926. Conclusion
  1927. .nr PS 10
  1928. .nr VS 12
  1929. .LP
  1930. After implementing and testing a non-correcting error recovery mechanism
  1931. we have come to the conclusion that it indeed is superior to correcting
  1932. mechanisms in what regards the error messages it produces;
  1933. the examples we have given clearly show this. However, there is a
  1934. clear loss of performance when errors are present in a program,
  1935. although we have found this performance
  1936. degradation to be acceptable. We feel that the benefits of
  1937. better error messages outweigh the loss of performance. In any case,
  1938. correct programs do not suffer at all from the incorporation
  1939. of a non-correcting recovery mechanism.
  1940. The error recovery mechanism we implemented does not make
  1941. unreasonable demands on resources; the size of the prediction
  1942. graphs stays within reasonable limits.
  1943. .LP
  1944. The main problems we encountered had to do with recognizing
  1945. `languages within languages', and semantic actions that did
  1946. unreasonable things like eating input. The more `well-behaved' a
  1947. parser is, the better the results the non-correcting error recovery
  1948. mechanism gives. This is also true for the input grammars: with a
  1949. language like Modula-2, whose syntax has been designed with parser
  1950. generators in mind, the performance of the non-correcting mechanism
  1951. is better than with C, whose syntax is extremely hard, if not
  1952. impossible to describe with a LL(1) grammar.
  1953. .bp
  1954. .nr PS 12
  1955. .nr VS 14
  1956. .NH
  1957. Bibliography
  1958. .nr PS 10
  1959. .nr VS 12
  1960. .IP [CORMACK] 12
  1961. Gordon V. Cormack, `An LR substring parser for noncorrecting syntax error
  1962. recovery', ACM SIGPLAN Notices, vol. 24, no. 7, p. 161-169, July 1989
  1963. .IP [GRUNE] 12
  1964. Dick Grune, Ceriel J.H. Jacobs, `A programmer friendly LL(1) parser
  1965. generator', Softw. Pract. Exper., vol. 18, no. 1, p. 29-38, Jan 1988
  1966. .IP [RICHTER] 12
  1967. Helmut Richter, `Noncorrecting syntax error recovery', ACM Trans. Prog. Lang.
  1968. Sys., vol.7, no.3, p. 478-489, July 1985
  1969. .IP [ROEHRICH] 12
  1970. Johannes R\*:ohrich, `Methods for the automatic construction of error
  1971. correcting parsers', Acta Inform., vol. 13, no. 2, p. 115-139, Feb 1980
  1972. .IP [TOMITA] 12
  1973. Masaru Tomita, Efficient parsing for natural language, Kluwer Academic
  1974. Publishers, Boston, p.210, 1986
  1975. .bp
  1976. .SH
  1977. Appendix A: Implementation Issues
  1978. .nr PS 10
  1979. .nr VS 12
  1980. .RS
  1981. .LP
  1982. In this appendix we will describe some implementation issues;
  1983. the data structure used to store the grammar during non-correcting
  1984. error recovery, postponing deletions of graph elements until after
  1985. the prediction phase, and the implementation of the %substart directive .
  1986. .RE
  1987. .SH
  1988. A.1 The grammar data structure
  1989. .LP
  1990. The grammar data structure used by the non-correcting error recovery technique has
  1991. to meet two conditions: easy access to a rule as a whole to make
  1992. substituting nonterminals efficient and easy access to each symbol in the RHS
  1993. of a rule to make starting error recovery and finding continuations
  1994. efficient. To fulfill these conditions we decided to construct the
  1995. storage of the grammar as follows.
  1996. .LP
  1997. A rule in the grammar is divided in two
  1998. parts: a LHS and a RHS. The LHS is represented by a struct `lhs' and
  1999. for each symbol in the RHS a struct 'symbol' is constructed.
  2000. A struct `lhs' contains the number of the
  2001. nonterminal forming the LHS of the rule, a pointer to the RHS, the
  2002. first- and follow-sets of the nonterminal and a flag 'empty' which
  2003. indicates whether the nonterminal produces empty or not. A struct
  2004. `symbol' contains a field indicating the type of the symbol, i.e.
  2005. a terminal or a nonterminal, the number of the symbol, a `link' pointer
  2006. to a struct `symbol' that represents the same symbol, a `next' pointer
  2007. to the rest of the RHS and a pointer back to the LHS.
  2008. .LP
  2009. A special struct `symbol' is added to the end of the RHS to indicate
  2010. the end of a rule. The type of this struct is LLEORULE, the number
  2011. is set to -1 and the pointers 'link' and `next' are nil.
  2012. .LP
  2013. In case that there is more than one RHS for a LHS, all the RHS's
  2014. are put after each other and separated by another special struct
  2015. `symbol'. The type of this struct is LLALT, the number is set to
  2016. -1 and the 'link' pointer is nil. After the last RHS a `LLEORULE'-struct
  2017. marker is added.
  2018. .LP
  2019. Finally, to make searching efficient there are two arrays: `terminals'
  2020. and `nonterminals'. `terminals' is indexed by the number of a terminal
  2021. and contains for each terminal a struct containing a 'link' pointer
  2022. to a symbol, representing this terminal, in the RHS of a rule. Because
  2023. this symbol has again a 'link' pointer to another symbol representing
  2024. the terminal, it is possible by following this chain of pointers
  2025. to find all rules containing such a terminal. In a similar way `nonterminals'
  2026. is indexed by the number of a nonterminal and contains for each
  2027. nonterminal a struct. This struct not only contains a 'link' pointer
  2028. linking all rules with this nonterminal, but also contains a 'rule'
  2029. pointer. This pointer points to the RHS or RHS's of the rules of which
  2030. the nonterminal forms the LHS.
  2031. .LP
  2032. As an example, consider the following grammar:
  2033. .br
  2034. A: a B
  2035. .br
  2036. B: a | $epsilon$
  2037. .br
  2038. This will result in the picture below. Note that `pointer' fields
  2039. without an arrow indicate nil pointers.
  2040. .PS
  2041. dx = 0.05
  2042. down
  2043. A_a: box ht boxht/2 "link"
  2044. box invis "a" ljust with .e at A_a.w
  2045. move to A_a.s
  2046. move
  2047. move
  2048. A: box "link" "rule"
  2049. B: box "link" "rule"
  2050. line dashed from A.w to A.e
  2051. line dashed from B.w to B.e
  2052. box invis "A" ljust with .e at A.w
  2053. box invis "B" ljust with .e at B.w
  2054. move to A.ne
  2055. right
  2056. move
  2057. move
  2058. down
  2059. LHS_A: box wid 1.2 * boxwid ht 2.5 * boxht "`A'" "rhs" "first" "follow" "empty 0"
  2060. line dashed from 0.2 <LHS_A.nw, LHS_A.sw> to 0.2 <LHS_A.ne, LHS_A.se>
  2061. line dashed from 0.4 <LHS_A.nw, LHS_A.sw> to 0.4 <LHS_A.ne, LHS_A.se>
  2062. line dashed from 0.6 <LHS_A.nw, LHS_A.sw> to 0.6 <LHS_A.ne, LHS_A.se>
  2063. line dashed from 0.8 <LHS_A.nw, LHS_A.sw> to 0.8 <LHS_A.ne, LHS_A.se>
  2064. move to LHS_A.ne + (1,0)
  2065. RHS_a1: box wid 2.0 * boxwid ht 2.5 * boxht "LLTERM" "`a'" "link" "next" "lhs"
  2066. line dashed from 0.2 <RHS_a1.nw, RHS_a1.sw> to 0.2 <RHS_a1.ne, RHS_a1.se>
  2067. line dashed from 0.4 <RHS_a1.nw, RHS_a1.sw> to 0.4 <RHS_a1.ne, RHS_a1.se>
  2068. line dashed from 0.6 <RHS_a1.nw, RHS_a1.sw> to 0.6 <RHS_a1.ne, RHS_a1.se>
  2069. line dashed from 0.8 <RHS_a1.nw, RHS_a1.sw> to 0.8 <RHS_a1.ne, RHS_a1.se>
  2070. move to RHS_a1.ne + (1,0)
  2071. RHS_B: box wid 2.0 * boxwid ht 2.5 * boxht "LLNONTERM" "`B'" "link" "next" "lhs"
  2072. line dashed from 0.2 <RHS_B.nw, RHS_B.sw> to 0.2 <RHS_B.ne, RHS_B.se>
  2073. line dashed from 0.4 <RHS_B.nw, RHS_B.sw> to 0.4 <RHS_B.ne, RHS_B.se>
  2074. line dashed from 0.6 <RHS_B.nw, RHS_B.sw> to 0.6 <RHS_B.ne, RHS_B.se>
  2075. line dashed from 0.8 <RHS_B.nw, RHS_B.sw> to 0.8 <RHS_B.ne, RHS_B.se>
  2076. move to RHS_B.ne + (1,0)
  2077. RHS_END1: box wid 2.0 * boxwid ht 2.5 *boxht "LLEORULE" "-1" "link" "next" "lhs"
  2078. line dashed from 0.2 <RHS_END1.nw, RHS_END1.sw> to 0.2 <RHS_END1.ne,RHS_END1.se>
  2079. line dashed from 0.4 <RHS_END1.nw, RHS_END1.sw> to 0.4 <RHS_END1.ne,RHS_END1.se>
  2080. line dashed from 0.6 <RHS_END1.nw, RHS_END1.sw> to 0.6 <RHS_END1.ne,RHS_END1.se>
  2081. line dashed from 0.8 <RHS_END1.nw, RHS_END1.sw> to 0.8 <RHS_END1.ne,RHS_END1.se>
  2082. move to LHS_A.s - (0,1)
  2083. LHS_B: box wid 1.2 * boxwid ht 2.5 * boxht "`B'" "rhs" "first" "follow" "empty 1"
  2084. line dashed from 0.2 <LHS_B.nw, LHS_B.sw> to 0.2 <LHS_B.ne, LHS_B.se>
  2085. line dashed from 0.4 <LHS_B.nw, LHS_B.sw> to 0.4 <LHS_B.ne, LHS_B.se>
  2086. line dashed from 0.6 <LHS_B.nw, LHS_B.sw> to 0.6 <LHS_B.ne, LHS_B.se>
  2087. line dashed from 0.8 <LHS_B.nw, LHS_B.sw> to 0.8 <LHS_B.ne, LHS_B.se>
  2088. move to LHS_B.ne + (1,0)
  2089. RHS_a2: box wid 2.0 * boxwid ht 2.5 * boxht "LLTERM" "`a'" "link" "next" "lhs"
  2090. line dashed from 0.2 <RHS_a2.nw, RHS_a2.sw> to 0.2 <RHS_a2.ne, RHS_a2.se>
  2091. line dashed from 0.4 <RHS_a2.nw, RHS_a2.sw> to 0.4 <RHS_a2.ne, RHS_a2.se>
  2092. line dashed from 0.6 <RHS_a2.nw, RHS_a2.sw> to 0.6 <RHS_a2.ne, RHS_a2.se>
  2093. line dashed from 0.8 <RHS_a2.nw, RHS_a2.sw> to 0.8 <RHS_a2.ne, RHS_a2.se>
  2094. move to RHS_a2.ne + (1,0)
  2095. RHS_ALT: box wid 2.0 * boxwid ht 2.5 * boxht "LLALT" "-1" "link" "next" "lhs"
  2096. line dashed from 0.2 <RHS_ALT.nw, RHS_ALT.sw> to 0.2 <RHS_ALT.ne, RHS_ALT.se>
  2097. line dashed from 0.4 <RHS_ALT.nw, RHS_ALT.sw> to 0.4 <RHS_ALT.ne, RHS_ALT.se>
  2098. line dashed from 0.6 <RHS_ALT.nw, RHS_ALT.sw> to 0.6 <RHS_ALT.ne, RHS_ALT.se>
  2099. line dashed from 0.8 <RHS_ALT.nw, RHS_ALT.sw> to 0.8 <RHS_ALT.ne, RHS_ALT.se>
  2100. move to RHS_ALT.ne + (1,0)
  2101. RHS_END2: box wid 2.0 * boxwid ht 2.5 *boxht "LLEORULE" "-1" "link" "next" "lhs"
  2102. line dashed from 0.2 <RHS_END2.nw, RHS_END2.sw> to 0.2 <RHS_END2.ne,RHS_END2.se>
  2103. line dashed from 0.4 <RHS_END2.nw, RHS_END2.sw> to 0.4 <RHS_END2.ne,RHS_END2.se>
  2104. line dashed from 0.6 <RHS_END2.nw, RHS_END2.sw> to 0.6 <RHS_END2.ne,RHS_END2.se>
  2105. line dashed from 0.8 <RHS_END2.nw, RHS_END2.sw> to 0.8 <RHS_END2.ne,RHS_END2.se>
  2106. # Next pointers upper row
  2107. .ps 30
  2108. circle radius .01 at 0.75 <A.ne, A.se> - (dx, 0)
  2109. circle radius .01 at 0.3 <LHS_A.ne, LHS_A.se> - (dx, 0)
  2110. circle radius .01 at 0.7 <RHS_a1.ne, RHS_a1.se> - (dx, 0)
  2111. circle radius .01 at 0.7 <RHS_B.ne, RHS_B.se> - (dx, 0)
  2112. .ps 10
  2113. arrow from 0.75 <A.ne, A.se> - (dx, 0) to 0.3 <LHS_A.nw, LHS_A.sw>
  2114. arrow from 0.3 <LHS_A.ne, LHS_A.se> - (dx, 0) to 0.3 <RHS_a1.nw,RHS_a1.sw>
  2115. arrow from 0.7 <RHS_a1.ne, RHS_a1.se> - (dx, 0) to 0.7 <RHS_B.nw,RHS_B.sw>
  2116. arrow from 0.7 <RHS_B.ne, RHS_B.se> - (dx, 0) to 0.7 <RHS_END1.nw, RHS_END1.sw>
  2117. # Next pointers lower row
  2118. .ps 30
  2119. circle radius .01 at 0.75 <B.ne, B.se> - (dx, 0)
  2120. circle radius .01 at 0.3 <LHS_B.ne, LHS_B.se> - (dx, 0)
  2121. circle radius .01 at 0.7 <RHS_a2.ne, RHS_a2.se> - (dx, 0)
  2122. circle radius .01 at 0.7 <RHS_ALT.ne, RHS_ALT.se> - (dx, 0)
  2123. .ps 10
  2124. arrow from 0.75 <B.ne, B.se> - (dx, 0) to 0.3 <LHS_B.nw, LHS_B.sw>
  2125. arrow from 0.3 <LHS_B.ne, LHS_B.se> - (dx, 0) to 0.3 <RHS_a2.nw,RHS_a2.sw>
  2126. arrow from 0.7 <RHS_a2.ne, RHS_a2.se> - (dx, 0) to 0.7 <RHS_ALT.nw,RHS_ALT.sw>
  2127. arrow from 0.7 <RHS_ALT.ne, RHS_ALT.se> - (dx, 0) to 0.7 <RHS_END2.nw, RHS_END2.sw>
  2128. # Link pointers
  2129. .ps 30
  2130. circle radius .01 at 0.5 <RHS_a1.ne, RHS_a1.se> - (2*dx, 0)
  2131. circle radius .01 at 0.5 <A_a.ne, A_a.se> - (dx, 0)
  2132. circle radius .01 at 0.25 <B.ne, B.se> - (dx, 0)
  2133. .ps 10
  2134. arrow dashed from 0.5 <RHS_a1.ne, RHS_a1.se> - (2*dx, 0) to RHS_a2.ne - (2*dx,0)
  2135. line dashed from 0.5 <A_a.ne, A_a.se> - (dx, 0) right 4.0 * boxwid then to RHS_a1.ne - (2*dx, 0) ->
  2136. line dashed from 0.25 <B.ne, B.se> - (dx, 0) right then up .75 then right 7.0 * boxwid then to RHS_B.ne - (2*dx, 0) ->
  2137. # LHS pointers upper row
  2138. .ps 30
  2139. circle radius .01 at 0.9 <RHS_a1.ne, RHS_a1.se> - (3*dx, 0)
  2140. circle radius .01 at 0.9 <RHS_B.ne, RHS_B.se> - (3*dx, 0)
  2141. circle radius .01 at 0.9 <RHS_END1.ne, RHS_END1.se> - (3*dx, 0)
  2142. .ps 10
  2143. line from 0.9 <RHS_a1.ne, RHS_a1.se> - (3*dx, 0) down ->
  2144. line from 0.9 <RHS_B.ne, RHS_B.se> - (3*dx, 0) down ->
  2145. line from 0.9 <RHS_END1.ne, RHS_END1.se> - (3*dx, 0) down then left 8.0 * boxwid then to LHS_A.se ->
  2146. # LHS pointers lower row
  2147. .ps 30
  2148. circle radius .01 at 0.9 <RHS_a2.ne, RHS_a2.se> - (3*dx, 0)
  2149. circle radius .01 at 0.9 <RHS_ALT.ne, RHS_ALT.se> - (3*dx, 0)
  2150. circle radius .01 at 0.9 <RHS_END2.ne, RHS_END2.se> - (3*dx, 0)
  2151. .ps 10
  2152. line from 0.9 <RHS_a2.ne, RHS_a2.se> - (3*dx, 0) down ->
  2153. line from 0.9 <RHS_ALT.ne, RHS_ALT.se> - (3*dx, 0) down ->
  2154. line from 0.9 <RHS_END2.ne, RHS_END2.se> - (3*dx, 0) down then left 8.0 * boxwid then to LHS_B.se ->
  2155. # Text above structs
  2156. box invis ht boxht/2 "terminals" with .s at A_a.n
  2157. box invis ht boxht/2 "nonterminals" with .s at A.n
  2158. box invis ht boxht/2 "lhs" with .s at LHS_A.n
  2159. box invis ht boxht/2 "lhs" with .s at LHS_B.n
  2160. box invis ht boxht/2 "symbol" with .s at RHS_a1.n
  2161. box invis ht boxht/2 "symbol" with .s at RHS_B.n
  2162. box invis ht boxht/2 "symbol" with .s at RHS_END1.n
  2163. box invis ht boxht/2 "symbol" with .s at RHS_a2.n
  2164. box invis ht boxht/2 "symbol" with .s at RHS_ALT.n
  2165. box invis ht boxht/2 "symbol" with .s at RHS_END2.n
  2166. .PE
  2167. .LP
  2168. Note that the empty alternative for `B' is represented in the
  2169. data structure by the `LLEORULE-struct' immediately following
  2170. the `LLALT'-struct. When there are still other alternatives
  2171. the `LLEORULE'-struct is replaced by a `LLALT'-struct followed
  2172. by the other alternatives and a `LLEORULE'-struct.
  2173. Finally, when the empty rule is the only rule for a
  2174. nonterminal the RHS will consist only of a `LLEORULE'-struct.
  2175. .SH
  2176. A.2 Delayed deletes
  2177. .LP
  2178. We encountered a problem with deleting elements during the
  2179. prediction phase. Imagine that we have a nonterminal `B' on top of
  2180. the graph, and `B' has two alternatives. Now suppose that we
  2181. apply the first alternative and we find out that this alternative leads
  2182. to a `dead end', i.e. a head that does not match the input symbol, so we want
  2183. to get rid of it. When we delete it immediately the deletion algorithm
  2184. will also deallocate `[B]' and possibly some elements below `[B]'.
  2185. However, there was another alternative for `[B]' which was not yet
  2186. developed and maybe this alternative leads to a head which is legal.
  2187. But `[B]' has already been deleted and thus cannot be used anymore. A similar
  2188. situation can occur when we want to delete a joined element;
  2189. the substitution of a nonterminal
  2190. that only produces empty and thus has no element above it in the graph
  2191. can also lead to such a situation. We therefore decided to put `dead ends'
  2192. on a list, `cleanup_arr[]', and after the prediction phase has
  2193. finished we delete all elements on this list, and all their descendants
  2194. that become unreachable of course.
  2195. .SH
  2196. A.3 Clearing flags
  2197. .LP
  2198. We implemented two different ways to clear the flags set by the prediction
  2199. phase of the algorithm; the first recursively tracks down the whole graph
  2200. following the flags, the second puts all elements visited by
  2201. the prediction phase
  2202. on a list; after the prediction phase has finished the algorithm walks
  2203. through this list clearing the flags of all elements on it. We took measurements
  2204. on both algorithms and found out that with small programs the times
  2205. did not differ much but large programs were processed faster by the
  2206. second algorithm. Therefore we decided to use the second algorithm.
  2207. .LP
  2208. To speed up the algorithm even more, we do not deallocate the list
  2209. after a prediction phase has finished. We just set the number of
  2210. elements on the list to 0. This saves considerably on the number
  2211. of `Malloc'-calls.
  2212. .SH
  2213. A.4 Implementation of %erroneous directive
  2214. .LP
  2215. As explained in chapter 3, the user can put a %erroneous directive
  2216. in front of a terminal, making the non-correcting error recovery
  2217. mechanism ignore that terminal. However, implementing this directive
  2218. was not entirely straightforward; consider, for example, the rule
  2219. .br
  2220. .nf
  2221. A: 'a' | %erroneous 'b' | 'c';
  2222. .fi
  2223. .LP
  2224. Just leaving out terminal 'b' will not do, because then nonterminal
  2225. A produces empty all of a sudden, which it did not before.
  2226. The rule should become
  2227. .br
  2228. .nf
  2229. A: 'a' | 'c';
  2230. .fi
  2231. but this is hard to implement in LLgen. We took a different approach:
  2232. we introduce a new terminal 'ERRONEOUS', and substitute it for all
  2233. terminals with an %erroneous directive in front of them. Thus, the
  2234. example rule becomes
  2235. .br
  2236. .nf
  2237. A: 'a' | ERRONEOUS | 'c';
  2238. .fi
  2239. .LP
  2240. Since the terminal ERRONEOUS will never be in the input to the parser,
  2241. this has exactly the desired effect; when a predicting phase produces
  2242. ERRONEOUS as head of a prediction graph this head will never match the
  2243. input. In particular, it will not match the terminal that was
  2244. originally there (in this case 'b') so that terminal is no longer
  2245. regarded as part of the input language at that point.
  2246. .bp
  2247. .SH
  2248. Appendix B: Using the non-correcting error recovery
  2249. .LP
  2250. To use the new non-correcting error recovery mechanism, LLgen has to
  2251. be called with the new flag -n. LLgen will then create an extra file
  2252. called `Lncor.c' which contains the code for the non-correcting recovery
  2253. mechanism. This file has to be compiled and linked with the rest
  2254. of the program, just like the file `Lpars.c'.
  2255. .LP
  2256. The user-supplied error reporting routine `LLmessage' will have to be
  2257. modified slightly; when it is called with a positive parameter, it
  2258. should only set the attributes of the inserted token, but not report an
  2259. error. Note that the lexical analyzer still must return the same token
  2260. as it did the last time it was called. When LLmessage is called with
  2261. parameter 0, it should report that the token in global variable LLsymb
  2262. is illegal; if the value of LLsymb is `EOFILE', the routine should
  2263. report an unexpected End-of-file. When LLmessage is called with parameter
  2264. -1, it should report that end-of-file was expected. To facilitate
  2265. switching between correcting and non-correcting error recovery,
  2266. the file Lpars.h contains a statement `#define LLNONCORR'
  2267. which indicates that the non-correcting
  2268. mechanism is enabled.
  2269. Here is a
  2270. skeleton for the modified LLmessage routine:
  2271. .nr PS 8
  2272. .nr VS 10
  2273. .LP
  2274. .br
  2275. .nf
  2276. #include "Lpars.h"
  2277. extern int LLsymb;
  2278. LLmessage(flag)
  2279. int flag;
  2280. {
  2281. if (flag < 0)
  2282. {
  2283. /* Error message "end-of-file expected" */;
  2284. }
  2285. else if (flag)
  2286. {
  2287. /* flag equals the number of the inserted token */
  2288. #ifndef LLNONCORR
  2289. /* Error message "token inserted" */;
  2290. #endif
  2291. /* Code to set attributes for inserted token */
  2292. /* Code to make lexical analyzer return same token as before */
  2293. else
  2294. {
  2295. /* The number of the illegal or deleted token is in LLsymb */
  2296. #ifndef LLNONCORR
  2297. /* Error message "token deleted" */;
  2298. #else
  2299. if (LLsymb == EOFILE)
  2300. {
  2301. /* Error message "unexpected end of file" */
  2302. }
  2303. else
  2304. {
  2305. /* Error message "token illegal" */;
  2306. }
  2307. #endif
  2308. }
  2309. }
  2310. .fi
  2311. .nr PS 10
  2312. .nr VS 12
  2313. .LP
  2314. For best results, one should check if the parser calls other parsers
  2315. in semantic actions; if this is the case, and the called parser
  2316. processes the same input file as the calling parser, then a %substart
  2317. should be put in front of the semantic action that starts a parser.
  2318. If a semantic action calls parsers defined by startsymbols say
  2319. A and B, then `%substart A, B;' should be put in front of the action.
  2320. As an alternative, one can use the -s flag of LLgen; this has the
  2321. same effect as putting `%substart X, Y, ....;' in front of all
  2322. semantic actions, where X, Y, .... are the startsymbols of the grammar.
  2323. Clearly, it is preferable to analyze the grammar and put %substart
  2324. directives only where appropriate.
  2325. Finally, beware of syntactic errors being handled in semantic
  2326. actions; eg, one could have a rule like
  2327. .nr PS 8
  2328. .nr VS 10
  2329. .LP
  2330. .br
  2331. .nf
  2332. Assignment_statement: lvalue
  2333. [
  2334. '='
  2335. {
  2336. error(":= expected");
  2337. }
  2338. |
  2339. ':='
  2340. ]
  2341. expression
  2342. ;
  2343. .fi
  2344. .nr PS 10
  2345. .nr VS 12
  2346. .LP
  2347. To ensure that the non-correcting mechanism will recognize the
  2348. `=' as a syntactic error, a `%erroneous' directive should be
  2349. put in front of it.