Browse Source

Added the appropriate #! magic at the beginning of shell scripts. (Some modern shells don't like scripts to be without it.)

dtrg 18 years ago
parent
commit
be7ec5cdd0
100 changed files with 17679 additions and 0 deletions
  1. 21 0
      .distr
  2. 276 0
      Action
  3. 32 0
      Copyright
  4. 3 0
      DistrAction
  5. 35 0
      Makefile
  6. 45 0
      NEW
  7. 174 0
      README
  8. 20 0
      TODO
  9. 144 0
      TakeAction
  10. 9 0
      bin/.distr
  11. 8 0
      bin/cc-and-mkdep.ack
  12. 21 0
      bin/cc-and-mkdep.all
  13. 8 0
      bin/cc-and-mkdep.sun
  14. 19 0
      bin/do_deps
  15. 48 0
      bin/do_resolve
  16. 1 0
      bin/em.pascal
  17. 13 0
      bin/lint-lib.ack
  18. 13 0
      bin/lint-lib.unix
  19. 20 0
      bin/mk_manpage
  20. 9 0
      bin/rm_deps
  21. 3 0
      distr/Action
  22. 16 0
      distr/Action1
  23. 15 0
      distr/Exceptions
  24. 90 0
      distr/How_To
  25. 26 0
      distr/dwalk
  26. 1 0
      distr/echod
  27. 26 0
      distr/f.attf
  28. 10 0
      distr/listall
  29. 2 0
      distr/listall.d
  30. 10 0
      distr/listdirs
  31. 40 0
      distr/mk_distr_syms
  32. 11 0
      distr/mka
  33. 0 0
      distr/mkd
  34. 165 0
      distr/mkdist
  35. 19 0
      distr/mkf
  36. 1 0
      distr/mks
  37. 42 0
      distr/mktree
  38. 26 0
      distr/todistr
  39. 2 0
      distr/ts
  40. 33 0
      doc/.distr
  41. 1893 0
      doc/6500.doc
  42. 4 0
      doc/LLgen/.distr
  43. 1077 0
      doc/LLgen/LLgen.n
  44. 54 0
      doc/LLgen/LLgen.refs
  45. 2712 0
      doc/LLgen/LLgen_NCER.n
  46. 15 0
      doc/LLgen/Makefile
  47. 20 0
      doc/LLgen/proto.make
  48. 82 0
      doc/Makefile
  49. 8 0
      doc/READ_ME
  50. 444 0
      doc/ack.doc
  51. 365 0
      doc/ansi_C.doc
  52. 949 0
      doc/basic.doc
  53. 3 0
      doc/ceg/.distr
  54. 6 0
      doc/ceg/Makefile
  55. 42 0
      doc/ceg/ceg.ref
  56. 1587 0
      doc/ceg/ceg.tr
  57. 284 0
      doc/ceg/proposal.tr
  58. 12 0
      doc/ceg/proto.make
  59. 276 0
      doc/ceg/prototype.tr
  60. 1864 0
      doc/cg.doc
  61. 323 0
      doc/cref.doc
  62. 629 0
      doc/crefman.doc
  63. 18 0
      doc/ego/.distr
  64. 55 0
      doc/ego/Makefile
  65. 1 0
      doc/ego/bo/.distr
  66. 162 0
      doc/ego/bo/bo1
  67. 1 0
      doc/ego/ca/.distr
  68. 65 0
      doc/ego/ca/ca1
  69. 6 0
      doc/ego/cf/.distr
  70. 94 0
      doc/ego/cf/cf1
  71. 50 0
      doc/ego/cf/cf2
  72. 53 0
      doc/ego/cf/cf3
  73. 93 0
      doc/ego/cf/cf4
  74. 82 0
      doc/ego/cf/cf5
  75. 21 0
      doc/ego/cf/cf6
  76. 1 0
      doc/ego/cj/.distr
  77. 144 0
      doc/ego/cj/cj1
  78. 5 0
      doc/ego/cs/.distr
  79. 45 0
      doc/ego/cs/cs1
  80. 86 0
      doc/ego/cs/cs2
  81. 250 0
      doc/ego/cs/cs3
  82. 311 0
      doc/ego/cs/cs4
  83. 46 0
      doc/ego/cs/cs5
  84. 5 0
      doc/ego/ic/.distr
  85. 57 0
      doc/ego/ic/ic1
  86. 150 0
      doc/ego/ic/ic2
  87. 431 0
      doc/ego/ic/ic3
  88. 83 0
      doc/ego/ic/ic4
  89. 166 0
      doc/ego/ic/ic5
  90. 6 0
      doc/ego/il/.distr
  91. 112 0
      doc/ego/il/il1
  92. 93 0
      doc/ego/il/il2
  93. 164 0
      doc/ego/il/il3
  94. 135 0
      doc/ego/il/il4
  95. 446 0
      doc/ego/il/il5
  96. 27 0
      doc/ego/il/il6
  97. 3 0
      doc/ego/intro/.distr
  98. 10 0
      doc/ego/intro/head
  99. 79 0
      doc/ego/intro/intro1
  100. 17 0
      doc/ego/intro/tail

+ 21 - 0
.distr

@@ -0,0 +1,21 @@
+Action
+Copyright
+NEW
+README
+TODO
+TakeAction
+bin
+doc
+emtest
+etc
+fast
+fcc
+first
+h
+include
+modules
+lang
+lib
+mach
+man
+util

+ 276 - 0
Action

@@ -0,0 +1,276 @@
+name	"System definition"
+dir first
+action ack_sys
+failure "You have to run the shell script first/first"
+fatal
+end
+name "Manual pages"
+dir man
+end
+! name	"EM definition"
+! dir etc
+! end
+name "EM definition library"
+dir util/data
+end
+name "C utilities"
+dir util/cmisc
+end
+name "Yacc parser generator"
+dir util/byacc
+end
+name "Flex lexical analyzer generator"
+dir util/flex
+action "make firstinstall && make clean"
+end
+name "Include files for modules"
+dir modules/h
+end
+name "Modules"
+dir modules/src
+indir
+end
+name "LL(1) Parser generator"
+dir util/LLgen
+action "make firstinstall && make clean"
+end
+name "C preprocessor"
+dir util/cpp
+end
+name "Peephole optimizer libraries"
+dir modules/src/em_opt
+end
+name "ACK object utilities"
+dir util/amisc
+end
+name "Encode/Decode"
+dir util/misc
+end
+name "Shell files in bin"
+dir util/shf
+end
+name "EM assembler"
+dir util/ass
+end
+name "EM Peephole optimizer"
+dir util/opt
+end
+name "EM Global optimizer"
+dir util/ego
+indir
+end
+name "ACK archiver"
+dir util/arch
+end
+name "Program 'ack'"
+dir util/ack
+end
+name "Bootstrap for backend tables"
+dir util/cgg
+end
+name "Bootstrap for newest form of backend tables"
+dir util/ncgg
+end
+name "Bootstrap for code expanders"
+dir util/ceg
+indir
+end
+name "LED link editor"
+dir util/led
+end
+name "TOPGEN target optimizer generator"
+dir util/topgen
+end
+name "C frontend"
+dir lang/cem/cemcom
+end
+name "ANSI-C frontend"
+dir lang/cem/cemcom.ansi
+end
+name "ANSI-C preprocessor"
+dir lang/cem/cpp.ansi
+end
+name "ANSI-C header files"
+dir lang/cem/libcc.ansi
+end
+name "LINT C program checker"
+dir lang/cem/lint
+end
+name "EM definition lint-library"
+action "make lintlib"
+dir util/data
+end
+name "Modules lint libraries"
+dir modules/src
+indir "Action.lint"
+end
+name "Global optimizer lint libraries"
+dir util/ego/share
+action "make lintlib"
+end
+name "Pascal frontend"
+dir lang/pc/comp
+end
+name "Basic frontend"
+dir lang/basic/src
+end
+name "Occam frontend"
+dir lang/occam/comp
+end
+name "Modula-2 frontend"
+dir lang/m2/comp
+end
+name "Modula-2 definition modules"
+dir lang/m2/libm2
+end
+name "Modula-2 makefile generator"
+dir lang/m2/m2mm
+end
+name "Fortran to C compiler"
+dir lang/fortran/comp
+end
+name "EM interpreter in C"
+dir util/int
+end
+name "Symbolic debugger"
+dir util/grind
+end
+name "Intel 8086 support"
+dir mach/i86
+indir
+end
+name "Intel 80286 support for Xenix"
+dir mach/xenix3
+indir
+end
+name "Intel 80386 support for Xenix 386 System V"
+dir mach/i386
+indir
+end
+name "MSC6500 support"
+dir mach/6500
+indir
+end
+name "Motorola 6800 support"
+dir mach/6800
+indir
+end
+name "Motorola 6805 support"
+dir mach/6805
+indir
+end
+name "Motorola 6809 support"
+dir mach/6809
+indir
+end
+name "Intel 8080 support"
+dir mach/i80
+indir
+end
+name "2-2 Interpreter support"
+dir mach/em22
+indir
+end
+name "2-4 Interpreter support"
+dir mach/em24
+indir
+end
+name "4-4 Interpreter support"
+dir mach/em44
+indir
+end
+name "Motorola 68000 2-4 support"
+dir mach/m68k2
+indir
+end
+name "Motorola 68000 4-4 support"
+dir mach/m68k4
+indir
+end
+name "NS16032 support"
+dir mach/ns
+indir
+end
+name "PDP 11 support"
+dir mach/pdp
+indir
+end
+name "PMDS support"
+dir mach/pmds
+indir
+end
+name "PMDS 4/4 support"
+dir mach/pmds4
+indir
+end
+name "Signetics 2650 support"
+dir mach/s2650
+indir
+end
+name "Vax 4-4 support"
+dir mach/vax4
+indir
+end
+name "M68020 System V/68 support"
+dir mach/m68020
+indir
+end
+name "Sun 3 M68020 support"
+dir mach/sun3
+indir
+end
+name "Sun 4 SPARC SunOs 4 support"
+dir mach/sparc
+system "sparc|sparc_solaris"
+indir
+end
+name "Sun 4 SPARC Solaris support"
+dir mach/sparc_solaris
+system "sparc_solaris"
+indir
+end
+name "Sun 2 M68000 support"
+dir mach/sun2
+indir
+end
+name "Mantra M68000 System V.0 support"
+dir mach/mantra
+indir
+end
+name "PC Minix support"
+dir mach/minix
+indir
+end
+name "Atari ST Minix support"
+dir mach/minixST
+indir
+end
+name "Z80 support"
+dir mach/z80
+indir
+end
+name "Zilog Z8000 support"
+dir mach/z8000
+indir
+end
+name "Acorn Archimedes support"
+dir mach/arm
+indir
+end
+name "Documentation"
+dir doc
+end
+name "Motorola 68000 interpreters"
+system "m68*|sun*"
+dir mach/mantra/int
+end
+name "Fast compilers"
+system "m68020|sun3|i386|vax*"
+dir fast
+indir
+end
+name "Fast cc-compatible C compiler"
+system "sun3|vax*"
+dir fcc
+indir
+end

+ 32 - 0
Copyright

@@ -0,0 +1,32 @@
+Copyright (c) 1987, 1990, 1993, 2005 Vrije Universiteit, Amsterdam, The Netherlands.
+All rights reserved.
+
+Redistribution and use of the Amsterdam Compiler Kit in source and
+binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+   * Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+   * Redistributions in binary form must reproduce the above
+     copyright notice, this list of conditions and the following
+     disclaimer in the documentation and/or other materials provided
+     with the distribution.
+
+   * Neither the name of Vrije Universiteit nor the names of the
+     software authors or contributors may be used to endorse or
+     promote products derived from this software without specific
+     prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS, AUTHORS, AND
+CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL VRIJE UNIVERSITEIT OR ANY AUTHORS OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 3 - 0
DistrAction

@@ -0,0 +1,3 @@
+p=/proj/em/Work
+sh TakeAction 'make distr' $p/distr/Action
+sh TakeAction 'make distr' $p/distr/Action1

+ 35 - 0
Makefile

@@ -0,0 +1,35 @@
+cmp:           # compile everything and compare
+	(cd etc  ; make cmp )
+	(cd util ; make cmp )
+	(cd lang ; make cmp )
+	(cd mach ; make cmp )
+
+install:         # compile everything to machine code
+	(cd etc  ; make install )
+	(cd util ; make install )
+	(cd lang/cem ; make install )
+	(cd mach ; make install )
+	(cd lang/pc ; make install )
+
+clean:        # remove all non-sources, except boot-files
+	(cd doc ; make clean )
+	(cd man ; make clean )
+	(cd h   ; make clean )
+	(cd etc  ; make clean )
+	(cd util ; make clean )
+	(cd lang ; make clean )
+	(cd mach ; make clean )
+
+opr:            # print all sources
+	make pr | opr
+
+pr:             # print all sources
+	@( pr Makefile ; \
+	  (cd doc ; make pr ) ; \
+	  (cd man ; make pr ) ; \
+	  (cd h ; make pr ) ; \
+	  (cd etc  ; make pr ) ; \
+	  (cd lang ; make pr ) ; \
+	  (cd util ; make pr ) ; \
+	  (cd mach ; make pr ) \
+	)

+ 45 - 0
NEW

@@ -0,0 +1,45 @@
+This is ACK distribution 5.6.
+
+This is a minor update of 5.5, the last public release from Vrije University.
+Only minor changes have been made to make the system build on modern
+platforms.
+
+The NEW document from the previous release follows.
+
+David Given
+dg@cowlark.com 2005-06-24
+
+-----------------------------------------------------------------------------
+
+The only addition with respect to the 5th ACK distribution is the support
+for Solaris 2 on SPARCs. It also contains many bug fixes.
+
+Notes for the 5th ACK distribution:
+
+It is not wise to mix files created by the previous version of the Kit
+with files belonging to this version, although that might sometimes work.
+Many problems with the previous distribution have been fixed.
+The major additions are:
+
+	- an ANSI C compiler
+	- a LINT C program checker, both non-ansi and ansi
+	- an Intel 80386 back-end
+	- a SPARC code expander
+	- a source level debugger for Pascal, Modula-2, C, and ANSI C
+	- an Acorn Archimedes back-end
+	- code-expanders for VAX, Intel 80386 and Motorola M68020 processors,
+	  and very fast Pascal, Modula-2, ANSI C, and C compilers constructed
+	  using these code expanders
+	- a cc-compatible very fast C compiler for SUN-3 and VAX.
+
+Also added, but not part of the Kit proper are
+	- flex: a lexical analyzer generator
+	- byacc: yacc-clone by UCB
+	- f2c: a Fortran to C compiler by AT&T.
+
+See the ACK installation manual for their copyright notices.
+
+--
+Ceriel Jacobs, Dept. of Mathematics and Computer Science, Vrije Universiteit,
+De Boelelaan 1081a, 1081 HV Amsterdam, The Netherlands
+Email:	ceriel@cs.vu.nl		Fax: +31 20 6427705

+ 174 - 0
README

@@ -0,0 +1,174 @@
+# $Source$
+# $State$
+
+Installing the ACK on a modern platform
+=======================================
+
+This document provides some very quick and dirty instructions for installing
+the ACK on a modern platform. It is not intended as a substitute for the
+real instructions, which can be found in doc/install.pr.
+
+Let me repeat myself:
+
+THE FULL INSTALLATION INSTRUCTIONS ARE IN doc/install.pr.
+
+The ACK is a very large and complex package and has received minimal
+maintenance for the best part of a decade. During that time, the Unix
+world has moved on, and many APIs have changed. It compiles cleanly on
+my, dtrg's, test machine, which is a Debian Ubuntu Linux system. Your
+mileage may vary.
+
+All disclaimers now done, now on to the good stuff:
+
+Building the ACK
+----------------
+
+I'm assuming you're using Linux here, because that's what I use. If you
+don't use Linux, please let me know if you have any trouble and I'll update
+the instructions.
+
+1. Configure the build.
+
+   To do this, run the first/first script. You will be asked several
+   questions.
+   
+   * What is the root of the ACK source tree?
+   
+     This is the directory that you have unpacked the distribution into.
+     For example, /home/dg/src/Ack-5.6.
+     
+   * What is the root of the configuration tree?
+   
+     This is the directory that the build process will use for temporary
+     files. You'll only need this during the compilation process; it can
+     be removed afterwards.
+     For example, /tmp/ack-conf
+     
+   * What is the root of the ACK binaries?
+   
+     This is the ACK's installation path; where the binaries will live.
+     This needs to be writable during the build process --- if you want
+     to install in /usr/local, you either have to make /usr/local
+     writable or compile as root. Sorry!
+     
+   * What is your system type?
+   
+     Linux isn't on the list. Choose ANY.
+     
+   * Is this the system you are running on?
+   
+     Yes.
+     
+   * Are you satisfied?
+   
+     Yes.
+     
+   * What default machine do you wish to compile for?
+   
+     The ACK wants to know what architecture to target if you don't manually
+     specify an architecture. Unfortunately, it can't generate runnable
+     binaries for Linux or any other modern system (except possible Solaris
+     on Sparc). I'd recommend you choose em44. This will produce portable
+     binaries using the ACK's intermediate format, which you can run using
+     the int interpreter.
+     
+   * What kind of Unix are you running?
+   
+     Linux is a mixture, but I pick SYS_5 and it works.
+     
+   * Do you wish to limit the installation?
+   
+     No. If you pick Yes, the script will ask detailed questions about
+     exactly what you want to build. Modern systems are fast enough that
+     we may as well build everything.
+     
+   * Which system call library do you wish to use on the VAX?
+   
+     I don't have a VAX; the only person I know who has one uses it to vacuum
+     his carpets. I pick libsysV_2 with no ill effects.
+     
+   If the configuration script is happy, it will generate a script called
+   INSTALL.
+   
+2. Do the compilation.
+
+   The configuration script will recommend a command line. Execute this. On
+   modern systems, the compilation doesn't take long.
+   
+   Check the output of the configuration script for "Failed" lines. On my
+   system there are two:
+   
+     $ grep Failed INSTALL.out
+     Failed for Intel 8080 download programs, see dl/Out
+     Failed for Intel 8080 support
+   
+   You can ignore these. They aren't important.
+   
+3. Use the ACK.
+
+   Ensure that the ACK's binary directory is on your path; this is /bin in
+   the directory you specified during the configuration process. In my
+   example, this is /usr/local/bin. The /man subdirectory should go on your
+   manpath.
+
+   To test your path, do: ack
+   
+   This should return silently.
+   
+   To test your manpath, do: man ack
+      
+   This will produce the documentation for the main compiler driver.
+   
+   If this works, you can remove the conf tree (/tmp/ack-conf in my example).
+   
+Gotchas
+-------
+
+There are some things you should be aware of.
+
+* The ACK's archiver tool is called 'arch'. This conflicts on Linux platforms
+  with a utility that displays the current architecture. If your compilation
+  occasionally fails obscurely and displays something like 'i686', you are
+  running afoul of this. As a workaround, rearrange your path so the ACK's
+  bin directory comes first --- but do be aware that some Linux system
+  tools may stop working.
+  
+* By default, the ack tool will compile K&R C. Practically all C source these
+  days is ANSI C --- use the -ansi switch to enable ANSI mode. No, the ACK is
+  not C99 compatible.
+  
+* Not all combinations of optimisation and architectures work. This is
+  perfectly normal, but the combinations are not well documented. Everything
+  supports -O.
+  
+Disclaimer
+----------
+
+The ACK is mature, well-tested software, but the environment in which it was
+developed for and tested under is rather different from that available on
+today's machines. There will probably be little in the way of logical bugs,
+but there may be many compilation and API bugs.
+
+If you wish to use the ACK, *please* join the mailing list. We are interested
+in any reports of success and particularly, failure. If it does fail for you,
+we would love to know why, in as much detail as possible. Bug fixes are even
+more welcome.
+
+The ACK is licensed under a BSD-like license. Please see the 'Copyright' file
+for the full text.
+
+You can find the mailing list on the project's web site:
+
+	http://tack.sourceforge.net/
+	
+Please enjoy.
+
+David Given (dtrg on Sourceforge)
+dg@cowlark.com
+2005-06-24, 23:53
+
+# Revision history
+# $Log$
+# Revision 2.2  2005-06-24 23:20:41  dtrg
+# Added some new readmes at the top level.
+#

+ 20 - 0
TODO

@@ -0,0 +1,20 @@
+# $Source$
+# $State$
+
+This file contains things that I have noticed need fixing, but have not
+yet been fixed. Everything here should be reasonably low priority. Some
+bugs have been bodged around to make things work; these are all marked in
+the source with FIXME tags.
+
+
+* util/int needs to be rewritten to emulate sgtty with termios; look for
+  FIXMEs.
+
+* mach/i80/dl/nascom.c needs to be rewritten to use termios, not sgtty.
+
+
+# Revision history
+# $Log$
+# Revision 2.1  2005-06-24 23:20:41  dtrg
+# Added some new readmes at the top level.
+#

+ 144 - 0
TakeAction

@@ -0,0 +1,144 @@
+case $# in
+0)	PAR='make install && make clean' ; CMD=Action ;;
+1)	PAR="$1" ; CMD=Action ;;
+2)	PAR="$1" ; CMD="$2" ;;
+*)	echo Syntax: "$0" [command [file]] ; exit 1 ;;
+esac
+if test -r "$CMD"
+then :
+else
+	case "$CMD" in
+	Action)		echo No Action file present ;;
+	*)		echo No Action file "($CMD)" present ;;
+	esac
+fi
+case $0 in
+/*)	THISFILE=$0
+	;;
+*)	if [ -f $0 ]
+	then
+		THISFILE=`pwd`/$0
+	else
+		THISFILE=$0
+	fi
+	;;
+esac
+SYS=
+RETC=0
+{ while read LINE
+do
+	eval set $LINE
+	case x"$1" in
+	x!*)	;;
+	xname)		SYS="$2"
+			ACTION='$PAR'
+			DIR=.
+			FM=no
+			FAIL='Failed for $SYS, see $DIR/Out'
+			SUCC='$SYS -- done'
+			ATYPE=
+			FATAL=no
+			DOIT=yes
+			;;
+	xfatal)		FATAL=yes ;;
+	xaction|xindir)	case x$ATYPE in
+			x)	ACTION=$2 ; ATYPE=$1
+				case $ATYPE$FM in
+				indirno) FAIL='Failed for $SYS' ;;
+				esac
+				;;
+			*)	echo Already specified an $ATYPE for this name
+				RETC=65 ;;
+			esac ;;
+	xfailure)	FM=yes 
+			FAIL="$2" ;;
+	xsuccess)	SUCC="$2" ;;
+	xdir)		DIR="$2" ;;
+	xsystem)	PAT="$2"
+			oIFS=$IFS
+			IFS="|"
+			eval set $2
+			case x`ack_sys` in
+			x$1|x$2|x$3|x$4|x$5|x$6|x$7)	;;
+			*)	echo "Sorry, $SYS can only be made on $PAT systems"
+				DOIT=no
+				;;
+			esac
+			IFS=$oIFS
+			;;
+	xend)		case $DOIT in
+			no)	continue ;;
+			esac
+			case x$SYS in
+			x)	echo Missing name line; RETC=65 ;;
+			*)	if test -d $DIR
+				then (
+				    cd $DIR
+				    X=
+				    case $ATYPE in
+				    indir)	
+					if $THISFILE "$PAR" $ACTION
+					then eval echo $SUCC
+					else RETC=2 ; eval echo $FAIL
+					fi ;;
+				    *)
+					case "$ACTION" in
+					'$PAR')
+					    	ACTION="$PAR"
+					    ;;
+					*)  ;;
+					esac
+					if [ -f No$CMD ]
+					then
+					    x=`cat No$CMD`
+					    if [ "$ACTION" = "$x" ]
+					    then
+						ACTION='echo "No actions performed, No$CMD file present"'
+						SUCC='$SYS -- skipped'
+					    fi
+					fi
+					if eval "{ $ACTION ; } >Out 2>&1 </dev/null"
+					then	eval echo $SUCC
+						if [ "$SUCC" = '$SYS -- skipped' ]
+						then :
+						else echo "$ACTION" > No$CMD 2>/dev/null
+						fi
+					else RETC=1 ; X=: ; eval echo $FAIL
+					fi
+					;;
+				    esac
+				    (echo ------- `pwd`
+				     cat Out
+				     $X rm -f Out
+				    ) 2>/dev/null 1>&- 1>&3
+				    exit $RETC
+				)
+				case $? in
+				0) ;;
+				*) case $RETC in
+				   0) RETC=$? ;;
+				   esac ;;
+				esac
+				else
+				      echo Directory $DIR for $SYS is inaccessible
+				      RETC=66
+				fi ;;
+			esac
+			case $FATAL$RETC in
+			yes0)	;;
+			yes*)	echo Fatal error, installation stopped.
+				exit $RETC ;;
+			esac
+			SYS=
+			;;
+	*)		echo Unknown keyword "$1"
+			RETC=67 ;;
+	esac
+done
+exit $RETC
+} <$CMD
+RETX=$?
+case $RETX in
+0)	exit $RETC ;;
+*)	exit $RETX ;;
+esac

+ 9 - 0
bin/.distr

@@ -0,0 +1,9 @@
+cc-and-mkdep.ack
+cc-and-mkdep.all
+cc-and-mkdep.sun
+do_deps
+do_resolve
+lint-lib.ack
+lint-lib.unix
+mk_manpage
+rm_deps

+ 8 - 0
bin/cc-and-mkdep.ack

@@ -0,0 +1,8 @@
+#!/bin/sh
+: '$Id$'
+
+: Compile and make dependencies. First argument is the file on which the
+: dependencies must be produced. This version is for ACK.
+n=$1
+shift
+exec $CC -Rcem-A$n -Rcem-m $*

+ 21 - 0
bin/cc-and-mkdep.all

@@ -0,0 +1,21 @@
+#!/bin/sh
+: '$Id$'
+
+: Compile and make dependencies. First argument is the file on which the
+: dependencies must be produced. This version is a generic one that should
+: work for all Unix systems.
+n=$1
+shift
+cpp_args=
+for i in $*
+do
+	case $i in
+	-I*|-D*|-U*)	cpp_args="$cpp_args $i"
+			;;
+	-*)		;;
+	*)		cpp_args="$cpp_args $i"
+			;;
+	esac
+done
+$UTIL_HOME/lib.bin/cpp -d -m $cpp_args > $n 2>/dev/null
+exec $CC $*

+ 8 - 0
bin/cc-and-mkdep.sun

@@ -0,0 +1,8 @@
+#!/bin/sh
+: '$Id$'
+
+: Compile and make dependencies. First argument is the file on which the
+: dependencies must be produced. This version is for the SUN cc.
+n=$1
+shift
+exec $CC -Qpath $UTIL_HOME/lib.bin -Qoption cpp -d$n -Qoption cpp -m $*

+ 19 - 0
bin/do_deps

@@ -0,0 +1,19 @@
+#!/bin/sh
+: '$Id$'
+
+: Produce dependencies for all argument files
+
+for i in $*
+do
+	n=`basename $i .c`
+	if [ -f $n.dep ]
+	then
+		:
+	else
+		echo $n.'$(SUF):	'$i > $n.dep
+		echo "	head -5 $n.dep > $n.dp1" >> $n.dep
+		echo '	CC="$(CC)" UTIL_HOME="$(UTIL_HOME)" $(CC_AND_MKDEP) '$n.dp2 '$(CFLAGS)' -c $i >> $n.dep
+		echo "	cat $n.dp1 $n.dp2 > $n.dep" >> $n.dep
+		echo "	rm -f $n.dp1 $n.dp2" >> $n.dep
+	fi
+done

+ 48 - 0
bin/do_resolve

@@ -0,0 +1,48 @@
+#!/bin/sh
+: '$Id$'
+
+: Resolve name clashes in the files on the argument list. If these
+: files reside in another directory, a copy is made in the current
+: directory. If not, it is overwritten. Never do this in a source
+: directory! A list of the new files is produced on standard output.
+
+UTIL_BIN=$UTIL_HOME/bin
+
+trap "rm -f tmp$$ a.out nmclash.* longnames clashes" 0 1 2 3 15
+
+: first find out if we have to resolve problems with identifier significance.
+
+cat > nmclash.c <<'EOF'
+/* Accepted if many characters of long names are significant */
+abcdefghijklmnopr() { }
+abcdefghijklmnopq() { }
+main() { }
+EOF
+if $CC nmclash.c
+then	: no identifier significance problem
+	for i in $*
+	do
+		echo $i
+	done
+else
+	$UTIL_BIN/prid -l7 $* > longnames
+
+	: remove code generating routines from the clashes list.
+	: code generating routine names start with C_.
+	: also remove names starting with flt_.
+
+	sed '/^C_/d' < longnames | sed '/^flt_/d' > tmp$$
+	$UTIL_BIN/cclash -c -l7 tmp$$ > clashes
+	for i in $*
+	do
+		$UTIL_BIN/cid -Fclashes < $i > tmp$$
+		n=`basename $i .xxx`
+		if cmp -s $n tmp$$
+		then
+			rm -f tmp$$
+		else
+			mv tmp$$ $n
+		fi
+		echo $n
+	done
+fi

+ 1 - 0
bin/em.pascal

@@ -0,0 +1 @@
+exec /usr/em/doc/em/int/em /usr/em/doc/em/int/tables ${1-e.out} core

+ 13 - 0
bin/lint-lib.ack

@@ -0,0 +1,13 @@
+#!/bin/sh
+: '$Id$'
+
+: Create a lint library file. The name of the library file is constructed
+: from the first argument. The second argument indicates the directory where
+: the result is to be placed. This version is for ACK lint.
+
+n=$1
+shift
+d=$1
+shift
+lint -L$n $*
+mv $n.llb $d

+ 13 - 0
bin/lint-lib.unix

@@ -0,0 +1,13 @@
+#!/bin/sh
+: '$Id$'
+
+: Create a lint library file. The name of the library file is constructed
+: from the first argument. The second argument indicates the directory where
+: the result is to be placed. This version is for Unix lint.
+
+n=$1
+shift
+d=$1
+shift
+/usr/bin/lint -C$n $*
+mv llib-l$n.ln $d

+ 20 - 0
bin/mk_manpage

@@ -0,0 +1,20 @@
+#!/bin/sh
+
+num=`expr $1 : '.*\.\([1-8]\)'`
+
+if [ -d $2/man ] ; then : ; else mkdir $2/man ; fi
+if [ -f $2/man/head ] ; then : ; else cat > $2/man/head <<'EOF'
+.rn TH yy
+.de TH
+.di zz
+.yy "\\$1" "\\$2" "\\$3" "\\$4"
+.ds ]W 5th ACK distribution
+.ds ]D Amsterdam Compiler Kit
+.ds ]L "\\$3
+.di
+.rm zz
+..
+EOF
+fi
+if [ -d $2/man/man$num ] ; then : ; else mkdir $2/man/man$num ; fi
+cat $2/man/head $1  | sed "s!TARGETHOME!$2!" > $2/man/man$num/`expr //$1 : '.*/\([^/]*\)'`

+ 9 - 0
bin/rm_deps

@@ -0,0 +1,9 @@
+#!/bin/sh
+: $Id$
+
+: remove dependencies from a makefile, write result on standard output.
+: we cannot do this directly in a makefile because some make versions
+: have # start a comment, always.
+
+sed -e '/^#DEPENDENCIES/,$d' $1
+echo '#DEPENDENCIES'

+ 3 - 0
distr/Action

@@ -0,0 +1,3 @@
+name "EM tables"
+dir etc
+end

+ 16 - 0
distr/Action1

@@ -0,0 +1,16 @@
+name "m68k2/cg bootstrap files"
+dir mach/m68k2/cg
+action "make EMHOME=/proj/em/Work distr"
+end
+name "vax4/cg bootstrap files"
+dir mach/vax4/cg
+action "make EMHOME=/proj/em/Work distr"
+end
+name "m68020/ncg bootstrap files"
+dir mach/m68020/ncg
+action "make EMHOME=/proj/em/Work distr"
+end
+name "m68k4/cg bootstrap files"
+dir mach/m68k4/cg
+action "make EMHOME=/proj/em/Work distr"
+end

+ 15 - 0
distr/Exceptions

@@ -0,0 +1,15 @@
+++ ./doc/install.pr made
+++ ./doc/int/.distr made
+++ ./etc/new_table_done made
+++ ./lang/cem/cemcom.ansi/Version.c made
+++ ./lang/cem/libcc.ansi/stdlib/malloc.c made
+++ ./lang/cem/cemcom/Version.c made
+++ ./lang/pc/comp/Version.c made
+++ ./lang/m2/comp/Version.c made
+++ ./lang/m2/m2mm/Version.c made
+++ ./mach/sparc/ce/EM_table made
+++ ./mach/sparc_solaris/libem/LIST made
+++ ./util/LLgen/src/LLgen.c.dist made
+++ ./util/cpp/Version.c made
+++ ./util/ego/share/pop_push.h made
+++ ./util/grind/ops.c made

+ 90 - 0
distr/How_To

@@ -0,0 +1,90 @@
+How to make a distribution
+--------------------------
+
+I have written a new tool to generate the distributions that does not rely on
+having a local CVS server --- distr/mkdist.
+
+To use it, you need to specify your CVS work tree, the destination directory
+that the distribution will be written to, plus flags. It should be self-
+documenting; use:
+
+	mkdist --help
+	
+...to get documentation.
+
+It uses .distr files in exactly the same way as the previous mechanism.
+
+The documentation for the old distribution tools follows.
+
+David Given
+dg@cowlark.com
+2005-06-25
+
+-----------------------------------------------------------------------------
+
+How to make a fresh distribution:
+For a distribution you need ".distr" files and RCS files.
+The EM home directory contains a file called ".distr". It contains
+the names of all the files and directories you want to have in the distribution.
+The directories should contain .distr files, the other files should
+be placed under CVS.
+There are files that derive from other files and yet should be placed
+in the distribution.
+These files should not be placed under RCS or CVS.
+The file "Exceptions" in this directory contains the current list of
+these files.
+
+When all this is correct, use the shell script mktree the extract
+the distribution from the EM tree.
+	sh mktree destination_tree repository_tree <distrname> 2>f.attf
+Use the "cvs rtag" command to give the distribution a name first!
+Make sure that the destination tree exists and is empty!
+Failing to do that will almost certainly result in a welter of
+error messages.
+The file f.attf contains mktree error messages and should be compared
+to Exceptions.
+The actions of mktree are quite complicated. It starts in the current
+directory creating a version in the destination directory.
+Then it reads the .distr file.
+For each file mentioned there it performes certain actions:
+1- Directory	Change to that directory and call yourself recursively.
+2- File
+   a-               Does a file LIST exist in this directory AND
+                    is the first line of LIST equal to the name of the
+                    destination file? If so, try to extract all the files
+                    named in the rest of the LIST file and call the program
+                    arch to create a library "arch cDr `cat LIST`".
+                    In this manner libraries can be distributed whose members
+                    have their own RCS file.
+              else
+   b-		    Try to run 'make distr'
+	      else
+   c-		    Try to run 'make <filename>'
+	      else
+   d-               give message that says "not present" (or some such). 
+
+Now, the tree contains all the files in the distribution, but it also contains
+files that should not be in the distribution, especially the files created
+by CVS.
+That is why we now give the command:
+	dtar cdf distr .
+The file distr is the one you should put on tape!
+But,.... before doing that: Try it out!
+Repeat the process described in the installation manual.
+Only if that succeeds you are sure that you included the files needed.
+					Good Luck,
+						Ed Keizer, 85/4/15.
+
+Updated for 3rd distribution by Ceriel Jacobs, 87/3/11.
+And again,
+					Good Luck!
+
+Updated for 4th distribution by Ceriel Jacobs, 88/4/08.
+And again,
+					Good Luck!
+Updated for 5th distribution by Ceriel Jacobs, 91/19/12.
+And again,
+					Good Luck!
+Updated for 1st upgrade to 5th distribution by Ceriel Jacobs, 91/12/11.
+And again,
+					Good Luck!

+ 26 - 0
distr/dwalk

@@ -0,0 +1,26 @@
+#!/bin/sh
+
+: ${CDIR=.}
+${DF-:} $CDIR .distr
+if test ! -r $DESTDIR/$CDIR/.distr
+then
+	echo ++ no .distr in $CDIR 1>&2
+        exit 0
+fi
+for i in `cat $DESTDIR/$CDIR/.distr`
+do
+        if test -d $i
+        then
+                ( if cd $i
+		  then 
+			CDIR=$CDIR/$i
+			export CDIR
+			exec $DDIR/dwalk $*
+		  else
+			echo ++ Could not access $CDIR/$i 1>&2
+		  fi
+		)
+	else
+		${DF-:} $CDIR $i
+        fi
+done

+ 1 - 0
distr/echod

@@ -0,0 +1 @@
+echo $1

+ 26 - 0
distr/f.attf

@@ -0,0 +1,26 @@
+-- ./doc/install.pr no RCS file
+-- ./h/em_mnem.h no RCS file
+-- ./h/em_pseu.h no RCS file
+-- ./h/em_spec.h no RCS file
+-- ./lang/basic/src/y.tab.c no RCS file
+-- ./lang/basic/src/y.tab.h no RCS file
+-- ./lang/pc/pem/pem22.m no RCS file
+-- ./lang/pc/pem/pem24.m no RCS file
+-- ./lang/pc/pem/pem44.m no RCS file
+-- ./lib/LLgen/incl no RCS file
+-- ./lib/LLgen/rec no RCS file
+-- ./mach/m68k2/cg/tables1.c no RCS file
+-- ./mach/m68k2/cg/tables1.h no RCS file
+-- ./mach/m68020/ncg/tables1.c no RCS file
+-- ./mach/m68020/ncg/tables1.h no RCS file
+-- ./mach/vax4/cg/tables1.c no RCS file
+-- ./mach/vax4/cg/tables1.h no RCS file
+-- ./util/LLgen/src/parser no RCS file
+-- ./util/LLgen/src/LLgen.c no RCS file
+-- ./util/LLgen/src/Lpars.c no RCS file
+-- ./util/LLgen/src/Lpars.h no RCS file
+-- ./util/LLgen/src/tokens.c no RCS file
+-- ./util/data/em_flag.c no RCS file
+-- ./util/data/em_mnem.c no RCS file
+-- ./util/data/em_pseu.c no RCS file
+-- ./util/ego/share/pop_push.h no RCS file

+ 10 - 0
distr/listall

@@ -0,0 +1,10 @@
+case $# in
+0)	DESTDIR=. ;;
+1)	DESTDIR=$1 ;;
+*)	echo $0 [directory] ; exit 1 ;;
+esac
+DD=`pwd`/listall.d
+DW=`pwd`/dwalk
+export DD DESTDIR
+cd $DESTDIR
+$DW

+ 2 - 0
distr/listall.d

@@ -0,0 +1,2 @@
+echo "<$1>"
+ls -bCdx `cat .distr`

+ 10 - 0
distr/listdirs

@@ -0,0 +1,10 @@
+case $# in
+0)	DIR=. ;;
+1)	DIR=$1 ;;
+*)	echo $0 [directory] ; exit 1 ;;
+esac
+DD=`pwd`/echod
+DW=`pwd`/dwalk
+export DD
+cd $DIR
+$DW

+ 40 - 0
distr/mk_distr_syms

@@ -0,0 +1,40 @@
+#!/bin/sh
+: Utility to make a tree of symbolic links to source tree.
+: Mount the source tree read-only, use this script, and then try installation.
+case $# in
+2)	;;
+*)	echo "Usage: $0 <source-tree> <symlink-tree>" 1>&2
+	exit 1
+	;;
+esac
+if [ -f $1/.distr ]
+then
+	for i in `cat $1/.distr`
+	do
+		if [ -d $1/$i ]
+		then
+			if mkdir $2/$i && $0 $1/$i $2/$i
+			then
+				:
+			else
+				exit 2
+			fi
+		else
+			if [ -f $1/$i ] 
+			then
+				if ln -s $1/$i $2/$i
+				then
+					:
+				else
+					exit 3
+				fi
+			else
+				echo "Missing file $1/$i" 1>&2
+				exit 4
+			fi
+		fi
+	done
+else
+	echo "No .distr file in $1" 1>&2
+	exit 5
+fi

+ 11 - 0
distr/mka

@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+for i in `tail +2 $DESTDIR/$1/LIST`
+do
+	${DF-false} $1 $i
+done
+cd $DESTDIR/$1
+arch cDr `cat LIST`
+: I do not remove the files constituating the library, because
+: they might be present in .distr

+ 0 - 0
distr/mkd


+ 165 - 0
distr/mkdist

@@ -0,0 +1,165 @@
+#!/bin/sh
+# $Source$
+# $State$
+
+# Set up default variables.
+
+destdir=
+srcdir=`pwd`
+arch=/usr/local/bin/arch
+delete=no
+copy=ln
+
+# --- Options parsing -------------------------------------------------------
+
+while [ "$1" != "" ]; do
+	case "$1" in
+		-s|--srcdir)
+			srcdir="$2"
+			shift
+			;;
+			
+		-d|--destdir)
+			destdir="$2"
+			shift
+			;;
+			
+		-x|--delete)
+			delete=yes
+			;;
+			
+		-c|--copy)
+			copy="cp -dp"
+			;;
+			
+		-a|--arch)
+			arch="$2"
+			shift
+			;;
+			
+		-h|--help)
+			echo "mkdist [options]"
+			echo "Options are:"
+			echo "  -s --srcdir <path>   The CVS tree to read from. (default: CWD)"
+			echo "  -d --destdir <path>  The directory to create the distribution in."
+			echo "  -x --delete          Erase the destination directory first."
+			echo "  -c --copy            Make physical copies of the files. (default: hardlink)"
+			echo "  -a --arch <path>     Where the ACK 'arch' tool is."
+			echo "  -h --help            Display this message."
+			exit 0
+			;;
+			
+		*)
+			echo "Unrecognised option. Try --help for help."
+			exit 1
+	esac
+	shift
+done
+
+if [ "$destdir" == "" ]; then
+	echo "You must specify a destination directory. (Try --help for help.)"
+	exit 1
+fi
+
+# --- Main routines ---------------------------------------------------------
+
+# These two routines do the work of traversing the source tree and building
+# the distribution tree.
+
+addfile() {
+	local f
+	f="${1##$srcdir/}"
+	mkdir -p $destdir/`dirname $f`
+	$copy "$1" "$destdir/$f"
+}
+
+process_dir() {
+	local path
+	local archivename
+	
+	path=$1
+	cd $path
+	
+	# Look for a LIST file and cache the first line.
+	
+	archivename=
+	if [ -f LIST ]; then
+		archivename=`head -1 LIST`
+	fi
+
+	for i in `cat $path/.distr`; do
+		if [ -d $i ]; then
+			# This is a directory. Recurse into it.
+			
+			( process_dir $path/$i )
+		elif [ -f $i ]; then
+			# This is a file.
+			
+			addfile $path/$i
+		elif [ "$i" = "$archivename" ]; then
+			# Build the named archive.
+
+			$arch cDr `cat LIST`
+			addfile $path/$archivename
+		else
+		(
+			PATH=$PATH:.
+			export PATH
+			make distr || make $i || (
+				echo "Don't know what to do with $i, listed in $1/.distr."
+				exit 1
+			)
+			
+			if [ ! -f "$path/$i" ]; then
+				echo "Make failed for $i, listed in $path/.distr"
+				exit 1
+			fi
+			addfile $path/$i
+		)
+		fi
+	done
+}
+
+# --- Main program ----------------------------------------------------------
+
+# Test to make sure that $arch points to the right thing.
+
+if !(strings $arch | grep archiver > /dev/null); then
+	echo "$arch does not seem to point at the ACK archiver tool."
+	echo "(Don't confuse this with the Linux tool for displaying your"
+	echo "architecture.)"
+	echo ""
+	echo "Press RETURN to go ahead anyway, or CTRL+C to abort."
+	read
+fi
+
+# Actually do the work.
+
+echo "Creating distribution from CVS tree: $srcdir"
+echo "              into destination tree: $destdir"
+echo ""
+
+if [ -e $destdir ]; then
+	if [ "$delete" == "yes" ]; then
+		echo "Press RETURN to erase $destdir and its contents, or CTRL+C to abort."
+		read
+		echo "Erasing..."
+		rm -rf "$destdir"
+	else
+		echo "$destdir exists. Aborting."
+		exit 1
+	fi
+fi
+
+echo "Working..."
+mkdir -p $destdir
+process_dir $srcdir
+echo "Done."
+
+# Revision history
+# $Log$
+# Revision 1.2  2005-06-24 23:19:23  dtrg
+# Added new mkdist tool.
+#
+# Revision 1.1  2005/06/24 22:13:57  dtrg
+# Created new tool to generate distributions.

+ 19 - 0
distr/mkf

@@ -0,0 +1,19 @@
+#!/bin/sh
+
+if [ -f $DESTDIR/$1/$2 ]
+then
+	:
+elif grep LIST $DESTDIR/$1/.distr >/dev/null 2>&1 &&
+     (test "$2" = "`head -1 $DESTDIR/$1/LIST`") >/dev/null 2>&1 &&
+     ${DA-false} "$1" "$2"
+then
+:	Fetched library contents one by one and put them together
+elif ( cd $DESTDIR/$1 ; make distr ) > /dev/null 2>&1
+then
+	echo ++ $1/$2 made 1>&2
+elif ( cd $DESTDIR/$1 ; make $2 ) > /dev/null 2>&1
+then
+	echo ++ $1/$2 made 1>&2
+else
+	echo ++ $1/$2 not present 1>&2
+fi

+ 1 - 0
distr/mks

@@ -0,0 +1 @@
+cp .distr $DESTDIR/$1

+ 42 - 0
distr/mktree

@@ -0,0 +1,42 @@
+case $# in
+2|3)	;;
+*)	echo Usage: $0 directory repdir [ SVrecord ] 1>&2 ; exit 1 ;;
+esac
+case $0 in
+/*)	DDIR=`dirname $0`
+	;;
+*)	DDIR=`pwd`/`dirname $0`
+	;;
+esac
+case $1 in
+/*)	DESTDIR=$1 ;;
+*)	DESTDIR=`pwd`/$1 ;;
+esac
+case $2 in
+/*)	REPDIR=$2 ;;
+*)	REPDIR=`pwd`/$2 ;;
+esac
+# DD=$DDIR/mkd
+# export DD
+mkdir -p $DESTDIR
+CVSROOT=/usr/proj/em/Repositories
+export CVSROOT
+cd $DESTDIR
+case $# in
+3)
+	cvs checkout world -r $3
+	;;
+2)
+	cvs checkout world
+	;;
+esac
+cd $REPDIR
+DF=$DDIR/mkf
+DA=$DDIR/mka
+export DDIR DESTDIR DF DA REPDIR
+
+$DDIR/dwalk
+
+cd $DESTDIR
+find . -type d -print | xargs chmod "uog+rx"
+chmod -R "og-w,u+w,uog+r" .

+ 26 - 0
distr/todistr

@@ -0,0 +1,26 @@
+REV=
+FILE=
+while :
+do
+	case $# in
+	0)	break ;;
+	esac
+	ARG="$1"
+	shift
+	case "$ARG" in
+	-r*)	REV=`echo "$ARG"| sed s/-r//` ;;
+	-*)	FLAGS="$FLAGS $ARG" ;;
+	*)	case x$FILE in
+		x)	FILE="$ARG" ;;
+		*)	echo todistr can only be done on one file at the time
+			exit 1 ;;
+		esac
+	esac
+done
+case x$REV in
+x)	REV=`rlog -h "$FILE"|sed -n -e '/head/s/^head:[ 	]*//p'` ;;
+esac
+case x$REV in
+x)	exit 2 ;;
+esac
+rcs -ndistr4:$REV $FLAGS $FILE

+ 2 - 0
distr/ts

@@ -0,0 +1,2 @@
+DD=`pwd`/ts
+echo OK

+ 33 - 0
doc/.distr

@@ -0,0 +1,33 @@
+READ_ME
+Makefile
+proto.make
+ack.doc
+basic.doc
+cg.doc
+crefman.doc
+ansi_C.doc
+em
+install.doc
+install.pr
+ncg.doc
+pcref.doc
+peep.doc
+regadd.doc
+toolkit.doc
+v7bugs.doc
+val.doc
+LLgen
+6500.doc
+i80.doc
+z80.doc
+m68020.doc
+m2ref.doc
+nopt.doc
+top
+ego
+occam
+int
+ceg
+sparc
+lint
+pascal

+ 1893 - 0
doc/6500.doc

@@ -0,0 +1,1893 @@
+. \" $Id$"
+.RP
+.ND Dec 1984
+.TL
+.B
+A backend table for the 6500 microprocessor
+.R
+.AU
+Jan van Dalen
+.AB
+The backend table is part of the Amsterdam Compiler Kit (ACK).
+It translates the intermediate language family EM to a machine
+code for the MCS6500 microprocessor family.
+.AE
+.bp
+.DS C
+.B
+THE MCS6500 MICROPROCESSOR.
+.R
+.DE
+.NH 0
+Introduction
+.PP
+Why a back end table for the MCS6500 microprocessor family.
+Although the MCS6500 microprocessor family has an simple
+instruction set and internal structure, it is used in a
+variety of microcomputers and homecomputers.
+This is because of is low cost.
+As an example the Apple II, a well known and width spread
+microprocessor, uses the MCS6502 CPU.
+Also the BBC homecomputer, whose popularity is growing day
+by day uses the MCS6502 CPU.
+The BBC homecomputer is based on the MCS6502 CPU although 
+better and stronger microprocessors are available.
+The designers of Acorn computer Industries have probably
+choosen for the MCS6502 because of the amount of software
+available for this CPU.
+Since its width spreaded use, a variaty of software
+will be needed for it.
+One can think of games!!, administration programs,
+teaching programs, basic interpreters and other application
+programs.
+Even do it will not be possible to run the total compiler kit
+on a MCS6500 based computer, it is possible to write application
+programs in a high level language, such as Pascal or C on a
+minicomputer.
+These application programs can be tested and compiled on that
+minicomputer and put in a ROM (Read Only Memory), for example,
+cso that it an be executed by a MCS6500 CPU.
+The strategy of writing testprograms on a minicomputer, 
+compile it and then execute it on a MCS6500 based
+microprocessor is used by the development of the back end.
+The minicomputer used is M68000 based one, manufactured by
+Bleasdale Computer Systems Ltd..
+The micro- or homecomputer used is a BBC microcomputer,
+manufactured by Acorn Computer Ltd..
+.NH
+The MOS Technology MCS6500
+.PP
+The MCS6500 is as a family of CPU devices developed by MOS
+Technology [1].
+The members of the MCS6500 family are the same chips in a 
+different housing.
+The MCS6502, the big brother in the family, can handle 64k
+bytes of memory, while for example the MCS6504 can only handle
+8k bytes of memory.
+This difference is due to the fact that the MCS6502 is in a
+40 pins house and the MCS6504 has a 28 pins house, so less
+address lines are available.
+.bp
+.NH
+The MCS6500 CPU programmable registers
+.PP
+The MCS6500 series is based on the same chip so all have the
+same programmable registers.
+.sp 9
+.NH 2
+The accumulator A.
+.PP
+The accumulator A is the only register on which the arithmetic
+and logical instructions can be used.
+For example, the instruction ADC (add with carry) adds the
+contents of the accumulator A and a byte from memory or data.
+.NH 2
+The index register X.
+.PP
+As the name suggests this register can be used for some
+indirect addressing modes.
+The modes are explaned below.
+.NH 2
+The index register Y.
+.PP
+This register is, just as the index register X, used for
+certain indirect addressing modes.
+These addressing modes are different from the modes which
+use index register X.
+.NH 2
+The program counter PC
+.PP 
+This is the only 16-bit register available.
+It is used to point to the next instruction to be
+carried out.
+.NH 2
+The stack pointer SP
+.PP
+The stack pointer is an 8-bit register, so the stack can contain
+at most 256 bytes.
+The CPU always appends 00000001 as highbyte of any stack address,
+which means that memory locations
+.B
+0100
+.R
+through
+.B
+01FF
+.R
+are permanently assigned to the stack.
+.sp 12
+.NH 2
+The status register
+.PP
+The status register maintains six status flags and a master
+interrupt control bit.
+.br
+These are the six status flags:
+    Carry        (c)
+    Zero         (z)
+    Overflow     (o)
+    Sign         (n)
+    Decimal mode (d)
+    Break        (b)
+
+
+
+
+
+The bit (i) is the master interrupt control bit.
+.NH
+The MCS6500 memory layout.
+.PP
+In the MCS6500 memory space three area's have special meaning.
+These area's are:
+.IP 1)
+Top page.
+.IP 2)
+Zero page.
+.IP 3)
+The stack.
+.PP
+MCS6500 memory is divided up into pages.
+These pages consist 256 bytes.
+So in a memory address the highbyte denotes the page number
+and the lowbyte the offset within the page.
+.NH 2
+Top page.
+.PP
+When a MCS6500 is restared it jumps indirect via memory address
+.B
+FFFC.
+.R
+At
+.B
+FFFC
+.R
+(lowbyte) and 
+.B
+FFFD
+.R
+(highbyte) there must be the address of the bootstrap subroutine.
+When a break instruction (BRK) occurs or an interrupt takes place,
+the MCS6500 jumps indirect through memory address
+.B
+FFFE.
+.R
+.B
+FFFE
+.R
+and 
+.B
+FFFF
+.R
+thus, must contain the address of the interrupt routine.
+The former only goes for maskeble interrupt.
+There also exist a nonmaskeble interrupt.
+This cause the MCS6500 to jump indirect through memory address
+.B
+FFFA.
+.R
+So the top six bytes of memory are used by the operating system
+and therefore not available for the back end.
+.NH 2
+Zero page.
+.PP
+This page has a special meaning in the sence that addressing
+this page uses special opcodes.
+Since a page consists of 256 bytes, only one byte is needed
+for addressing zero page.
+So an instruction which uses zero page occupies two bytes.
+It also uses less clock cycle's while carrying out the instruction.
+Zero page is also needed when indirect addressing is used.
+This means that when indirect addressing is used, the address must
+reside in zero page (two consecutive bytes).
+In this case (the back end), zero page is used, for example
+to hold the local base, the second local base, the stack pointer
+etc.
+.NH 2
+The stack.
+.PP
+The stack is described in paragraph 3.5 about the MCS6500
+programmable registers.
+.NH 
+The memory adressing modes
+.PP
+MCS6500 memory reference instructions use direct addressing,
+indexed addressing, and indirect addressing.
+.NH 2
+direct addressing.
+.PP
+Three-byte instructions use the second and third bytes of the
+object code to provide a direct 16-bit address:
+therefore, 65.536 bytes of memory can be addressed directly.
+The commonly used memory reference instructions also have a two-byte
+object code variation, where the second byte directly addresses
+one of the first 256 bytes.
+.NH 2
+Base page, indexed addressing.
+.PP
+In this case, the instruction has two bytes of object code.
+The contents of either the X or Y index registers are added to the 
+second  object code byte in order to compute a memory address.
+This may be illustrated as follows:
+.sp 15
+Base page, indexed addressing, as illustrated above, is 
+wraparound - which means that there is no carry.
+If the sum of the index register and second object code byte contents
+is more than
+.B
+FF
+.R
+, the carry bit will be dicarded.
+This may be illustrated as follows:
+.sp 9
+.NH 2
+Absolute indexed addressing.
+.PP
+In this case, the contents of either the X or Y register are added
+to a 16-bit direct address provided by the second and third bytes
+of an instruction's object code.
+This may be illustrated as follows:
+.sp 10
+.NH 2
+Indirect addressing.
+.PP
+Instructions that use simple indirect addressing have three bytes of
+object code.
+The second and third object code bytes provide a 16-bit address;
+therefore, the indirect address can be located anywhere in
+memory.
+This is straightforward indirect addressing.
+.NH 3
+Pre-indexed indirect addressing.
+.PP
+In this case, the object code consists of two bytes and the 
+second object code byte provides an 8-bit address.
+Instructions that use pre-indexed indirect addressing add the contents
+of the X index register and the second object code byte to access
+a memory location in the first 256 bytes of memory, where the 
+indirect address will be found:
+.sp 18
+When using pre-indexed indirect addressing, once again wraparound
+addition is used, which means that when the X index register contents
+are added to the second object code byte, any carry will be discarded.
+Note that only the X index register can be used with pre-indexed
+addressing.
+.NH 3
+Post-indexed indirect addressing.
+.PP
+In this case, the object code consists of two bytes and the
+second object code byte provides an 8-bit address.
+Now the second object code byte indentifies a location
+in the first 256 bytes of memory where an indirect address
+will be found.
+The contents of the Y index register are added to this indirect
+address.
+This may be illustrated as follows:
+.sp 18
+Note that only the Y index register can be used with post-indexed
+indirect addressing.
+.bp
+.NH
+What the CPU has and doesn't has.
+.PP
+Although the designers of the MCS6500 CPUs family state that
+there is nothing very significant about the short stack (only
+256 bytes) this stack caused problems for the back end.
+The designers say that a 256-byte stack usually is sufficient
+for any typical microcomputer, this is only true if the stack
+is used only for return addresses of the JSR (jump to
+subroutine) instruction.
+But since the EM machine is suppost to be a stack machine and
+high level languages need the ability of parameters and
+locals in there procedures and function, this short stack
+is unsufficiant.
+So an software stack is implemented in this back end, requiring two
+additional subroutines for stack handling.
+These two stack handling subroutines slow down the processing time
+of a program since the stack is used heavely.
+.PP
+Since parameters and locals of EM procedures are offseted
+from the localbase of that procedure, indirect addressing
+is havily used.
+Offsets are positive (for parameters) and negative (for
+local variables).
+As explaned before the addressing modes the MCS6500 have a
+post indexed indirect addressing mode.
+This addressing mode can only handle positive offsets.
+This raises a problem for accessing the local variables
+I have chosen for the next solution.
+A second local base is introduced.
+This second local base is the real local base subtracted by
+a constant BASE.
+In the present situation of the back end the value of BASE
+is 240.
+This means that there are 240 bytes reseved for local
+variables to be indirect addressed and 14 bytes for
+the parameters.
+.DS C
+.B
+THE CODE GENERATOR.
+.R
+.DE
+.NH 0
+Description of the machine table.
+.PP
+The machine description table consists of the following sections:
+.IP 1.
+The macro definitions.
+.IP 2.
+Constant definitions.
+.IP 3.
+Register definitions.
+.IP 4.
+Token definitions.
+.IP 5.
+Token expressions.
+.IP 6.
+Code rules.
+.IP 7.
+Move definitions.
+.IP 8.
+Test definitions.
+.IP 9.
+Stack definitions.
+.NH 2
+Macro definitions.
+.PP
+The macro definitions at the top of the table are expanded
+by the preprocessor on occurence in the rest of the table.
+.NH 2
+Constant definitions.
+.PP
+There are three constants which must be defined at first.
+The are:
+.IP EM_WSIZE: 11
+Number of bytes in a machine word.
+This is the number of bytes a simple
+.B
+loc
+.R
+instruction will put on the stack.
+.IP EM_PSIZE:
+Number of bytes in a pointer.
+This is the number of bytes a
+.B
+lal
+.R
+instruction will put on the stack.
+.IP EM_BSIZE:
+Number of bytes in the hole between AB and LB.
+The calling sequence only saves LB on the stack so this
+constant is equal to the pointer size.
+.NH 1
+Register definitions.
+.PP
+The only important register definition is the definition of
+the registerpair AX.
+Since the rest of the machine's registers Y, PC, ST serve
+special purposes, the code generator cannot use them.
+.NH 2
+Token definitions
+.PP
+There is a fake token.
+This token is put in the table, since the code generator generator
+complains if it cannot find one.
+.NH 2
+Token expression definitions.
+.PP
+The token expression is also a fake one.
+This token expression is put in the table, since the code generator
+generator complains if it cannot find one.
+.NH 2
+Code rules.
+.PP
+The code rule section is the largest section in the table.
+They specify EM patterns, stack patterns, code to be generated,
+etc.
+The syntax is:
+.IP code rule:
+EM pattern '|' stack pattern '|' code '|'
+stack replacement '|' EM replacement '|'
+.PP
+All patterns are optional, however there must be at least one
+pattern present.
+If the EM pattern is missing the rule becomes a rewriting
+rule or a
+.B
+coercion
+.R
+to be used when code generation cannot continue because of an
+invalid stack pattern.
+The code rules are preceeded by the word CODE:.
+.NH 3
+The EM pattern.
+.PP
+The EM pattern consists of a list of EM mnemonics followed by
+a boolean expression. Examples:
+.sp 1
+.br
+.B
+loe
+.R
+.sp 1
+will match a single
+.B
+loe
+.R
+instruction,
+.sp 1
+.br
+.B
+loc loc cif
+.R
+$1==2 && $2==8
+.sp 1
+is a pattern that will match
+.sp 1
+.br
+.B
+loc
+.R
+2
+.br
+.B
+loc
+.R
+8
+.br
+.B
+cif
+.R
+.sp 1
+and
+.sp 1
+.br
+.B
+lol
+inc
+stl
+.R
+$1==$3
+.sp 1
+will match for example
+.sp 1
+.br
+.B
+lol
+.R
+6
+.br
+.B
+inc
+.R
+.br
+.B
+stl
+.R
+6
+.sp 1
+A missing boolean expession evaluates to TRUE.
+.PP
+The code generator will match the longest EM pattern on every occasion,
+if two patterns of the same length match the first in the table
+will be chosen, while all patterns of length greater than or equal
+to three are considered to be of the same length.
+.NH 3
+The stack pattern.
+.PP
+The only stack pattern that can occur is R16, which means that the
+registerpair AX contains the word on top of the stack.
+If this is not the case a coersion occurs.
+This coersion generates a "jsr Pop", which means that the top
+of the stack is popped and stored in the registerpair AX.
+.NH 3
+The code part.
+.PP
+The code part consists of three parts, stack cleanup, register
+allocation, and code to be generated.
+All of these may be omitted.
+.NH 4
+Stack cleanup.
+.PP
+When generating something like a branch instruction it might be
+needed to empty the fake stack, that is, remove the AX registerpair.
+This is done by the instruction remove(ALL)
+.NH 4
+Register allocation.
+.PP
+If the machine code to be generated uses the registerpair AX,
+this is signaled to the code generator by the allocate(R16)
+instruction.
+If the registerpair AX resides on the fake stack, this will result
+in a "jsr Push", which means that the registerpair AX is pushed on
+the stack and will be free for further use.
+If registerpair AX is not on the fake stack nothing happens.
+.NH 4
+Code to be generated.
+.PP
+Code to be generated is specified as a list of items of the following
+kind:
+.IP 1)
+A string in double quotes("This is a string").
+This is copied to the codefile and a newline ('\n') is appended.
+Inside the string all normal C string conventions are allowed,
+and substitutions can be made of the following sorts.
+.RS
+.IP a)
+$1, $2 etc. These are the operand of the corresponding EM 
+instructions and are printed according to there type.
+To put a real '$' inside the string it must be doubled ('$$').
+.IP b)
+%[1], %[2.reg], %[b.1] etc. these have there obvious meaning.
+If they describe a complete token (%[1]) the printformat for
+the token is used.
+If they stand fo a basic term in an expression they will be
+printed according to their type.
+To put a real '%' inside the string it must be doubled ('%%').
+.IP c)
+%( arbitrary expression %). This allows inclusion of arbitrary
+expressions inside strings.
+Usually not needed very often, so that the akward notation
+is not too bad.
+Note that %(%[1]%) is equivalent to %[1].
+.RE
+.NH 3
+stack replacement.
+.PP
+The stack replacement is a possibly empty list of items to be
+pushed on the fake stack.
+Three things can occur:
+.IP 1)
+%[1] is used if the registerpair AX was on the fake stack and is
+to be pushed back onto it.
+.IP 2)
+%[a] is used if the registerpair AX is allocated with allocate(R16)
+and is to be pushed onto the fake stack.
+.IP 3)
+It can also be empty.
+.NH 3
+EM replacement.
+.PP
+In exeptional cases it might be useful to leave part of the an EM
+pattern undone.
+For example, a
+.B
+sdl
+.R
+instruction might be split into two
+.B
+stl
+.R
+instructions when there is no 4-byte quantity on the stack.
+The EM replacement part allows one to express this.
+Example:
+.sp 1
+.br
+.B
+stl
+.R
+$1
+.B
+stl
+.R
+$1+2
+.sp 1
+The instructions are inserted in the stream so they can match
+the first part of a pattern in the next step.
+Note that since the code generator traverses the EM instructions
+in a strict linear fashion, it is impossible to let the EM
+replacement match later parts of a pattern.
+So if there is a pattern
+.sp 1
+.br
+.B
+loc
+stl
+.R
+$1==0
+.sp1
+and the input is
+.sp 1
+.br
+.B
+loc
+.R
+0
+.B
+sdl
+.R
+4
+.sp 1
+the
+.B
+loc
+.R
+0
+will be processed first, then the
+.B
+sdl
+.R
+might be split into two
+.B
+stl
+.R
+'s but the pattern cannot match now.
+.NH 3
+Move definitions.
+.PP
+This definition is a fake. This definition is put in the
+table, since the code generator generator complains if it
+cannot find one.
+.NH 3
+Test definitions.
+.PP
+Test definitions aren't used by the table.
+.NH 3
+Stack definitions.
+.PP
+When the generator has to push the registerpair AX, it must
+know how to do so.
+The machine code to be generated is defined here.
+.NH 1
+Some remarks.
+.PP
+The above description of the machine table is
+a description of the table for the MCS6500.
+It uses only a part of the possibilities which the code generator
+generator offers.
+For a more precise and detailed description see [2].
+.DS C
+.B
+THE BACK END TABLE.
+.R
+.DE
+.NH 0
+Introduction.
+.PP
+The code rules are divided in 15 groups.
+These groups are:
+.IP 1.
+Load instructions.
+.IP 2.
+Store instructions.
+.IP 3.
+Integer arithmetic instructions.
+.IP 4.
+Unsigned arithmetic instructions.
+.IP 5.
+Floating point arithmetic instructions.
+.IP 6.
+Pointer arithmetic instructions.
+.IP 7.
+Increment, decrement and zero instructions.
+.IP 8.
+Convert instructions.
+.IP 9.
+Logical instructions.
+.IP 10.
+Set manipulation instructions.
+.IP 11.
+Array instructions.
+.IP 12.
+Compare instructions.
+.IP 13.
+Branch instructions.
+.IP 14.
+Procedure call instructions.
+.IP 15.
+Miscellaneous instructions.
+.PP
+From all of these groups one or two typical EM pattern will be explained
+in the next paragraphs.
+Comment is placed between /* and */ (/* This is a comment */).
+.NH
+The instructions.
+.NH 2
+The load instructions.
+.PP
+In this group a typical instruction is
+.B
+lol
+.R
+.
+A
+.B
+lol
+.R
+instruction pushes the word at local base + offset, where offset
+is the instructions argument, onto the stack.
+Since the MCS6500 can only offset by 256 bytes, as explaned at the
+memory addressing modes, there is a need for two code rules in the
+table.
+One which can offset directly and one that must explicit
+calculate the address of the local.
+.NH 3
+The lol instruction with indirect offsetting.
+.PP
+In this case an indirect offsetted load from the second local base
+is possible.
+The table content is:
+.sp 1
+.br
+.B
+lol
+.R
+IN($1) | |
+.br
+allocate(R16)	/* allocate registerpair AX */
+.br
+"ldy #BASE+$1"	/* load Y with the offset from the second
+.br
+					      local base */
+.br
+"lda (LBl),y"	/* load indirect the lowbyte of the word */
+.br
+"tax"		/* move register A to register X */
+.br
+"iny"		/* increment register Y (offset) */
+.br
+"lda (LBl),y"	/* load indirect the highbyte of the word */
+.br
+| %[a] | |	/* push the word onto the fake stack */
+.NH 3
+The lol instruction whose offset is to big.
+.PP
+In this case, the library subroutine "Lol" is used.
+This subroutine expects the offset in registerpair AX, then
+calculates the address of the local or parameter, and loads
+it into registerpair AX.
+The table content is:
+.sp 1
+.br
+.B
+lol
+.R
+| |
+.br
+allocate(R16)	/* allocate registerpair AX */
+.br
+"lda #[$1].h"	/* load highbyte of offset into register A */
+.br
+"ldx #[$1].l"	/* load lowbyte of offset into register X */
+.br
+"jsr Lol"	/* perform the subroutine */
+.br
+| %[a] | |	/* push word onto the fake stack */
+.NH 2
+The store instructions.
+.PP
+In this group a typical instruction is
+.B
+stl.
+.R
+A
+.B
+stl
+.R
+instruction poppes a word from the stack and stores it in the word
+at local base + offset, where offset is the instructions argument.
+Here also is the need for two code rules in the table as a result
+of the offset limits.
+.NH 3
+The stl instruction with indirect offsetting.
+.PP
+In this case it an indirect offsetted store from the second local
+base is possible.
+The table content is:
+.sp 1
+.br
+.B
+stl
+.R
+IN($1) | R16 |	/* expect registerpair AX on top of the
+.br
+							fake stack */
+.br
+"ldy #BASE+1+$1"  /* load Y with the offset from the
+.br
+						second local base */
+.br
+"sta (LBl),y"	/* store the highbyte of the word from A */
+.br
+"txa"		/* move register X to register A */
+.br
+"dey"		/* decrement offset */
+.br
+"sta (LBl),y"	/* store the lowbyte of the word from A */
+.br
+| | |
+.NH 3
+The stl instruction whose offset is to big.
+.PP
+In this case the library subroutine 'Stl' is used.
+This subroutine expects the offset in registerpair AX, then
+calculates the address, poppes the word stores it at its place.
+The table content is:
+.sp 1
+.br
+.B
+stl
+.R
+| |
+.br
+allocate(R16)	/* allocate registerpair AX */
+.br
+"lda #[$1].h"	/* load highbyte of offset in register A */
+.br
+"ldx #[$1].l"	/* load lowbyte of offset in register X */
+.br
+"jsr Stl"	/* perform the subroutine */
+.br
+| | |
+.NH 2
+Integer arithmetic instructions.
+.PP
+In this group typical instructions are
+.B
+adi
+.R
+and
+.B
+mli.
+.R
+These instructions, in this table, are implemented for 2-byte
+and 4-byte integers.
+The only arithmetic instructions available on the MCS6500 are
+the ADC (add with carry), and SBC (subtract with not(carry)).
+Not(carry) here means that in a subtraction, the one's complement
+of the carry is taken.
+The absence of multiply and division instructions forces the
+use of subroutines to handle these cases.
+Because there are no registers left to perform on the multiply
+and division, zero page is used here.
+The 4-byte integer arithmetic is implemented, because in C there
+exists the integer type long.
+A user is freely to use the type long, but will pay in performance.
+.NH 3
+The adi instruction.
+.PP
+In case of the
+.B
+adi
+.R
+2 (and
+.B
+sbi
+.R
+2) instruction there are many EM
+patterns, so that the instruction can be performed in line in
+most cases.
+For the worst case there exists a subroutine in the library
+which deals with the EM instruction.
+In case of a
+.B
+adi
+.R
+4 (or
+.B
+sbi
+.R
+4) there only is a subroutine to deal with it.
+A table content is:
+.sp 1
+.br
+.B
+lol lol adi
+.R
+(IN($1) && IN($2) && $3==2) | | /* is it in range */
+.br
+allocate(R16)	/* allocate registerpair AX */
+.br
+"ldy #BASE+$1+1" /* load Y with offset for first operand */
+.br
+"lda (LBl),y"	/* load indirect highbyte first operand */
+.br
+"pha"		/* save highbyte first operand on hard_stack */
+.br
+"dey"		/* decrement offset first operand */
+.br
+"lda (LBl),y"	/* load indirect lowbyte first operand */
+.br
+"ldy #BASE+$2"	/* load Y with offset for second operand */
+.br
+"clc"		/* clear carry for addition */
+.br
+"adc (LBl),y"	/* add the lowbytes of the operands */
+.br
+"tax"		/* store lowbyte of result in place */
+.br
+"iny"		/* increment offset second operand */
+.br
+"pla"		/* get highbyte first operand */
+.br
+"adc (LBl),y"	/* add the highbytes of the operands */
+.br
+| %[a] | |	/* push the result onto the fake stack */
+.NH 3
+The mli instruction.
+.PP
+The
+.B
+mli
+.R
+2 instruction uses most the subroutine 'Mlinp'.
+This subroutine expects the multiplicand in zero page
+at locations ARTH, ARTH+1, while the multiplier is in zero
+page locations ARTH+2, ARTH+3.
+For a description of the algorithms used for multiplication and
+division, see [3].
+A table content is:
+.sp  1
+.br
+.B
+lol lol mli
+.R
+(IN($1) && IN($2) && $3==2) | |
+.br
+allocate(R16)	/* allocate registerpair AX */
+.br
+"ldy #BASE+$1"	/* load Y with offset of multiplicand */
+.br
+"lda (LBl),y"	/* load indirect lowbyte of multiplicand */
+.br
+"sta ARTH"	/* store lowbyte in zero page */
+.br
+"iny"		/* increment offset of multiplicand */
+.br
+"lda (LBl),y"	/* load indirect highbyte of multiplicand */
+.br
+"sta ARTH+1"	/* store highbyte in zero page */
+.br
+"ldy #BASE+$2"	/* load Y with offset of multiplier */
+.br
+"lda (LBl),y"	/* load indirect lowbyte of multiplier */
+.br
+"sta ARTH+2"	/* store lowbyte in zero page */
+.br
+"iny"		/* increment offset of multiplier */
+.br
+"lda (LBl),y"	/* load indirect highbyte of multiplier */
+.br
+"sta ARTH+3"	/* store highbyte in zero page */
+.br
+"jsr Mlinp"	/* perform the multiply */
+.br
+| %[a] | |	/* push result onto fake stack */
+.NH 2
+The unsgned arithmetic instructions.
+.PP
+Since unsigned addition an subtraction is performed in the same way
+as signed addition and subtraction, these cases are dealt with by
+an EM replacement.
+For mutiplication and division there are special subroutines.
+.NH 3
+Unsigned addition.
+.PP
+This is an example of the EM replacement strategy.
+.sp 1
+.br
+.B
+lol lol adu
+.R
+	| | | |
+.B
+lol
+.R
+$1
+.B
+lol
+.R
+$2
+.B
+adi
+.R
+$3 |
+.NH 2
+Floating point arithmetic.
+.PP
+Floating point arithmetic isn't implemented in this table.
+.NH 2
+Pointer arithmetic instructions.
+.PP
+A typical pointer arithmetic instruction is
+.B
+adp
+.R
+2.
+This instruction adds an offset and a pointer.
+A table content is:
+.sp 1
+.br
+.B
+adp
+.R
+	| | | |
+.B
+loc
+.R
+$1
+.B
+adi
+.R
+2 |
+.NH 2
+Increment, decrement and zero instructions.
+.PP
+In this group a typical instruction is
+.B
+inl
+.R
+, which increments a local or parameter.
+The MCS6500 doesn't have an instruction to increment the
+accumulator A, so the 'ADC' instruction must be used.
+A table content is:
+.sp 1
+.br
+.B
+inl
+.R
+IN($1) | |
+.br
+allocate(R16)	/* allocate registerpair AX */
+.br
+"ldy #BASE+$1"	/* load Y with offset of the local */
+.br
+"clc"		/* clear carry for addition */
+.br
+"lda (LBl),y"	/* load indirect lowbyte of local */
+.br
+"adc #1"	/* increment lowbyte */
+.br
+"sta (LBl),y"	/* restore indirect the incremented lowbyte */
+.br
+"bcc 1f"	/* if carry is clear then ready */
+.br 
+"iny"		/* increment offset of local */
+.br
+"lda (LBl),y"	/* load indirect highbyte of local */
+.br
+"adc #0"	/* add carry to highbyte */
+.br
+"sta (LBl),y\\n1:"  /* restore indirect the highbyte */
+.PP
+If the offset of the local or parameter is to big, first the
+local or parameter is fetched, than incremented, and then
+restored.
+.NH 2
+Convert instructions.
+.PP
+In this case there are two convert instructions
+which really do something.
+One of them is in line code, and deals with the extension of
+a character (1-byte) to an integer.
+The other one is a subroutine which handles the conversion
+between 2-byte integers and 4-byte integers.
+.NH 3
+The in line conversion.
+.PP
+The table content is:
+.sp 1
+.br
+.B
+loc loc cii
+.R
+$1==1 && $2==2 | R16 |
+.br
+"txa"		/* see if sign extension is needed */
+.br
+"bpl 1f"	/* there is no need for sign extension */
+.br
+"lda #0FFh"	/* sign extension here */
+.br
+"bne 2f"	/* conversion ready */
+.br
+"1: lda #0\\n2:"	/* no sign extension here */
+.NH 2
+Logical instructions.
+.PP
+A typical instruction in this group is the logical
+.B
+and
+.R
+on two 2-byte words.
+The logical
+.B
+and
+.R
+on groups of more than two bytes (max 254)
+is also possible and uses a library subroutine.
+.NH 3
+The logical and on 2-byte groups.
+.PP
+The table content is:
+.sp 1
+.br
+.B
+and
+.R
+$1==2 | R16 |	/* one group must be on the fake stack */
+.br
+"sta ARTH+1"	/* temporary save of first group highbyte */
+.br
+"stx ARTH"	/* temporary save of first group lowbyte */
+.br
+"jsr Pop"	/* pop second group from the stack */
+.br
+"and ARTH+1"	/* logical and on highbytes */
+.br
+"pha"		/* temporary save the result's highbyte */
+.br
+"txa"		/* logical and can only be done in A */
+.br
+"and ARTH"	/* logical and on lowbytes */
+.br
+"tax"		/* restore results lowbyte */
+.br
+"pla"		/* restore results highbyte */
+.br
+| %[1] | |	/* push result onto fake stack */
+.NH 2
+Set manipulation instructions.
+.PP
+A typical EM pattern in this group is
+.B
+loc inn zeq
+.R
+$1>0 && $1<16 && $2==2.
+This EM pattern works on sets of 16 bits.
+Sets can be bigger (max 256 bytes = 2048 bits), but than a
+library routine is used instead of in line code.
+The table content of the above EM pattern is:
+.sp 1
+.br
+.B
+loc inn zeq
+.R
+$1>0 && $1<16 && $2==2 | R16 |
+.br
+"ldy #$1+1"	/* load Y with bit number */
+.br
+"stx ARTH"	/* cannot rotate X, so use zero page */
+.br
+"1: lsr a"	/* right shift A */
+.br
+"ror ARTH"	/* right rotate zero page location */
+.br
+"dey"		/* decrement Y */
+.br
+"bne 1b"	/* shift $1 times */
+.br
+"bcc $1"	/* no carry, so bit is zero */
+.NH 2
+Array instructions.
+.PP
+In this group a typical EM pattern is
+.B
+lae lar
+.R
+defined(rom(1,3)) | | | |
+.B
+lae
+.R
+$1
+.B
+aar
+.R
+$2
+.B
+loi
+.R
+rom(1,3).
+This pattern uses the 
+.B
+aar
+.R
+instruction, which is part of a typical EM pattern:
+.sp 1
+.br
+.B
+lae aar
+.R
+$2==2 && rom(1,3)==2 && rom(1,1)==0 | R16 | /* registerpair AX contains
+the index in the array */
+.br
+"pha"		/* save highbyte of index */
+.br
+"txa"		/* move lowbyte of index to A */
+.br
+"asl a"		/* shift left lowbyte == 2 times lowbyte */
+.br
+"tax"		/* restore lowbyte */
+.br
+"pla"		/* restore highbyte */
+.br
+"rol a"		/* rotate left highbyte == 2 times highbyte */
+.br
+| %[1] | adi 2 | /* push new index, add to lowerbound array */
+.NH 2
+Compare instructions.
+.PP
+In this group all EM patterns are performed by calling
+a subroutine.
+Subroutines are used here because comparison is only
+possible byte by byte.
+This means a lot of code, and since compare are used frequently
+a lot of in line code would be generated, and thus reducing
+the space left for the software stack.
+These subroutines can be found in the library.
+.NH 2
+Branch instructions.
+.PP
+A typical branch instruction is
+.B
+beq.
+.R
+The table content for it is:
+.sp 1
+.br
+.B
+beq
+.R
+| R16 |
+.br
+"sta BRANCH+1"	/* save highbyte second operand in zero page */
+.br
+"stx BRANCH"	/* save lowbyte second operand in zero page */
+.br
+"jsr Pop"	/* pop the first operand */
+.br
+"cmp BRANCH+1" 	/* compare the highbytes */
+.br
+"bne 1f"	/* there not equal so go on */
+.br
+"cpx BRANCH"	/* compare the lowbytes */
+.br
+"beq $1\\n1:"	/* lowbytes are also equal, so branch */
+.PP
+Another typical instruction in this group is
+.B
+zeq.
+.R
+The table content is:
+.sp 1
+.br
+.B
+zeq
+.R
+| R16 |
+.br
+"tay"		/* move A to Y for setting testbits */
+.br
+"bmi $1"	/* highbyte s minus so branch */
+.br
+"txa"		/* move X to A for setting testbits */
+.br
+"beq $1\\n1:"	/* lowbyte also zero, thus branch */
+.NH 2
+Procedure call instructions.
+.PP
+In this group one code generation might seem a little
+akward.
+It is the EM instruction
+.B
+cai
+.R
+which generates a 'jsr Indir'.
+This is because there is no indirect jump_subroutine in the
+MCS6500.
+The only solution is to store the address in zero page, and then
+do a 'jsr' to a known label.
+At this label there must be an indirect jump instruction, which
+perform a jump to the address stored in zero page.
+In this case the label is Indir, and the address is stored in
+zero page at the addresses ADDR, ADDR+1.
+The tabel content is:
+.sp 1
+.br
+.B
+cai
+.R
+| R16 |
+.br
+"stx ADDR"	/* store lowbyte of address in zero page */
+.br
+"sta ADDR+1"	/* store highbyte of address in zero page */
+.br
+"jsr Indir"	/* use the indirect jump */
+.br
+| | |
+.NH 2
+Miscellaneous instructions.
+.PP
+In this group, as the name suggests, there is no
+typical EM instruction or EM pattern.
+Most of the MCS6500 code to be generated uses a library subroutine
+or is straightforward.
+.DS C
+.B
+PERFORMANCE.
+.R
+.DE
+.NH 0
+Introduction.
+.PP
+To measure the performance of the back end table some timing
+tests are done.
+What to time?
+In this case, the execution time of several Pascal statements
+are timed.
+Statements in C, which have a Pascal equivalence are timed also.
+The statements are timed as follows.
+A test program is been written, which executes two
+nested  for_loops from 1 to 1.000.
+Within these for_loops the statement, which is to be tested, is placed,
+so the statement will be executed 1.000.000 times.
+Then the same program is executed without the test statement.
+The time difference between the two executions is the time
+neccesairy to execute the test statement 1.000.000 times.
+The total time to execute the test statement requires thus the
+time difference divided by 1.000.000.
+.NH 0
+Testing Pascal statements.
+.PP
+The next statements are tested.
+.IP 1)
+int1 := 0;
+.IP 2)
+int1 := int2 - 1;
+.IP 3)
+int1 := int1 + 1;
+.IP 4)
+int1 := icon1 - icon2;
+.IP 5)
+int1 := icon2 div icon1;
+.IP 6)
+int1 := int2 * int3;
+.IP 7)
+bool := (int1 < 0);
+.IP 8)
+bool := (int1 < 3);
+.IP 9)
+bool := ((int1 > 3) or (int1 < 3))
+.IP 10)
+case int1 of 1: bool := false; 2: bool := true end;
+.IP 11)
+if int1 = 0 then int2 := 3;
+.IP 12)
+while int1 > 0 do int1 := int1 - 1;
+.IP 13)
+m := a[k];
+.IP 14)
+let2 := ['a'..'c'];
+.IP 15)
+P3(x);
+.IP 16)
+dum := F3(x);
+.IP 17)
+s.overhead := 5400;
+.IP 18)
+with s do overhead := 5400;
+.PP
+These statement were tested in a procedure test.
+.sp 1
+.br
+procedure test;
+.br
+var i, j, ... : integer;
+.br
+    bool : boolean;
+.br
+    let2 : set of char;
+.br
+begin
+.br
+    for i := 1 to 1000
+.br
+	for j := 1 to 1000
+.br
+	    STATEMENT
+.br
+end;
+.sp 1
+.PP
+STATEMENT is one of the statements as shown above, or it is
+the empty statement.
+The assignment of used variables, if neccesairy, is done before
+the first for_loop.
+In case of the statement which uses the procedure call, statement
+15, a dummy procedure is declared whose body is empty.
+In case of the statement which uses the function, statement 16,
+this function returns its argument.
+for the timing of C statements a similar test program was
+written.
+.sp 1
+.br
+main()
+.br
+{
+.br
+    int i, j, ...;
+.br
+    for (i = 1; i <= 1000; i++)
+.br
+	for (j = 1; j <= 1000; j++)
+.br
+	    STATEMENT
+.br
+}
+.sp 1
+.NH
+The results.
+.PP
+Here are tables with the results of the time measurments.
+Times are in microseconds (10^-6).
+Some statements appear twice in the tables.
+In the second case an array of 200 integers was declerated
+before the variable to be tested, so this variable cannot
+be accessed by indirect addressing from the second local base.
+This results in a larger execution time of the statement to be
+tested.
+The column 68000 contains the times measured on a Bleasdale,
+M68000 based, computer.
+The times in column pdp are measured on a DEC pdp11/44, where
+the times from column 6500 come from a BBC microcomputer.
+.bp
+.TS
+expand;
+c s s s
+c c c c
+lw35 nw7 nw7 nw7.
+Pascal timing results
+statement	68000	pdp	6500
+_
+T{
+int1 := 0;
+T}	4.0	5.8	16.7
+ 	4.0	4.2	97.8
+_
+T{
+int1 := int2 - 1;
+T}	7.2	7.1	27.2
+ 	6.9	7.1	206.5
+_
+T{
+int1 := int1 + 1;
+T}	6.9	6.8	27.2
+ 	6.4	6.7	106.5
+_
+T{
+int1 := icon1 + icon2;
+T}	6.2	6.2	25.6
+ 	6.2	6.0	106.6
+_
+T{
+int1 := icon2 div icon1;
+T}	14.9	14.3	372.6
+ 	14.9	14.7	453.7
+_
+T{
+int1 := int2 * int3;
+T}	11.5	12.0	558.1
+ 	11.3	11.6	728.6
+_
+T{
+bool := (int1 < 0);
+T}	7.2	6.9	122.8
+ 	7.8	8.1	453.2
+_
+T{
+bool := (int1 < 3);
+T}	7.3	7.6	126.0
+ 	7.2	8.1	232.2
+_
+T{
+bool := ((int1 > 3) or (int1 < 3))
+T}	10.1	12.0	307.8
+ 	10.2	11.9	440.1
+_
+T{
+case int1 of 1: bool := false; 2: bool := true end;
+T}	18.3	17.9	165.7
+_
+T{
+if int1 = 0 then int2 := 3;
+T}	9.5	8.5	133.8
+_
+T{
+while int1 > 0 do int1 := int1 - 1;
+T}	6.9	6.9	126.0
+_
+T{
+m := a[k];
+T}	7.2	6.8	134.3
+_
+T{
+let2 := ['a'..'c'];
+T}	38.4	38.8	447.4
+_
+T{
+P3(x);
+T}	18.9	18.8	180.3
+_
+T{
+dum := F3(x);
+T}	26.8	27.1	343.3
+_
+T{
+s.overhead := 5400;
+T}	4.6	4.1	16.7
+_
+T{
+with s do overhead := 5400;
+T}	4.2	4.3	16.7
+.TE
+.TS
+expand;
+c s s s
+c c c c
+lw35 nw7 nw7 nw7.
+C timing results
+statement	68000time	pdptime	6500time
+_
+T{
+int1 = 0;
+T}	4.1	3.6	17.2
+ 	4.1	4.1	97.7
+_
+T{
+int1 = int2 - 1;
+T}	6.6	6.9	27.2
+ 	6.1	6.5	206.4
+_
+T{
+int1 = int1 + 1;
+T}	6.4	7.3	27.2
+ 	6.3	6.2	206.4
+_
+T{
+int1 = int2 * int3;
+T}	11.4	12.3	522.6
+	9.6	10.1	721.2
+_
+T{
+int1 = (int2 < 0);
+T}	7.2	7.6	126.4
+ 	7.4	7.7	232.5
+_
+T{
+int1 = (int2 < 3);
+T}	7.0	7.5	126.0
+ 	7.8	7.8	232.6
+_
+T{
+int1 = ((int2 > 3) || (int2 < 3));
+T}	11.8	12.2	193.4
+ 	11.5	13.2	245.6
+_
+T{
+switch (int1) { case 1: int1 = 0; break; case 2: int1 = 1; break; }
+T}	28.3	29.2	164.1
+_
+T{
+if (int1 == 0) int2 = 3;
+T}	4.8	4.8	19.4
+_
+T{
+while (int2 > 0) int2 = int2 - 1;
+T}	5.8	6.0	125.9
+_
+T{
+int2 = a[int2];
+T}	4.8	5.1	192.8
+_
+T{
+P3(int2);
+T}	18.8	18.4	180.3
+_
+T{
+int2 = F3(int2);
+T}	27.0	27.2	309.4
+_
+T{
+s.overhead = 5400;
+T}	5.0	4.1	16.7
+.TE
+.NH
+Pascal statements which don't have a C equivalent.
+.PP
+At first, the two statements who perform an operation on constants
+are left out.
+These are left out while the C front end does constant folding,
+while the Pascal front end doesn't.
+So in C the statements int1 = icon1 + icon2; and int1 = icon1 / icont2;
+will use the same amount of time since the expression is evaluated
+by the front end.
+The two other statements (let2 := ['a'..'c']; and
+.B
+with
+.R
+s
+.B
+do
+.R
+overhead := 5400;), aren't included in the C statement timing table,
+because there constructs do not exist in C.
+Although in C there can be direct bit manipulation, and thus can
+be used to implement sets I have not used it here.
+The
+.B
+with
+.R
+statement does not exists in C and there is nothing with the slightest
+resemblance to it.
+.PP
+At first sight in the table , it looked if there is no much difference
+in the times for the M68000 and the pdp11/44, in comparison with the
+times needed by the MCS6500.
+To verify this impression, I calculated the correlation coefficient
+between the times of the M68000 and pdp11/44.
+It turned out to be 0.997 for both the Pascal time tests and the C
+time tests.
+Since the correlation coefficient is near to one and the difference
+between the times is small, they can be considered to be the same
+as seen from the times of the MCS6500.
+Then I have tried to make a grafic of the times from the M68000 and
+the MCS6500.
+Well, there was't any correlation to been seen, taken all the times.
+The only correlation one could see, with some effort, was in the
+times for the first three Pascal statements.
+The two first C statements show also a correlation, which two points
+always do.
+.PP
+Also the three Pascal statements
+.B
+case
+.R
+,
+.B
+if
+.R
+,
+and
+.B
+while
+.R
+have a correlation coefficient of 0.999.
+This is probably because the
+.B
+case
+.R
+statement uses a subroutine in both cases and the other two
+statements
+.B
+if
+.R
+and,
+.B
+while
+.R
+generate in line code.
+The last two Pascal statements use the same time, since the front
+end wil generate the same EM code for both.
+.PP
+The independence between the rest of the test times is because
+in these cases the object code for the MCS6500 uses library
+subroutines, while the other processors can handle the EM code
+with in line code.
+.PP
+It is clear that the MCS6500 is a slower device, it needs longer
+execution times, the need of more library subroutines, but
+there is no constant factor between it execution times and those
+of other processors.
+.PP
+The slowing down of the MCS6500 as result of the need of a
+library subroutine is illustrated by the muliplication
+statement.
+The MCS6500 needs a library subroutine, while the other
+two processors have a machine instruction to perform the
+multiply.
+This results in a factor of 48.5, when the operands can be accessed
+indirect by the MCS6500.
+When the MCS6500 cannot access the operands indirectly the situation
+is even worse.
+The slight differences between the MCS6500 execution times for
+Pascal statements and C statements is probably the result of the
+front end, and thus beyond the scope of this discussion.
+.PP
+Another timing test is done in C on the statement k = i + j + 1983.
+This statement is tested on many UNIX*
+.FS
+* UNIX is a Trademark of Bell Laboratories.
+.FE
+systems.
+For a complete list see appendix A.
+The slowest one is the IBM XT, which runs on a 8088 microprocessor.
+The fasted one is the Amdahl computer.
+Here is short table to illustrate the performance of the
+MCS6500.
+.TS
+c c c
+c n n.
+machine	short	int
+IBM XT	53.4	53.4
+Amdahl	0.5	0.3
+MCS6500	150.2	150.2
+.TE
+The MCS6500 is three times slower than the IBM XT, but threehundred
+times slower than the Amdahl.
+The reason why the times on the IBM XT and the MCS6500 are the
+same for short's and int's, is that most C compilers make the types
+short and integer the same size on 16-bit machines.
+In this project the MCS6500 is regarded as a 16-bit machine.
+.NH
+Length tests.
+.PP
+I have also compiled several programs written in Pascal and C to
+see if there is a resemblance between the number of bytes generated
+in the machine's language.
+In the tables:
+.IP length: 9
+The number of bytes of the source program.
+.IP 68000:
+The number of bytes of the a.out file for a M68000.
+.IP pdp:
+The number of bytes of the a.out file for a pdp11/44.
+.IP 6500:
+The number of bytes of the a.out file for a MCS6500.
+.LP
+These are the results:
+.TS
+c s s s
+c c c c
+n n n n.
+Pascal programs
+length	68000	pdp	6500
+_
+19946	14383	16090	26710
+19484	20169	20190	35416
+10849	10469	11464	18949
+273	4221	5106	7944
+1854	5807	6610	10301
+.TE
+.TS
+c s s s
+c c c c
+n n n n.
+C progams
+length	68000	pdp	6500
+_
+9444	6927	8234	11559
+7655	14353	18240	26251
+4775	11309	15934	19910
+639	6337	9660	12494
+.TE
+.PP
+In contrast to the execution times of the test statements, the
+object code files sizes show a constant factor between them.
+After calculating the correlation coefficient, I have calculated
+the line fitted between sizes.
+.FS
+* x is the number of bytes
+.FE
+.TS
+c s s
+c c c
+l c c.
+Pascal programs
+processor	corr. coef.	fitted line
+_
+68000-pdp	0.996	 
+68000-6500	0.999	1.76x + 502*
+pdp-6500	0.999	1.80x - 1577
+.TE
+.TS
+c s s
+c c c
+l c c.
+C programs
+processor	corr. coef.	fitted line
+_
+68000-pdp	0.974	 
+68000-6500	0.992	1.80x + 502*
+pdp-6500	0.980	1.40x - 1577
+.TE
+.PP
+As seen from the tables above the correlation coefficient for
+Pascal programs is better than the ones for C programs.
+Thus the line fits best for Pascal programs.
+With the formula of the best fitted line one can now estimate
+the size of the object code, which a program needs, for a MCS6500
+without having the compiler at hand.
+One also can see from these formula that the object code
+generated for a MCS6500 is about 1.8 times more than for the other
+processors.
+Since the number of bytes in the source file havily depends on the
+programmer, how many spaces he or she uses, the size of the indenting
+in structured programs, etc., there is no correlation between the
+size of the source file and the size of the object file.
+Also the use of comments has its influence on the size.
+.bp
+.DS C
+.B
+SUMMARY.
+.R
+.DE
+.NH 0
+Summary
+.PP
+In this chapter some final conclusions are made.
+.PP
+In spite of its simplicity, the MCS6500 is strong enough to
+implement a EM machine.
+A serious deficy of the MCS6500 is the missing of 16-bit
+general purpose registers, and especially the missing of a
+16-bit stackpointer.
+As pointed out before, one 16-bit register can be simulated
+by a pair of 8-bit registers, in fact, the accumulator A to
+hold the highbyte, and the index register X to hold the lowbyte
+of the word.
+By lack of a 16-bit stackpointer, zero page must be used to hold
+a stackpointer and there are also two subroutines needed for
+manipulating the stack (Push and Pop).
+.PP
+As seen at the time tests, the simple instruction set of the
+MCS6500 forces the use of library subroutines.
+These library subroutines increas the execution time of the
+programs.
+.PP
+The sizes of the object code files show a strong correlation
+in contrast to the execution times.
+With this correlatiuon one canestimate the size of a program
+if it is to be used on a MCS6500.
+.bp
+.NH 0
+.B
+REFERENCES.
+.R
+.IP 1.
+Osborn, A., Jacobson, S., and Kane, J. The Mos Technology MCS6500.
+.B
+An Introduction to Microcomputers ,
+.R
+Volume II, Some Real Products (june 1977) chap. 9.
+.RS
+.PP
+A hardware description of some real existing CPU's, such as
+the Intel Z80, MCS6500, etc. is given in this book.
+.RE
+.IP 2.
+van Staveren, H.
+The table driven code generator from the Amsterdam Compiler Kit.
+Vrije Universiteit, Amsterdam, (July 11, 1983).
+.RS
+.PP
+The defining document for writing a back end table.
+.RE
+.IP 3.
+Tanenbaum, A.S. Structured Computer Organization.
+Prentice Hall. (1976).
+.RS
+.PP
+In this book computers are described as a hierarchy of levels,
+with each one performing some well-defined function.
+.RE

+ 4 - 0
doc/LLgen/.distr

@@ -0,0 +1,4 @@
+LLgen.n
+LLgen_NCER.n
+LLgen.refs
+proto.make

+ 1077 - 0
doc/LLgen/LLgen.n

@@ -0,0 +1,1077 @@
+.\"	$Id$
+.\"	Run this paper off with
+.\"	refer [options] -p LLgen.refs LLgen.doc | [n]eqn | tbl | (nt)roff -ms
+.if '\*(>.'' \{\
+.	if '\*(<.'' \{\
+.		if n .ds >. .
+.		if n .ds >, ,
+.		if t .ds <. .
+.		if t .ds <, ,\
+\}\
+\}
+.cs 5 22u
+.ND
+.EQ
+delim @@
+.EN
+.TL
+LLgen, an extended LL(1) parser generator
+.AU
+Ceriel J. H. Jacobs
+.AI
+Dept. of Mathematics and Computer Science
+Vrije Universiteit
+Amsterdam, The Netherlands
+.AB
+\fILLgen\fR provides a
+tool for generating an efficient recursive descent parser
+with no backtrack from
+an Extended Context Free syntax.
+The \fILLgen\fR
+user specifies the syntax, together with code
+describing actions associated with the parsing process.
+\fILLgen\fR
+turns this specification into a number of subroutines that handle the
+parsing process.
+.PP
+The grammar may be ambiguous.
+\fILLgen\fR contains both static and dynamic facilities
+to resolve these ambiguities.
+.PP
+The specification can be split into several files, for each of
+which \fILLgen\fR generates an output file containing the
+corresponding part of the parser.
+Furthermore, only output files that differ from their previous
+version are updated.
+Other output files are not affected in any
+way.
+This allows the user to recompile only those output files that have
+changed.
+.PP
+The subroutine produced by \fILLgen\fR calls a user supplied routine
+that must return the next token. This way, the input to the
+parser can be split into single characters or higher level
+tokens.
+.PP
+An error recovery mechanism is generated almost completely
+automatically.
+It is based on so called \fBdefault choices\fR, which are
+implicitly or explicitly specified by the user.
+.PP
+\fILLgen\fR has succesfully been used to create recognizers for
+Pascal, C, and Modula-2.
+.AE
+.NH
+Introduction
+.PP
+\fILLgen\fR
+provides a tool for generating an efficient recursive
+descent parser with no backtrack from an Extended Context Free
+syntax.
+A parser generated by
+\fILLgen\fR
+will be called
+\fILLparse\fR
+for the rest of this document.
+It is assumed that the reader has some knowledge of LL(1) grammars and
+recursive descent parsers.
+For a survey on the subject, see reference
+.[ (
+griffiths
+.]).
+.PP
+Extended LL(1) parsers are an extension of LL(1) parsers. They are
+derived from an Extended Context-Free (ECF) syntax instead of a Context-Free
+(CF) syntax.
+ECF syntax is described in section 2.
+Section 3 provides an outline of a
+specification as accepted by
+\fILLgen\fR and also discusses the lexical conventions of
+grammar specification files.
+Section 4 provides a description of the way the
+\fILLgen\fR
+user can associate
+actions with the syntax. These actions must be written in the programming
+language C,
+.[
+kernighan ritchie
+.]
+which also is the target language of \fILLgen\fR.
+The error recovery technique is discussed in section 5.
+This section also discusses what the user can do about it.
+Section 6 discusses
+the facilities \fILLgen\fR offers
+to resolve ambiguities and conflicts.
+\fILLgen\fR offers facilities to resolve them both at parser
+generation time and during the execution of \fILLparse\fR.
+Section 7 discusses the
+\fILLgen\fR
+working environment.
+It also discusses the lexical analyzer that must be supplied by the
+user.
+This lexical analyzer must read the input stream and break it
+up into basic input items, called \fBtokens\fR for the rest of
+this document.
+Appendix A gives a summary of the
+\fILLgen\fR
+input syntax.
+Appendix B gives an example.
+It is very instructive to compare this example with the one
+given in reference
+.[ (
+yacc
+.]).
+It demonstrates the struggle \fILLparse\fR and other LL(1)
+parsers have with expressions.
+Appendix C gives an example of the \fILLgen\fR features
+allowing the user to recompile only those output files that
+have changed, using the \fImake\fR program.
+.[
+make
+.]
+.NH
+The Extended Context-Free Syntax
+.PP
+The extensions of an ECF syntax with respect to an ordinary CF syntax are:
+.IP 1. 10
+An ECF syntax contains the repetition operator: "N" (N represents a positive
+integer).
+.IP 2. 10
+An ECF syntax contains the closure set operator without and with
+upperbound: "*" and "*N".
+.IP 3. 10
+An ECF syntax contains the positive closure set operator without and with
+upperbound: "+" and "+N".
+.IP 4. 10
+An ECF syntax contains the optional operator: "?", which is a
+shorthand for "*1".
+.IP 5. 10
+An ECF syntax contains parentheses "[" and "]" which can be
+used for grouping.
+.PP
+We can describe the syntax of an ECF syntax with an ECF syntax :
+.DS
+.ft CW
+grammar         : rule +
+                ;
+.ft R
+.DE
+This grammar rule states that a grammar consists of one or more
+rules.
+.DS
+.ft CW
+rule            : nonterminal ':' productionrule ';'
+                ;
+.ft R
+.DE
+A rule consists of a left hand side, the nonterminal,
+followed by ":",
+the \fBproduce symbol\fR, followed by a production rule, followed by a
+";", in\%di\%ca\%ting the end of the rule.
+.DS
+.ft CW
+productionrule  : production [ '|' production ]*
+                ;
+.ft R
+.DE
+A production rule consists of one or
+more alternative productions separated by "|". This symbol is called the
+\fBalternation symbol\fR.
+.DS
+.ft CW
+production      : term *
+                ;
+.ft R
+.DE
+A production consists of a possibly empty list of terms.
+So, empty productions are allowed.
+.DS
+.ft CW
+term            : element repeats
+                ;
+.ft R
+.DE
+A term is an element, possibly with a repeat specification.
+.DS
+.ft CW
+element         : LITERAL
+                | IDENTIFIER
+                | '[' productionrule ']'
+                ;
+.ft R
+.DE
+An element can be a LITERAL, which basically is a single character
+between apostrophes, it can be an IDENTIFIER, which is either a
+nonterminal or a token, and it can be a production rule
+between square parentheses.
+.DS
+.ft CW
+repeats         : '?'
+                | [ '*' | '+' ] NUMBER ?
+                | NUMBER ?
+                ;
+.ft R
+.DE
+These are the repeat specifications discussed above. Notice that
+this specification may be empty.
+.PP
+The class of ECF languages
+is identical with the class of CF languages. However, in many
+cases recursive definitions of language features can now be
+replaced by iterative ones. This tends to reduce the number of
+nonterminals and gives rise to very efficient recursive descent
+parsers.
+.NH
+Grammar Specifications
+.PP
+The major part of a
+\fILLgen\fR
+grammar specification consists of an
+ECF syntax specification.
+Names in this syntax specification refer to either tokens or nonterminal
+symbols.
+\fILLgen\fR
+requires token names to be declared as such. This way it
+can be avoided that a typing error in a nonterminal name causes it to
+be accepted as a token name. The token declarations will be
+discussed later.
+A name will be regarded as a nonterminal symbol, unless it is declared
+as a token name.
+If there is no production rule for a nonterminal symbol, \fILLgen\fR
+will complain.
+.PP
+A grammar specification may also include some C routines,
+for instance the lexical analyzer and an error reporting
+routine.
+Thus, a grammar specification file can contain declarations,
+grammar rules and C-code.
+.PP
+Blanks, tabs and newlines are ignored, but may not appear in names or
+keywords.
+Comments may appear wherever a name is legal (which is almost
+everywhere).
+They are enclosed in
+/* ... */, as in C. Comments do not nest.
+.PP
+Names may be of arbitrary length, and can be made up of letters, underscore
+"\_" and non-initial digits. Upper and lower case letters are distinct.
+Only the first 50 characters are significant.
+Notice however, that the names for the tokens will be used by the
+C-preprocessor.
+The number of significant characters therefore depends on the
+underlying C-implementation.
+A safe rule is to make the identifiers distinct in the first six
+characters, case ignored.
+.PP
+There are two kinds of tokens:
+those that are declared and are denoted by a name,
+and literals.
+.PP
+A literal consists of a character enclosed in apostrophes "'".
+The "\e" is an escape character within literals. The following escapes
+are recognized :
+.TS
+center;
+l l.
+\&'\en'	newline
+\&'\er'	return
+\&'\e''	apostrophe "'"
+\&'\e\e'	backslash "\e"
+\&'\et'	tab
+\&'\eb'	backspace
+\&'\ef'	form feed
+\&'\exxx'	"xxx" in octal
+.TE
+.PP
+Names representing tokens must be declared before they are used.
+This can be done using the "\fB%token\fR" keyword,
+by writing
+.nf
+.ft CW
+.sp 1
+%token  name1, name2, . . . ;
+.ft R
+.fi
+.PP
+\fILLparse\fR is designed to recognize special nonterminal
+symbols called \fBstart symbols\fR.
+\fILLgen\fR allows for more than one start symbol.
+Thus, grammars with more than one entry point are accepted.
+The start symbols must be declared explicitly using the
+"\fB%start\fR" keyword. It can be used whenever a declaration is
+legal, f.i.:
+.nf
+.ft CW
+.sp 1
+%start LLparse, specification ;
+.ft R
+.fi
+.sp 1
+declares "specification" as a start symbol and associates the
+identifier "LLparse" with it.
+"LLparse" will now be the name of the C-function that must be
+called to recognize "specification".
+.NH
+Actions
+.PP
+\fILLgen\fR
+allows arbitrary insertions of actions within the right hand side
+of a production rule in the ECF syntax. An action consists of a number of C
+statements, enclosed in the brackets "{" and "}".
+.PP
+\fILLgen\fR
+generates a parsing routine for each rule in the grammar. The actions
+supplied by the user are just inserted in the proper place.
+There may also be declarations before the statements in the
+action, as
+the "{" and "}" are copied into the target code along with the
+action. The scope of these declarations terminates with the
+closing bracket "}" of the action.
+.PP
+In addition to actions, it is also possible to declare local variables
+in the parsing routine, which can then be used in the actions.
+Such a declaration consists of a number of C variable declarations,
+enclosed in the brackets "{" and "}". It must be placed
+right in front of the ":" in the grammar rule.
+The scope of these local variables consists of the complete
+grammar rule.
+.PP
+In order to facilitate communication between the actions and
+\fILLparse\fR,
+the parsing routines can be given C-like parameters.
+Each parameter must be declared separately, and each of these declarations must
+end with a semicolon.
+For the last parameter, the semicolon is optional.
+.PP
+So, for example
+.nf
+.ft CW
+.sp 1
+expr(int *pval;) { int fact; } :
+                /*
+                 * Rule with one parameter, a pointer to an int.
+                 * Parameter specifications are ordinary C declarations.
+                 * One local variable, of type int.
+                 */
+        factor (&fact)          { *pval = fact; }
+                /*
+                 * factor is another nonterminal symbol.
+                 * One actual parameter is supplied.
+                 * Notice that the parameter passing mechanism is that
+                 * of C.
+                 */
+        [ '+' factor (&fact)    { *pval += fact; } ]*
+                /*
+                 * remember the '*' means zero or more times
+                 */
+        ;
+.sp 1
+.ft R
+.fi
+is a rule to recognize a number of factors, separated by "+", and
+to compute their sum.
+.PP
+\fILLgen\fR
+generates C code, so the parameter passing mechanism is that of
+C, as is shown in the example above.
+.PP
+Actions often manipulate attributes of the token just read.
+For instance, when an identifier is read, its name must be
+looked up in a symbol table.
+Therefore, \fILLgen\fR generates code
+such that at a number of places in the grammar rule
+it is defined which token has last been read.
+After a token, the last token read is this token.
+After a "[" or a "|", the last token read is the next token to
+be accepted by \fILLparse\fR.
+At all other places, it is undefined which token has last been
+read.
+The last token read is available in the global integer variable
+\fILLsymb\fR.
+.PP
+The user may also specify C-code wherever a \fILLgen\fR-declaration is
+legal.
+Again, this code must be enclosed in the brackets "{" and "}".
+This way, the user can define global declarations and
+C-functions.
+To avoid name-conflicts with identifiers generated by
+\fILLgen\fR, \fILLparse\fR only uses names beginning with
+"LL"; the user should avoid such names.
+.NH
+Error Recovery
+.PP
+The error recovery technique used by \fILLgen\fR is a
+modification of the one presented in reference
+.[ (
+automatic construction error correcting
+.]).
+It is based on \fBdefault choices\fR, which just are
+what the word says, default choices at
+every point in the grammar where there is a
+choice.
+Thus, in an alternation, one of the productions is marked as a
+default choice, and in a term with a non-fixed repetition
+specification there will also be a default choice (between
+doing the term (once more) and continuing with the rest of the
+production in which the term appears).
+.PP
+When \fILLparse\fR detects an error after having parsed the
+string @s@, the default choices enable it to compute one
+syntactically correct continuation,
+consisting of the tokens @t sub 1~...~t sub n@,
+such that @s~t sub 1~...~t sub n@ is a string of tokens that
+is a member of the language defined by the grammar.
+Notice, that the computation of this continuation must
+terminate, which implies that the default choices may not
+invoke recursive rules.
+.PP
+At each point in this continuation, a certain number of other
+tokens could also be syntactically correct, f.i. the token
+@t@ is syntactically correct at point @t sub i@ in this
+continuation, if the string @s~t sub 1~...~t sub i~t~s sub 1@
+is a string of the language defined by the grammar for some
+string @s sub 1@ and i >= 0.
+.PP
+The set @T@
+containing all these tokens (including @t sub 1 ,~...,~t sub n@) is computed.
+Next, \fILLparse\fR discards zero
+or more tokens from its input, until a token
+@t@ \(mo @T@ is found.
+The error is then corrected by inserting i (i >= 0) tokens
+@t sub 1~...~t sub i@, such that the string
+@s~t sub 1~...~t sub i~t~s sub 1@ is a string of the language
+defined by the grammar, for some @s sub 1@.
+Then, normal parsing is resumed.
+.PP
+The above is difficult to implement in a recursive decent
+parser, and is not the way \fILLparse\fR does it, but the
+effect is the same. In fact, \fILLparse\fR maintains a list
+of tokens that may not be discarded, which is adjusted as
+\fILLparse\fR proceeds. This list is just a representation
+of the set @T@ mentioned
+above. When an error occurs, \fILLparse\fR discards tokens until
+a token @t@ that is a member of this list is found.
+Then, it continues parsing, following the default choices,
+inserting tokens along the way, until this token @t@ is legal.
+The selection of
+the default choices must guarantee that this will always
+happen.
+.PP
+The default choices are explicitly or implicitly
+specified by the user.
+By default, the default choice in an alternation is the
+alternative with the shortest possible terminal production.
+The user can select one of the other productions in the
+alternation as the default choice by putting the keyword
+"\fB%default\fR" in front of it.
+.PP
+By default, for terms with a repetition count containing "*" or
+"?" the default choice is to continue with the rest of the rule
+in which the term appears, and
+.sp 1
+.ft CW
+.nf
+                term+
+.fi
+.ft R
+.sp 1
+is treated as
+.sp 1
+.nf
+.ft CW
+                term term* .
+.ft R
+.fi
+.PP
+It is also clear, that it can never be the default choice to do
+the term (once more), because this could cause the parser to
+loop, inserting tokens forever.
+However, when the user does not want the parser to skip
+tokens that would not have been skipped if the term
+would have been the default choice,
+the skipping of such a term can be prevented by
+using the keyword "\fB%persistent\fR".
+For instance, the rule
+.sp 1
+.ft CW
+.nf
+commandlist : command* ;
+.fi
+.ft R
+.sp 1
+could be changed to
+.sp 1
+.ft CW
+.nf
+commandlist : [ %persistent command ]* ;
+.fi
+.ft R
+.sp 1
+The effects of this in case of a syntax error are twofold:
+The set @T@ mentioned above will be extended as if "command" were
+in the default production, so that fewer tokens will be
+skipped.
+Also, if the first token that is not skipped is a member of the
+subset of @T@ arising from the grammar rule for "command",
+\fILLparse\fR will enter that rule.
+So, in fact the default choice
+is determined dynamically (by \fILLparse\fR).
+Again, \fILLgen\fR checks (statically)
+that \fILLparse\fR will always terminate, and if not,
+\fILLgen\fR will complain.
+.PP
+An important property of this error recovery method is that,
+once a rule is started, it will be finished.
+This means that all actions in the rule will be executed
+normally, so that the user can be sure that there will be no
+inconsistencies in his data structures because of syntax
+errors.
+Also, as the method is in fact error correcting, the
+actions in a rule only have to deal with syntactically correct
+input.
+.NH
+Ambiguities and conflicts
+.PP
+As \fILLgen\fR generates a recursive descent parser with no backtrack,
+it must at all times be able to determine what to do,
+based on the current input symbol.
+Unfortunately, this cannot be done for all grammars.
+Two kinds of conflicts can arise :
+.IP 1) 10
+the grammar rule is of the form "production1 | production2",
+and \fILLparse\fR cannot decide which production to chose.
+This we call an \fBalternation conflict\fR.
+.IP 2) 10
+the grammar rule is of the form "[ productionrule ]...",
+where ... specifies a non-fixed repetition count,
+and \fILLparse\fR cannot decide whether to
+choose "productionrule" once more, or to continue.
+This we call a \fBrepetition conflict\fR.
+.PP
+There can be several causes for conflicts: the grammar may be
+ambiguous, or the grammar may require a more complex parser
+than \fILLgen\fR can construct.
+The conflicts can be examined by inspecting the verbose
+(-\fBv\fR) option output file.
+The conflicts can be resolved by rewriting the grammar
+or by using \fBconflict resolvers\fR.
+The mechanism described here is based on the attributed parsing
+of reference
+.[ (
+milton
+.]).
+.PP
+An alternation conflict can be resolved by putting an \fBif condition\fR
+in front of the first conflicting production.
+It consists of a "\fB%if\fR" followed by a
+C-expression between parentheses.
+\fILLparse\fR will then evaluate this expression whenever a
+token is met at this point on which there is a conflict, so
+the conflict will be resolved dynamically.
+If the expression evaluates to
+non-zero, the first conflicting production is chosen,
+otherwise one of the remaining ones is chosen.
+.PP
+An alternation conflict can also be resolved using the keywords
+"\fB%prefer\fR" or "\fB%avoid\fR". "\fB%prefer\fR"
+is equivalent in behaviour to
+"\fB%if\fR (1)". "\fB%avoid\fR" is equivalent to "\fB%if\fR (0)".
+In these cases however, "\fB%prefer\fR" and "\fB%avoid\fR" should be used,
+as they resolve the conflict statically and thus
+give rise to better C-code.
+.PP
+A repetition conflict can be resolved by putting a \fBwhile condition\fR
+right after the opening parentheses. This while condition
+consists of a "\fB%while\fR" followed by a C-expression between
+parentheses. Again, \fILLparse\fR will then
+evaluate this expression whenever a token is met
+at this point on which there is a conflict.
+If the expression evaluates to non-zero, the
+repeating part is chosen, otherwise the parser continues with
+the rest of the rule.
+Appendix B will give an example of these features.
+.PP
+A useful aid in writing conflict resolvers is the "\fB%first\fR" keyword.
+It is used to declare a C-macro that forms an expression
+returning 1 if the parameter supplied can start a specified
+nonterminal, f.i.:
+.sp 1
+.nf
+.ft CW
+%first fmac, nonterm ;
+.ft R
+.sp 1
+.fi
+declares "fmac" as a macro with one parameter, whose value
+is a token number. If the parameter
+X can start the nonterminal "nonterm", "fmac(X)" is true,
+otherwise it is false.
+.NH
+The LLgen working environment
+.PP
+\fILLgen\fR generates a number of files: one for each input
+file, and two other files: \fILpars.c\fR and \fILpars.h\fR.
+\fILpars.h\fR contains "#-define"s for the tokennames.
+\fILpars.c\fR contains the error recovery routines and tables.
+Only those output files that differ from their previous version
+are updated. See appendix C for a possible application of this
+feature.
+.PP
+The names of the output files are constructed as
+follows:
+in the input file name, the suffix after the last point is
+replaced by a "c". If no point is present in the input file
+name, ".c" is appended to it. \fILLgen\fR checks that the
+filename constructed this way in fact represents a previous
+version, or does not exist already.
+.PP
+The user must provide some environment to obtain a complete
+program.
+Routines called \fImain\fR and \fILLmessage\fR must be defined.
+Also, a lexical analyzer must be provided.
+.PP
+The routine \fImain\fR must be defined, as it must be in every
+C-program. It should eventually call one of the startsymbol
+routines.
+.PP
+The routine \fILLmessage\fR must accept one
+parameter, whose value is a token number, zero or -1.
+.br
+A zero parameter indicates that the current token (the one in
+the external variable \fILLsymb\fR) is deleted.
+.br
+A -1 parameter indicates that the parser expected end of file, but didn't get
+it.
+The parser will then skip tokens until end of file is detected.
+.br
+A parameter that is a token number (a positive parameter)
+indicates that this
+token is to be inserted in front of the token currently in
+\fILLsymb\fR.
+The user can give the token the proper attributes.
+Also, the user must take care, that the token currently in
+\fILLsymb\fR is again returned by the \fBnext\fR call to the
+lexical analyzer, with the proper attributes.
+So, the lexical analyzer must have a facility to push back one
+token.
+.PP
+The user may also supply his own error recovery routines, or handle
+errors differently. For this purpose, the name of a routine to be called
+when an error occurs may be declared using the keyword \fB%onerror\fR.
+This routine takes two parameters.
+The first one is either the token number of the
+token expected, or 0. In the last case, the error occurred at a choice.
+In both cases, the routine must ensure that the next call to the lexical
+analyser returns the token that replaces the current one. Of course,
+that could well be the current one, in which case
+.I LLparse
+recovers from the error.
+The second parameter contains a list of tokens that are not skipped at the
+error point. The list is in the form of a null-terminated array of integers,
+whose address is passed.
+.PP
+The user must supply a lexical analyzer to read the input stream and
+break it up into tokens, which are passed to
+.I LLparse.
+It should be an integer valued function, returning the token number.
+The name of this function can be declared using the
+"\fB%lexical\fR" keyword.
+This keyword can be used wherever a declaration is legal and may appear
+only once in the grammar specification, f.i.:
+.sp 1
+.nf
+.ft CW
+%lexical scanner ;
+.ft R
+.fi
+.sp 1
+declares "scanner" as the name of the lexical analyzer.
+The default name for the lexical analyzer is "yylex".
+The reason for this funny name is that a useful tool for constructing
+lexical analyzers is the
+.I Lex
+program,
+.[
+lex
+.]
+which generates a routine of that name.
+.PP
+The token numbers are chosen by \fILLgen\fR.
+The token number for a literal
+is the numerical value of the character in the local character set.
+If the tokens have a name,
+the "#\ define" mechanism of C is used to give them a value and
+to allow the lexical analyzer to return their token numbers symbolically.
+These "#\ define"s are collected in the file \fILpars.h\fR which
+can be "#\ include"d in any file that needs the token-names.
+The maximum token number chosen is defined in the macro \fILL_MAXTOKNO\fP.
+.PP
+The lexical analyzer must signal the end
+of input to \fILLparse\fR
+by returning a number less than or equal to zero.
+.NH
+Programs with more than one parser
+.PP
+\fILLgen\fR offers a simple facility for having more than one parser in
+a program: in this case, the user can change the names of global procedures,
+variables, etc, by giving a different prefix, like this:
+.sp 1
+.nf
+.ft CW
+%prefix XX ;
+.ft R
+.fi
+.sp 1
+The effect of this is that all global names start with XX instead of LL, for
+the parser that has this prefix. This holds for the variables \fILLsymb\fP,
+which now is called \fIXXsymb\fP, for the routine \fILLmessage\fP,
+which must now be called \fIXXmessage\fP, and for the macro \fILL_MAXTOKNO\fP,
+which is now called \fIXX_MAXTOKNO\fP.
+\fILL.output\fP is now \fIXX.output\fP, and \fILpars.c\fP and \fILpars.h\fP
+are now called \fIXXpars.c\fP and \fIXXpars.h\fP.
+.bp
+.SH
+References
+.[
+$LIST$
+.]
+.bp
+.SH
+Appendix A : LLgen Input Syntax
+.PP
+This appendix has a description of the \fILLgen\fR input syntax,
+as a \fILLgen\fR specification. As a matter of fact, the current
+version of \fILLgen\fR is written with \fILLgen\fR.
+.nf
+.ft CW
+.sp 2
+/*
+ * First the declarations of the terminals
+ * The order is not important
+ */
+
+%token  IDENTIFIER;            /* terminal or nonterminal name */
+%token  NUMBER;
+%token  LITERAL;
+
+/*
+ * Reserved words
+ */
+
+%token  TOKEN;         /* %token */
+%token  START;         /* %start */
+%token  PERSISTENT;    /* %persistent */
+%token  IF;            /* %if */
+%token  WHILE;         /* %while */
+%token  AVOID;         /* %avoid */
+%token  PREFER;        /* %prefer */
+%token  DEFAULT;       /* %default */
+%token  LEXICAL;       /* %lexical */
+%token  PREFIX;        /* %prefix */
+%token  ONERROR;       /* %onerror */
+%token  FIRST;         /* %first */
+
+/*
+ * Declare LLparse to be a C-routine that recognizes "specification"
+ */
+
+%start  LLparse, specification;
+
+specification
+        : declaration*
+        ;
+
+declaration
+        : START
+                IDENTIFIER ',' IDENTIFIER
+          ';'
+        | '{'
+                /* Read C-declaration here */
+          '}'
+        | TOKEN
+                IDENTIFIER
+                [ ',' IDENTIFIER ]*
+          ';'
+        | FIRST
+                IDENTIFIER ',' IDENTIFIER
+          ';'
+        | LEXICAL
+                IDENTIFIER
+          ';'
+        | PREFIX
+                IDENTIFIER
+          ';'
+        | ONERROR
+                IDENTIFIER
+	  ';'
+        | rule
+        ;
+
+rule    : IDENTIFIER parameters? ldecl?
+                ':' productions
+          ';'
+        ;
+
+ldecl   : '{'
+                /* Read C-declaration here */
+          '}'
+        ;
+
+productions
+        : simpleproduction
+          [ '|' simpleproduction ]*
+        ;
+
+simpleproduction
+        : DEFAULT?
+	  [ IF '(' /* Read C-expression here */ ')'
+          | PREFER
+          | AVOID
+          ]?
+          [ element repeats ]*
+        ;
+
+element : '{'
+                /* Read action here */
+          '}'
+        | '[' [ WHILE '(' /* Read C-expression here */ ')' ]?
+                PERSISTENT?
+                productions
+          ']'
+        | LITERAL
+        | IDENTIFIER parameters?
+        ;
+
+parameters
+        : '(' /* Read C-parameters here */ ')'
+        ;
+
+repeats : /* empty */
+        | [ '*' | '+' ] NUMBER?
+        | NUMBER
+        | '?'
+        ;
+
+.fi
+.ft R
+.bp
+.SH
+Appendix B : An example
+.PP
+This example gives the complete \fILLgen\fR specification of a simple
+desk calculator. It has 26 registers, labeled "a" through "z",
+and accepts arithmetic expressions made up of the C operators
++, -, *, /, %, &, and |, with their usual priorities.
+The value of the expression is
+printed. As in C, an integer that begins with 0 is assumed to
+be octal; otherwise it is assumed to be decimal.
+.PP
+Although the example is short and not very complicated, it
+demonstrates the use of if and while conditions. In
+the example they are in fact used to reduce the number of
+nonterminals, and to reduce the overhead due to the recursion
+that would be involved in parsing an expression with an
+ordinary recursive descent parser. In an ordinary LL(1)
+grammar there would be one nonterminal for each operator
+priority. The example shows how we can do it all with one
+nonterminal, no matter how many priority levels there are.
+.sp 1
+.nf
+.ft CW
+{
+#include <stdio.h>
+#include <ctype.h>
+#define MAXPRIO      5
+#define prio(op)     (ptab[op])
+
+struct token {
+        int     t_tokno;        /* token number */
+        int     t_tval;         /* Its attribute */
+} stok = { 0,0 }, tok;
+
+int     nerrors = 0;
+int     regs[26];               /* Space for the registers */
+int     ptab[128];              /* Attribute table */
+
+struct token
+nexttok() {  /* Read next token and return it */
+        register        c;
+        struct token    new;
+
+        while ((c = getchar()) == ' ' || c == '\et') { /* nothing */ }
+        if (isdigit(c)) new.t_tokno = DIGIT;
+        else if (islower(c)) new.t_tokno = IDENT;
+        else new.t_tokno = c;
+        if (c >= 0) new.t_tval = ptab[c];
+        return new;
+}   }
+
+%token  DIGIT, IDENT;
+%start  parse, list;
+
+list    : stat* ;
+
+stat    {       int     ident, val; } :
+        %if (stok = nexttok(),
+             stok.t_tokno == '=')
+                    /* The conflict is resolved by looking one further
+                     * token ahead. The grammar is LL(2)
+                     */
+          IDENT
+                                {       ident = tok.t_tval; }
+          '=' expr(1,&val) '\en'
+                                {       if (!nerrors) regs[ident] = val; }
+        | expr(1,&val) '\en'
+                                {       if (!nerrors) printf("%d\en",val); }
+        | '\en'
+        ;
+
+expr(int level; int *val;) {       int     expr; } :
+          factor(val)
+          [ %while (prio(tok.t_tokno) >= level)
+                    /* Swallow operators as long as their priority is
+                     * larger than or equal to the level of this invocation
+                     */
+              '+' expr(prio('+')+1,&expr)
+                                {       *val += expr; }
+                    /* This states that '+' groups left to right. If it
+                     * should group right to left, the rule should read:
+                     * '+' expr(prio('+'),&expr)
+                     */
+            | '-' expr(prio('-')+1,&expr)
+                                {       *val -= expr; }
+            | '*' expr(prio('*')+1,&expr)
+                                {       *val *= expr; }
+            | '/' expr(prio('/')+1,&expr)
+                                {       *val /= expr; }
+            | '%' expr(prio('%')+1,&expr)
+                                {       *val %= expr; }
+            | '&' expr(prio('&')+1,&expr)
+                                {       *val &= expr; }
+            | '|' expr(prio('|')+1,&expr)
+                                {       *val |= expr; }
+          ]*
+                    /* Notice the "*" here. It is important.
+                     */
+	;
+
+factor(int *val;):
+            '(' expr(1,val) ')'
+          | '-' expr(MAXPRIO+1,val)
+                                {       *val = -*val; }
+          | number(val)
+          | IDENT
+                                {       *val = regs[tok.t_tval]; }
+        ;
+
+number(int *val;) {       int base; }
+        : DIGIT
+                                {       base = (*val=tok.t_tval)==0?8:10; }
+          [ DIGIT
+                                {       *val = base * *val + tok.t_tval; }
+          ]*        ;
+
+%lexical scanner ;
+{
+scanner() {
+        if (stok.t_tokno) { /* a token has been inserted or read ahead */
+                tok = stok;
+                stok.t_tokno = 0;
+                return tok.t_tokno;
+        }
+        if (nerrors && tok.t_tokno == '\en') {
+                printf("ERROR\en");
+                nerrors = 0;
+        }
+        tok = nexttok();
+        return tok.t_tokno;
+}
+
+LLmessage(insertedtok) {
+        nerrors++;
+        if (insertedtok) { /* token inserted, save old token */
+                stok = tok;
+                tok.t_tval = 0;
+                if (insertedtok < 128) tok.t_tval = ptab[insertedtok];
+        }
+}
+
+main() {
+        register *p;
+
+        for (p = ptab; p < &ptab[128]; p++) *p = 0;
+        /* for letters, their attribute is their index in the regs array */
+        for (p = &ptab['a']; p <= &ptab['z']; p++) *p = p - &ptab['a'];
+        /* for digits, their attribute is their value */
+        for (p = &ptab['0']; p <= &ptab['9']; p++) *p = p - &ptab['0'];
+        /* for operators, their attribute is their priority */
+        ptab['*'] = 4;
+        ptab['/'] = 4;
+        ptab['%'] = 4;
+        ptab['+'] = 3;
+        ptab['-'] = 3;
+        ptab['&'] = 2;
+        ptab['|'] = 1;
+        parse();
+	exit(nerrors);
+}   }
+.fi
+.ft R
+.bp
+.SH
+Appendix C. How to use \fILLgen\fR.
+.PP
+This appendix demonstrates how \fILLgen\fR can be used in
+combination with the \fImake\fR program, to make effective use
+of the \fILLgen\fR-feature that it only changes output files
+when neccessary. \fIMake\fR uses a "makefile", which
+is a file containing dependencies and associated commands.
+A dependency usually indicates that some files depend on other
+files. When a file depends on another file and is older than
+that other file, the commands associated with the dependency
+are executed.
+.PP
+So, \fImake\fR seems just the program that we always wanted.
+However, it
+is not very good in handling programs that generate more than
+one file.
+As usual, there is a way around this problem.
+A sample makefile follows:
+.sp 1
+.ft CW
+.nf
+# The grammar exists of the files decl.g, stat.g and expr.g.
+# The ".o"-files are the result of a C-compilation.
+
+GFILES = decl.g stat.g expr.g
+OFILES = decl.o stat.o expr.o Lpars.o
+LLOPT =
+
+# As make does'nt handle programs that generate more than one
+# file well, we just don't tell make about it.
+# We just create a dummy file, and touch it whenever LLgen is
+# executed. This way, the dummy in fact depends on the grammar
+# files.
+# Then, we execute make again, to do the C-compilations and
+# such.
+
+all:	dummy
+        make parser
+
+dummy:  $(GFILES)
+        LLgen $(LLOPT) $(GFILES)
+        touch dummy
+
+parser: $(OFILES)
+        $(CC) -o parser $(LDFLAGS) $(OFILES)
+
+# Some dependencies without actions :
+# make already knows what to do about them
+
+Lpars.o:        Lpars.h
+stat.o:         Lpars.h
+decl.o:         Lpars.h
+expr.o:         Lpars.h
+
+.fi
+.ft R

+ 54 - 0
doc/LLgen/LLgen.refs

@@ -0,0 +1,54 @@
+%T An ALL(1) Compiler Generator
+%A D. R. Milton
+%A L. W. Kirchhoff
+%A B. R. Rowland
+%B Proc. of the SIGPLAN '79 Symposium on Compiler Construction
+%D August 1979 
+%J SIGPLAN Notices
+%N 8
+%P 152-157
+%V 14
+
+%T Lex - A Lexical Analyser Generator
+%A M. E. Lesk
+%I Bell Laboratories
+%D October 1975
+%C Murray Hill, New Jersey
+%R Comp. Sci. Tech. Rep. No. 39
+
+%T Yacc: Yet Another Compiler Compiler
+%A S. C. Johnson
+%I Bell Laboratories
+%D 1975
+%C Murray Hill, New Jersey
+%R Comp. Sci. Tech. Rep. No. 32
+
+%T The C Programming Language
+%A B. W. Kernighan
+%A D. M. Ritchie
+%I Prentice-Hall, Inc.
+%C Englewood Cliffs, New Jersey
+%D 1978
+
+%A M. Griffiths
+%T LL(1) Grammars and Analysers
+%E F. L. Bauer and J. Eickel
+%B Compiler Construction, An Advanced Course
+%I Springer-Verlag
+%C New York, N.Y.
+%D 1974
+
+%T Make - A Program for Maintaining Computer Programs
+%A S. I. Feldman
+%J Software - Practice and Experience
+%V 10
+%N 8
+%P 255-265
+%D August 1979
+
+%T Methods for the Automatic Construction of Error Correcting Parsers
+%A J. R\*:ohrich
+%J Acta Informatica
+%V 13
+%P 115-139
+%D 1980

+ 2712 - 0
doc/LLgen/LLgen_NCER.n

@@ -0,0 +1,2712 @@
+.RP
+.TL
+
+
+
+Top-down Non-Correcting Error Recovery 
+ in LLgen
+.AU
+Arthur van Deudekom
+Peter Kooiman
+.AI
+Department of Mathematics and Computer Science
+Vrije Universiteit 
+Amsterdam
+
+
+
+
+
+Supervised by
+.AU
+dr. D. Grune
+.AI
+Department of Mathematics and Computer Science
+Vrije Universiteit
+Amsterdam
+
+.AB
+This paper describes the design and implementation of a parser
+generator with non-correcting error recovery based on the extended LL(1)
+parser generator LLgen. It describes a top-down algorithm for implementing 
+this error recovery technique that can handle left-recursive grammars. 
+The parser generator has been tested with several existing ACK-compilers, 
+among which C and Modula-2. Various optimizations have been tried and are
+discussed in this paper. 
+.AE
+.LP
+.nr PS 12
+.nr VS 14
+
+.NH
+Introduction
+.EQ
+delim $$
+.EN
+
+.nr PS 10
+.nr VS 12
+.RS
+.LP
+One of the trickier problems in constructing parser-generators is what
+to do when the input to the generated parser is not well formed. Several
+approaches are known, most of which are `correcting', meaning that they
+modify the input to make it correct. However, in most cases there are
+several possible corrections, and often the one chosen will turn out
+to be the wrong one. As a result of such an incorrect choice, spurious error 
+messages can occur. Every programmer knows from experience how the omission 
+of a single `)' can on occasion lead to pages of error messages. 
+
+.LP
+A radically different approach is to just discard all the input up to
+and including the offending token, and start with a clean slate at the
+token following the offending one. [RICHTER] describes how
+this idea can be used to construct a non-correcting error recovery system
+that will never introduce spurious error messages. It is, however,
+possible that errors are overlooked.
+
+.LP
+In this paper we describe the incorporation of this non-correcting error
+recovery into LLgen, an existing LL(1) parser generator.
+In this introduction, we will describe in detail this non-correcting error
+recovery technique, give an overview of LLgen and how it handles
+errors, and finally describe how we have incorporated noncorrecting
+error recovery in LLgen.
+.RE
+
+.NH 2
+Non-correcting syntax error recovery
+
+.LP
+Richter describes how syntax error recovery can be done
+without making any corrections to the input text. Richter gives three
+reasons why recovery without correction is desirable:
+
+.IP 1
+In most cases there are many possible corrections, the choice among which
+will severely influence the further processing of the input. Thus, the
+probability of selecting the right correction is not high.
+
+.IP 2
+The harm done by selecting the wrong correction is often unlimited.
+
+.IP 3
+The loss of information to the user of a non-correcting recovery technique
+need not be grave.
+
+.LP
+The non-correcting technique described by Richter can be summarized as
+follows: When a syntax-error has occurred, the input up to and including the
+erroneous symbol is discarded; the remainder of the 
+input is processed by a substring parser of the input
+language, that is a parser that recognizes any substring of a string in the input 
+language. When the substring parser detects a syntax error, the offending 
+symbol is reported as another error, and the input up to and including the 
+erroneous symbol is discarded. The process is then repeated with the remaining input, possibly
+finding other syntax errors, until all the input is scanned.
+This process yields what Richter calls a 
+.I 
+suffix analysis 
+.R
+of an input string. Formally, given an input string 
+.I x
+, suffix analysis produces a set of strings $w sub k$ and a set of symbols
+$ a sub k$ such that
+.br
+
+.IP
+$x~ =~ w sub 0 a sub 0 w sub 1 a sub 1~...w sub n-1 a sub n-1 w sub n$
+.LP
+and such that:
+.br
+.IP
+ $w sub 0$ is the longest prefix of $x$ that is  a prefix of
+a string in the input language L, formally: there is a string $y$ such that 
+$w sub 0 y$ is in  L, but there is no string $z$ such that $w sub 0 a sub 0 z$
+is in L;
+.IP
+For $0 < k < n$, $w sub k$ is a longest substring of $x$ that is also a
+substring of a string in L, formally there are strings $u$ and $v$ such that 
+$u w sub k v$ is in L, but there are no strings $y$ en $z$ such that 
+$y w sub k a sub k z$ is in L; 
+.IP
+$w sub n$ is a substring of $x$ 
+that is a substring of a string in L, formally:
+there exist $u$ and $v$, such that $u w sub n v$ is in L. Note that
+$w sub n$ need not be a suffix of a string in L, if $x$ represents incomplete
+input $w sub n$ is not a suffix of a string in L.
+
+.LP
+Now, the $a sub k$ indicate points at which an error is detected. The
+"real" error need not be at $a sub k$, it can have occurred anywhere
+within $w sub k a sub k$.
+In his paper, Richter shows that, although this method may miss errors, it 
+will never introduce spurious errors.
+
+.LP
+For implementing the technique, a parser that recognizes any
+substring of the input language is needed. If we confine ourselves to
+syntactical analysis, it is sufficient to construct a substring
+recognizer. Richter himself does not give a practical construction, but
+[CORMACK] describes how a LR substring parser can be constructed
+that handles BC-LR(1,1) grammars. In this paper, we describe the construction
+of a LL substring recognizer that can handle any grammar. Furthermore,
+our recognizer is actually a suffix-recognizer, that is, a recognizer that 
+recognizes any suffix of a string in the input language. Our suffix recognizer has the
+correct-prefix property, 
+meaning that it detects the first syntax error as early as possible
+in a left-to-right scan of the input. Specifically, if the input language
+is L and the invalid input is $x$ , it finds a string $w$ and an input symbol 
+$a$ such that $x = way$  , there is a string $z$ such that $wz$
+is in L, and there is no string $z$ such that $waz$ is in L.
+Because the suffix parser has this correct-prefix property, it can be
+used as a substring parser, because it will detect the first input symbol that
+is not part of a substring of the language. Because it is a suffix-recognizer,
+it additionally will detect incomplete input, because in that case 
+at the end of the input the parser will not be in an accepting state.
+
+.NH 2
+Overview of LLgen
+
+.LP
+LLgen is an extended LL(1) parser generator. For a complete description, 
+see [GRUNE].
+LLgen can actually handle grammars that are not LL(1), because it allows
+the use of conflict-resolvers. In case of an LL(1) conflict, these resolvers
+are used to statically or dynamically decide which rule to use. As we will see
+later, this feature makes it necessary for the suffix-recognizer to 
+handle grammars that are not LL(1). Semantic actions can occur anywhere
+in the grammar rules, and they are executed when their position is 
+reached during parsing. A typical LLgen rule looks like
+.br
+.IP
+S:	A {
+.I action
+} B
+.LP
+where the action is a piece of C-code, that will be executed
+when the parser is using the rule for S and has recognized A.
+
+.LP
+LLgen-generated parsers use correcting syntax error recovery, based on a
+scheme designed by R\*:ohrich [ROEHRICH], inserting or deleting symbols at the point of error detection
+until correct input results. This means that actions in the parser will
+always be executed in an order that could also have resulted from
+syntactically correct input, and most importantly, once a grammar-rule
+is started it is guaranteed to be completed. This means that syntactic
+errors can never result in inconsistencies for the actions. Actions
+only have to deal with syntactically correct input. In a nutshell, the
+error recovery in LLgen-parsers works as follows: Suppose the parser is
+presented with correct input that breaks off before the end. The error
+recovery mechanism now provides a continuation path, chosen in such a
+way that all active rules are left as soon as possible. Effectively, the
+continuation path is the `shortest way out'. The symbols on this path are
+called `acceptable', and end-of-file is also `acceptable'. Furthermore, at
+each point along this `shortest path' there can be other terminals that
+would be correct; these are `acceptable' as well. Now, when an
+error occurs, all symbols that are not acceptable are discarded, until 
+an acceptable symbol appears in the input. The tokens on the path up to 
+but not including the acceptable input symbol are inserted. 
+From then on, normal parsing resumes.
+
+.NH 2
+Incorporation of non-correcting error recovery in LLgen
+
+.LP
+An important consideration in incorporating the non-correcting recovery
+in LLgen was that correct programs should suffer as little as possible
+in what regards compilation speed. Furthermore, the existing error
+recovery method has the highly desirable property that rules that are
+started will be finished too, thus ensuring that errors in the
+input text will not cause inconsistencies in the semantic actions. We have
+implemented the non-correcting error recovery in such a way that this 
+property is preserved.
+
+.LP
+The way we have achieved these goals is by actually including
+the suffix recognizer as a `second recognizer' in the generated parser. 
+Correct programs are handled in the usual way by the parser, but if an error
+occurs the following happens: instead of going to the standard error
+recovery routine, the parser starts executing the non-correcting error
+handler. This process continues, reporting all errors, until the
+end of the input text is reached. Then, control is handed back to
+the standard error recovery routine. This routine will now think
+there is no more input, and thus start inserting tokens so as to construct
+a `shortest way out'. This ensures that all rules that were started are
+also finished, and no inconsistencies can occur in the semantic actions. 
+However, this method does require some modifications to the error reporting 
+routine. Normally, if the generated parser inserts a token, it reports 
+this to the user, but in this case this is undesirable. The insertions only 
+serve to maintain consistency in the semantic actions 
+and do not signify errors, so reporting of insertions should be suppressed. 
+.bp
+.nr PS 12
+.nr VS 14
+.PS
+boxwid = boxwid / 1.5
+boxht = boxht / 1.5
+arcrad = arcrad / 1.5
+movewid = movewid / 1.5
+moveht = moveht / 1.5
+arrowwid = arrowwid / 1.5
+arrowht = arrowht / 1.5
+arrowhead = arrowhead / 1.5
+linewid = linewid / 1.5
+lineht = lineht / 1.5
+.PE
+.NH
+The LL suffix parser
+
+.nr PS 10
+.nr VS 12
+.RS
+.LP
+In this chapter, we describe the construction of the LL suffix parser.
+The described parser is not restricted to LL(1) grammars, because the
+presence of conflict resolvers in LLgen allows for more general grammars,
+that may even be left-recursive. We start this chapter with a discussion
+of the implications of conflict resolvers, and continue with descriptions
+of the parser algorithm, the used data-structures,
+the handling of left- and right recursion, and some possible optimizations.
+.RE
+
+.NH 2
+LLgen conflict resolvers and their implications
+
+.LP
+In grammars that are nearly but not completely LL(1), conflicts
+will arise in the two places where parsing decisions are made: the choice
+of which alternative to start (`alternation conflicts') and the decision
+to stop or continue a repeated item (`repetition conflicts'). In order to
+allow LLgen to handle this type of grammar, the user can 
+specify conflict resolvers in those places where conflicts arise.
+These resolvers are Boolean expressions labeling an alternative,
+and are evaluated when a conflict arises during parsing. If the 
+expression evaluates to `true' the labeled alternative will be taken.
+The Boolean expressions are expressions in C, and can consult
+any information available at the point they occur.
+However, if a syntactic error has occurred in the input, and the non-correcting
+error recovery starts, we can no longer rely on the conflict resolvers to 
+guide parsing decisions. The suffix recognizer is only concerned with
+syntax, and will not execute any semantic actions. It recognizes suffices
+of correct input, but does not know or care what prefix would make
+the suffix a correct program; as a result, the information that conflict
+resolvers could use is not available, because the semantic actions
+that would build this information have not been executed.
+Therefore, the information used by the conflict resolvers is no longer 
+reliable, and the suffix parser needs to be able to handle the underlying
+grammar without their help. In particular, it has to be able to handle
+left-recursive grammars.  
+
+.NH 2
+The suffix parser algorithm
+
+.LP
+Our algorithm needs easy access to the grammar rules; in the description
+we assume there is an efficient way to access the grammar rules. In 
+the next chapter we will describe the details of the actual implementation.
+For the moment, we will only consider grammars that are not left- or 
+right-recursive. In the next section, we will discuss how the algorithm has to be adapted
+to handle left- and right recursion. 
+
+.LP
+Suppose the grammar is G, and the input to the suffix recognizer is 
+$a sub 0 a sub 1 ... a sub n-1 a sub n$. Remember that parsing is
+always started by the `normal' LLgen generated parser. It's only after
+a syntactic error has occurred that the suffix recognizer will be started.
+The input to the suffix recognizer thus is the `tail' of the input, starting
+at the first symbol after the position where the first syntax error was
+found.
+
+.LP
+Now, in order to get parsing going again, the parser scans the grammar
+for rules which contain symbol $a sub 0$ in the right hand side:
+.br
+
+	A:	$alpha ~ a sub 0 ~ beta$
+.br
+
+.LP
+where $alpha$ and $beta$ represent a string of terminals and non-terminals,
+possible empty. Now, for each of these rules found, and for any string  
+$b sub 0 b sub 1$...$ b sub m$ that can be generated by $beta$ it holds that
+$a sub 0 b sub o b sub 1$...$b sub m$ is a substring of some string in L.
+This can be shown as follows, supposing that the start symbol is S and
+S $-> sup * gamma$  A $delta$:
+.br
+
+S $-> sup * gamma$ A $delta$ $-> sup * gamma ~ alpha ~ a sub 0 beta ~ delta
+-> sup * gamma ~ alpha ~ a sub 0 b sub 0 b sub 1$...$b sub m delta$
+
+.br
+Of course, there may very well be more than one such string
+$b sub 1 b sub 2$..$b sub m$, and one of these strings can be empty as well, if
+$beta$ can produce empty. Now, in what we will call the 
+.I 
+predicting phase
+.R
+the algorithm will
+produce all possible symbols $b sub 0$. Then, in what we will call the
+.I 
+accepting phase
+.R
+these symbols  are matched against
+the input, and those not matching are discarded. Then, entering the next
+predicting phase, the algorithm will produce
+all symbols $b sub 1$, and match them against the next input symbol in
+the subsequent accepting phase,
+etc. In case one of the strings $b sub 0$...$b sub m$ is empty, or
+the end of one of the strings is reached, some way to continue is
+needed; we will discuss this later. First let's see how the
+algorithm produces the strings $b sub 0$...$b sub m$ .
+
+.LP
+For each rule in the grammar of the form
+.br
+
+	A:	$alpha a sub 0 W sub 1 W sub 2$...$W sub p$
+.br
+
+with each $W sub k$ a terminal or nonterminal, a 
+.I
+prediction graph 
+.R
+is created that looks like this:
+
+.PS
+down; box "$a sub 0$"; arrow; box "$W sub 1$"; arrow
+box "$W sub 2$"; arrow dashed; box "$W sub p$"
+arrow; box "END" "$[A]$"
+.PE
+
+.LP
+The bottom element of these prediction graphs is an end-marker containing the
+left-hand side of the rule used. All these graphs have $a sub 0$ on top, and
+this $a sub 0$ is matched against the $a sub 0$ in the input in the
+accepting phase that follows, removing the
+$a sub 0$ from the graph. If the prediction graph is now empty, we have to find a way 
+to continue;  this case is treated later. First we will consider what to do if
+the prediction graph is not empty. There are two possibilities: either $W sub 1$ is a 
+terminal, or it is a nonterminal. If it is a terminal, we are finished for 
+the moment; if not, the algorithm scans for rules of the form
+.br
+
+	$W sub 1$:	$U sub 1 U sub 2$...$U sub i$
+.br
+
+.LP
+with each $U sub k$ a terminal or nonterminal. Now, the algorithm substitutes 
+the top of the prediction graph with the right-hand sides 
+of all the rules found. Because there can be more than one rule, the
+prediction graph can now become a DAG (Directed Acyclic Graph).
+Supposing there are two rules with $W sub 1$ in the LHS:
+
+.br
+
+	$W sub 1$:	$U sub 1 U sub 2$...$U sub i$
+.br
+	$W sub 1$:	$V sub 1 V sub 2$...$V sub j$
+
+.LP
+the prediction graph will now look like this:
+
+.PS
+B1: box "$U sub 1$"
+move 
+B2: box "$V sub 1$"
+arrow dashed down from bottom of B1
+B3: box "$U sub i$"
+arrow dashed down from bottom of B2
+B4:box "$V sub j$"
+move to 0.5 <B3.se, B4.sw>
+down;move
+B5:box "$[W sub 1 ]$"
+arrow dashed;
+box "$W sub p$"
+arrow;
+box "END" "$[A]$"
+arrow from B3.bottom to B5.top
+arrow from B4.bottom to B5.top
+.PE
+
+.LP
+The graph element representing $W sub 1$ is left in the stack, the
+notation $[W sub 1 ]$ indicates it has been substituted. These substituted
+element will from now on be ignored by the algorithm. The elements 
+$U sub 1$ and $V sub 1$ are now `on top' of the prediction graph.
+
+.LP
+If $W sub 1$ can also produce empty, its successor in the prediction graph 
+has to be processed
+as well; the algorithm walks down the graph to this successor, and
+there the process is repeated; if it is a terminal we are finished, else we 
+substitute it with the right hand sides of its grammar rule. 
+However, the element that we want to substitute now, say $W sub k$, cannot
+be marked `substituted' just like that, because it can be on another
+path, on which it cannot be substituted yet. Therefore, a copy of element
+$W sub k$ is made, it is marked $[W sub k ]$, and an edge is created
+from $[W sub k ]$ to the successor of $W sub k$. This produces graphs like
+this:
+.br
+.PS
+B1: box "$U sub 1$"
+move
+B2: box "$V sub 1$"
+move
+X1:box "$X sub 1$"
+arrow dashed down from bottom of B1
+B3: box "$U sub m$"
+arrow dashed down from bottom of B2
+B4:box "$V sub j$"
+arrow dashed down from bottom of X1
+Xj: box "$X sub j$"
+move to 0.5 <B3.se, B4.sw>
+down;move
+B5:box "$[W sub 1 ]$"
+arrow dashed;
+B6: box "$W sub k$"
+arrow
+Wk1:box "$W sub k+1$"
+arrow dashed
+box "$W sub n$"
+arrow;
+box "END" "$[A]$"
+arrow from B3.bottom to B5.top
+arrow from B4.bottom to B5.top
+move down from Xj.top;move;move;move
+Wk: box "$[W sub k ]$"
+arrow from Xj.bottom to Wk.top
+arrow from Wk.bottom to Wk1.top
+.PE
+
+.LP
+This process of substituting is repeated with all nonterminals that are
+now on top of the prediction graph, until there are only terminals on top of 
+the graph.
+This completes the prediction phase of the algorithm, not taking into account
+what to do if an END marker appears on top of the graph.
+Now, the algorithm enters its accepting phase, in which
+the terminals on top are compared with the next symbol in the input.
+If a terminal in the graph matches the input, its element is deleted
+from the graph, and the substitution process will continue with its
+successors, in the next prediction phase.
+If a terminal on top of the graph does not
+match the input, the path it is on represents a `dead-end', which
+does not need to be processed any further. The terminal is no longer
+a `top', and the algorithm will not visit it again.
+
+.LP
+There is one tricky situation: consider again this graph:
+
+.PS
+B1: box "$U$"
+move
+B2: box "$a$"
+move to 0.5 <B1.se, B2.sw>
+down;move
+B5:box "$W sub 1 $"
+arrow dashed;
+box "$W sub n$"
+arrow;
+box "END" "$[A]$"
+arrow from B1.bottom to B5.top
+arrow from B2.bottom to B5.top
+.PE
+
+.LP
+Here, the algorithm is processing $W sub 1$ in the predicting phase, and
+using some rule it has produced $a$ on top; there is another rule with
+$W sub 1$ in its LHS which has produced nonterminal $U$ on top.
+Now, suppose $U$ is a  nonterminal that can 
+produce empty. Now, the algorithm starts substituting $U$, and walks
+down $W sub 1$. What we definitely do not want 
+is the algorithm to start substituting $W sub 1$ again, because then we
+would loop forever. Therefore, if the algorithm starts processing 
+element $W sub 1$ it should make it $[W sub 1 ]$ before it does
+anything else. On entering the element 
+for the second time in the prediction phase , it sees that it is already substituted, 
+so there is nothing to do.
+It then just walks to the successor of $W sub 1$ and
+starts substituting it. This is correct, since the fact that the algorithm
+enters an element for the second time in a prediction phase  means that the element
+indirectly can produce the empty string, and thus its successor must
+be substituted as well in the prediction phase.
+
+.LP
+It is easy to see that the substitution process will stop: the algorithm can 
+only loop if  it starts processing an element for the second time in a
+prediction phase, 
+or if the  processing of an element eventually yields a graph with that 
+same element on top. 
+The first case cannot occur because the algorithm marks elements it is 
+processing as `substituted' before it does anything else, meaning that those elements will not
+be processed again; the second case can only occur if the grammar is 
+left-recursive, which we assumed it was not. 
+
+.LP
+The algorithm simulates
+left-most derivations of strings $a sub 0 b sub 0 b sub 1$..$b sub n$
+starting from $a sub 0 W sub 1$..$W sub p$; as we showed before, if
+the algorithm recognizes a string $a sub 0 b sub 0$..$b sub n$ that
+string is a substring of some string in L. Conversely, because the
+algorithm start out by using all rules of the form 
+A:	$alpha a sub 0 beta$, and then proceeds to simulate all
+possible left-most derivations, it will recognize all input
+$a sub 0 b sub 0$... $b sub n$ that can be produced starting from
+$a sub 0 beta$.
+
+.LP
+Now we will discuss what has to be done if an END marker appears as
+top of the prediction graph. 
+When this happens, it means that starting from some rule 
+.br
+
+	A:	$alpha a sub 0 beta$
+
+.br
+the algorithm has produced a leftmost-derivation of a string 
+$a sub 0 b sub 1 .. b sub n$ starting from $a sub 0 beta$, or that $beta$ can produce
+empty and the string so far is just $a sub 0$. The next step is to assume
+that the have recognized A and that that some string produced by $alpha$
+is part of the prefix that makes the suffix we are recognizing a 
+correct string in L. Remember that in the END marker we kept record of
+the LHS of the rule that has started the graph, and we will now use this
+LHS to continue recognizing. What the algorithm does is scan for all
+rules of the form:
+.br
+
+	B:	$gamma$ A $delta$
+.br
+
+with $gamma$ and $delta$ possibly empty strings of terminals and nonterminals.
+The algorithm now starts a new component in the prediction graph, and if $delta$ is
+$W sub 1 W sub 2$...$W sub n$ it looks like this:
+
+.PS
+down;box "$W sub 1$"; arrow
+box "$W sub 2$"; line dashed; box "$W sub n$"
+arrow; box "END" "$[B]$"
+.PE
+
+.LP
+Note that the END marker now contains B, because we have started to match
+a rule for B. If the $delta$ in the rule for B was empty, this just produces
+and END marker with B in it; in this case, the process is just repeated
+with all rules of the form:
+.br
+
+	C:	$zeta$ B $eta$
+.br
+
+.LP
+etc, until we have a prediction graph with a nonterminal or terminal on top.
+Now, the substitution algorithm is again applied over all nonterminals on
+top, until every top contains a terminal. It is possible that during
+substitution again an END marker will turn up; if this happens
+we again scan for rules to continue with etc. 
+This `continuation algorithm' can only loop if, when
+trying to build a new prediction graph for matched symbol A, it produces an empty
+graph with again matched symbol A. If this happens, the grammar was
+(directly or indirectly) right-recursive, and we assumed that it was not.
+Therefore, the algorithm will terminate. The terminals on top of the
+new graph after applying this `continuation' algorithm are exactly those
+that could follow the string $A sub 0 b sub 0$..$b sub n$ in a substring
+of a string in L.
+To see this, suppose we have `recognized' the rule
+.br
+
+	A:	$alpha a sub 0 beta$
+
+.br
+and $a sub 0 b sub 0 b sub 1$...$b sub n$ is the string produced from 
+$a sub 0 beta$ by the algorithm. Now, using rule:
+.br
+
+	B:	$gamma$ A $delta$
+
+.br
+and supposing that S $->$ $zeta$ B $eta$ we get
+.br
+
+	S $->$ $zeta$ B $eta$ $->$ $zeta gamma$ A $delta$ $eta$ $->$ $zeta gamma a sub 0 b sub 0 b sub 1$ ... $b sub n$ $delta$ $eta$
+
+.br
+.LP
+and thus any string produced by a derivation starting from
+$delta$ can come right after $a sub 0 b sub 0$...$b sub n$ in a substring 
+of some string in L. The algorithm will proceed to generate all these
+strings starting from $delta$. If $delta$ produces empty, the above
+is just repeated. Because in the `continuation' part
+all possible rules are considered, the whole algorithm will recognize
+all substrings of any string in L. In order to determine if we 
+have actually recognized a suffix of some string in L, we need to
+remember if within a predicting phase the `continuation' part of the algorithm has been run
+on an END marker containing the start-symbol S;
+if this is the case, then the input seen until now is a suffix of some string in L.
+Formally, it means that there is a derivation starting from start symbol
+$S$ such that if the
+input seen until now is $a sub 0 a sub 1$..$a sub n$, then:
+.br
+
+	S $-> sup * alpha beta$ $-> sup * alpha a sub 0 a sub 1$..$a sub n$
+.br
+
+.LP
+where $alpha$ can be empty, $beta$ is not empty.
+
+.NH 2
+The prediction graph data structure
+
+.LP
+The graphs that are produced by the suffix recognizer may grow extremely
+large; to facilitate an efficient
+implementation we have devised a way of keeping the size of the
+data structure under control, in a way that is very similar to
+the way described in [TOMITA].
+
+.LP 
+The basic idea is, that in a prediction phase of the algorithm, it is not
+necessary to explicitly substitute each nonterminal every time it
+turns up as a `top'; it is sufficient to do it once, because the
+second substitution will produce exactly the same subgraph starting at
+the substituted nonterminal. Here is an example:
+
+.PS
+down;box "$a$";arrow;box "A";arrow dashed;box "[B]";arrow
+box "C";arrow dashed;box "END" "[X]"
+move right from last box.e;
+box "END" "[Y]";
+arrow <- dashed up from last box.top;
+box "D";arrow <- up from last box.top
+box "B"
+.PE
+
+.LP
+Here, in the left component of the graph, nonterminal B has been
+substituted. Now, in the same prediction phase, the algorithm again runs into
+B, now in the right component. There is no need to compute again
+what the substitution will produce, it is exactly the part on top
+of B in the left component. Therefore, all that is needed is:
+
+.PS
+down;box "$a$";arrow;box "A";arrow dashed;
+B1: box "[B]";arrow 
+box "C";arrow dashed;box "END" "[X]"
+move right from last box.e;
+box "END" "[Y]";
+arrow <- dashed up from last box.top;
+box "D"
+arrow from B1.bottom to last box.top
+.PE
+
+So, when, in a prediction phase of the algorithm, a nonterminal is substituted,
+the nonterminal is placed on a list, together with a pointer to
+the substituted nonterminal. If in the same prediction phase a nonterminal that
+is on the list becomes a top, all we need to do is place an edge
+between the already substituted one and the successor of the top we are currently
+processing. When a prediction phase is finished, the list is cleared.
+There is one catch: if we consider again the last picture,
+note that if nonterminal B can (directly or indirectly) produce empty,
+it is also necessary to substitute D. However, it is not difficult to
+determine if a nonterminal can produce empty. LLgen already computes
+this information for each nonterminal.
+
+.LP
+Without this `joining together' of graph components, each
+element in the graph has exactly one successor, except the END marker,
+which has none.
+Now that components get joined as described, an element can have any
+number of successors. The recognizer algorithm now has to consider all
+successors of a graph element instead of one.
+
+.NH 2
+Handling right recursion
+
+.LP
+The only problem right-recursive grammars cause in the algorithm is in the
+`continuation' part; they can cause this part of the algorithm to loop
+forever. As an example, consider:
+.br
+
+	A:	$alpha$ B
+.br
+	B:      $beta$ C
+.br
+	C:	$gamma$ A
+
+.LP
+Now suppose the `substitution' part of the algorithm has turned up
+an END marker with nonterminal A in it. The continuation algorithm will
+now produce:
+
+.PS
+box "END" "[A]";move;box "END" "[C]";move;box "END" "[B]";move
+box "END" "[A]";move;box "END" "[C]"
+.PE
+
+.LP
+etc. etc. However, a slight modification to the algorithm suffices
+to eliminate this problem; within each prediction phase of the algorithm, we
+simply maintain a list of nonterminals that have turned up in an
+END marker. As soon as an END marker turns up whose nonterminal is
+already in the list, we stop the `continuation' algorithm; the part
+of the graph that would be produced by it already has been generated
+by an earlier invocation of the algorithm in the same prediction phase.
+At the end
+of a prediction phase, when all heads are terminals, we clear the list.
+This way, no looping can occur; even if the right recursion is
+indirect, for instance if in the above example the rule for A had been
+.br
+
+	A:	$alpha$ B $delta$
+.br
+.LP
+where $delta$ can produce empty, the algorithm still works; the substitution
+of $delta$ will yield an END marker on top, and when trying to find
+a continuation for LHS A the algorithm notices A is already on the list.
+
+
+.NH 2
+Handling left recursion
+
+.LP
+Left-recursion is, unfortunately, a much tougher problem than
+right-recursion. The result of left-recursive grammar rules is that
+the substitution algorithm never stops, because it can keep on building
+the graph with the same set of rules without ever turning up a terminal.
+One course of action would be to pre-process the grammar rules to
+eliminate left-recursion; there are algorithms that eliminate direct
+and indirect left-recursion. However, we have taken another course; by
+allowing the produced graphs to contain loops, we can handle left
+recursion without any modifications to the grammar. As soon as 
+we come to the point that we want to substitute a nonterminal
+which was already substituted earlier on the same path and in
+the same prediction phase, we can 
+make a link from the `older' nonterminal to the successor of
+the `new' nonterminal. In this way we have constructed a loop
+in the graph. As an example, suppose we have the following rules:
+.br
+
+D: A
+
+A: B a
+
+B: A | x
+
+.br
+Suppose also that we have nonterminal `D' on top of a stack. We 
+now start substituting `D':
+
+.PS
+A: box "A"
+move
+X: box "x"
+move to 0.5 <A.se, X.sw>
+down
+move
+B: box "[B]"
+arrow
+box "a"
+arrow
+box "[A]"
+arrow
+box "[D]"
+arrow dashed
+box "END" "[S]"
+
+arrow from A.s to B.n
+arrow from X.s to B.n
+ 
+.PE 
+
+.LP
+We now have an `A' on top of of the stack which was already 
+substituted on the same path and also in the same prediction phase. To avoid
+never ending substitution we make a loop as follows:
+
+.PS
+A: box "A" dashed
+move
+X: box "x"
+move to 0.5 <A.se, X.sw>
+down
+move
+B: box "[B]"
+arrow
+box "a"
+arrow
+A2: box "[A]"
+arrow
+box "[D]"
+arrow dashed
+box "END" "[S]"
+
+arrow dashed from A.s to B.n
+arrow from X.s to B.n
+arc <- from B.w to A2.w
+.PE
+
+.LP
+The dashed box with `A' in it means that it can be deleted, because
+there is already an occurrence of it in the loop.
+
+.LP
+The most beautiful result of loops in graphs is 
+that the original parsing algorithm needs only one minor change.
+When the algorithm visits an element which has more than one 
+outgoing edge the algorithm starts tracking down both paths,
+just like before, only now there may be one or more backedges among
+these edges, but the algorithm needs not to be aware of this fact.
+The only difficulty with loops is that the algorithm might go into
+a loop; it continues searching for terminals but it might happen
+that there are no valid terminals in the loop. The solution to this
+problem is not very difficult; just set a flag at all elements we
+visit. When we reach an element which has this flag turned on, we
+don't have to search any further.  At the end of the prediction phase, when we
+have found all possible new heads, all flags are cleared.
+Even if there are no loops in the
+prediction graph, setting flags may be used as an optimization: 
+it is possible that two paths come together at one point. In that situation
+it is useless to scan for the second time the part of the graph which 
+both paths have in common.
+
+.NH 2
+Some optimizations using reference counts
+
+.LP
+As explained in section 2.2, it is sometimes necessary to copy a
+prediction graph element before substituting it. In order to determine
+if a certain element has to be copied, it is convenient to maintain
+a reference count in each graph element. This reference count keeps
+track of the number of edges that enter an element. Now, when we want
+to substitute an element with reference count not 0, we need to
+copy it, because there is another path in the prediction graph that
+contains the element we want to substitute, and on this other path
+the element cannot be substituted yet.
+
+.LP
+Maintaining reference counts also enables us to perform another
+optimization: remember that if, in a prediction phase, a terminal
+is predicted that does not match the current inputsymbol, we from
+then on just ignore the path in the graph starting at the terminal.
+However, we can safely delete the terminal from the graph; furthermore,
+all its successors in the prediction graph that have reference count
+0 can be deleted as well, as can their successors with reference
+count 0, etc. This way, we delete from the prediction graph
+most elements that are no longer accessible, but not all of them; as will 
+be explained in the next section, loops in the prediction graph
+can cause problems. 
+
+.NH 2
+The algorithm to delete inaccessible loops
+
+.LP
+Deleting graph elements which are no longer reachable is not as easy
+as it looks when there are loops in the graph, introduced by
+the extension to the algorithm that handles left recursive grammars.
+Suppose for example that we have a very simple loop as in the left 
+picture below:
+
+.PS
+down
+X: box "x" "(0)"
+arrow 
+box "[B]" "(2)"
+arrow
+box "a" "(1)"
+arrow
+box "[A]" "(1)"
+arrow 
+box "[D]" "(1)"
+arc <- from 2nd box.w to 2nd last box.w
+
+move right from X.ne
+move 
+move
+move
+move
+move
+move
+down 
+box "x" "(0)" dashed
+arrow dashed
+B: box "[B]" "(1)"
+arrow
+box "a" "(1)"
+arrow
+box "[A]" "(1)"
+arrow
+box "[D]" "(1)"
+arc <- from B.w to 2nd last box.w
+.PE
+
+.LP
+The number below each symbol indicates the reference count of that element.
+Suppose now that we delete `x', then we have the situation depicted in the 
+picture on the right. The loop consisting of `[B]', `a' and `[A]' is now
+unreachable, so all these elements can be deallocated.
+The reference count of `[B]' is 1, so it will not be deleted. To be precise
+all elements in the loop have their reference counts on 1, and
+consequently none of these will be deleted. But we stated earlier
+that all elements of the loop cannot be reached anymore and that the
+loop had to be deleted! In this example the reference counts of the
+loop elements are all 1, but in more complex situations it is also 
+possible that some of the elements have a reference count of more
+than 1.
+
+.LP
+To solve this problem we present an algorithm, devised by E. Wattel, that
+determines whether a loop can be deleted or not.
+The algorithm consists of two parts. The first part of the algorithm goes as
+follows: it presumes that all elements of the loop will indeed be
+deleted. Every time it deletes an element it decreases the reference
+count of all the successors of the element that are also member of the same 
+loop.  How the algorithm knows which elements belong to the loop and which
+do not will be explained later. The situation of the example above will now 
+look like this:
+ 
+.PS
+down
+box "[B]" "(0)" 
+arrow 
+box "a" "(0)"
+arrow 
+box "[A]" "(0)"
+arrow
+box "[D]" "(1)"
+arc <- from 1st box.w to 2nd last box.w
+.PE
+
+.LP
+The number below each symbol indicates again the reference count 
+after we have applied the first part of the algorithm.
+
+.LP
+The second part of the algorithm checks and restores the 
+reference counts of all members of the loop . When it finds 
+out that one or more reference counts are not 0, it concludes 
+that it is still possible to enter the loop in some way, and 
+that it cannot be 
+deleted yet. In the other case it reports that the loop can be 
+deleted, which is also true in our example.
+
+.LP
+We will now formally describe the first part of the algorithm 
+that finds all directed circuits from a given vertex, and determines if 
+the vertices on those circuits can be deleted.
+The algorithm works on prediction-graphs in which every edge that
+is in a circuit is marked. Note that a marked edge may be in more than one circuit.
+We will call this mark `C'.
+The input to the algorithm is such a prediction graph, and a start vertex,
+say A. The first part of the algorithm is:
+
+.IP 1
+Put the start vertex A on a list L; mark all edges `unused'
+.IP 2
+If L is empty, stop
+.IP 3
+For each vertex in list L, check if there are edges marked both C' and
+`unused'. For each edge found, mark it `used', and traverse it to its
+other endpoint; put this endpoint on a new list M, initially empty
+.IP 4
+Decrease the reference count of all vertices on M by 1
+.IP 5
+L := M; go to 2
+
+.LP
+It is clear that the algorithm will terminate: each edge is only traversed once,
+and the number of edges is finite. We will now prove some properties of this
+part of the algorithm.
+
+.LP
+.I
+An edge is traversed by the algorithm if and only if it is on some
+directed circuit $A ->$...$->A$.
+.R
+.br
+
+The if-part is easy; if an edge $e$ connecting vertices $W$ and $V$ is on some directed circuit starting in
+$A$, then there is a path $A ->$...$-> W -> V$; let $A ->$...$-> W -> V$ be a path
+of minimum length from $A$ to $V$. If the length of the path from $A$ to
+$W$ is $k$, then after turn $k$ of the algorithm $W$ will be on list L. To see
+that this is the case, suppose that $W$ is not on list L after turn $k$;
+this means that the edge entering $W$ was already marked used in a
+previous turn, but then there would be a shorter path from $A$
+to $W$, contradicting the assumption that the path is of
+minimum length. The edge
+$e$ is marked `C', because it is in a circuit; it is marked `unused', for if
+it were marked used, there would be a shorter path from $A$ to $V$. So,
+in turn $k + 1$, the edge $e$ will be traversed. 
+
+.LP
+On the other hand, suppose that an edge $e$ is traversed by the algorithm;
+we will show by induction on the number of turns the algorithm has made
+that $e$ is on a directed circuit $A->$..$->A$. In the first turn, all
+edges from $A$ that are marked `C' are traversed, and clearly, if an edge
+from $A$ is part of a circuit then that edge is part of a circuit from $A$ to $A$.
+Now suppose that in turn $n+1$ an edge $e$ connecting vertices $W$ and
+$V$ is traversed. This means the edge is 
+marked `C', so it is part of some circuit. If there is a path from $V$ to $A$,
+we can simply trace a circuit
+$A->$...$-> W -> V -> $...$-> A$, and clearly $e$ is on a circuit from
+$A$ to $A$. Now, suppose there is no path from $V$ to
+$A$. We can always trace a circuit $W -> V ->$...$-> W$ because the
+edge from $W$ to $V$ is part of a circuit; and by the
+induction hypothesis there is a circuit $A ->$...$-> W ->$...$-> A$. We can
+now make a `detour' at  $W$, yielding a circuit $A->$...$-> W -> V$...
+$-> W ->$...$-> A$. This case is shown in the picture below.
+So in either case $e$ is on a circuit from $A$ to $A$.
+
+.PS
+down; 
+B1: box "A"; 
+arrow dashed; 
+B3: box dashed; 
+arrow dashed;
+B2: box "W";
+arrow dashed; box dashed;
+arc <- from B1.w to last box.w
+arrow right "$e$" "C" from B2.e
+box "V"; arrow dashed; box dashed;
+arrow dashed  -> from last box.n to B3.e
+.PE
+  
+.LP
+.I
+A vertex appears on list L if and only if it is on some directed
+circuit from $A$ to $A$.
+.R
+.br
+
+.LP
+If a vertex is in such a circuit, there is an edge that enters it, which
+is part of a circuit form $A$ to $A$; we already showed that this edge
+is traversed by the algorithm, and thus the vertex will appear on list
+L. Conversely, if a vertex appears on list L, then an edge entering
+that vertex has been traversed by the algorithm; we showed that this
+edge is part of a circuit from $A$ to $A$, and thus the vertex is
+part of a circuit from $A$ to $A$.
+
+.LP
+.I
+When the algorithm is finished, each vertex that is part of some
+directed circuit from $A$ to $A$ has its reference count decreased by exactly
+the number of edges entering it that are part of a directed circuit from $A$ to $A$.
+.R
+.br
+
+.LP
+Each edge that is part of some circuit from $A$ to $A$ is traversed
+exactly once; the reference count of the endpoint is decreased
+by one after an edge has been traversed. Thus, if a vertex is endpoint
+of $k$ such vertices, its reference count is decreased by $k$.
+
+.LP
+.I
+If the reference count of each of the vertices visited by the algorithm
+is 0 after the algorithm has finised, all these vertices can be deleted; 
+if the reference count is not zero for one or more of the visited
+vertices, then none of them can be deleted.
+.R
+.br
+
+.LP
+Suppose all visited vertices have reference count 0; this means that
+each of the vertices is only entered by edges that are on a circuit
+from $A$ to $A$. Therefore, it holds that any path leading to any
+of the visited vertices has to start in one of the visited vertices; there
+is no path starting in an unvisited vertex to a visited one. Thus,
+all the visited vertices are unreachable.
+Conversely, if one of the visited vertices has reference count not zero,
+then there is a path from an unvisited vertex to this vertex. Because from
+the vertex with reference count non zero, we can get to $A$, and from $A$
+we can get to any of the other vertices, all visited vertices are 
+reachable.
+
+.LP
+The second part of the algorithm now checks if all reference counts are
+zero, and if they are, it deletes all visited vertices. 
+
+
+.NH 2
+Marking loop elements
+
+.LP
+One point we have omitted so far is how the edges in the prediction
+graph that are part of a loop get marked.
+Basically, a loop can be detected:
+
+	a. when it is made;
+.br
+	b. when we want to know about it.
+
+.LP
+The first approach checks if a loop is constructed
+as soon as we join two paths in the graph, and if so, marks all
+edges of the loop. The other approach does not do any checking when two
+paths are joined together; it starts looking for loops when we want
+to delete an element with reference count not 0, marking all edges
+belonging to the loops it discovers. In practice it turns out that
+we very often encounter elements that we would like to delete, but that have
+reference count not 0, whereas the joining of paths occurs relatively 
+infrequently. We therefore have chosen to check if a loop is created
+when two paths in a prediction graph are joined. 
+
+.LP
+Now the question arises how to find and mark all edges of
+the loop. For this problem we devised also an algorithm. 
+Because we already know that there is an edge from the element on which 
+the new path is connected to the successor of the joined element, the
+algorithm only has to find a path from this last element back to the first one.
+This can be done by a backtracking depth first search; to find a path from
+one element to another we have to find a possible empty path 
+from one of the successors of the first element to the last element. As
+soon as we have found a path, we can mark all the edges on the path and also
+the backedge as loop edges. In case that there is more than one path
+back to the first element it is necessary that the algorithm continues
+searching after it has found one path.
+
+.LP
+To avoid looping of this algorithm we have to set a flag at the elements
+which are on the path already. When the algorithm is backtracking it can 
+clear the flags at the elements it is leaving.
+
+.LP
+To speed up the searching process we can set flags at the edges we have already 
+visited but did not lead back to the first element. When the algorithm
+encounters such an edge it already knows that this edge is not worth
+searching again and can be skipped. At the end of the algorithm these
+flags have to be cleared again.
+
+.LP
+One might propose another optimization: as soon as
+we reach an edge that is already marked as a loop edge, we
+can stop searching for other loop edges. There is, however, 
+a case in which this can go wrong. Imagine the following situation:
+
+.PS
+down
+E: box "[E]"
+arrow " C" ljust
+D: box "[D]"
+arrow " C" ljust
+C: box "c"
+arrow " C" ljust
+box "b"
+arrow " C" ljust
+A: box "[A]"
+arrow 
+box "a"
+
+move right from D
+move right
+J: box "[J]"
+down
+arrow from J.s " C" ljust
+I: box "i"
+arrow " C" ljust
+H: box "[H]"
+arrow from H.s to A.e 
+
+arc <- from E.w to A.w 
+move left from C
+move left
+"C"
+arc -> from H.e to J.e
+move right from I
+move right
+"C"
+
+arrow dashed from E.s to J.n
+
+
+.PE
+
+What we have here is a prediction graph with two loops; all edges that belong 
+to a loop are again marked with an `C'. Note that the edge between `[H]'
+and `[A]' is not a loop edge. Suppose that `[J]' is not yet
+completely substituted, i.e. there is another production rule for
+J:
+.br
+
+J:	E
+
+.br
+The `E' on top of the right path is now joined with the `[E]' 
+on the left path, which is depicted by the dashed arrow
+between `[E]' and `[J]'. When we take a good look at the graph
+we see that the two loops are merged into one. But that is not
+the most important observation we have to make: not only the 
+edge between `[E]' and `[J]' must be marked as a loop edge, but
+also the edge between `[H]' and `[A]'! So it is not possible
+to stop searching for loop edges as soon as we have found an
+edge which was already marked as a loop edge. We have to continue
+until we reach the element at which we started: `[E]'. So the 
+optimization proposed above is incorrect.
+
+
+.NH 2
+Optimizations using FIRST and FOLLOW sets
+
+.LP
+In the algorithm as we have described it, every nonterminal on top of the graph
+is substituted until only terminals remain on top; these terminals are
+then matched against the current input symbol. However, by using
+FIRST sets, we can save considerably on the number of computations 
+necessary. Suppose one of the top elements of the graph is nonterminal A,
+and the current inputsymbol is $a$. Then, it is of no use to substitute
+A if terminal $a$ is not in FIRST(A), because then substituting A will
+never produce $a$ on top of the graph. So, before substituting a
+nonterminal we check if the current inputsymbol is in its FIRST set; if
+it is not, we can declare the path the nonterminal is on a dead end, and
+delete it, without having to perform the actual substitution. Of course, if
+A can produce empty, we still have to consider its successor in the graph. 
+
+.LP
+Similarly, when we have an END marker on top, with nonterminal B in
+it, and we consider using rule 
+.br
+
+	D:	$alpha$ B C $gamma$
+
+.br
+We first check if the current inputsymbol is in FIRST(C); if this is
+not the case, there is no need to start a graph component with this
+rule, because it will never produce the next inputsymbol on top.
+Again, if C produces empty, we still have to evaluate the part of the
+rule following C.
+
+.LP
+To circumvent the problems caused in the FIRST set optimization by
+nonterminal that produce empty, we can also make use of FOLLOW-sets.
+When substituting, if we encounter a nonterminal whose FIRST set does
+not contain the current inputsymbol but which can produce empty,
+we check if the current inputsymbol is in its FOLLOW set. If it is not,
+there is no need to process its successor. Similarly, in case we
+are processing an END marker as explained above, there is no need
+to process the part of the rule following C if FIRST(C) does not
+contain the input symbol, or C produces empty but the inputsymbol
+is not in FOLLOW(C).
+.bp
+.nr PS 12
+.nr VS 14
+
+.NH
+Test results
+
+.nr PS 10
+.nr VS 12
+.RS
+
+.LP
+In this chapter, we discuss some test results that were obtained
+by recompiling existing ACK compilers with the modified LLgen.
+We tried several combinations of possible optimizations, including
+`dumb' ones, like no optimization at all, not even deleting unreachable
+prediction graph elements.
+The incorporation of LLgen with non-correcting error recovery went
+smoothly; only minor modifications to the Make-files were necessary.
+Specifically, these modifications consisted of passing an extra
+flag to LLgen, and including the new generated C-file Lncor.c in
+the list of generated C-files. Also, the LLmessage error reporting
+routine had to be adapted. We successfully recompiled the C, Modula-2
+and Occam compilers; in the next sections, we discuss some test results
+that were obtained with the Modula-2 and C compilers.
+
+.RE
+.LP
+.NH 2
+Performance
+
+.LP
+We will now present and discuss, with the aid of some 
+diagrams, time and space measurements on the non-correcting error
+recovery. We have measured the effect of various optimizations. 
+These optimizations include the first-set optimization and the follow-set 
+optimization. We also measured the effect of leaving out the loop-deletion
+algorithm, regarding both time and space. We performed out measurements using 
+C- and Modula-2-programs of three different sizes; one of approximately 
+750 tokens, one of appr. 5000 tokens and one of appr. 15000 tokens. We have
+chosen to represent the sizes of programs in the number of tokens instead of 
+number of lines, because the number of tokens more realistically 
+reflects the load the programs put on the error recovery mechanism. Also we give 
+our time measurements in usertime instead of realtime, because realtime 
+depends heavily on the load of the system, which usertime does not.
+Our space measurements are based on the size of the prediction graphs.
+Note that all files are entirely recognized by the non-correcting error 
+recovery technique. We achieved this by putting a `1' at the beginning
+of each file; because then each file starts with a syntax error LLgen 
+is forced to continue with the non-correcting error recovery. 
+
+.NH 3
+Time and space measurements on the effect of the first-set optimization 
+
+.LP
+In the diagram below we show our time measurements we got from recognizing 
+the C-programs both with and without first-set optimization. 
+
+.G1
+coord x 0, 17000 y 0, 65
+ticks bot out at 750, 5000, 15000
+label bot "Number of tokens"
+label left "User Time" "(sec)" left .3
+draw no_opt dashed 
+draw first_opt dashed 
+
+copy thru X
+	times size +2 at $1, $2 
+	times size +2 at $1, $3 
+	next no_opt at $1, $2
+	next first_opt at $1, $3
+X until "XXX"
+
+742	2.5	.9	
+5010	16.3	5.8
+14308	54.2	16.8
+XXX
+
+copy thru X "$1 $2" size -2 at 11000, $3 X until "XXX"
+No optimization 55 
+First-set optimization 20
+XXX
+.G2
+
+.I
+.ce
+Time measurements of three C-programs with and without first-set optimization
+.R
+
+.LP
+Notice the considerable time savings we 
+get when the first-set optimization is turned on; a factor of slightly more than
+3. Obviously this is an extremely useful optimization. On the other hand
+we found there were no measurable time savings when using the follow-set
+optimization; for that reason we did not chart the result of this optimization.
+It seems that the time savings gained by the optimization are 
+waisted again by the extra processing time needed. We conclude that 
+this optimization is of little or no use when we want to save on time.
+
+.LP
+In the following picture the time measurements of three Modula-2 programs 
+are given, again with and without first-set optimization. 
+
+.G1
+coord x 0, 17000 y 0, 65
+ticks bot out at 750, 5000, 15000
+label bot "Number of tokens"
+label left "User Time" "(sec)" left .3
+draw no_opt dashed 
+draw first_opt dashed 
+copy thru X
+	times size +2 at $1, $2 
+	times size +2 at $1, $3 
+	next no_opt at $1, $2
+	next first_opt at $1, $3
+X until "XXX"
+
+823	1.3	.6	
+4290	7.6	3.5	
+16530	30.5	14.3
+XXX
+
+copy thru X "$1 $2" size -2 at 13000, $3 X until "XXX"
+No optimization 30 
+First-set optimization 15
+XXX
+.G2
+
+.I
+.ce
+Time measurements of three Modula-2-programs with and without first-set optimization
+.R
+
+.LP
+From this picture we can conclude mainly the same as above; considerable
+time savings when we use the first-set optimization; 
+the factor is somewhat less, but still more than 2. Again we have omitted
+the results of the follow-set optimization, for the same reason as before.
+
+.LP
+There is however one remarkable difference between the two languages: parsing
+C-programs needs almost twice the time as parsing programs of comparable
+sizes written in Modula-2. This can be explained by the fact that the 
+C-grammar is far more complicated than that of Modula-2, and also the 
+production rules are longer in C, so building, deleting and definitely
+traversing the graph will consume more time.
+
+.LP
+Now we come to the space measurements of both C- and Modula-2 programs.
+In the picture below we present the maximum sizes of the prediction graphs,
+during the recognition of the three C-programs.
+ 
+.G1
+coord x 0, 17000 y 0, 18000 
+ticks bot out at 750, 5000, 15000
+label bot "Number of tokens"
+label left "Maximum size of" "the prediction graph" "(bytes)"left .3
+draw no_opt dashed 
+draw first_opt dashed 
+copy thru X
+	times size +2 at $1, $2 
+	times size +2 at $1, $3 
+	next no_opt at $1, $2
+	next first_opt at $1, $3
+X until "XXX"
+
+742	5568	10444
+5010	7668	12664
+14308	13636	17308
+XXX
+
+copy thru X "$1 $2" size -2 at 8000, $3 X until "XXX"
+No optimization 16000 
+First-set optimization 7000 
+XXX
+.G2
+
+.I
+.ce
+Maximum sizes of the prediction graphs when recognizing three C-programs
+.R
+
+.LP
+From this diagram we see that, although the prediction graphs
+are smaller when the first-set optimization is used, the space savings are
+not as spectacular as the time savings achieved by this optimization.
+
+.LP
+In Modula-2 the first-set optimization also causes a decrease in memory
+usage. The savings are less than in C, but still about 1.5 Kb. Again 
+this can be explained by the fact that the rules of the Modula-2 grammar 
+are shorter than that of C.
+
+.G1
+coord x 0, 17000 y 0, 12000 
+ticks bot out at 750, 5000, 15000
+label bot "Number of tokens"
+label left "Maximum size of" "the prediction graph" "(bytes)" left .3
+draw no_opt dashed 
+draw first_opt dashed 
+copy thru X
+	times size +2 at $1, $2 
+	times size +2 at $1, $3 
+	next no_opt at $1, $2
+	next first_opt at $1, $3
+X until "XXX"
+
+823	5056	3292
+4290	6420	4664
+16530	11388	9632
+XXX
+
+copy thru X "$1 $2" size -2 at 8000, $3 X until "XXX"
+No optimization 10000 
+First-set optimization 4000 
+XXX
+.G2
+
+.I
+.ce
+Maximum sizes of the prediction graphs when recognizing three Modula-2-programs
+.R
+
+.NH 3
+Input that is recognized in quadratic time
+
+.LP
+The measurements presented may suggest that the time required to
+recognize input depends linearly on the length of the input; however,
+this is not always the case. When there are recursive rules in the
+grammar, the time needed to recognize input that is produced by this
+rules can become proportional to the square of the input length.
+Consider this set of grammar rules:
+.br
+.nf
+
+	S:	'{' A '}'
+	A:	'a' A | $epsilon$
+
+.fi
+.LP
+When the input is `{aaa....', the algorithm will produce the following 
+prediction graphs: 
+
+.PS
+up; B1: box "END" "S"; arrow <- ;box "}";arrow <- ;box "A";arrow <- ;box "{";
+move right from B1.se; move
+up; B2: box "END" "S"; arrow <-; box "}"; arrow <-; box "[A]"; 
+arrow <-; box "A"; arrow <-; box "a";
+move right from B2.se; move
+up; B3: box "END" "S"; arrow <-; box "}"; arrow <-; box "[A]";
+arrow <-; box "[A]"; arrow <-; box "A"; arrow <-; box "a";
+move right from B3.se;move
+up; B4: box "END" "S"; arrow <-; box "}"; arrow <-; box "[A]";
+arrow <-; box "[A]"; arrow <-; box "[A]"; arrow <- ; box "A"; arrow <-;box "a";
+.PE
+
+.LP
+In each prediction phase, a new [A] appears on the prediction graph. However,
+since A also produces empty, the prediction algorithm has to traverse all the
+elements [A] until it finds the element `}'. In the first prediction phase,
+there is one element [A], in the second there are two, etc, so in all
+1 + 2 + 3 + ... + k = $k(k+1) over 2$ elements have to be traversed if 
+there are k prediction phases, making this proportional to the square
+of the input length. We constructed a parser with this simple input grammar
+and measured the processing time the error recovery mechanism used.
+In the following diagram the dashed line shows the processing time needed;
+the dotted line is the curve $t = 13 n sup 2$. Clearly the processing time
+is proportional to the square of the number of tokens.
+
+.G1
+coord x 0, 2100 y 0, 60 
+ticks bot out at 500, 1000, 1500, 2000
+label bot "Number of tokens"
+label left "User Time" "(sec)" left .3
+draw quad dashed 
+
+copy thru X
+        times size +2 at $1, $2 
+        next quad at $1, $2
+X until "XXX"
+
+500  3.0     
+1000 12.4
+1500 28.6
+2000 51.4 
+XXX
+
+draw dotted
+for i from 0 to 2100 by 25 do { next at i, 0.000013 * i * i }
+.G2
+
+.LP
+In the grammar used for the C compiler, array initializations are handled by a recursive
+rule, so we would expect that the error recovery mechanism needs quadratic
+processing time to recognize such an initialization; we made measurements on 
+the processing time and indeed, the
+processing time needed grows proportionally to the square of the size of the input, as the
+next figure shows. Here, the processing times are about half of those in
+the previous example; this is so because the recursion appears after two
+tokens are recognized. Note that the algorithm only takes quadratic time
+when it is recognizing input that is generated by a recursive grammar rule.
+Other input is still recognized in linear time, regardless of the fact that
+there are recursive grammar rules.
+
+.G1
+coord x 0, 5000 y 0, 85 
+ticks bot out at 1150, 2400, 3600, 4800
+label bot "Number of tokens"
+label left "User Time" "(sec)" left .3
+draw quad dashed 
+
+copy thru X
+        times size +2 at $1, $2 
+        next quad at $1, $2
+X until "XXX"
+
+1150 5.1      
+2400 20.3
+3600 43.7 
+4800 78.6
+XXX
+.G2
+
+.LP
+Unfortunately, there is no easy way to speed up the recognition of these
+recursively defined language elements; they are caused by the substituted
+tokens that are left in the prediction graph, and we cannot just delete those
+`dummies' from the graph during a prediction phase because the `join' part of the
+prediction algorithm depends on them. One could traverse the graph after
+a prediction phase to delete the dummies, but then the processing
+time needed to recognize non-recursively defined language elements would 
+increase dramatically. However, we feel that in practice things
+like large array initializations will not occur in hand-made programs; when
+they occur, it is probably in computer-generated programs, which normally
+will be correct anyway, meaning that the error recovery never sees them.
+When testing such generated programs, one is likely
+to use small test-cases, which are handled well by the error recovery.
+
+.NH 3
+Time measurements on the effect of leaving out the loop-deletion algorithm
+
+.LP
+We now show what effect the loop-deletion algorithm has on processing time. 
+To put it another way: how much time can be saved when we turn off the 
+loop-deletion algorithm. In the diagram below we give the measurements of 
+the three C-programs; note that we do use the first-set optimization.
+
+.G1
+coord x 0, 17000 y 0, 22
+ticks bot out at 750, 5000, 15000
+label bot "Number of tokens"
+label left "User Time" "(sec)" left .3
+draw no_loop dashed 
+draw loop dashed 
+copy thru X
+	times size +2 at $1, $2 
+	times size +2 at $1, $3 
+	next no_loop at $1, $2
+	next loop at $1, $3
+X until "XXX"
+
+742	.9	.4	
+5010	5.8	6.8
+14308	16.8	20.5
+XXX
+
+copy thru X "$1 $2" size -2 at 11300, $3 X until "XXX"
+With loop-deletion 20 
+Without loop-deletion 9 
+XXX
+.G2
+
+.I
+.ce
+Time measurements on processing three C-programs with and without the loop-deletion algorithm
+.R
+
+The diagram shows that the loop-deletion algorithm 
+does not dramatically slow down the recognizing process. There is, however, 
+a measurable time loss of \(+-25%. As we will see later, the loop-deletion
+algorithm will turn out to be extremely useful in efficient use of memory 
+when there are many loops in the graph.
+
+The effect of the loop-detecion algorithm on parsing Modula-2 programs
+is even less than with C-programs; in fact there is no measurable
+time loss:
+
+.G1
+coord x 0, 17000 y 0, 15
+ticks bot out at 750, 5000, 15000
+label bot "Number of tokens"
+label left "User Time" "(sec)" left .3
+draw no_loop dashed 
+draw loop dashed 
+copy thru X
+	times size +2 at $1, $2 
+	times size +2 at $1, $3 
+	next no_loop at $1, $2
+	next loop at $1, $3
+X until "XXX"
+
+823	.6	.6
+4290	3.5	3.8
+16530	14.3	14.3
+XXX
+
+copy thru X "$1 $2" size -2 at 11800, $3 X until "XXX"
+With loop-deletion 13 
+Without loop-deletion 7
+XXX
+.G2
+
+.I
+.ce
+Time measurements on processing three Modula-2-programs with and without a loop-deletion algorithm
+.R 
+
+There are at least two reasons for this; both result from the relative
+simplicity of the Modula-2 grammar. The distance from a head to an 
+end of stack marker is shorter than in C, and secondly Modula-2
+causes fewer joins to occur than C, meaning that the loop marking algorithm
+is run less often and when it is run it has fewer paths to search.
+
+
+.NH 3
+Space measurements on the effect of leaving out the loop-deletion algorithm
+
+.LP
+Clearly, to make any measurements on the space-usage effects of leaving out
+the loop-deletion algorithm we need a program that causes the prediction
+graph to contain loops; however, we have not been able to devise a C
+or Modula-2 program that does this. In order to be able to make measurements,
+we added an extra alternative to a rule of the C compiler grammar, making
+it directly left-recursive. To make LLgen accept this new grammar, we
+put a `%if' directive in the rule.
+
+.LP
+We have input our standard C test program consisting of 800 tokens to
+the error recovery routine for this `doctored' C compiler, 
+and compared the storage needed for the prediction graphs with the
+loop deletion algorithm enabled with the storage needed when the
+algorithm is disabled. With the loop-deletion algorithm enabled, the
+maximum size of the prediction graph was 5576 bytes. When the loop
+algorithm was disabled, the maximum size of the prediction graph
+grew to 12676 bytes; furthermore, 12676 bytes of heap were allocated
+for the prediction graph, but not deallocated again, because they were
+in use by graph elements that were in inaccessible loops. The user-time
+the program needed decreased only slightly, from 0.9 to 1.0 seconds. Given the
+relatively small input program, this data suggests that when loops
+are actually being made, the loop deletion algorithm is definitely 
+worth the extra overhead it costs, considering the space
+that would otherwise be occupied by inaccessible loops. To verify this,
+we input the C program consisting of 15000 tokens to the compiler;
+execution time increased from 17.3 to 21.1 seconds after enabling
+the loop deletion algorithm, while the maximum size of the prediction graph
+shrunk from 328664 to 13664 bytes. With the loop-deletion algorithm
+disabled, 326720 bytes allocated for the graph were not deallocated again.
+Again, given the relatively small increase in execution time and the
+large reduction of memory usage, we feel that the loop-deletion
+algorithm is useful enough to justify the overhead it creates.
+
+.NH 2
+Problems encountered
+
+.LP
+In this section we describe some of the problems we encountered
+while testing the non-correcting error recovery.
+
+.NH 3
+The LLgen error reporting mechanism.
+
+.LP
+The parsers generated by LLgen call a user-supplied error reporting
+routine, usually called LLmessage. This routine is called with an
+integer parameter that is positive, zero or negative. When the parameter
+is positive the parser has just inserted a token, whose
+number is equal to the parameter; if it is zero, the parser 
+has deleted a token whose number is in a global variable called LLsymb; if
+it is negative, it means that LLgen expected end-of-file, but did not
+find it. The routine LLmessage is supposed to print an error message,
+and when a token is inserted, it should set all necessary attributes.
+
+.LP
+However, when non-correcting error recovery is used, the situation becomes slightly
+different; when the parser inserts a token, it is only to keep the
+semantic actions consistent, and does no longer signify an error.
+However, the LLmessage routine still has to be called because the
+attributes of the inserted token need to be set. Therefore, when
+non-correcting error recovery is used, the LLmessage routine should not
+print an error message when the parameter is positive, or else it will
+print highly confusing error messages indeed. Furthermore, the
+LLmessage routine will usually print a message like `token ... deleted' when
+it is called with parameter equal to zero; however, when the non-correcting
+error recovery is used, it is more appropriate to report something
+like `token ... illegal', as the non-correcting error recovery does
+not delete tokens. Finally, when an unexpected end-of-file is encountered,
+LLgen normally just inserts the missing tokens and calls
+LLmessage with the parameter equal to the token number;
+when non-correcting error recovery is used we need a way to
+actually report we have encountered an unexpected end-of-file. The
+way we achieved this is by calling LLgen with parameter 0 and the
+global variable LLsymb set to EOFILE when this situation occurs; the
+routine LLmessage should print something like `unexpected end of file'
+when it is called with parameter 0 and LLsymb is EOFILE. To facilitate
+switching between correcting and non-correcting error recovery, the
+file Lpars.h contains a statement `#define LLNONCORR' if non-correcting
+error recovery is used.
+
+
+.NH 3
+Parsers being started in semantic actions
+
+.LP
+LLgen allows the programmer to define more than one nonterminal as the
+start symbol of the input grammar; it will generate a parsing routine
+for each of the start symbols. However, the error recovery code
+is generated only once; it is shared by all parsers.
+The programmer is free to call any
+of the generated parsers whenever he wants; for instance, in the C-compiler
+a separate parser for expressions in #if and #elsif statements is used. Whenever
+the lexical analyzer encounters such a statement, it calls the expression
+parser. It is also possible to call a parser in a semantic action of
+another parser; in the MODULA-2 compiler a separate parser for 
+definition modules is used. When the main parser encounters a 
+FROM defmod IMPORT statement a semantic
+actions opens the definition module defmod and starts the parser for
+definition modules. 
+
+.LP
+The fact that subparsers can be started just about anywhere causes
+problems when non-correcting error recovery is used. 
+Suppose a parser calls another parser in a semantic action
+to parse a separate input file. In the Modula-2 compiler, after 
+seeing the FROM defmod IMPORT statement a semantic action opens
+defmod and parses it; now, if a syntax error occurred before the
+FROM IMPORT statement, the non-correcting error recovery will not
+execute the action that opens and parses the definition module, but
+it will not report an error either, because the statement 
+FROM defmod IMPORT is part of the input language of the main parser.
+However, suppose that during the parsing of a definition module
+an error occurs; then, some semantic actions that would normally
+be executed during parsing of the definition module will not have
+taken place. When normal parsing is now resumed by the main parser,
+after the non-correcting error recovery has finished with the
+definition module, a lot of spurious semantic errors are likely to be
+reported, because the semantic actions that would normally have been
+executed during the definition module parsing have not been executed
+by the error recovery. Therefore, it is desirable that the main parser
+does not resume normal parsing, but instead continues with the non-correcting
+error recovery as well. Any syntactic errors in the main program will
+still be reported, but no spurious semantic errors will be reported
+that way.
+
+.LP
+When the lexical analyzer calls other parsers, as is the case in
+the ACK C compiler, recursive invocations of the non-correcting error
+recovery routine can occur. This will happen if a parser starts the
+error recovery, the error recovery calls the lexical analyzer, which
+starts another parser that finds a syntax error and calls the
+error recovery again. This is not really a problem, but is has
+consequences for the implementation of the error recovery routine.
+
+.LP
+The worst case
+occurs when two parsers are involved in parsing one input file, and
+the secondary parser (e.g. an inline assembly parser) is called in a semantic
+action of the main parser. Suppose now that the input text contains
+a syntax error; after detecting this error, the parser starts the
+non-correcting error recovery. This recovery does not execute any
+semantic actions; therefore it will not start the subparser at those points
+where the original LLgen generated parser would. As a result, parts
+of the program that would be accepted by the subparser will now probably
+be rejected as illegal, because the error recovery does not know it
+should use another grammar to check these parts. This is a serious
+problem, and we have devised and implemented two ways to solve it.
+
+.LP
+The first solution is based on the assumption that whenever a semantic
+action occurs in the grammar, another parser can be started at that
+point. Obviously, we have no way of knowing which semantic actions start
+a parser and which don't, so we assume the worst.
+Now, assume that in the grammar there are k symbols defined as
+start symbols, say $W sub 1 , W sub 2 , ..., W sub k$. Each of these symbols
+will cause LLgen to generate a parser that can be called in any
+of the semantic actions of the grammar. We now introduce a new
+symbol $X$, and a new grammar rule $X -> W sub 1 X | W sub 2 X | ... | 
+W sub k X |
+epsilon$.
+In the grammar the error recovery algorithm uses, we insert this symbol
+X at all positions where there are semantic actions in the original grammar,
+so a rule $A -> alpha$ { action } $beta$ becomes $A -> alpha X beta$. As a
+result, at each position in a grammar rule where a semantic action 
+occurs, we now accept any input that would be accepted by any of the
+parsers. Clearly, this solution is somewhat of a kludge, as it will
+accept a lot of input that is not accepted by the original parser.
+However, it is guaranteed to never give spurious error messages, because
+whenever a parser would be started by the original parser, there now
+is an $X$ in the grammar that produces all the strings that would be
+accepted by that parser. We have implemented this solution, and found
+it to be extremely slow, which of course was to be expected given the
+number of semantic actions in the average grammar. Furthermore,
+because each time a semantic action occurs in the grammar
+a string accepted by any of the generated parsers is accepted, including
+strings recognized by the currently running parser, error messages
+become hard to interpret. As an example, consider the following
+C program:
+.br
+.nf
+
+
+	main()
+	{
+		int i, j;
+
+		while (i < j 
+           		j++;
+
+		i = 1;
+		j = 2;
+
+	}
+
+
+.fi
+.LP
+Clearly, there is a `)' missing in the while-statement;
+however, if this program is input to the error recovery it will complain 
+"} illegal", since after recognizing the
+expression controlling the while the original parser starts  a 
+semantic action, so the non-correcting recovery will accept a valid
+C program at that point; after recognizing the three statements
+following the while-statement as a separate program the
+recognizer expects the missing `)', but gets `}' instead.
+
+.LP
+Our second solution is based on the observation that if we knew
+which semantic actions can start other parsers, we would only
+have to introduce the new symbol $X$ at those places where parsers
+can get started. We have therefore extended LLgen with a new directive
+%substart, which is used to indicate to the parser generator that
+another parser may be started. The %substart is followed by the
+startsymbols that will produce the parsers that can be called,
+so %substart A, B, C; indicates that in the semantic action
+following the directive the parsers produced by startsymbols
+A, B, en C can be started. In the grammar used by the error
+recovery, a new symbol $X$ will be introduced at this point,
+along with a new rule $X -> AX | BX | CX | epsilon$. Of course, this
+solution can still accept input that would not have been accepted
+by original parser, for instance if a parser is started
+conditionally, based on other semantic information. However, it
+is a big improvement over the first solution, both in performance
+and the input it accepts.
+
+.NH 3
+Syntactic errors being handled in semantic actions
+
+.LP
+A programmer may decide to handle certain syntactic  errors
+in semantic actions, for instance because he is not satisfied with 
+the standard error recovery. However, since the non-correcting error
+recovery does not execute semantic actions, this may cause errors
+to remain undetected. We encountered the following example in the ACK
+Modula-2 compiler, in the grammar rule for assignment statement:
+.br
+.nf
+
+
+	Assignment_statement:	lvalue
+				[
+				 	'='
+					{
+					 error(":= expected");
+					}
+
+					|
+
+					':='
+				]
+				expression
+				;
+
+.fi
+.LP
+This works well in the original LLgen; however, statements like
+`j=9' are not treated as syntactic, but as semantic errors.
+The original LLgen generated parser
+will print the (semantic) error message, but the non-correcting recovery
+will not execute the semantic action and therefore the erroneous
+input will be accepted.
+
+.LP
+To facilitate the incorporation of non-correcting error recovery in parsers 
+that use this kind of `trick', we extended LLgen with the %erroneous
+directive. The directive indicates to the non-correcting recovery 
+mechanism that the token following it is not really part of the grammar.
+When recognizing input, the error recovery will ignore tokens in the
+grammar that have %erroneous in front of them. If in the example above,
+the '=' is replaced with %erroneous '=', the non-correcting mechanism will
+report an error when it sees a statement like 'j = 9'. See appendix B
+for details about the implementation of the %erroneous directive. 
+
+.LP
+Another example is in the ACK C compiler. For some reason, the
+grammar accepts function definitions without `()', so according
+to the syntax a function definition can look like:
+.br
+.nf
+
+	int func
+	{
+	  ....
+	}
+.fi
+
+.LP
+The absence of the `()', however, causes `func' to be entered in the
+symbol table as non-function, and when the parser encounters the body
+a semantic action will complain with the error message "Making function body
+for non-function". This again will cause the non-correcting error
+recovery to miss errors. Consider this piece of code:
+.br
+.nf
+
+int i int j = 1;
+{}
+
+.fi
+
+.LP
+where apparently there's a `;' missing between the declarations
+of i and j. The original LLgen-generated parser only gives semantic errors:
+.br
+.nf
+"Making function body for non-function"
+"j is not in parameter list"
+"Illegal initialization of formal parameter, ignored"
+.fi
+.LP
+As a result, the non-correcting error recovery will not report
+any errors in this piece of code, because it does not execute the
+semantic actions that recognize and report the error. Unfortunately,
+due to the way the C-grammar is written, it is not possible to solve
+this problem using a %erroneous directive; the part of the grammar 
+that deals with declaratons would have to be rewritten so as to
+syntactically reject functions without `()'. 
+
+.NH 3
+Semantic actions that read input
+
+.LP
+There are no restrictions on what a semantic action can do;
+there is nothing to stop the programmer from writing a parser in such
+a way that some of the input to the parser is processed by semantic
+actions. Obviously, because the non-correcting error recovery does not
+execute semantic actions, this kind of parser will not work at all
+with the new error recovery. Ironically, LLgen itself is written in
+such a fashion; {}-enclosed C-code in its input is processed by
+a semantic action in the LLgen grammar. We feel that it is bad
+practice to write parsers this way; the `eating' of parts of
+the input should be done in the lexical analyzer, not in the parser.
+After all, in the case of LLgen, one can regard a semantic action
+in the input as one token, and thus it should be handled by
+the lexical analyzer as such.
+
+.NH 2
+Examples of error recovery
+
+.LP
+We will now give some examples that compare non-correcting error
+recovery with the correcting error recovery used by parsers generated
+by `standard' LLgen.
+
+Consider the next C program, where there is a `)' missing in the
+header of function `test'.
+.br
+.nf
+
+	1 	int test(a,b
+	2
+	3	int a,b;
+	4
+	5	{
+	6		if (a < b)
+	7			return(1);
+	8		else
+	9			return(0);
+	10	}
+.fi
+
+.LP
+This small error derails the `standard' parser; it produces the
+following error messages, where we have left out 7 messages reporting
+semantic errors:
+.br
+.nf
+
+	line 3: , missing before type_identifier
+	line 3: , missing before identifier
+	line 3: ) missing before ;
+	line 5: { deleted
+	line 6: if deleted
+	line 6: < deleted
+	line 6: ) missing before identifier
+	line 6: ) deleted
+	line 7: identifier missing before return
+	line 7: ; missing before return
+	line 7: { missing before return
+	line 8: else deleted
+
+.fi
+.LP
+In contrast, the parser using non-correcting error recovery produces
+only one error message:
+.br
+
+	line 3: type_identifier illegal
+
+This error message correctly pin-points the error: there should
+have been a `)' at the position where type-identifier `int' is.
+
+.LP
+Now, an example with Modula-2; consider this program:
+.br
+.nf
+
+	1	MODULE test;
+	2
+	3	TYPES
+	4		ElementRecordType = RECORD
+	5		Element: ElementType;
+	6		Next,
+	7		Prior: ElementPointerType;
+	8	END;
+	9
+	10	VARS a,b,c: ElementRecordType;
+	11
+	12
+	13	BEGIN
+	14
+	15		a := b;
+	16
+	17	END test.
+
+.fi
+.LP
+There are two syntactic errors in this program; on line 3, TYPES should be TYPE, and
+on line 10, VARS should be VAR. We have left out the type declarations of
+ElementType and ElementPointerType; clearly this will generate semantic
+errors, but we are only interested in syntactic errors anyway.
+The correcting error recovery parser
+again derails on this program; it produces the following syntactic error messages:
+.br
+.nf
+
+	line 3: CONST missing before identifier
+	line 4: '=' missing before identifier
+	line 4: RECORD deleted
+	line 5: ':' deleted
+	line 5: ';' missing before identifier
+	line 5: '=' missing before ';'
+	line 5: number missing before ';'
+	line 6: ',' deleted
+	line 7: '=' missing before identifier
+	line 7: ':' deleted
+	line 7: ';' missing before identifier
+	line 7: '=' missing before ';'
+	line 7: number missing before ';'
+	line 8: ';' deleted
+	line 10: identifier deleted
+	line 10: ',' deleted
+	line 10: identifier deleted
+	line 10: ',' deleted
+	line 10: identifier deleted
+	line 10: ':' deleted
+	line 10: identifier deleted
+	line 10: ';' deleted
+	line 13: BEGIN deleted
+	line 15: identifier deleted
+	line 15: := deleted
+	line 15: identifier deleted
+	line 15: ';' deleted
+	line 17: END deleted
+	line 17: identifier deleted
+
+.fi
+.LP
+The error correction mechanism clearly makes the wrong guess by inserting
+CONST on line 3; as a result, all that follows is rejected as incorrect.
+In contrast, the non-correcting error recovery mechanism only produces
+two error messages:
+.br
+.nf
+
+	line 3: identifier illegal
+	line 10: identifier illegal
+
+.fi
+.LP
+This again exactly pin-points the errors: the identifiers TYPES and
+VARS constitute the only errors in the program. Note that the
+presence of more than one error does not cause any problems to the
+non-correcting recovery mechanism. 
+
+.bp
+.nr PS 12
+.nr VS 14
+
+.NH
+Conclusion
+
+.nr PS 10
+.nr VS 12
+
+.LP
+After implementing and testing a non-correcting error recovery mechanism 
+we have come to the conclusion that it indeed is superior to correcting
+mechanisms in what regards the error messages it produces; 
+the examples we have given clearly show this. However, there is a
+clear loss of performance when errors are present in a program, 
+although we have found this performance
+degradation to be acceptable. We feel that the benefits of
+better error messages outweigh the loss of performance. In any case,
+correct programs do not suffer at all from the incorporation 
+of a non-correcting recovery mechanism.
+The error recovery mechanism we implemented does not make
+unreasonable demands on resources; the size of the prediction
+graphs stays within reasonable limits. 
+
+.LP
+The main problems we encountered had to do with recognizing
+`languages within languages', and semantic actions that did
+unreasonable things like eating input. The more `well-behaved' a
+parser is, the better the results the non-correcting error recovery
+mechanism gives. This is also true for the input grammars: with a
+language like Modula-2, whose syntax has been designed with parser
+generators in mind, the performance of the non-correcting mechanism
+is better than with C, whose syntax is extremely hard, if not
+impossible to describe with a LL(1) grammar.
+
+.bp
+.nr PS 12
+.nr VS 14
+
+.NH
+Bibliography
+
+.nr PS 10
+.nr VS 12
+
+.IP [CORMACK] 12
+Gordon V. Cormack, `An LR substring parser for noncorrecting syntax error
+recovery', ACM SIGPLAN Notices, vol. 24, no. 7, p. 161-169, July 1989
+
+.IP [GRUNE] 12
+Dick Grune, Ceriel J.H. Jacobs, `A programmer friendly LL(1) parser
+generator', Softw. Pract. Exper., vol. 18, no. 1, p. 29-38, Jan 1988
+
+.IP [RICHTER] 12
+Helmut Richter, `Noncorrecting syntax error recovery', ACM Trans. Prog. Lang.
+Sys., vol.7, no.3, p. 478-489, July 1985
+
+.IP [ROEHRICH] 12
+Johannes R\*:ohrich, `Methods for the automatic construction of error
+correcting parsers', Acta Inform., vol. 13, no. 2, p. 115-139, Feb 1980
+
+.IP [TOMITA] 12
+Masaru Tomita, Efficient parsing for natural language, Kluwer Academic
+Publishers, Boston, p.210, 1986
+.bp
+.SH
+Appendix A: Implementation Issues
+
+.nr PS 10
+.nr VS 12
+.RS
+.LP
+In this appendix we will describe some implementation issues;
+the data structure used to store the grammar during non-correcting
+error recovery, postponing deletions of graph elements until after 
+the prediction phase, and the implementation of the %substart directive .
+.RE
+
+.SH
+A.1 The grammar data structure
+
+.LP
+The grammar data structure used by the non-correcting error recovery technique has
+to meet two conditions: easy access to a rule as a whole to make
+substituting nonterminals efficient and easy access to each symbol in the RHS 
+of a rule to make starting error recovery and finding continuations
+efficient. To fulfill these conditions we decided to construct the 
+storage of the grammar as follows.
+
+.LP
+A rule in the grammar is divided in two
+parts: a LHS and a RHS. The LHS is represented by a struct `lhs' and
+for each symbol in the RHS a struct 'symbol' is constructed. 
+A struct `lhs' contains the number of the 
+nonterminal forming the LHS of the rule, a pointer to the RHS, the 
+first- and follow-sets of the nonterminal and a flag 'empty' which
+indicates whether the nonterminal produces empty or not. A struct 
+`symbol' contains a field indicating the type of the symbol, i.e.
+a terminal or a nonterminal, the number of the symbol, a `link' pointer
+to a struct `symbol' that represents the same symbol, a `next' pointer
+to the rest of the RHS and a pointer back to the LHS.
+
+.LP
+A special struct `symbol' is added to the end of the RHS to indicate 
+the end of a rule. The type of this struct is LLEORULE, the number
+is set to -1 and the pointers 'link' and `next' are nil.
+
+.LP
+In case that there is more than one RHS for a LHS, all the RHS's 
+are put after each other and separated by another special struct 
+`symbol'. The type of this struct is LLALT, the number is set to 
+-1 and the 'link' pointer is nil. After the last RHS a `LLEORULE'-struct 
+marker is added. 
+
+.LP
+Finally, to make searching efficient there are two arrays: `terminals'
+and `nonterminals'. `terminals' is indexed by the number of a terminal
+and contains for each terminal a struct containing a 'link' pointer 
+to a symbol, representing this terminal, in the RHS of a rule. Because
+this symbol has again a 'link' pointer to another symbol representing
+the terminal, it is possible by following this chain of pointers 
+to find all rules containing such a terminal. In a similar way `nonterminals'
+is indexed by the number of a nonterminal and contains for each 
+nonterminal a struct. This struct not only contains a 'link' pointer
+linking all rules with this nonterminal, but also contains a 'rule' 
+pointer. This pointer points to the RHS or RHS's of the rules of which
+the nonterminal forms the LHS.
+ 
+.LP
+As an example, consider the following grammar:
+
+.br
+A:	a B
+.br
+B:	a | $epsilon$
+.br
+
+This will result in the picture below. Note that `pointer' fields 
+without an arrow indicate nil pointers.
+
+.PS
+dx = 0.05
+
+down 
+A_a: box ht boxht/2 "link"
+box invis "a" ljust with .e at A_a.w
+
+move to A_a.s
+move
+move
+
+A: box "link" "rule"
+B: box "link" "rule"
+line dashed from A.w to A.e 
+line dashed from B.w to B.e
+box invis "A" ljust with .e at A.w
+box invis "B" ljust with .e at B.w
+
+move to A.ne
+right
+move
+move
+down
+
+LHS_A: box wid 1.2 * boxwid ht 2.5 * boxht "`A'" "rhs" "first" "follow" "empty 0"
+line dashed from 0.2 <LHS_A.nw, LHS_A.sw> to 0.2 <LHS_A.ne, LHS_A.se>
+line dashed from 0.4 <LHS_A.nw, LHS_A.sw> to 0.4 <LHS_A.ne, LHS_A.se>
+line dashed from 0.6 <LHS_A.nw, LHS_A.sw> to 0.6 <LHS_A.ne, LHS_A.se>
+line dashed from 0.8 <LHS_A.nw, LHS_A.sw> to 0.8 <LHS_A.ne, LHS_A.se>
+
+move to LHS_A.ne + (1,0)
+
+RHS_a1: box wid 2.0 * boxwid ht 2.5 * boxht "LLTERM" "`a'" "link" "next" "lhs"
+line dashed from 0.2 <RHS_a1.nw, RHS_a1.sw> to 0.2 <RHS_a1.ne, RHS_a1.se>
+line dashed from 0.4 <RHS_a1.nw, RHS_a1.sw> to 0.4 <RHS_a1.ne, RHS_a1.se>
+line dashed from 0.6 <RHS_a1.nw, RHS_a1.sw> to 0.6 <RHS_a1.ne, RHS_a1.se>
+line dashed from 0.8 <RHS_a1.nw, RHS_a1.sw> to 0.8 <RHS_a1.ne, RHS_a1.se>
+
+move to RHS_a1.ne + (1,0)
+
+RHS_B: box wid 2.0 * boxwid ht 2.5 * boxht "LLNONTERM" "`B'" "link" "next" "lhs"
+line dashed from 0.2 <RHS_B.nw, RHS_B.sw> to 0.2 <RHS_B.ne, RHS_B.se>
+line dashed from 0.4 <RHS_B.nw, RHS_B.sw> to 0.4 <RHS_B.ne, RHS_B.se>
+line dashed from 0.6 <RHS_B.nw, RHS_B.sw> to 0.6 <RHS_B.ne, RHS_B.se>
+line dashed from 0.8 <RHS_B.nw, RHS_B.sw> to 0.8 <RHS_B.ne, RHS_B.se>
+
+move to RHS_B.ne + (1,0)
+
+RHS_END1: box wid 2.0 * boxwid ht 2.5 *boxht "LLEORULE" "-1" "link" "next" "lhs"
+line dashed from 0.2 <RHS_END1.nw, RHS_END1.sw> to 0.2 <RHS_END1.ne,RHS_END1.se>
+line dashed from 0.4 <RHS_END1.nw, RHS_END1.sw> to 0.4 <RHS_END1.ne,RHS_END1.se>
+line dashed from 0.6 <RHS_END1.nw, RHS_END1.sw> to 0.6 <RHS_END1.ne,RHS_END1.se>
+line dashed from 0.8 <RHS_END1.nw, RHS_END1.sw> to 0.8 <RHS_END1.ne,RHS_END1.se>
+
+
+move to LHS_A.s - (0,1)
+
+LHS_B: box wid 1.2 * boxwid ht 2.5 * boxht "`B'" "rhs" "first" "follow" "empty 1"
+line dashed from 0.2 <LHS_B.nw, LHS_B.sw> to 0.2 <LHS_B.ne, LHS_B.se>
+line dashed from 0.4 <LHS_B.nw, LHS_B.sw> to 0.4 <LHS_B.ne, LHS_B.se>
+line dashed from 0.6 <LHS_B.nw, LHS_B.sw> to 0.6 <LHS_B.ne, LHS_B.se>
+line dashed from 0.8 <LHS_B.nw, LHS_B.sw> to 0.8 <LHS_B.ne, LHS_B.se>
+
+move to LHS_B.ne + (1,0)
+
+RHS_a2: box wid 2.0 * boxwid ht 2.5 * boxht "LLTERM" "`a'" "link" "next" "lhs"
+line dashed from 0.2 <RHS_a2.nw, RHS_a2.sw> to 0.2 <RHS_a2.ne, RHS_a2.se>
+line dashed from 0.4 <RHS_a2.nw, RHS_a2.sw> to 0.4 <RHS_a2.ne, RHS_a2.se>
+line dashed from 0.6 <RHS_a2.nw, RHS_a2.sw> to 0.6 <RHS_a2.ne, RHS_a2.se>
+line dashed from 0.8 <RHS_a2.nw, RHS_a2.sw> to 0.8 <RHS_a2.ne, RHS_a2.se>
+
+move to RHS_a2.ne + (1,0)
+
+RHS_ALT: box wid 2.0 * boxwid ht 2.5 * boxht "LLALT" "-1" "link" "next" "lhs"
+line dashed from 0.2 <RHS_ALT.nw, RHS_ALT.sw> to 0.2 <RHS_ALT.ne, RHS_ALT.se>
+line dashed from 0.4 <RHS_ALT.nw, RHS_ALT.sw> to 0.4 <RHS_ALT.ne, RHS_ALT.se>
+line dashed from 0.6 <RHS_ALT.nw, RHS_ALT.sw> to 0.6 <RHS_ALT.ne, RHS_ALT.se>
+line dashed from 0.8 <RHS_ALT.nw, RHS_ALT.sw> to 0.8 <RHS_ALT.ne, RHS_ALT.se>
+
+move to RHS_ALT.ne + (1,0)
+
+RHS_END2: box wid 2.0 * boxwid ht 2.5 *boxht "LLEORULE" "-1" "link" "next" "lhs"
+line dashed from 0.2 <RHS_END2.nw, RHS_END2.sw> to 0.2 <RHS_END2.ne,RHS_END2.se>
+line dashed from 0.4 <RHS_END2.nw, RHS_END2.sw> to 0.4 <RHS_END2.ne,RHS_END2.se>
+line dashed from 0.6 <RHS_END2.nw, RHS_END2.sw> to 0.6 <RHS_END2.ne,RHS_END2.se>
+line dashed from 0.8 <RHS_END2.nw, RHS_END2.sw> to 0.8 <RHS_END2.ne,RHS_END2.se>
+
+# Next pointers upper row
+.ps 30
+circle radius .01 at 0.75 <A.ne, A.se> - (dx, 0) 
+circle radius .01 at 0.3 <LHS_A.ne, LHS_A.se> - (dx, 0) 
+circle radius .01 at 0.7 <RHS_a1.ne, RHS_a1.se> - (dx, 0) 
+circle radius .01 at 0.7 <RHS_B.ne, RHS_B.se> - (dx, 0) 
+.ps 10 
+
+arrow from 0.75 <A.ne, A.se> - (dx, 0) to 0.3 <LHS_A.nw, LHS_A.sw>
+arrow from 0.3 <LHS_A.ne, LHS_A.se> - (dx, 0) to 0.3 <RHS_a1.nw,RHS_a1.sw>
+arrow from 0.7 <RHS_a1.ne, RHS_a1.se> - (dx, 0) to 0.7 <RHS_B.nw,RHS_B.sw>
+arrow from 0.7 <RHS_B.ne, RHS_B.se> - (dx, 0) to 0.7 <RHS_END1.nw, RHS_END1.sw>
+
+
+# Next pointers lower row
+.ps 30
+circle radius .01 at 0.75 <B.ne, B.se> - (dx, 0) 
+circle radius .01 at 0.3 <LHS_B.ne, LHS_B.se> - (dx, 0) 
+circle radius .01 at 0.7 <RHS_a2.ne, RHS_a2.se> - (dx, 0) 
+circle radius .01 at 0.7 <RHS_ALT.ne, RHS_ALT.se> - (dx, 0) 
+.ps 10
+
+arrow from 0.75 <B.ne, B.se> - (dx, 0) to 0.3 <LHS_B.nw, LHS_B.sw>
+arrow from 0.3 <LHS_B.ne, LHS_B.se> - (dx, 0) to 0.3 <RHS_a2.nw,RHS_a2.sw>
+arrow from 0.7 <RHS_a2.ne, RHS_a2.se> - (dx, 0) to 0.7 <RHS_ALT.nw,RHS_ALT.sw>
+arrow from 0.7 <RHS_ALT.ne, RHS_ALT.se> - (dx, 0) to 0.7 <RHS_END2.nw, RHS_END2.sw>
+
+
+# Link pointers
+.ps 30
+circle radius .01 at 0.5 <RHS_a1.ne, RHS_a1.se> - (2*dx, 0) 
+circle radius .01 at 0.5 <A_a.ne, A_a.se> - (dx, 0) 
+circle radius .01 at 0.25 <B.ne, B.se> - (dx, 0) 
+.ps 10
+
+arrow dashed from 0.5 <RHS_a1.ne, RHS_a1.se> - (2*dx, 0) to RHS_a2.ne - (2*dx,0)
+line dashed from 0.5 <A_a.ne, A_a.se> - (dx, 0) right 4.0 * boxwid then to RHS_a1.ne - (2*dx, 0) ->
+line dashed from 0.25 <B.ne, B.se> - (dx, 0) right then up .75 then right 7.0 * boxwid then to RHS_B.ne - (2*dx, 0) ->
+
+
+# LHS pointers upper row
+.ps 30
+circle radius .01 at 0.9 <RHS_a1.ne, RHS_a1.se> - (3*dx, 0) 
+circle radius .01 at 0.9 <RHS_B.ne, RHS_B.se> - (3*dx, 0) 
+circle radius .01 at 0.9 <RHS_END1.ne, RHS_END1.se> - (3*dx, 0) 
+.ps 10
+
+line from 0.9 <RHS_a1.ne, RHS_a1.se> - (3*dx, 0) down ->
+line from 0.9 <RHS_B.ne, RHS_B.se> - (3*dx, 0) down ->
+line from 0.9 <RHS_END1.ne, RHS_END1.se> - (3*dx, 0) down then left 8.0 * boxwid then to LHS_A.se -> 
+
+
+# LHS pointers lower row
+.ps 30
+circle radius .01 at 0.9 <RHS_a2.ne, RHS_a2.se> - (3*dx, 0)
+circle radius .01 at 0.9 <RHS_ALT.ne, RHS_ALT.se> - (3*dx, 0)
+circle radius .01 at 0.9 <RHS_END2.ne, RHS_END2.se> - (3*dx, 0)
+.ps 10
+
+line from 0.9 <RHS_a2.ne, RHS_a2.se> - (3*dx, 0) down ->
+line from 0.9 <RHS_ALT.ne, RHS_ALT.se> - (3*dx, 0) down ->
+line from 0.9 <RHS_END2.ne, RHS_END2.se> - (3*dx, 0) down then left 8.0 * boxwid then to LHS_B.se ->
+
+
+# Text above structs
+box invis ht boxht/2 "terminals" with .s at A_a.n
+box invis ht boxht/2 "nonterminals" with .s at A.n
+box invis ht boxht/2 "lhs" with .s at LHS_A.n
+box invis ht boxht/2 "lhs" with .s at LHS_B.n
+box invis ht boxht/2 "symbol" with .s at RHS_a1.n
+box invis ht boxht/2 "symbol" with .s at RHS_B.n
+box invis ht boxht/2 "symbol" with .s at RHS_END1.n
+box invis ht boxht/2 "symbol" with .s at RHS_a2.n
+box invis ht boxht/2 "symbol" with .s at RHS_ALT.n
+box invis ht boxht/2 "symbol" with .s at RHS_END2.n
+.PE
+
+.LP
+Note that the empty alternative for `B' is represented in the 
+data structure by the `LLEORULE-struct' immediately following
+the `LLALT'-struct. When there are still other alternatives 
+the `LLEORULE'-struct is replaced by a `LLALT'-struct followed
+by the other alternatives and a `LLEORULE'-struct. 
+Finally, when the empty rule is the only rule for a 
+nonterminal the RHS will consist only of a `LLEORULE'-struct.
+
+.SH
+A.2 Delayed deletes
+
+.LP
+We encountered a problem with deleting elements during the 
+prediction phase. Imagine that we have a nonterminal `B' on top of 
+the graph, and `B' has two alternatives. Now suppose that we  
+apply the first alternative and we find out that this alternative leads 
+to a `dead end', i.e. a head that does not match the input symbol, so we want
+to get rid of it. When we delete it immediately the deletion algorithm
+will also deallocate `[B]' and possibly some elements below `[B]'.
+However, there was another alternative for `[B]' which was not yet 
+developed and maybe this alternative leads to a head which is legal.
+But `[B]' has already been deleted and thus cannot be used anymore. A similar 
+situation can occur when we want to delete a joined element; 
+the substitution of a nonterminal
+that only produces empty and thus has no element above it in the graph 
+can also lead to such a situation. We therefore decided to put `dead ends' 
+on a list, `cleanup_arr[]', and after the prediction phase has 
+finished we delete all elements on this list, and all their descendants
+that become unreachable of course.
+
+.SH
+A.3 Clearing flags
+
+.LP
+We implemented two different ways to clear the flags set by the prediction 
+phase of the algorithm; the first recursively tracks down the whole graph 
+following the flags, the second puts all elements visited by 
+the prediction phase
+on a list; after the prediction phase has finished the algorithm walks 
+through this list clearing the flags of all elements on it. We took measurements
+on both algorithms and found out that with small programs the times
+did not differ much but large programs were processed faster by the
+second algorithm. Therefore we decided to use the second algorithm. 
+
+.LP
+To speed up the algorithm even more, we do not deallocate the list
+after a prediction phase has finished. We just set the number of 
+elements on the list to 0. This saves considerably on the number
+of `Malloc'-calls.
+
+.SH
+A.4 Implementation of %erroneous directive
+
+.LP
+As explained in chapter 3, the user can put a %erroneous directive
+in front of a terminal, making the non-correcting error recovery
+mechanism ignore that terminal. However, implementing this directive
+was not entirely straightforward; consider, for example, the rule
+.br
+.nf
+
+	A:	'a' | %erroneous 'b' | 'c';
+
+.fi
+.LP
+Just leaving out terminal 'b' will not do, because then nonterminal
+A produces empty all of a sudden, which it did not before. 
+The rule should become
+.br
+.nf
+
+	A:	'a' | 'c';
+
+.fi
+but this is hard to implement in LLgen. We took a different approach:
+we introduce a new terminal 'ERRONEOUS', and substitute it for all 
+terminals with an %erroneous directive in front of them. Thus, the
+example rule becomes
+.br
+.nf
+
+	A:	'a' | ERRONEOUS | 'c';
+
+.fi
+.LP
+Since the terminal ERRONEOUS will never be in the input to the parser,
+this has exactly the desired effect; when a predicting phase produces
+ERRONEOUS as head of a prediction graph this head will never match the
+input. In particular, it will not match the terminal that was
+originally there (in this case 'b') so that terminal is no longer
+regarded as part of the input language at that point.
+.bp
+.SH
+Appendix B: Using the non-correcting error recovery
+
+.LP
+To use the new non-correcting error recovery mechanism, LLgen has to
+be called with the new flag -n. LLgen will then create an extra file
+called `Lncor.c' which contains the code for the non-correcting recovery 
+mechanism. This file has to be compiled and linked with the rest
+of the program, just like the file `Lpars.c'. 
+
+.LP
+The user-supplied error reporting routine `LLmessage' will have to be
+modified slightly; when it is called with a positive parameter, it
+should only set the attributes of the inserted token, but not report an 
+error. Note that the lexical analyzer still must return the same token
+as it did the last time it was called. When LLmessage is called with 
+parameter 0, it should report that the token in global variable LLsymb 
+is illegal; if the value of LLsymb is `EOFILE', the routine should
+report an unexpected End-of-file. When LLmessage is called with parameter
+-1, it should report that end-of-file was expected. To facilitate
+switching between correcting and non-correcting error recovery,
+the file Lpars.h contains a statement `#define LLNONCORR' 
+which indicates that the non-correcting
+mechanism is enabled.
+Here is a
+skeleton for the modified LLmessage routine:
+.nr PS 8
+.nr VS 10
+.LP
+.br
+.nf
+
+	#include "Lpars.h"
+	extern int LLsymb;
+
+	LLmessage(flag) 
+	int flag;
+	{
+		if (flag < 0)     
+		{
+			/* Error message "end-of-file expected" */;
+		}
+		else if (flag)    
+		{
+			/* flag equals the number of the inserted token */
+#ifndef LLNONCORR 
+
+			/* Error message "token inserted" */;	
+#endif
+
+			/* Code to set attributes for inserted token */
+			/* Code to make lexical analyzer return same token as before */
+
+		else    
+		{
+			/* The number of the illegal or deleted token is in LLsymb */
+#ifndef LLNONCORR
+
+			/* Error message "token deleted" */;
+#else
+
+			if (LLsymb == EOFILE)
+			{
+				/* Error message "unexpected end of file" */
+			}
+			else
+			{
+				/* Error message "token illegal" */;
+			}
+#endif
+			
+		}
+
+	}
+
+.fi
+.nr PS 10
+.nr VS 12
+
+.LP
+For best results, one should check if the parser calls other parsers
+in semantic actions; if this is the case, and the called parser
+processes the same input file as the calling parser, then a %substart
+should be put in front of the semantic action that starts a parser.
+If a semantic action calls parsers defined by startsymbols say
+A and B, then `%substart A, B;' should be put in front of the action.
+As an alternative, one can use the -s flag of LLgen; this has the
+same effect as putting `%substart X, Y, ....;' in front of all
+semantic actions, where X, Y, .... are the startsymbols of the grammar.
+Clearly, it is preferable to analyze the grammar and put %substart
+directives only where appropriate.
+
+Finally, beware of syntactic errors being handled in semantic
+actions; eg, one could have a rule like
+.nr PS 8
+.nr VS 10
+.LP
+.br
+.nf
+
+        Assignment_statement:   lvalue
+                                [
+                                        '='
+                                        {
+                                         error(":= expected");
+                                        }
+
+                                        |
+
+                                        ':='
+                                ]
+                                expression
+                                ;
+.fi
+
+.nr PS 10
+.nr VS 12
+.LP
+To ensure that the non-correcting mechanism will recognize the
+`=' as a syntactic error, a `%erroneous' directive should be
+put in front of it.  

+ 15 - 0
doc/LLgen/Makefile

@@ -0,0 +1,15 @@
+# $Id$
+
+GRAP=grap
+PIC=pic
+EQN=eqn
+REFER=refer
+TBL=tbl
+
+all:		../LLgen.doc ../LLgen_NCER.doc
+
+../LLgen.doc:	LLgen.n LLgen.refs
+		$(REFER) -sA+T -p LLgen.refs LLgen.n | $(EQN) | $(TBL) > $@
+
+../LLgen_NCER.doc:	LLgen_NCER.n
+		$(GRAP) LLgen_NCER.n | pic | eqn > $@

+ 20 - 0
doc/LLgen/proto.make

@@ -0,0 +1,20 @@
+# $Id$
+
+#PARAMS         do not remove this line!
+
+SRC_DIR = $(SRC_HOME)/doc/LLgen
+
+GRAP=grap
+PIC=pic
+EQN=eqn
+REFER=refer
+TBL=tbl
+
+all:	$(TARGET_HOME)/doc/LLgen.doc $(TARGET_HOME)/doc/LLgen_NCER.doc
+
+$(TARGET_HOME)/doc/LLgen.doc:	$(SRC_DIR)/LLgen.n $(SRC_DIR)/LLgen.refs
+	$(REFER) -sA+T -p $(SRC_DIR)/LLgen.refs $(SRC_DIR)/LLgen.n | $(EQN) | $(TBL) > $@
+
+$(TARGET_HOME)/doc/LLgen_NCER.doc:      $(SRC_DIR)/LLgen_NCER.n
+		$(GRAP) $(SRC_DIR)/LLgen_NCER.n | pic | eqn > $@
+

+ 82 - 0
doc/Makefile

@@ -0,0 +1,82 @@
+# $Id$
+
+# This Makefile is not supposed to be used in the doc source directory.
+# Instead, it is supposed to be copied to the target doc directory.
+
+SUF=dit
+PRINT=dis
+NROFF=troff
+MS=-ms
+OPR=dip
+
+RESFILES= \
+	toolkit.$(SUF) install.$(SUF) em.$(SUF) ack.$(SUF) v7bugs.$(SUF) \
+	peep.$(SUF) cg.$(SUF) ncg.$(SUF) regadd.$(SUF) LLgen.$(SUF) \
+	basic.$(SUF) crefman.$(SUF) pascal.$(SUF) pcref.$(SUF) val.$(SUF) \
+	ansi_C.$(SUF) \
+	6500.$(SUF) i80.$(SUF) z80.$(SUF) top.$(SUF) ego.$(SUF) \
+	m68020.$(SUF) occam.$(SUF) m2ref.$(SUF) ceg.$(SUF) nopt.$(SUF) \
+	sparc.$(SUF) int.$(SUF) lint.$(SUF)
+
+.SUFFIXES: .doc .$(SUF) .lpr .out
+
+.doc.$(SUF):
+		$(NROFF) $(MS) $< > $@
+
+# directly to the printer:
+.doc.lpr:
+		$(NROFF) $(MS) $< | $(OPR)
+
+# to standard output
+.doc.out:
+		@$(NROFF) $(MS) $<
+
+# Exceptions, to be run without -ms
+
+v7bugs.$(SUF):	v7bugs.doc
+		$(NROFF) v7bugs.doc >$@
+
+v7bugs.lpr:	v7bugs.doc
+		$(NROFF) v7bugs.doc | $(OPR)
+
+v7bugs.out:	v7bugs.doc
+		@$(NROFF) v7bugs.doc
+
+pcref.$(SUF):	pcref.doc
+		$(NROFF) pcref.doc >$@
+
+pcref.lpr:	pcref.doc
+		$(NROFF) pcref.doc | $(OPR)
+
+pcref.out:	pcref.doc
+		@$(NROFF) pcref.doc
+
+val.$(SUF):	val.doc
+		$(NROFF) val.doc >$@
+
+val.lpr:	val.doc
+		$(NROFF) val.doc | $(OPR)
+
+val.out:	val.doc
+		@$(NROFF) val.doc
+
+pr:
+		@make "SUF="$(SUF) "NROFF="$(NROFF) "MS="$(MS) \
+			$(RESFILES) >make.pr.out 2>&1
+		@$(PRINT) $(RESFILES)
+
+# The 'opr' entry creates a lot of paper ... but the user must be able
+# to write the doc directory. I hope that this limits the users of
+# this entry to persons that know what they are doing.
+opr:
+		@make "SUF="$(SUF) "NROFF="$(NROFF) "MS="$(MS) $(RESFILES) 
+		$(OPR) $(RESFILES)
+
+clean:
+		-rm -f $(RESFILES)
+
+# The distr entry is only used when making a distribution tree.
+# It makes a version of the installation manual, suitable for a simple
+# line printer.
+distr:		install.doc
+		tbl install.doc | nroff -Tlp $(MS) >install.pr

+ 8 - 0
doc/READ_ME

@@ -0,0 +1,8 @@
+Some of these documents use a font called CW.
+If this font is not available, reference to it can be changed with
+a sed-script like
+	s/\.ft CW/.ft yourfont/
+	s/\\f(CW/\\fyourfont/g
+	s/^.fp\(.*\)CW$/.fp\1yourfont/
+However, the font must be a constant-width font for the documents to look
+reasonable.

+ 444 - 0
doc/ack.doc

@@ -0,0 +1,444 @@
+.\" $Id$
+.nr PD 1v
+.tr ~
+.TL
+Ack Description File
+.br
+Reference Manual
+.AU
+Ed Keizer
+.AI
+Vakgroep Informatica
+Vrije Universiteit
+Amsterdam
+.NH
+Introduction
+.PP
+The program \fIack\fP(I) internally maintains a table of
+possible transformations and a table of string variables.
+The transformation table contains one entry for each possible
+transformation of a file.
+Which transformations are used depends on the suffix of the
+source file.
+Each transformation table entry tells which input suffixes are
+allowed and what suffix/name the output file has.
+When the output file does not already satisfy the request of the
+user (indicated with the flag \fB\-c.suffix\fP), the table is scanned
+starting with the next transformation in the table for another
+transformation that has as input suffix the output suffix of
+the previous transformation.
+A few special transformations are recognized, among them is the
+combiner, which is
+a program combining several files into one.
+When no stop suffix was specified (flag \fB\-c.suffix\fP) \fIack\fP
+stops after executing the combiner with as arguments the \-
+possibly transformed \- input files and libraries.
+\fIAck\fP will only perform the transformations in the order in
+which they are presented in the table.
+.LP
+The string variables are used while creating the argument list
+and program call name for
+a particular transformation.
+.NH
+Which descriptions are used
+.PP
+\fIAck\fP always uses two description files: one to define the
+front-end transformations and one for the machine dependent
+back-end transformations.
+Each description has a name.
+First the way of determining
+the name of the descriptions needed is described.
+.PP
+When the shell environment variable ACKFE is set \fIack\fP uses
+that to determine the front-end table name, otherwise it uses
+\fBfe\fP.
+.PP
+The way the backend table name is determined is more
+convoluted.
+.br
+First, when the last filename in the program call name is not
+one of \fIack\fP or the front-end call-names,
+this filename is used as the backend description name.
+Second, when the \fB\-m\fP is present the \fB\-m\fP is chopped of this
+flag and the rest is used as the backend description name.
+Third, when both failed the shell environment variable ACKM is
+used.
+Last, when also ACKM was not present the default backend is
+used, determined by the definition of ACKM in h/local.h.
+The presence and value of the definition of ACKM is
+determined at compile time of \fIack\fP.
+.PP
+Now, we have the names, but that is only the first step.
+\fIAck\fP stores a few descriptions at compile time.
+This descriptions are simply files read in at compile time.
+At the moment of writing this document, the descriptions
+included are: pdp, fe, i86, m68k2, vax2 and int.
+The name of a description is first searched for internally,
+then in lib/descr/\fIname\fP, then in
+lib/\fIname\fP/descr, and finally in the current
+directory of the user.
+.NH
+Using the description file
+.PP
+Before starting on a narrative of the description file,
+the introduction of a few terms is necessary.
+All these terms are used to describe the scanning of zero
+terminated strings, thereby producing another string or
+sequence of strings.
+.IP Backslashing 5
+.br
+All characters preceded by \e are modified to prevent
+recognition at further scanning.
+This modification is undone before a string is passed to the
+outside world as argument or message.
+When reading the description files the
+sequences \e\e, \e# and \e<newline> have a special meaning.
+\e\e translates to a single \e, \e# translates to a single #
+that is not
+recognized as the start of comment, but can be used in
+recognition and finally, \e<newline> translates to nothing at
+all, thereby allowing continuation lines.
+.nr PD 0
+.IP "Variable replacement"
+.br
+The scan recognizes the sequences {{, {NAME} and {NAME?text}
+Where NAME can be any combination if characters excluding ? and
+} and text may be anything excluding }.
+(~\e} is allowed of course~)
+The first sequence produces an unescaped single {.
+The second produces the contents of the NAME, definitions are
+done by \fIack\fP and in description files.
+When the NAME is not defined an error message is produced on
+the diagnostic output.
+The last sequence produces the contents of NAME if it is
+defined and text otherwise.
+.PP
+.IP "Expression replacement"
+.br
+Syntax:  (\fIsuffix sequence\fP:\fIsuffix sequence\fP=\fItext\fP)
+.br
+Example: (.c.p.e:.e=tail_em)
+.br
+If the two suffix sequences have a common member \-~\&.e in this
+case~\- the text is produced.
+When no common member is present the empty string is produced.
+Thus the example given is a constant expression.
+Normally, one of the suffix sequences is produced by variable
+replacement.
+\fIAck\fP sets three variables while performing the diverse
+transformations: HEAD, TAIL and RTS.
+All three variables depend on the properties \fIrts\fP and
+\fIneed\fP from the transformations used.
+Whenever a transformation is used for the first time,
+the text following the \fIneed\fP is appended to both the HEAD and
+TAIL variable.
+The value of the variable RTS is determined by the first
+transformation used with a \fIrts\fP property.
+.IP
+Two runtime flags have effect on the value of one or more of
+these variables.
+The flag \fB\-.suffix\fP has the same effect on these three variables
+as if a file with that \fBsuffix\fP was included in the argument list
+and had to be translated.
+The flag \fB\-r.suffix\fP only has that effect on the TAIL
+variable.
+The program call names \fIacc\fP and \fIcc\fP have the effect
+of an automatic \fB\-.c\fP flag.
+\fIApc\fP and \fIpc\fP have the effect of an automatic \fB\-.p\fP flag.
+.IP "Line splitting"
+.br
+The string is transformed into a sequence of strings by replacing
+the blank space by string separators (nulls).
+.IP "IO replacement"
+.br
+The > in the string is replaced by the output file name.
+The < in the string is replaced by the input file name.
+When multiple input files are present the string is duplicated
+for each input file name.
+.nr PD 1v
+.LP
+Each description is a sequence of variable definitions followed
+by a sequence of transformation definitions.
+Variable definitions use a line each, transformations
+definitions consist of a sequence of lines.
+Empty lines are discarded, as are lines with nothing but
+comment.
+Comment is started by a # character, and continues to the end
+of the line.
+Three special two-characters sequences exist: \e#, \e\e and
+\e<newline>.
+Their effect is described under 'backslashing' above.
+Each \- nonempty \- line starts with a keyword, possibly
+preceded by blank space.
+The keyword can be followed by a further specification.
+The two are separated by blank space.
+.PP
+Variable definitions use the keyword \fIvar\fP and look like this:
+.DS X
+   var NAME=text
+.DE
+The name can be any identifier, the text may contain any
+character.
+Blank space before the equal sign is not part of the NAME.
+Blank space after the equal is considered as part of the text.
+The text is scanned for variable replacement before it is
+associated with the variable name.
+.br
+.sp 2
+The start of a transformation definition is indicated by the
+keyword \fIname\fP.
+The last line of such a definition contains the keyword
+\fIend\fP.
+The lines in between associate properties to a transformation
+and may be presented in any order.
+The identifier after the \fIname\fP keyword determines the name
+of the transformation.
+This name is used for debugging and by the \fB\-R\fP flag.
+The keywords are used to specify which input suffices are
+recognized by that transformation,
+the program to run, the arguments to be handed to that program
+and the name or suffix of the resulting output file.
+Two keywords are used to indicate which run-time startoffs and
+libraries are needed.
+The possible keywords are:
+.IP \fIfrom\fP
+.br
+followed by a sequence of suffices.
+Each file with one of these suffices is allowed as input file.
+Preprocessor transformations do not need the \fIfrom\fP
+keyword. All other transformations do.
+.nr PD 0
+.IP \fIto\fP
+.br
+followed by the suffix of the output file name or in the case of a
+linker
+the output file name.
+.IP \fIprogram\fP
+.br
+followed by name of the load file of the program, a pathname most likely
+starts with either a / or {EM}.
+This keyword must be
+present, the remainder of the line
+is subject to backslashing and variable replacement.
+.IP \fImapflag\fP
+.br
+The mapflags are used to grab flags given to \fIack\fP and
+pass them on to a specific transformation.
+This feature uses a few simple pattern matching and replacement
+facilities.
+Multiple occurrences of this keyword are allowed.
+This text following the keyword is
+subjected to backslashing.
+The keyword is followed by a match expression and a variable
+assignment separated by blank space.
+As soon as both description files are read, \fIack\fP looks
+at all transformations in these files to find a match for the
+flags given to \fIack\fP.
+The flags \fB\-m\fP, \fB\-o\fP,
+\fB\-O\fP, \fB\-r\fP, \fB\-v\fP, \fB\-g\fP, \-\fB\-c\fP, \fB\-t\fP,
+\fB\-k\fP, \fB\-R\fP and \-\fB\-.\fP are specific to \fIack\fP and
+not handed down to any transformation.
+The matching is performed in the order in which the entries
+appear in the definition.
+The scanning stops after first match is found.
+When a match is found, the variable assignment is executed.
+A * in the match expression matches any sequence of characters,
+a * in the right hand part of the assignment is
+replaced by the characters matched by
+the * in the expression.
+The right hand part is also subject to variable replacement.
+The variable will probably be used in the program arguments.
+The \fB\-l\fP flags are special,
+the order in which they are presented to \fIack\fP must be
+preserved.
+The identifier LNAME is used in conjunction with the scanning of
+\fB\-l\fP flags.
+The value assigned to LNAME is used to replace the flag.
+The example further on shows the use of all this.
+.IP \fIargs\fP
+.br
+The keyword is followed by the program call arguments.
+It is subject to backslashing, variable replacement, expression
+replacement, line splitting and IO replacement.
+The variables assigned to by \fImapflags\fP will probably be
+used here.
+The flags not recognized by \fIack\fP or any of the transformations
+are passed to the linker and inserted before all other arguments.
+.IP \fIstdin\fP
+.br
+This keyword indicates that the transformation reads from standard input.
+.IP \fIstdout\fP
+.br
+This keyword indicates that the transformation writes on standard output.
+.IP \fIoptimizer\fP
+.br
+The presence of this keyword indicates that this transformation is an optimizer.
+It can be followed by a number, indicating the "level" of the
+optimizer (see description of the -O option in the ack(1ACK) manual page).
+.IP \fIpriority\fP
+.br
+This \-~optional~\- keyword is followed by a number. Positive priority means
+that the transformation is likely to be used, negative priority means that
+the transformation is unlikely to be used.
+Priorities can also be set with a ack(1ACK) command line option.
+Priorities come in handy when there are several implementations of a
+certain transformation. They can then be used to select a default one.
+.IP \fIlinker\fP
+.br
+This keyword indicates that this transformation is the linker.
+.IP \fIcombiner\fP
+.br
+This keyword indicates that this transformation is a combiner. A combiner
+is a program combining several files into one, but is not a linker.
+An example of a combiner is the global optimizer.
+.IP \fIprep\fP
+.br
+This \-~optional~\- keyword is followed an option indicating its relation
+to the preprocessor.
+The possible options are:
+.DS X
+  always	the input files must be preprocessed
+  cond	the input files must be preprocessed when starting with #
+  is	this transformation is the preprocessor
+.DE
+.IP \fIrts\fP
+.br
+This \-~optional~\- keyword indicates that the rest of the line must be
+used to set the variable RTS, if it was not already set.
+Thus the variable RTS is set by the first transformation
+executed which such a property or as a result from \fIack\fP's program
+call name (acc, cc, apc or pc) or by the \fB\-.suffix\fP flag.
+.IP \fIneed\fP
+.br
+This \-~optional~\- keyword indicates that the rest of the line must be
+concatenated to the HEAD and TAIL variables.
+This is done once for every transformation used or indicated
+by one of the program call names mentioned above or indicated
+by the \fB\-.suffix\fP flag.
+.br
+.nr PD 1v
+.NH
+Conventions used in description files
+.PP
+\fIAck\fP reads two description files.
+A few of the variables defined in the machine specific file
+are used by the descriptions of the front-ends.
+Other variables, set by \fIack\fP, are of use to all
+transformations.
+.PP
+\fIAck\fP sets the variable EM to the home directory of the
+Amsterdam Compiler Kit.
+The variable SOURCE is set to the name of the argument that is currently
+being massaged, this is useful for debugging.
+The variable SUFFIX is set to the suffix of the argument that is
+currently being massaged.
+.br
+The variable M indicates the
+directory in lib/{M}/tail_..... and NAME is the string to
+be defined by the preprocessor with \-D{NAME}.
+The definitions of {w}, {s}, {l}, {d}, {f} and {p} indicate
+EM_WSIZE, EM_SSIZE, EM_LSIZE, EM_DSIZE, EM_FSIZE and EM_PSIZE
+respectively.
+.br
+The variable INCLUDES is used as the last argument to \fIcpp\fP.
+It is used to add directories to
+the list of directories containing #include files.
+.PP
+The variables HEAD, TAIL and RTS are set by \fIack\fP and used
+to compose the arguments for the linker.
+.NH
+Example
+.PP
+Description for front-end
+.DS X
+.ta 4n 40n
+name cpp	# the C-preprocessor
+		# no from, it's governed by the P property
+	to .i	# result files have suffix i
+	program {EM}/lib/cpp	# pathname of loadfile
+	mapflag \-I* CPP_F={CPP_F?} \-I*	# grab \-I.. \-U.. and
+	mapflag \-U* CPP_F={CPP_F?} \-U*	# \-D.. to use as arguments
+	mapflag \-D* CPP_F={CPP_F?} \-D*	# in the variable CPP_F
+	args {CPP_F?} {INCLUDES?} \-D{NAME} \-DEM_WSIZE={w} \-DEM_PSIZE={p} \e
+	    \-DEM_SSIZE={s} \-DEM_LSIZE={l} \-DEM_FSIZE={f} \-DEM_DSIZE={d} <
+		# The arguments are: first the \-[IUD]...
+		#  then the include dir's for this machine
+		#  then the NAME and size values finally
+		#  followed by the input file name
+	stdout	# Output on stdout
+	prep is	# Is preprocessor
+end
+name cem	# the C-compiler proper
+	from .c	# used for files with suffix .c
+	to .k	# produces compact code files
+	program {EM}/lib/em_cem	# pathname of loadfile
+	mapflag \-p CEM_F={CEM_F?} \-Xp	# pass \-p as \-Xp to cem
+	mapflag \-L CEM_F={CEM_F?} \-l	# pass \-L as \-l to cem
+	args \-Vw{w}i{w}p{p}f{f}s{s}l{l}d{d} {CEM_F?}
+		# the arguments are the object sizes in
+		# the \-V... flag and possibly \-l and \-Xp
+	stdin	# input from stdin
+	stdout	# output on stdout
+	prep always	# use cpp
+	rts .c	# use the C run-time system
+	need .c	# use the C libraries
+end
+name decode	# make human readable files from compact code
+	from .k.m	# accept files with suffix .k or .m
+	to .e	# produce .e files
+	program {EM}/lib/em_decode	# pathname of loadfile
+	args <	# the input file name is the only argument
+	stdout	# the output comes on stdout
+end
+.DE
+
+.DS X
+.ta 4n 40n
+Example of a backend, in this case the EM assembler/loader.
+
+var w=2	# wordsize 2
+var p=2	# pointersize 2
+var s=2	# short size 2
+var l=4	# long size 4
+var f=4	# float size 4
+var d=8	# double size 8
+var M=em22
+var NAME=em22	# for cpp (NAME=em22 results in #define em22 1)
+var LIB=lib/{M}/tail_	# part of file name for libraries
+var RT=lib/{M}/head_	# part of file name for run-time startoff
+var SIZE_FLAG=\-sm	# default internal table size flag
+var INCLUDES=\-I{EM}/include	# use {EM}/include for #include files
+name asld	# Assembler/loader
+	from .k.m.a	# accepts compact code and archives
+	to e.out	# output file name
+	program {EM}/lib/em_ass	# load file pathname
+	mapflag \-l* LNAME={EM}/{LIB}*	# e.g. \-ly becomes
+		#	{EM}/mach/int/lib/tail_y
+	mapflag \-+* ASS_F={ASS_F?} \-+*  # recognize \-+ and \-\-
+	mapflag \-\-* ASS_F={ASS_F?} \-\-*
+	mapflag \-s* SIZE_FLAG=\-s*	# overwrite old value of SIZE_FLAG
+	args {SIZE_FLAG} \e
+	    ({RTS}:.c={EM}/{RT}cc) ({RTS}:.p={EM}/{RT}pc) \-o > < \e
+	    (.p:{TAIL}={EM}/{LIB}pc) \e
+	    (.c:{TAIL}={EM}/{LIB}cc.1s {EM}/{LIB}cc.2g) \e
+	    (.c.p:{TAIL}={EM}/{LIB}mon)
+		# \-s[sml] must be first argument
+		# the next line contains the choice for head_cc or head_pc
+		# and the specification of in- and output.
+		# the last three args lines choose libraries
+	linker
+end
+.DE
+
+The command \fIack \-mem22 \-v \-v \-I../h \-L \-ly prog.c\fP
+would result in the following
+calls (with exec(II)):
+.DS X
+.ta 4n
+1)	/lib/cpp \-I../h \-I/usr/em/include \-Dem22 \-DEM_WSIZE=2 \-DEM_PSIZE=2 \e
+	    \-DEM_SSIZE=2 \-DEM_LSIZE=4 \-DEM_FSIZE=4 \-DEM_DSIZE=8 prog.c
+2)	/usr/em/lib/em_cem \-Vw2i2p2f4s2l4d8 \-l
+3)	/usr/em/lib/em_ass \-sm /usr/em/lib/em22/head_cc \-o e.out prog.k
+	/usr/em/lib/em22/tail_y /usr/em/lib/em22/tail_cc.1s
+	/usr/em/lib/em22/tail_cc.2g /usr/em/lib/em22/tail_mon
+.DE

+ 365 - 0
doc/ansi_C.doc

@@ -0,0 +1,365 @@
+.de NS
+.sp
+.in 0
+\\fBANS \\$1:\\fP
+..
+.TL
+Amsterdam Compiler Kit-ANSI C compiler compliance statements
+.AU 
+Hans van Eck
+.AI
+Dept. of Mathematics and Computer Science
+Vrije Universiteit
+Amsterdam, The Netherlands
+.PP
+This document specifies the implementation-defined behaviour of the ANSI-C
+front end of the Amsterdam Compiler Kit as required by ANS X3.159-1989.  Since
+the implementation-defined behaviour sometimes depends on the machine
+compiling on or for, some items will be left unspecified in this
+document\(dg.
+.FS
+\(dg when cross-compiling, run-time behaviour may be different from
+compile-time behaviour
+.FE
+The compiler assumes that it runs on a UNIX system.
+.NS A.6.3.1
+.IP -
+Diagnostics are placed on the standard error output.  They have the
+following specification:
+.br
+"<file>", line <nr>: [(<class>)] <diagnostic>
+.br
+There are three classes of diagnostics: "error", "strict" and "warning".
+When the class is "error", the <class> is absent.
+.br
+The class "strict" is used for violations of the standard which are
+not severe enough to stop compilation.  An example is the the occurrence
+of non white-space after an '#else' or '#endif' pre-processing
+directive.  The class "warning" is used for legal but dubious
+constructions.  An example is overflow of constant expressions.
+.NS A.6.3.2
+.IP -
+The function 'main' can have two arguments.  The first argument is an
+integer specifying the number of arguments on the command line.  The second
+argument is a pointer to an array of pointers to the arguments (as
+strings).
+.IP -
+Interactive devices are terminals.
+.NS A.6.3.3
+.IP -
+The number of significant characters is an option.  By default it is 64.
+There is a distinction between upper and lower case.
+.NS A.6.3.4
+.IP -
+The compiler assumes ASCII-characters in both the source and execution
+character set.
+.IP -
+There are no multi-byte characters.
+.IP -
+There 8 bits in a character.
+.IP -
+Character constants with values that can not be represented in 8 bits
+are truncated.
+.IP -
+Character constants that are more than 1 character wide will have the
+first character specified in the least significant byte.
+.IP -
+The only supported locale is "C".
+.IP -
+A plain 'char' has the same range of values as 'signed char'.
+.NS A.6.3.5
+.IP -
+The compiler assumes that it works on and compiles for a
+2-complement binary-number system.  Shorts will use 2 bytes and longs
+will use 4 bytes.  The size of integers are machine dependent.
+.IP -
+Converting an integer to a shorter signed integer is implemented by
+ignoring the high-order byte(s) of the former.
+Converting a unsigned integer to a signed integer of the same type is
+only done in administration.  This means that the bit-pattern remains
+unchanged.
+.IP -
+The result of bitwise operations on signed integers are what can be
+expected on a 2-complement machine.
+.IP -
+If either operand is negative, whether the result of the / operator is the
+largest integer less than or equal to the algebraic quotient or the
+smallest integer greater than or equal to the algebraic quotient is machine
+dependent, as is the sign of the result of the % operator.
+.IP -
+The right-shift of a negative value is negative.
+.NS A.6.3.6
+.IP -
+The representation of floating-point values is machine-dependent.
+When native floating-point is not present an IEEE-emulation is used.
+The compiler uses high-precision floating-point for constant folding.
+.IP -
+Truncation is always to the nearest floating-point number that can
+be represented.
+.NS A.6.3.7
+.IP -
+The type returned by the sizeof-operator (also known as size_t)
+is 'unsigned int'.  This is done for backward compatibility reasons.
+.IP -
+Casting an integer to a pointer or vice versa has no effect in
+bit-pattern when the sizes are equal.  Otherwise the value will be
+truncated or zero-extended (depending on the direction of the
+conversion and the relative sizes).
+.IP -
+When a pointer is as large as an integer, the type of a 'ptrdiff_t' will
+be 'int'.  Otherwise the type will be 'long'.
+.NS A.6.3.8
+.IP -
+Since the front end has only limited control over the registers, it can
+only make it more likely that variables that are declared as
+registers also end up in registers.  The only things that can possibly be
+put into registers are : 'int', 'long', 'float', 'double', 'long double'
+and pointers.
+.NS A.6.3.9
+.IP -
+When a member of a union object is accessed using a member of a
+different type, the resulting value will usually be garbage.  The
+compiler makes no effort to catch these errors.
+.IP -
+The alignment of types is a compile-time option.  The alignment of
+a structure-member is the alignment of its type.  Usually, the
+alignment is passed on to the compiler by the 'ack' program.  When a
+user wants to do this manually, he/she should be prepared for trouble.
+.IP -
+A "plain" 'int' bit-field is taken as a 'signed int'.  This means that
+a field with a size of 1 bit can only store the values 0 and -1.
+.IP -
+The order of allocation of bit-fields is a compile-time option.  By
+default, high-order bits are allocated first.
+.IP -
+An enum has the same size as a "plain" 'int'.
+.NS A.6.3.10
+.IP -
+An access to a volatile declared variable is done by just mentioning
+the variable.  E.g. the statement "x;" where x is declared volatile,
+constitutes an access.
+.S A.6.3.11
+.IP -
+There is no fixed limit on the number of declarators that may modify an
+arithmetic, structure or union type, although specifying too many may
+cause the compiler to run out of memory.
+.NS A.6.3.12
+.IP -
+The maximum number of cases in a switch-statement is in the order of
+1e9, although the compiler may run out of memory somewhat earlier.
+.NS A.6.3.13
+.IP -
+Since both the pre-processor and the compiler assume ASCII-characters, 
+a single character constant in a conditional-inclusion directive
+matches the same value in the execution character set.
+.IP -
+The pre-processor recognizes -I... command-line options.  The
+directories thus specified are searched first.  After that, depending on the
+command that the preprocessor is called with, machine/system-dependant
+directories are searched.  After that, ~em/include/_tail_ac and
+/usr/include are visited.
+.IP -
+Quoted names are first looked for in the directory in which the file
+which does the include resides.
+.IP -
+The characters in a h- or q- char-sequence are taken to be UNIX
+paths.
+.IP -
+Neither the compiler nor the preprocessor know any pragmas.
+.IP -
+Since the compiler runs on UNIX, __DATE__ and __TIME__ will always be
+defined.
+.NS A.6.3.14
+.IP -
+NULL is defined as ((void *)0).  This in order to flag dubious
+constructions like "int x = NULL;".
+.IP -
+The diagnostic printed by 'assert' is as follows:
+.ti +4n
+"Assertion "<expr>" failed, file "<file>", line <line>",
+.br
+where <expr> is the argument to the assert macro, printed as string.
+(the <file> and <line> should be clear)
+.KS
+.IP -
+The sets for character test macros.
+.TS
+l l.
+name:	set:
+isalnum()	0-9A-Za-z
+isalpha()	A-Za-z
+iscntrl()	\e000-\e037\e177
+islower()	a-z
+isupper()	A-Z
+isprint()	<space>-~ (== \e040-\e176)
+.TE
+.KE
+As an addition, there is an isascii() macro, which tests whether a character
+is an ascii character.  Characters in the range from \e000 to \e177 are ascii
+characters.
+.KS
+.IP -
+The behaviour of mathematic functions on domain error:
+.TS
+l c
+l n.
+name:	returns:
+asin()	0.0
+acos()	0.0
+atan2()	0.0
+fmod()	0.0
+log()	-HUGE_VAL
+log10()	-HUGE_VAL
+pow()	0.0
+sqrt()	0.0
+.TE
+.KE
+.IP -
+Underflow range errors do not cause errno to be set.
+.IP -
+The function fmod() returns 0.0 and sets errno to EDOM when the second
+argument is 0.0.
+.IP -
+The set of signals for the signal() function depends on the UNIX-system
+which the compiler is compiling for.  The default handling, semantics
+and behaviour of these signals are those specified by the operating
+system vendor.  The default handling is not reset when SIGILL is
+received.
+.IP -
+A text-stream need not end in a new-line character.
+.IP -
+White space characters before a new-line appear when read in.
+.IP -
+There may be any number of null characters appended to a binary
+stream.
+.IP -
+The file position indicator of an append mode stream is initially
+positioned at the beginning of the file.
+.IP -
+A write on a text stream does not cause the associated file to be
+truncated beyond that point.
+.IP -
+The buffering intended by the standard is fully supported.
+.IP -
+A zero-length file actually exists.
+.IP -
+A file name can consist of any character, except for the '\e0' and
+the '/'.
+.IP -
+A file can be open multiple times.
+.IP -
+When a remove() is done on an open file, reading and writing behave
+just as can be expected from a non-removed file.  When the associated
+stream is closed, all written data will be lost.
+.IP -
+When a file exists prior to a call to rename(), the behaviour is that
+of the underlying UNIX system.  Normally, the call would fail.
+.IP -
+The %p conversion in fprintf() has the same effect as %#x or %#lx,
+depending on the sizes of pointer and integer.
+.IP -
+The %p conversion in fscanf() has the same effect as %x or %lx,
+depending on the sizes of pointer and integer.
+.IP -
+A - character that is neither the first nor the last character in the
+scanlist for %[ conversion is taken to be a range indicator.  When the
+first character has a higher ASCII-value than the second, the - will
+just be put into the scanlist.
+.IP -
+The value of errno when fgetpos() or ftell() failed is that of lseek().
+This means:
+.RS
+.IP "EBADF \-" 10
+when the stream is not valid
+.IP "ESPIPE \-"
+when fildes is associated with a pipe (and on some systems: sockets)
+.IP "EINVAL \-"
+the resulting file pointer would be negative
+.RE
+.LP
+.IP -
+The messages generated by perror() depend on the value of errno.
+The mapping of errors to strings is done by strerror().
+.IP -
+When the requested size is zero, malloc(), calloc() and realloc()
+return a null-pointer.
+.IP -
+When abort() is called, output buffers will be flushed.  Temporary files
+(made with the tmpfile() function) will have disappeared when SIGABRT
+is not caught or ignored.
+.IP -
+The exit() function returns the low-order eight bits of its argument
+to the environment.
+.IP -
+The predefined environment names are controlled by the user.
+Setting environment variables is done through the putenv() function.
+This function accepts a pointer to char as its argument.
+To set f.i. the environment variable TERM to a230 one writes
+.ti +4n
+putenv("TERM=a230");
+.br
+The argument to putenv() is stored in an internal table, so malloc'ed
+strings can not be freed until another call to putenv() (which sets the
+same environment variable) is made.  The function returns 1 if it fails,
+0 otherwise.
+.LP
+.IP -
+The argument to system is passed as argument to /bin/sh -c.
+.IP -
+The strings returned by strerror() depend on errno in the following
+way:
+.TS
+l l.
+errno	string
+0	"Error 0",
+EPERM	"Not owner",
+ENOENT	"No such file or directory",
+ESRCH	"No such process",
+EINTR	"Interrupted system call",
+EIO	"I/O error",
+ENXIO	"No such device or address",
+E2BIG	"Arg list too long",
+ENOEXEC	"Exec format error",
+EBADF	"Bad file number",
+ECHILD	"No children",
+EAGAIN	"No more processes",
+ENOMEM	"Not enough core",
+EACCES	"Permission denied",
+EFAULT	"Bad address",
+ENOTBLK	"Block device required",
+EBUSY	"Mount device busy",
+EEXIST	"File exists",
+EXDEV	"Cross-device link",
+ENODEV	"No such device",
+ENOTDIR	"Not a directory",
+EISDIR	"Is a directory",
+EINVAL	"Invalid argument",
+ENFILE	"File table overflow",
+EMFILE	"Too many open files",
+ENOTTY	"Not a typewriter",
+ETXTBSY	"Text file busy",
+EFBUG	"File too large",
+ENOSPC	"No space left on device",
+ESPIPE	"Illegal seek",
+EROFS	"Read-only file system",
+EMLINK	"Too many links",
+EPIPE	"Broken pipe",
+EDOM	"Math argument",
+ERANGE	"Result too large"
+.TE
+everything else causes strerror() to return "unknown error"
+.IP -
+The local time zone is per default MET (GMT + 1:00:00).  This can be
+changed through the TZ environment variable, or by some changes in the
+sources.
+.IP -
+The clock() function returns the number of ticks since process
+startup.
+.SH
+References
+.IP [1]
+ANS X3.159-1989
+.I
+American National Standard for Information Systems -
+Programming Language C
+.R

+ 949 - 0
doc/basic.doc

@@ -0,0 +1,949 @@
+.\" $Id$ 
+.TL 
+.de Sy
+.LP
+.IP \fBsyntax\fR 10
+..
+.de PU
+.IP \fBpurpose\fR 10
+..
+.de RM
+.IP \fBremarks\fR 10
+..
+The ABC compiler
+.AU
+Martin L. Kersten
+Gert-Jan Akkerman
+Marcel Worring
+Edo Westerhuis
+Frans Kunst
+Ronnie Lachniet
+.AI
+Department of Mathematics and Computer Science.
+.br
+Free University
+.br
+Amsterdam
+.AB
+This manual describes the 
+programming language BASIC and its compiler
+included in the Amsterdam Compiler Kit.
+.AE
+.SH
+INTRODUCTION.
+.LP
+The BASIC-EM compiler is an extensive implementation of the
+programming language BASIC.
+The language structure and semantics are modelled after the 
+BASIC interpreter/compiler of Microsoft (tr), a short comparison
+is provided in appendix A.
+.LP
+The compiler generates code for a virtual machine, the EM machine
+[[ACM, etc]].
+Using EM as an intermediate machine results in a highly portable
+compiler and BASIC code.
+.br
+The drawback of EM is that it does not directly reflect one particular
+hardware design, which means that many of the low level operations available 
+within BASIC are ill-defined or even inapplicable.
+To mention a few, the peek and poke instructions are likely
+to be behave errorneous, while line printer and tapedeck 
+primitives are unknown.
+.LP
+This manual is divided into three chapters.
+.br
+Chapter 1 discusses the general language syntax and semantics.
+.br
+Chapter 2 describes the statements available in BASIC-EM.
+.br
+Chapter 3 describes the predefined functions, ordered alphabetically.
+.LP
+Appendix A discusses the differences with Microsoft BASIC. 
+.br
+Appendix B describes all reserved symbols.
+.LP
+.LP
+.SH
+SYNTAX NOTATION
+.LP
+The conventions for syntax presentation are as follows:
+.IP CAPS 10
+Items are reserved words, must be input as shown.
+.IP <> 10
+Items in lowercase letters enclosed in angular brackets
+are to be supplied by the user.
+.IP [] 10
+Items are optional.
+.IP \.\.\. 10
+Items may be repeated any number of times 
+.IP {} 10
+A choice between two or more alternatives. At least one of the entries
+must be chosen.
+.IP | 10
+Vertical bars separate the choices within braces.
+.LP
+All punctuation must be included where shown.
+.bp
+.NH 1
+GENERAL INFORMATION
+.LP
+The BASIC-EM compiler is designed for a UNIX based environment.
+It accepts a text file with a BASIC program (suffix .b) and generates
+an executable file, called a.out.
+.NH 2
+LINE FORMAT
+.LP
+A BASIC program consists of a series of lines, starting with a 
+positive line number in the range 0 to 32767.
+A line may consists of more than one physical line on a terminal, but
+is limited to 1024 characters.
+Multiple BASIC statements may be placed on a single line, provided
+they are separated by a colon (:).
+.NH 2
+CONSTANTS
+.LP
+The BASIC compiler character set is comprised of alphabetic
+characters, numeric characters, and special characters shown below.
+.DS
+= + - * / ^ ( ) % # $ \\ _
+! [ ] , . ; : & ' ? > <  \\ (blanc)
+.DE
+.LP
+BASIC uses two different types of constants during processing:
+numeric and string constants.
+.br
+A string constant is a sequence of characters taken from the ASCII
+character set enclosed by double quotation marks.
+.br
+Numeric constants are positive or negative numbers, grouped into
+five different classes.
+.IP "a) integer constants" 25
+.br
+Whole numbers in the range of -32768 and 32767. Integer constants do
+not contain decimal points.
+.IP "b) fixed point constants" 25
+.br
+Positive or negative real numbers, i.e. numbers with a decimal point.
+.IP "c) floating point constants" 25
+.br
+Real numbers in scientific notation. A floating point constant
+consists of an optional signed integer or fixed point number
+followed by the letter E (or D) and an optional signed integer
+(the exponent).
+The allowable range of floating point constants is 10^-38 to 10^+38.
+.IP "d) Hex constants" 25
+.br
+Hexadecimal numbers, denoted by the prefix &H.
+.IP "e) Octal constants" 25
+.br
+Octal numbers, denoted by the prefix &O.
+.NH 2
+VARIABLES
+.LP
+Variables are names used to represent values in a BASIC program.
+A variable is assigned a value by assigment specified in the program.
+Before a variable is assigned its value is assumed to be zero.
+.br
+Variable names are composed of letters, digits or the decimal point,
+starting with a letter. Up to 40 characters are significant.
+A variable name can be followed by any of the following  type 
+declaration characters:
+.IP % 5
+Defines an integer variable
+.IP ! 5
+Defines a single precision variable (see below)
+.IP # 5
+Defines a double precision variable
+.IP $ 5
+Defines a string variable.
+.LP
+Beside single valued variables, values may be grouped into tables or arrays.
+Each element in an array is referenced by the array name and an index,
+such a variable is called a subscripted variable.
+An array has as many subscripts as there are dimensions in the array,
+the maximum of which is 11.
+.br
+If a variable starts with FN it is assumed to be a call to a user defined
+function. 
+.br
+A variable name may not be a reserved word nor the name 
+of a predefined function.
+A list of all reserved identifiers is included as Appendix B.
+.LP
+NOTES: 
+.br
+Two variables with the same name but different type is
+considered illegal.
+.br
+The type of a variable without typedeclaration-character is set,
+at it's first occurence in the program, 
+to the defaulttype which is (in this implementation) double precision.
+.br
+Multi-dimensional array's must be declared before use (see 
+DIM-statement ).
+.br
+BASIC-EM differs from Microsoft BASIC in supporting floats in one precision
+only (due to EM), eg doubles and floats have the same precision.
+.NH 2
+EXPRESSIONS
+.LP
+When necessary the compiler will convert a numeric value from
+one type to another.
+A value is always converted to the precision of the variable it is assigned
+to.
+When a floating point value is converted to an integer the fractional
+portion is rounded.
+In an expression all values are converted to the same degree of precision,
+i.e. that of the most precise operand.
+.br
+Division by zero results in the message "Division by zero".
+If overflow (or underflow) occurs, the "Overflow (underflow)" message is
+displayed and  execution is terminated (contrary to Microsoft).
+.SH
+Arithmetic
+.LP
+The arithmetic operators in order of precedence,a re:
+.DS L
+^		Exponentiation
+-		Negation
+*,/,\\\\\\\\,MOD	 Multiplication, Division, Remainder
++,-		Addition, Substraction
+.DE
+The operator \\\\ denotes integer division, its operands are rounded to
+integers before the operator is applied.
+Modulus arithmetic is denoted by the operator MOD, which yields the
+integer value that is the remainder of an integer division.
+.br
+The order in which operators are performed can be changed with parentheses.
+.SH
+Relational
+.LP
+The relational operators in order of precedence, are:
+.DS
+=	Equality
+<>	Inequality
+<	Less than
+>	Greater than
+<=	Less than or equal to
+>=	Greater than or equal to
+.DE
+The relational operators are used to compare two values and returns
+either "true" (-1) or "false" (0) (See IF statement).
+The precedence of the relational operators is lower 
+then the arithmetic operators.
+.SH
+Logical
+.LP
+The logical operators performs tests on multiple relations, bit manipulations,
+or boolean operations.
+The logical operators returns a bitwise result ("true" or "false").
+In an expression, logical operators are performed after the relational and
+arithmetic operators.
+The logical operators work by converting their operands to signed
+two-complement integers in the range -32768 to 32767.
+.DS
+NOT		Bitwise negation
+AND		Bitwise and
+OR		Bitwise or
+XOR		Bitwise exclusive or
+EQV		Bitwise equivalence
+IMP		Bitwise implies
+.DE
+.SH
+Functional
+.LP
+A function is used in  an expression to call a system or user defined
+function.
+A list of predefined functions is presented in chapter 3.
+.SH
+String operations
+.LP
+Strings can be concatenated by using +. Strings can be compared with
+the relational operators. String comparison is performed in lexicographic
+order.
+.NH 2
+ERROR MESSAGES
+.LP
+The occurence of an error results in termination of the program
+unless an ON....ERROR statement has been encountered.
+.bp
+.NH 1
+B-EM STATEMENTS
+.LP
+This chapter describes the statements available within the BASIC-EM
+compiler. Each description is formatted as follows:
+.Sy
+Shows the correct syntax for the statement. See introduction of
+syntax notation above.
+.PU
+Describes the purpose and details of the instructions.
+.RM
+Describes special cases, deviation from Microsoft BASIC etc.
+.LP
+.NH 2 
+CALL
+.Sy
+CALL <variable name>[(<argument list>)]
+.PU
+The CALL statement provides the means to execute procedures
+and functions written in another language included in the
+Amsterdam Compiler Kit.
+The argument list consist of (subscripted) variables.
+The BASIC compiler pushes the address of the arguments on the stack in order
+of encounter.
+.RM
+Not yet available.
+.NH 2
+CLOSE
+.Sy
+CLOSE [[#]<file number>[,[#]<file number...>]]
+.PU
+To terminate I/O on a disk file.
+<file number> is the number associated with the file 
+when it was OPENed (See OPEN-statement). Ommission of parameters results in closing
+all files.
+.sp
+The END statement and STOP statement always issue a CLOSE of
+all files.
+.NH 2
+DATA
+.Sy
+DATA <list of constants>
+.PU
+DATA statements are used to construct a data bank of values that are
+accessed by the program's READ statement.
+DATA statements are non-executable,
+the data items are assembled in a data file by the BASIC compiler.
+This file can be replaced, provided the layout remains
+the same (otherwise the RESTORE won't function properly).
+.sp
+The list of data items consists of numeric and string constants
+as discussed in section 1.
+Moreover, string constants starting with a letter and not
+containing blancs, newlines, commas, colon need not be enclosed with
+the string quotes.
+.sp
+DATA statements can be reread using the RESTORE statement.
+.NH 2
+DEF FN
+.Sy
+DEF FN<name> [(<parameterlist>)]=<expression>
+.PU
+To define and name a function that is written by the user.
+<name> must be an identifier and should be preceded by FN,
+which is considered integral part of the function name. 
+<expression> defines the expression to be evaluated upon function call.
+.sp
+The parameter list is comprised of a comma separated 
+list of variable names, used within the function definition,
+that are to replaced by values upon function call.
+The variable names defined in the parameterlist, called formal
+parameters, do not affect the definition and use of variables
+defined with the same name in the rest of the BASIC program.
+.sp
+A type declaration character may be suffixed to the function name to
+designate the data type of the function result.
+.NH 2
+DEFINT/SNG/DBL/STR
+.Sy
+DEF<type> <range of letters>
+.PU
+Any undefined variable starting with the letter included in the range of
+letters is declared of type <type> unless a type declaration character
+is appended.
+The range of letters is a comma separated list of characters and
+character ranges (<letter>-<letter>).
+.NH 2
+DIM
+.Sy
+DIM <list of subscripted variable>
+.PU
+The DIM statement allocates storage for subscripted variables.
+If an undefined subscripted variable is used 
+the maximum value of the array subscript is assumed to be 10.
+A subscript out of range is signalled by the program (when ACK works)
+The minimum subscript value is 0, unless the OPTION BASE statement has been
+encountered.
+.sp
+All variables in a subscripted variable are initially zero.
+.sp
+BUGS. Multi-dimensional arrays MUST be defined. Subscript out of range is
+left unnotified.
+.NH 2
+END
+.Sy
+END
+.PU
+END terminates a BASIC program and returns to the UNIX shell.
+An END statement at the end of the BASIC program is optional.
+.NH 2
+ERR and ERL
+.Sy
+<identifier name>= ERR
+.br
+<identifier name>= ERL
+.PU
+Whenever an error occurs the variable ERR contains the
+error number and ERL the BASIC line where the error occurred.
+The variables are usually used in error handling routines
+provided by the user.
+.NH 2
+ERROR
+.Sy
+ERROR <integer expression>
+.PU
+To simulate the occurrence of a BASIC error.
+To define a private error code a value must be used that is not already in
+use by the BASIC runtime system.
+The list of error messages currently in use can be found in appendix B.
+.NH 2
+FIELD
+.PU
+To be implemented.
+.NH 2
+FOR...NEXT
+.Sy
+FOR <variable>= <low>TO<high>[STEP<size>]
+.br
+ ......
+.br
+NEXT [<variable>][,<variable>...]
+.PU
+The FOR statements allows a series of statements to be performed
+repeatedly. <variable> is used as a counter. During the first
+execution pass it is assigned the value <low>,
+an arithmetic expression. After each pass the counter
+is incremented (decremented) with the step size <size>, an expression.
+Ommission of the step size is intepreted as an increment of 1.
+.br
+Execution of the program lines specified between the FOR and the NEXT
+statement is terminated as soon as <low> is greater (less) than <high>
+.sp
+The NEXT statement is labeled with the name(s) of the counter to be
+incremented.
+.sp
+The variables mentioned in the NEXT statement may be ommitted, in which case
+the variable of increment the counter of the most recent FOR statement.
+If a NEXT statement is encountered before its corresponding FOR statement,
+the error message "NEXT without FOR" is generated.
+.NH 2
+GET
+.Sy
+GET [#]<file number>[, <record number>]
+.PU
+To be implemented.
+.NH 2
+GOSUB...RETURN
+.Sy
+GOSUB <line number>
+  ...
+.br
+RETURN
+.PU
+The GOSUB statement branches to the first statement of a subroutine.
+The RETURN statement cause a branch back to the statement following the
+most recent GOSUB statement.
+A subroutine may contain more than one RETURN statement.
+.sp
+Subroutines may be called recursively. 
+Nesting of subroutine calls is limited, upon exceeding the maximum depth
+the error message "XXXXX" is displayed.
+.NH 2
+GOTO
+.Sy
+GOTO <line number>
+.PU
+To branch unconditionally to a specified line in the program.
+If <line number> does not exists, the compilation error message
+"Line not defined" is displayed.
+.RM
+Microsoft BASIC continues at the first line
+equal or greater then the line specified.
+.NH 2
+IF...THEN
+.Sy
+.br
+IF <expression> THEN {<statements>|<line number>}
+[ELSE {<statements>|<line number>}]
+.br
+.Sy
+IF <expression> GOTO <line number>
+[ELSE {<statements>|<line number>}]
+.PU
+The IF statement is used
+to make a decision regarding the program flow based on the
+result of the expressions.
+If the expression is not zero, the THEN or GOTO clause is
+executed. If the result of <expression> is zero, the THEN or
+GOTO clause is ignored and the ELSE clause, if present is
+executed.
+.br
+IF..THEN..ELSE statements may be nested.
+Nesting is limited by the length of the line.
+The ELSE clause matches with the closests unmatched THEN.
+.sp
+When using IF to test equality for a value that is the
+result of a floating point expression, remember that the
+internal representation of the value may not be exact.
+Therefore, the test should be against a range to
+handle the relative error.
+.RM
+Microsoft BASIC allows a comma before THEN.
+.NH 2
+INPUT
+.Sy
+INPUT [;][<"prompt string">;]<list of variables>
+.PU
+An INPUT statement can be used to obtain values from the user at the
+terminal.
+When an INPUT statement is encountered a question mark is printed
+to indicate the program is awaiting data.
+IF <"prompt string"> is included, the string is printed before the
+the question mark. The question mark is suppressed when the prompt
+string is followed by a comma, rather then a semicolon.
+.sp
+For each variable in the variable a list a value should be supplied.
+Data items presented should be separated by a comma.
+.sp
+The type of the variable in the variable list must aggree with the
+type of the data item entered. Responding with too few or too many
+data items causes the message "?Redo". No assignment of input values
+is made until an acceptable response is given.
+.RM
+The option to disgard the carriage return with the semicolon after the
+input symbol is not yet implemented.
+.NH 2
+INPUT [#]
+.Sy
+INPUT #<file number>,<list of variables>
+.PU
+The purpose of the INPUT# statement is to read data items from a sequential
+file and assign them to program variables.
+<file number> is the number used to open the file for input.
+The variables mentioned are (subscripted) variables.
+The type of the data items read should aggree with the type of the variables.
+A type mismatch results in the error message "XXXXX".
+.sp
+The data items on the sequential file are separated by commas and newlines.
+In scanning the file, leading spaces, new lines, tabs, and
+carriage returns are ignored. The first character encountered 
+is assumed to be the state of a new item.
+String items need not be enclosed with double quotes, provided
+it does not contain spaces, tabs, newlines and commas,
+.RM
+Microsoft BASIC won't assign values until the end of input statement.
+This means that the user has to supply all the information.
+.NH 2
+LET
+.Sy
+[LET]<variable>=<expression>
+.PU
+To assign  the value of an expression to a (subscribted) variable.
+The type convertions as dictated in chapter 1 apply.
+.NH 2
+LINE INPUT
+.Sy
+LINE INPUT [;][<"prompt string">;]<string variable>
+.PU
+An entire line of input is assigned to the string variable.
+See INPUT for the meaning of the <"prompt string"> option.
+.NH 2
+LINE INPUT [#]
+.Sy
+LINE INPUT #<file number>,<string variable>
+.PU
+Read an entire line of text from a sequential file <file number>
+and assign it to a string variable.
+.NH 2
+LSET and RSET
+.PU
+To be implemented
+.NH 2
+MID$
+.Sy
+MID$(<string expr1>,n[,m])=<string expr2>
+.PU
+To replace a portion of a string with another string value.
+The characters of <string expr2> replaces characters in <string expr1>
+starting at position n. If m is present, at most m characters are copied,
+otherwise all characters are copied.
+However, the string obtained never exceeds the length of string expr1.
+.NH 2
+ON ERROR GOTO
+.Sy
+ON ERROR GOTO <line number>
+.PU
+To enable error handling within the BASIC program.
+An error may result from arithmetic errors, disk problems, interrupts, or
+as a result of the ERROR statement.
+After printing an error message the program is continued at the
+statements associated with <line number>.
+.sp
+Error handling is disabled using ON ERROR GOTO 0.
+Subsequent errors result in an error message and program termination.
+.NH 2
+ON...GOSUB and ON ...GOTO
+.Sy
+ON <expression> GOSUB <list of line numbers>
+.br
+ON <expression> GOTO <list of line numbers>
+.PU
+To branch to one of several specified line numbers or subroutines, based
+on the result of the <expression>. The list of line numbers are considered
+the first, second, etc alternative. Branching to the first occurs when
+the expression evaluates to one, to the second alternative on two, etc.
+If the value of the expression is zero or greater than the number of alternatives, processing continues at the first statement following the ON..GOTO 
+(ON GOSUB) statement.
+.sp
+When the expression results in a negative number the 
+an "Illegal function call" error occurs.
+.sp
+BUG If the value of the expression is zero or greater than the number of 
+alternatives, processing does NOT continue at the first statement 
+following the ON..GOTO (ON GOSUB) statement.
+.NH 2
+OPEN
+.Sy
+OPEN {"i" | "o" | "r" } , [#]<file number> , <file-name>
+.PU
+To open <file-name> (filename should be quoted) for input/reading or output.
+If file is not opened for output it has to be existent, otherwise an 
+"file not found" error will occur.
+.NH 2
+OPTION BASE
+.Sy
+OPTION BASE n
+.PU
+To declare the lower bound of subsequent array subscripts as either
+0 or 1. The default lower bound is zero.
+.NH 2
+POKE
+.Sy
+POKE <expr1>,<expr2>
+.PU
+To poke around in memory. The use of this statement is not recommended,
+because it requires full understanding of both
+the implementation of the Amsterdam
+Compiler Kit and the hardware characteristics.
+.NH 2
+PRINT 
+.Sy
+PRINT <list of variables and/or constants>
+.PU
+To print constants or the contents of variables on the terminal-device. 
+If the variables or constants are seperated by comma's the values will 
+be printed seperated by tabs. 
+If the variables or constants are seperated by semi-colon's the values 
+will be printed without spaces in between. 
+The new-line generated at the end of the print-statement can be suppressed by 
+a semi-colon at the end of list of variables or constants.
+.NH 2
+PRINT USING
+.PU
+To be implemented
+.NH 2
+PUT
+.PU
+To be implemented
+.NH 2
+RANDOMIZE
+.Sy
+RANDOMIZE [<expression>]
+.PU
+To reset the random seed. When the expression is ommitted, the system
+will ask for a value between -32768 and 32767.
+The random number generator returns the same sequence of values provided
+the same seed is used.
+.NH 2
+READ
+.Sy
+READ <list of variables>
+.PU
+To read values from the DATA statements and assign them to variables.
+The type of the variables should match to the type of the items being read,
+otherwise a "Syntax error" occurs. If all data is read the message "Out of
+data" will be displayed.
+.NH 2
+REM
+.Sy
+REM <remark>
+.PU
+To include explantory information in a program.
+The REM statements are not executed.
+A single quote has the same effect as  : REM, which
+allows for the inclusion of comment at the end of the line.
+.RM
+Microsoft BASIC does not allow REM statements as part of
+DATA lines.
+.NH 2
+RESTORE
+.Sy
+RESTORE  [<line number>]
+.PU
+To allow DATA statements to be re-read from a specific line.
+After a RESTORE statement is executed, the next READ accesses
+the first item of the DATA statements.
+If <line number> is specified, the next READ accesses the first
+item in the specified line.
+.sp
+Note that data statements result in a sequential datafile generated
+by the compiler, being read by the read statements.
+This data file may be replaced using the operating system functions
+with a modified version, provided the same layout of items
+(same number of lines and items per line) is used.
+.NH 2
+STOP
+.Sy
+STOP
+.PU
+To terminate the execution of a program and return to the operating system
+command interpreter. A STOP statement results in the message "Break in line
+???"
+.NH 2
+SWAP
+.Sy
+SWAP <variable>,<variable>
+.PU
+To exchange the values of two variables.
+.sp
+BUG. Strings cannot be swapped !
+.NH 2
+TRON/TROFF
+.Sy
+TRON
+.Sy
+TROFF
+.PU
+As an aid in debugging the TRON statement results in a program
+listing each line being interpreted. TROFF disables generation of
+this code.
+.NH 2
+WHILE...WEND
+.Sy
+WHILE <expression>
+  .....
+WEND
+.PU
+To execute a series of BASIC statements as long as a conditional expression
+is true. WHILE...WEND loops may be nested.
+.NH 2
+WRITE 
+.Sy
+WRITE [<list of expressions>]
+.PU
+To write data at the terminal in DATA statement layout conventions.
+The expressions should be separated by commas.
+.NH 2
+WRITE #
+.Sy
+WRITE #<file number> ,<list of expressions>
+.PU
+To write a sequential data file, being opened with the "O" mode.
+The values are being writting using the DATA statements layout conventions.
+.bp
+.NH
+FUNCTIONS
+.LP
+.IP ABS(X) 25
+Returns the absolute value of expression X
+.IP ASC(X$) 25
+Returns the numeric value of the first character of the string.
+If X$ is not initialized an "Illegal function call" error
+is returned.
+.IP ATN(X) 25
+Returns the arctangent of X in radians. Result is in the range
+of -pi/2 to pi/2.
+.IP CDBL(X) 25
+Converts X to a double precision number.
+.IP CHR$(X) 25
+Converts the integer value X to its ASCII character. 
+X must be in the range of 0 to 257.
+It is used for cursor addressing and generating bel signals.
+.IP CINT(X) 25
+Converts X to an integer by rounding the fractional portion.
+If X is not in the range -32768 to 32767 an "Overflow"
+error occurs.
+.IP COS(X) 25
+Returns the cosine of X in radians.
+.IP CSNG(X) 25
+Converts X to a single precision number.
+.IP CVI(<2-bytes>) 25
+Convert two byte string value to integer number.
+.IP CVS(<4-bytes>) 25
+Convert four byte string value to single precision number.
+.IP CVD(<8-bytes>) 25
+Convert eight byte string value to double precision number.
+.IP EOF[(<file-number>)] 25
+Returns -1 (true) if the end of a sequential file has been reached.
+.IP EXP(X) 25
+Returns e(base of natural logarithm) to the power of X.
+X should be less then 10000.0.
+.IP FIX(X) 25
+Returns the truncated integer part of X. FIX(X) is
+equivalent to SGN(X)*INT(ABS(X)).
+The major difference between FIX and INT is that FIX does not
+return the next lower number for negative X.
+.IP HEX$(X) 25
+Returns the string which represents the hexadecimal value of
+the decimal argument. X is rounded to an integer using CINT
+before HEX$ is evaluated.
+.IP INT(X) 25
+Returns the largest integer <= X.
+.IP INP$(X[,[#]Y]) 25
+Returns the string of X characters read from the terminal or
+the designated file.
+.IP LEN(X$) 25
+Returns the number of characters in the string X$.
+Non printable and blancs are counted too.
+.IP LOC(<file\ number>) 25
+For sequential files LOC returns 
+position of the read/write head, counted in number of bytes.
+For random files the function returns the record number just
+read or written from a GET or PUT statement.
+If nothing was read or written 0 is returned.
+.IP LOG(X) 25
+Returns the natural logarithm of X. X must be greater than zero.
+.IP MID$(X,I,[J]) 25
+Returns first J characters from string X starting at position I in X.
+If J is omitted all characters starting of from position I in X are returned.
+.IP MKI$(X) 25
+Converts an integer expression to a two-byte string.
+.IP MKS$(X) 25
+Converts a single precision expression to a four-byte string.
+.IP MKD$(X) 25
+Converts a double precision expression to a eight-byte string.
+.IP OCT$(X) 25
+Returns the string which represents the octal value of the decimal
+argument. X is rounded to an integer using CINT before OCTS is evaluated.
+.IP PEEK(I) 25
+Returns the byte read from the indicated memory. (Of limited use
+in the context of ACK)
+.IP POS(I) 25
+Returns the current cursor position. To be implemented.
+.IP RIGHT$(X$,I)
+Returns the right most I characters of string X$.
+If I=0 then the empty string is returned.
+.IP RND(X) 25
+Returns a random number between 0 and 1. X is a dummy argument.
+.IP SGN(X) 25
+If X>0 , SGN(X) returns 1.
+.br
+if X=0, SGN(X) returns 0.
+.br
+if X<0, SGN(X) returns -1.
+.IP SIN(X) 25
+Returns the sine of X in radians.
+.IP SPACE$(X) 25
+Returns  a string of spaces length X. The expression
+X is rounded to an integer using CINT.
+.IP STR$(X)
+Returns the string representation value of X.
+.IP STRING$(I,J) 25
+Returns thes string of length Iwhose characters all
+have ASCII code J. (or first character when J is a string)
+.IP TAB(I) 25
+Spaces to position I on the terminal. If the current
+print position is already beyond space I,TAB
+goes to that position on the next line.
+Space 1 is leftmost position, and the rightmost position
+is width minus 1. To be used within PRINT statements only.
+.IP TAN(X) 25
+Returns the tangent of X in radians. If TAN overflows
+the "Overflow" message is displayed.
+.IP VAL(X$) 25
+Returns the numerical value of string X$.
+The VAL function strips leading blanks and tabs from the
+argument string.
+.bp
+.SH
+APPENDIX A DIFFERENCES WITH MICROSOFT BASIC
+.LP
+The following list of Microsoft commands and statements are
+not recognized by the compiler.
+.DS
+SPC
+USR
+VARPTR
+AUTO
+CHAIN
+CLEAR	
+CLOAD
+COMMON
+CONT
+CSAVE
+DELETE
+EDIT
+ERASE
+FRE
+KILL
+LIST
+LLIST
+LOAD
+LPRINT
+MERGE
+NAME
+NEW
+NULL
+RENUM
+RESUME
+RUN
+SAVE
+WAIT
+WIDTH LPRINT
+.DE
+Some statements are in the current implementation not available,
+but will be soon. These include:
+.DS
+CALL
+DEFUSR
+FIELD
+GET
+INKEY
+INPUT$
+INSTR$
+LEFT$
+LSET
+RSET
+PUT
+.DE
+.bp
+.SH
+APPENDIX B RESERVED WORDS IN BASIC-EM
+.LP
+The following list of words/symbols/names/identifiers are reserved, which
+means that they can not be used for variable-names.
+.DS
+ABS		AND		ASC		AS
+ATN		AUTO		BASE		CALL
+CDBL		CHAIN		CHR		CINT
+CLEAR		CLOAD	CLOSE		COMMON
+CONT		COS		CSNG		CSAVE
+CVI		CVS		CVD		DATA
+DEFINT	DEFSNG	DEFDBL	DEFSTR
+DEF		DELETE	DIM		EDIT
+ELSE		END		EOF		ERASE
+ERROR		ERR		ERL		ELSE
+EQV		EXP		FIELD		FIX
+FOR		FRE		GET		GOSUB
+GOTO		HEX		IF		IMP
+INKEY		INPUT		INP		INSTR
+INT		KILL		LEFT		LEN		
+LET		LINE		LIST		LLIST
+LOAD		LOC		LOG		LPOS
+LPRINT	LSET		MERGE	MID
+MKI		MKS		MKD		MOD
+NAME		NEW		NEXT		NOT		
+NULL		ON		OCT		OPEN
+OPTION	OR		OUT		PEEK
+POKE		PRINT		POS		PUT
+RANDOMIZE	READ		REM		RENUM
+REN		RESTORE	RESUME	RETURN
+RIGHT		RND		RUN		SAVE
+STEP		SGN		SIN		SPACE
+SPC		SQR		STOP		STRING
+STR		SWAP		TAB		TAN
+THEN		TO		TRON		TROFF
+USING		USR		VAL		VARPTR
+WAIT		WHILE		WEND		WIDTH
+WRITE		XOR
+.DE

+ 3 - 0
doc/ceg/.distr

@@ -0,0 +1,3 @@
+proto.make
+ceg.ref
+ceg.tr

+ 6 - 0
doc/ceg/Makefile

@@ -0,0 +1,6 @@
+PIC=pic
+TBL=tbl
+REFER=refer
+
+../ceg.doc:	ceg.tr ceg.ref
+	$(PIC) ceg.tr | $(REFER) -e -p ceg.ref | $(TBL) > $@

+ 42 - 0
doc/ceg/ceg.ref

@@ -0,0 +1,42 @@
+%T A Practical Toolkit For Making Compilers
+%A A.S. Tanenbaum
+%A H. v. Staveren
+%A E.G. Keizer
+%A J.W. Stevenson
+%J Communications of the ACM
+%V 26
+%N 9
+%D September 1983
+
+%T Description of a Machine Architecture for Use with Block Structured Languages
+%A A.S. Tanenbuum
+%A H. v. Staveren
+%A E.G. Keizer
+%A J.W. Stevenson
+%R IR-81
+%I Dept. Mathematics and Computer Science, Vrije Universiteit
+%C Amsterdam
+%D August 1983
+
+%T EM_CODE(3ACK)
+%A ACK Documentation
+%I Dept. Mathematics and Computer Science, Vrije Universiteit
+%C Amsterdam
+
+%T ACK.OUT(5ACK)
+%A ACK Documentation
+%I Dept. Mathematics and Computer Science, Vrije Universiteit
+%C Amsterdam
+%K aout
+
+%T PRINT(3ACK)
+%A ACK Documentation
+%I Dept. Mathematics and Computer Science, Vrije Universiteit
+%C Amsterdam
+
+%T The C Programming Language
+%A B.W. Kernighan
+%A D.M. Ritchie
+%I Prentice-Hall Inc.
+%C Englewood Cliffs, New Jersey
+%D 1978

+ 1587 - 0
doc/ceg/ceg.tr

@@ -0,0 +1,1587 @@
+.nr PS 12
+.nr VS 14
+.nr LL 6i
+.tr ~
+.TL
+The Code Expander Generator
+.AU
+Frans Kaashoek
+Koen Langendoen
+.AI
+Dept. of Mathematics and Computer Science
+Vrije Universiteit
+Amsterdam, The Netherlands
+.NH
+Introduction
+.PP
+A \fBcode expander\fR (\fBce\fR for short) is a part of the 
+Amsterdam Compiler Kit
+.[
+toolkit
+.]
+(\fBACK\fR) and provides the user with
+high-speed generation of medium-quality code. Although conceptually
+equivalent to the more usual \fBcode generator\fR, it differs in some
+aspects.
+.PP
+Normally, a program to be compiled with \fBACK\fR
+is first fed to the preprocessor. The output of the preprocessor goes 
+into the appropriate front end, which produces EM
+.[
+block
+.]
+(a
+machine independent low level intermediate code). The generated EM code is fed
+into the peephole optimizer, which scans it with a window of a few instructions,
+replacing certain inefficient code sequences by better ones. After the
+peephole optimizer a back end follows, which produces high-quality assembly code.
+The assembly code goes via the target optimizer into the assembler and the
+object code then goes into the
+linker/loader, the final component in the pipeline. 
+.PP
+For various applications 
+this scheme is too slow. When debugging, for example, 
+compile time is more important than execution time of a program.
+For this purpose a new scheme is introduced:
+.IP \ \ 1:
+The code generator and assembler are
+replaced by a library, the \fBcode expander\fR, consisting of a set of 
+routines, one for every EM-instruction. Each routine expands its EM-instruction
+into relocatable object code. In contrast, the usual ACK code generator uses
+expensive pattern matching on sequences of EM-instructions.
+The peephole and target optimizer are not used.
+.IP \ \ 2:
+These routines replace the usual EM-generating routines in the front end; this
+eliminates the overhead of intermediate files.
+.LP
+This results in a fast compiler producing object file, ready to be
+linked and loaded, at the cost of unoptimized object code.
+.PP
+Because of the
+simple nature of the code expander, it is much easier to build, to debug, and to
+test. Experience has demonstrated that a code expander can be constructed,
+debugged, and tested in less than two weeks.
+.PP
+This document describes the tools for automatically generating a
+\fBce\fR (a library of C files) from two tables and 
+a few machine-dependent functions. 
+A thorough knowledge of EM is necessary to understand this document.
+.NH
+The code expander generator
+.PP
+The code expander generator (\fBceg\fR) generates a code expander from 
+two tables and a few machine-dependent functions. This section explains how 
+\fBceg\fR works. The first half describes the transformations that are done on
+the two tables. The 
+second half tells how these transformations are done by the \fBceg\fR.
+.PP
+A code expander consists of a set of routines that convert EM-instructions
+directly to relocatable object code. These routines are called by a front 
+end through the EM_CODE(3ACK)
+.[
+EM_CODE
+.]
+interface. To free the table writer of the burden of building
+an object file, we supply a set of routines that build an object file
+in the ACK.OUT(5ACK)
+.[
+aout
+.]
+format (see appendix B). This set of routines is called
+the
+\fBback\fR-primitives (see appendix A). In short, a code expander consists of a
+set of routines that map the EM_CODE interface on the 
+\fBback\fR-primitives interface.
+.PP
+To avoid repetition of the same sequences of
+\fBback\fR-primitives in different
+EM-instructions
+and to improve readability, the EM-to-object information must be supplied in
+two
+tables. The EM_table maps EM to an assembly language, and the as_table
+maps
+assembly code to \fBback\fR-primitives. The assembly language is chosen by the
+table writer. It can either be an actual assembly language or his ad-hoc 
+designed language.
+.LP
+The following picture shows the dependencies between the different components:
+.sp
+.PS
+linewid = 0.5i
+A: line down 2i
+B: line down 2i with .start at A.start + (1.5i, 0)
+C: line down 2i with .start at B.start + (1.5i, 0)
+D: arrow right with .start at A.center - (0.25i, 0)
+E: arrow right with .start at B.center - (0.25i, 0)
+F: arrow right with .start at C.center - (0.25i, 0)
+"EM_CODE(3ACK)" at A.start above
+"EM_table" at B.start above
+"as_table" at C.start above
+"source language  " at D.start rjust
+"EM" at 0.5 of the way between D.end and E.start
+G: "assembly" at 0.5 of the way between E.end and F.start
+H: "  back primitives" at F.end ljust
+"(user defined)" at G - (0, 0.2i)
+"   (ACK.OUT)" at H - (0, 0.2i) ljust
+.PE
+.PP
+The picture suggests that, during compilation, the EM instructions are
+first transformed into assembly instructions and then the assembly instructions
+are transformed into object-generating calls. This
+is not what happens in practice, although the user is free to think it does.
+Actually, however the EM_table and the as_table are combined during code
+expander generation time, yielding an imaginary compound table that results in
+routines from the EM_CODE interface that generate object code directly.
+.PP
+As already indicated, the compound table does not exist either. Instead, each
+assembly instruction in the as_table is converted to a routine generating C
+.[
+Kernighan
+.]
+code
+to generate C code to call the \fBback\fR-primitives. The EM_table is
+converted into a program that for each EM instruction generates a routine,
+using the routines generated from the as_table. Execution of the latter program
+will then generate the code expander.
+.PP
+This scheme allows great flexibility 
+in the table writing, while still
+resulting in a very efficient code expander. One implication is that the
+as_table is interpreted twice and the EM_table only once. This has consequences
+for their structure.
+.PP
+To illustrate what happens, we give an example. The example is an entry in
+the tables for the VAX-machine. The assembly language chosen is a subset of the 
+VAX assembly language.
+.PP
+One of the most fundamental operations in EM is ``loc c'', load the value of c
+on the stack. To expand this instruction the 
+tables contain the following information:
+.DS
+EM_table   :
+.ft CW
+   C_loc   ==>   "pushl $$$1".
+     /* $1 refers to the first argument of C_loc. 
+      * $$ is a quoted $. */
+
+
+\fRas_table   :
+.ft CW
+   pushl  src : CONST   ==> 
+                         @text1( 0xd0);
+                         @text1( 0xef);
+                         @text4( %$( src->num)).
+\fR
+.DE
+.LP
+The as_table is transformed in the following routine:
+.DS
+.ft CW
+pushl_instr(src)
+t_operand *src;    
+/* ``t_operand'' is a struct defined by the 
+ * table writer. */
+{
+   printf("swtxt();");
+   printf("text1( 0xd0 );");
+   printf("text1( 0xef );");
+   printf("text4(%s);", substitute_dollar( src->num));
+}
+\fR
+.DE
+Using ``pushl_instr()'', the following routine is generated from the EM_table:
+.DS
+.ft CW
+C_loc( c)
+arith c;
+/* text1() and text4() are library routines that fill the
+ * text segment. */
+{
+    swtxt();
+    text1( 0xd0);    
+    text1( 0xef);   
+    text4( c);
+}
+\fR
+.DE
+.LP
+A compiler call to ``C_loc()'' will cause the 1-byte numbers ``0xd0'' 
+and ``0xef''
+and the 4-byte value of the variable ``c'' to be stored in the text segment.
+.PP
+The transformations on the tables are done automatically by the code expander
+generator.
+The code expander generator is made up of two tools:
+\fBemg\fR and \fBasg\fR. \fBAsg\fR 
+transforms 
+each assembly instruction into a C routine. These C routines generate calls
+to the \fBback\fR-primitives. The generated C routines are used
+by \fBemg\fR to generate the actual code expander from the EM_table.
+.PP
+The link between \fBemg\fR and \fBasg\fR is an assembly language.
+We did not enforce a specific syntax for the assembly language;
+instead we have given the table writer the freedom
+to make an ad-hoc assembly language or to use an actual assembly language 
+suitable for his purpose. Apart from a greater flexibility this
+has another advantage; if the table writer adopts the assembly language that
+runs on the machine at hand, he can test the EM_table independently from the
+as_table. Of course there is a price to pay: the table writer has to
+do the decoding of the operands himself. See section 4 for more details.
+.PP
+Before we describe the structure of the tables in detail, we will give 
+an overview of the four main phases.
+.IP "phase 1:"
+.br
+The as_table is transformed by \fBasg\fR. This results in a set of C routines. 
+Each assembly-opcode generates one C routine. Note that a call to such a
+routine does not generate the corresponding object code; it generates C code,
+which, when executed, generates the desired object code.
+.IP "phase 2:"
+.br
+The C routines generated by \fBasg\fR are used by emg to expand the EM_table. 
+This
+results in a set of C routines, the code expander, which conform to the 
+procedural interface EM_CODE(3ACK). A call to such a routine does indeed
+generate the desired object code.
+.IP "phase 3:"
+.br
+The front end that uses the procedural interface is linked/loaded with the
+code expander generated in phase 2 and the \fBback\fR-primitives (a supplied
+library). This results in a compiler.
+.IP "phase 4:"
+.br
+The compiler runs. The routines in the code expander are
+executed and produce object code.
+.RE
+.NH
+Description of the EM_table
+.PP
+This section describes the EM_table. It contains four subsections.
+The first 3 sections describe the syntax of the EM_table,
+the
+semantics of the EM_table, and the functions and
+constants that must be present in the EM_table, in the file ``mach.c'' or in
+the file ``mach.h''. The last section explains how a table writer can generate
+assembly code instead of object code. The section on
+semantics contains many examples.
+.NH 2
+Grammar
+.PP
+The following grammar describes the syntax of the EM_table.
+.VS +4
+.TS
+center tab(%);
+l c l.
+TABLE%::=%( RULE)*
+RULE%::=%C_instr   ( COND_SEQUENCE | SIMPLE)
+COND_SEQUENCE%::=%( condition   SIMPLE)*   ``default''   SIMPLE
+SIMPLE%::=% ``==>'' ACTION_LIST
+ACTION_LIST%::=%[ ACTION   ( ``;'' ACTION)* ]   ``.''
+ACTION%::=%AS_INSTR
+%|%function-call
+AS_INSTR%::=%``"'' [ label ``:'']   [ INSTR] ``"''
+INSTR%::=%mnemonic   [ operand   ( ``,''   operand)* ]
+.TE
+.VS -4
+.PP
+The ``('' ``)'' brackets are used for grouping, ``['' ... ``]'' 
+means ... 0 or 1 time,
+a ``*'' means zero or more times, and 
+a ``|'' means 
+a choice between left or right. A \fBC_instr\fR is 
+a name in the EM_CODE(3ACK) interface. \fBcondition\fR is a C expression. 
+\fBfunction-call\fR is a call of a C function. \fBlabel\fR, \fBmnemonic\fR,
+and \fBoperand\fR are arbitrary strings. If an \fBoperand\fR 
+contains brackets, the
+brackets must match. There is an upper bound on the number of
+operands; the maximum number is defined by the constant MAX_OPERANDS in de
+file ``const.h'' in the directory assemble.c. Comments in the table should be
+placed between ``/*'' and ``*/''. 
+The table is processed by the C preprocessor, before being parsed by
+\fBemg\fR.
+.NH 2
+Semantics
+.PP
+The EM_table is processed by \fBemg\fR. \fBEmg\fR generates a C function
+for every instruction in the EM_CODE(3ACK). 
+For every EM-instruction not mentioned in the EM_table, a
+C function that prints an error message is generated.
+It is possible to divide the EM_CODE(3ACK)-interface into four parts :
+.IP \0\01: 
+text instructions      (e.g., C_loc, C_adi, ..)
+.IP \0\02: 
+pseudo instructions    (e.g., C_open, C_df_ilb, ..)
+.IP \0\03: 
+storage instructions   (e.g., C_rom_icon,  ..)
+.IP \0\04: 
+message instructions   (e.g., C_mes_begin, ..)
+.LP
+This section starts with giving the semantics of the grammar. The examples
+are text instructions. The section ends with remarks on the pseudo
+instructions and the storage instructions. Since message instructions are not
+useful for a code expander, they are ignored. 
+.PP
+.NH 3
+Actions
+.PP
+The EM_table is made up of rules describing how to expand a \fBC_instr\fR
+defined by the EM_CODE(3ACK)-interface (corresponding 
+to an EM instruction) into actions. 
+There are two kinds of actions: assembly instructions and C function calls. 
+An assembly instruction is defined as a mnemonic followed by zero or more
+operands separated by commas. The semantics of an assembly instruction is
+defined by the table writer. When the assembly language is not expressive 
+enough, then, as an escape route, function calls can be made. However, this
+reduces
+the speed of the actual code expander. Finally, actions can be grouped into
+a list of actions; actions are separated by a semicolon and terminated 
+by a ``.''.
+.DS
+.ft CW
+C_nop   ==> .            
+       /* Empty action list : no operation. */
+
+C_inc   ==> "incl (sp)". 
+       /* Assembler instruction, which is evaluated 
+        * during expansion of the EM_table */
+
+C_slu   ==> C_sli( $1).  
+       /* Function call, which is evaluated during
+        *  execution of the compiler. */
+\fR
+.DE
+.NH 3
+Labels
+.PP
+Since an assembly language without instruction labels is a rather weak 
+language, labels inside a contiguous block of assembly instructions are 
+allowed. When using labels two rules must be observed:
+.IP \0\01:
+The name of a label should be unique inside an action list.
+.IP \0\02:
+The labels used in an assembler instruction should be defined in the same
+action list.
+.LP
+The following example illustrates the usage of labels.
+.DS
+.ft CW
+   /* Compare the two top elements on the stack. */
+C_cmp      ==>     "pop bx";           
+                   "pop cx";          
+                   "xor ax, ax";
+                   "cmp cx, bx";
+                /* Forward jump to local label */
+                   "je 2f";  
+                   "jb 1f";
+                   "inc ax";
+                   "jmp 2f";
+                   "1: dec ax";
+                   "2: push ax".
+\fR
+.DE
+We will come back to labels in the section on the as_table.
+.NH 3
+Arguments of an EM instruction
+.PP
+In most cases the translation of a \fBC_instr\fR depends on its arguments.
+The arguments of a \fBC_instr\fR are numbered from 1 to \fIn\fR, where \fIn\fR
+is the
+total number of arguments of the current \fBC_instr\fR (there are a few
+exceptions, see Implicit arguments). The table writer may
+refer to an argument as $\fIi\fR. If a plain $-sign is needed in an
+assembly instruction, it must be preceded by a extra $-sign.
+.PP
+There are two groups of \fBC_instr\fRs whose arguments are handled specially:
+.RS
+.IP "1: Instructions dealing with local offsets"
+.br
+The value of the $\fIi\fR argument referring to a parameter ($\fIi\fR >= 0)
+is increased by ``EM_BSIZE''. ``EM_BSIZE'' is the size of the return status block
+and must be defined in the file ``mach.h'' (see section 3.3). For example :
+.DS
+.ft CW
+C_lol   ==>     "push $1(bp)". 
+       /* automatic conversion of $1 */
+\fR
+.DE
+.IP "2: Instructions using global names or instruction labels"
+.br
+All the arguments referring to global names or instruction labels will be
+transformed into a unique assembly name. To prevent name clashes with library
+names the table writer has to provide the
+conversions in the file ``mach.h''. For example :
+.DS
+.ft CW
+C_bra   ==>     "jmp $1". 
+        /* automatic conversion of $1 */
+        /* type arith is converted to string */
+\fR
+.DE
+.RE
+.NH 3
+Conditionals
+.PP
+The rules in the EM_table can be divided into two groups: simple rules and 
+conditional rules. The simple rules are made up of a \fBC_instr\fR followed by 
+a list of actions, as described above. The conditional rules (COND_SEQUENCE)
+allow the table writer to select an action list depending on the value of 
+a condition. 
+.PP
+A CONDITIONAL is a list of a boolean expression with the corresponding
+simple rule. If
+the expression evaluates to true then the corresponding simple rule is carried
+out. If more than one condition evaluates to true, the first one is chosen.
+The last case of a COND_SEQUENCE of a \fBC_instr\fR must handle 
+the default case.
+The boolean expressions in a COND_SEQUENCE must be C expressions. Besides the
+ordinary C operators and constants, $\fIi\fR references can be used 
+in an expression. 
+.DS
+.ft CW
+    /* Load address of LB $1 levels back. */
+C_lxl                                 
+    $1 == 0    ==>    "pushl fp".
+    $1 == 1    ==>    "pushl 4(ap)".
+    default    ==>    "movl $$$1, r0";
+                      "jsb .lxl";
+                      "pushl r0".
+\fR
+.DE
+.NH 3
+Abbreviations
+.PP
+EM instructions with an external as an argument come in three variants in
+the EM_CODE(3ACK) interface. In most cases it will be possible to take 
+these variants together. For this purpose the ``..'' notation is introduced. 
+For the code expander there is no difference between the 
+following instructions. 
+.DS
+.ft CW
+C_loe_dlb    ==>    "pushl $1 + $2".
+C_loe_dnam   ==>    "pushl $1 + $2".
+C_loe        ==>    "pushl $1 + $2".
+\fR
+.DE
+So it can be written in the following way.
+.DS
+.ft CW
+C_loe..      ==>    "pushl $1 + $2".
+\fR
+.DE
+.NH 3
+Implicit arguments
+.PP
+In the last example ``C_loe'' has two arguments, but in the EM_CODE interface 
+it has one argument. This argument depends on the current ``hol''
+block; in the EM_table this is made explicit. Every \fBC_instr\fR whose
+argument depends on a ``hol'' block has one extra argument; argument 1 refers
+to the ``hol'' block.
+.NH 3
+Pseudo instructions
+.PP
+Most pseudo instructions are machine independent and are provided
+by \fBceg\fR. The table writer has only to supply the following functions,
+which are used to build a stackframe:
+.DS
+.ft CW
+C_prolog()
+/* Performs the prolog, for example save 
+ * return address */
+
+C_locals( n) 
+arith n;
+/* Allocate n bytes for locals on the stack */
+
+C_jump( label)
+char *label;
+/* Generates code for a jump to ``label'' */
+\fR
+.DE
+.LP
+These functions can be defined in ``mach.c'' or in the EM_table (see 
+section 3.3).
+.NH 3
+Storage instructions
+.PP
+The storage instructions ``C_bss_\fIcstp()\fR'', ``C_hol_\fIcstp()\fR'',
+''C_con_\fIcstp()\fR'', and ``C_rom_\fIcstp()\fR'', except for the instructions
+dealing with constants of type string (C_..._icon, C_..._ucon, C_..._fcon), are
+generated automatically. No information is needed in the table.
+To generate the C_..._icon, C_..._ucon, C_..._fcon instructions 
+\fBceg\fR only has to know how to convert a number of type string to bytes;
+this can be defined with the constants ONE_BYTE, TWO_BYTES, and FOUR_BYTES.
+C_rom_icon, C_con_icon, C_bss_icon, C_hol_icon can be abbreviated by ..icon.
+This also holds for ..ucon and ..fcon.
+For example :
+.DS
+.ft CW
+\\.\\.icon
+    $2 == 1   ==>  gen1( (ONE_BYTE) atoi( $1)).
+    $2 == 2   ==>  gen2( (TWO_BYTES) atoi( $1)).
+    $2 == 4   ==>  gen4( (FOUR_BYTES) atol( $1)).
+    default   ==>   arg_error( "..icon", $2).
+\fR
+.DE
+Gen1(), gen2() and gen4() are \fBback\fR-primitives (see appendix A), and
+generate one, two, or four byte constants. Atoi() is a C library function that
+converts strings to integers.
+The constants ``ONE_BYTE'', ``TWO_BYTES'', and ``FOUR_BYTES'' must be defined in
+the file ``mach.h''.
+.NH 2
+User supplied definitions and functions
+.PP
+If the table writer uses all the default functions he has only to supply
+the following constants and functions :
+.TS
+tab(#);
+l c lw(10c).
+C_prolog()#:#T{
+Do prolog
+T}
+C_jump( l)#:#T{
+Perform a jump to label l
+T}
+C_locals( n)#:#T{
+Allocate n bytes on the stack
+T}
+#
+NAME_FMT#:#T{
+Print format describing name to a unique name conversion. The format must
+contain %s.
+T}
+DNAM_FMT#:#T{
+Print format describing data-label to a unique name conversion. The  format
+must contain %s.
+T}
+DLB_FMT#:#T{
+Print format describing numerical-data-label to a unique name conversion.
+The format must contain a %ld.
+T}
+ILB_FMT#:#T{
+Print format describing instruction-label to a unique name conversion.
+The format must contain %d followed by %ld.
+T}
+HOL_FMT#:#T{
+Print format describing hol-block-number to a unique name conversion.
+The format must contain %d.
+T}
+#
+EM_WSIZE#:#T{
+Size of a word in bytes on the target machine
+T}
+EM_PSIZE#:#T{
+Size of a pointer in bytes on the target machine
+T}
+EM_BSIZE#:#T{
+Size of base block in bytes on the target machine
+T}
+#
+ONE_BYTE#:#T{
+\\C suitable type that can hold one byte on the machine where the \fBce\fR runs
+T}
+TWO_BYTES#:#T{
+\\C suitable type that can hold two bytes on the machine where the \fBce\fR runs
+T}
+FOUR_BYTES#:#T{
+\\C suitable type that can hold four bytes on the machine where the \fBce\fR runs
+T}
+#
+BSS_INIT#:#T{
+The default value that the loader puts in the bss segment
+T}
+#
+BYTES_REVERSED#:#T{
+Must be defined if the byte order must be reversed.
+By default the least significant byte is outputted first.\fR\(dg
+.FS 
+\fR\(dg When both byte orders are used, for 
+example NS 16032, the table writer has to
+supply his own set of routines.
+.FE
+T}
+WORDS_REVERSED#:#T{
+Must be defined if the word order must be reversed.
+By default the least significant word is outputted first.
+T}
+.TE
+.LP
+An example of the file ``mach.h'' for the vax4.
+.TS
+tab(:);
+l l l.
+#define : ONE_BYTE : int
+#define : TWO_BYTES : int
+#define : FOUR_BYTES : long
+:
+#define : EM_WSIZE : 4
+#define : EM_PSIZE : 4
+#define : EM_BSIZE : 0
+:
+#define : BSS_INIT : 0
+:
+#define : NAME_FMT : "_%s"
+#define : DNAM_FMT : "_%s"
+#define : DLB_FMT  : "_%ld"
+#define : ILB_FMT  : "I%03d%ld"
+#define : HOL_FMT  : "hol%d"
+.TE
+Notice that EM_BSIZE is zero. The vax ``call'' instruction takes automatically
+care of the base block.
+.PP
+There are three primitives that have to be defined by the table writer, either
+as functions in the file ``mach.c'' or as rules in the EM_table.
+For example, for the 8086 they look like this:
+.DS
+.ft CW
+C_jump       ==>       "jmp $1".
+
+C_prolog     ==>       "push bp";
+                     "mov bp, sp".
+
+C_locals     
+  $1  == 0   ==>     .
+  $1  == 2   ==>     "push ax".
+  $1  == 4   ==>     "push ax";
+                     "push ax".
+  default    ==>     "sub sp, $1".
+\fR
+.DE
+.NH 2
+Generating assembly code 
+.PP
+When the code expander generator is used for generating assembly instead of
+object code (see section 5), additional print formats have to be defined 
+in ``mach.h''. The following table lists these formats.
+.TS
+tab(#);
+l c lw(10c).
+BYTE_FMT#:#T{
+Print format to allocate and initialize one byte. The format must 
+contain %ld.
+T}
+WORD_FMT#:#T{
+Print format to allocate and initialize one word. The format must 
+contain %ld.
+T}
+LONG_FMT#:#T{
+Print format to allocate and initialize one long. The format must 
+contain %ld.
+T}
+BSS_FMT#:#T{
+Print format to allocate space in the bss segment. The format must 
+contain %ld (number of bytes).
+T}
+COMM_FMT#:#T{
+Print format to declare a "common". The format must contain a %s (name to be declared
+common), followed by a %ld (number of bytes).
+T}
+
+SEGTXT_FMT#:#T{
+Print format to switch to the text segment.
+T}
+SEGDAT_FMT#:#T{
+Print format to switch to the data segment.
+T}
+SEGBSS_FMT#:#T{
+Print format to switch to the bss segment.
+T}
+
+SYMBOL_DEF_FMT#:#T{
+Print format to define a label. The format must contain %s.
+T}
+GLOBAL_FMT#:#T{
+Print format to declare a global name. The format must contain %s.
+T}
+LOCAL_FMT#:#T{
+Print format to declare a local name. The format must contain %s.
+T}
+
+RELOC1_FMT#:#T{
+Print format to initialize a byte with an address expression. The format must
+contain %s (name) and %ld (offset).
+T}
+RELOC2_FMT#:#T{
+Print format to initialize a word with an address expression. The format must
+contain %s (name) and %ld (offset).
+T}
+RELOC4_FMT#:#T{
+Print format to initialize a long with an address expression. The format must
+contain %s (name) and %ld (offset).
+T}
+
+ALIGN_FMT#:#T{
+Print format to align a segment.
+T}
+.TE
+.NH 1
+Description of the as_table
+.PP
+This section describes the as_table. Like the previous section, it is divided 
+into
+four parts: the first two parts describe the grammar and the semantics of the 
+as_table; the third part gives an overview
+of the functions and the constants that must be present in the as_table (in 
+the file ``as.h'' or in the file ``as.c''); the last part describes the case when
+assembly is generated instead of object code.
+The part on semantics contains examples that appear in the as_table for the
+VAX or for the 8086. 
+.NH 2
+Grammar
+.PP
+The form of the as_table is given by the following grammar :
+.VS +4
+.TS
+center tab(#);
+l c l.
+TABLE#::=#( RULE)*
+RULE#::=#( mnemonic | ``...'')   DECL_LIST   ``==>''   ACTION_LIST
+DECL_LIST#::=#DECLARATION   ( ``,''   DECLARATION)*
+DECLARATION#::=#operand   [ ``:''   type]
+ACTION_LIST#::=#ACTION   ( ``;''   ACTION) ``.''
+ACTION#::=#IF_STATEMENT
+#|#function-call
+#|#``@''function-call
+IF_STATEMENT#::=#''@if''   ``('' condition ``)''   ACTION_LIST
+##( ``@elsif''   ``('' condition ``)''   ACTION_LIST)*
+##[ ``@else''   ACTION_LIST]
+##''@fi''
+function-call#::=#function-identifier ``('' [arg (,arg)*] ``)''
+arg#::=#argument
+#|#reference
+.TE
+.VS -4
+.LP
+\fBmnemonic\fR, \fBoperand\fR, and \fBtype\fR are all C identifiers;
+\fBcondition\fR is a normal C expression;
+\fBfunction-call\fR must be a C function call. A function can be called with
+standard C arguments or with a reference (see section 4.2.4).
+Since the as_table is
+interpreted during code expander generation as well as during code
+expander execution, two levels of calls are present in it. A ``function-call''
+is done during code expander generation, a ``@function-call'' during code
+expander execution.
+.NH 2
+Semantics
+.PP
+The as_table is made up of rules that map assembly instructions onto
+\fBback\fR-primitives, a set of functions that construct an object file. 
+The table is processed by \fBasg\fR, which generates a C functions
+for each assembler mnemonic. The names of
+these functions are the assembler mnemonics postfixed 
+with ``_instr'' (e.g., ``add'' becomes ``add_instr()''). These functions 
+will be used by the function 
+assemble() during the expansion of the EM_table. 
+After explaining the semantics of the as_table the function
+assemble() will be described.
+.NH 3
+Rules
+.PP
+A rule in the as_table is made up of a left and a right hand side; 
+the left hand side describes an assembler 
+instruction (mnemonic and operands); the
+right hand side gives the corresponding actions as \fBback\fR-primitives or as
+functions defined by the table writer, which call \fBback-primitives\fR.
+Two simple examples from the VAX as_table and the 8086 as_table, resp.:
+.DS
+.ft CW
+movl src, dst  ==> @text1( 0xd0);
+                   gen_operand( src); 
+                   gen_operand( dst). 
+    /* ``gen_operand'' is a function that encodes 
+     * operands by calling back-primitives. */
+
+rep ens:MOVS   ==>  @text1( 0xf3);
+                    @text1( 0xa5).  
+
+\fR
+.DE
+.NH 3
+Declaration of types.
+.PP
+In general, a machine instruction is encoded as an opcode followed by zero or
+more
+the operands. There are two methods for mapping assembler mnemonics
+onto opcodes: the mnemonic determines the opcode, or mnemonic and operands 
+together determine the opcode. Both cases can be 
+easily expressed in the as_table.
+The first case is obvious. 
+The second case is handled by introducing type fields for the operands.
+.PP
+When mnemonic and operands together determine the opcode, the table writer has 
+to give several rules for each combination of mnemonic and operands. The rules
+differ in the type fields of the operands.
+The table writer has to supply functions that check the type
+of the operand. The name of such a function is the name of the type; it
+has one argument: a pointer to a struct of type \fIt_operand\fR; it returns
+non-zero when the operand is of this type, otherwise it returns 0.
+.PP
+This will usually lead to a list of rules per mnemonic. To reduce the amount of
+work an abbreviation is supplied. Once the mnemonic is specified it can be
+referred to in the following rules by ``...''.
+One has to make sure
+that each mnemonic is mentioned only once in the as_table, otherwise 
+\fBasg\fR will generate more than one function with the same name.
+.PP
+The following example shows the usage of type fields.
+.DS 
+.ft CW
+ mov dst:REG, src:EADDR  ==>  
+          @text1( 0x8b);                /* opcode */
+          mod_RM( %d(dst->reg), src). /* operands */
+
+ ... dst:EADDR, src:REG  ==>  
+          @text1( 0x89);                /* opcode */
+          mod_RM( %d(src->reg), dst). /* operands */
+\fR
+.DE
+The table-writer must supply the restriction functions, 
+.ft CW
+REG\fR and
+.ft CW
+EADDR\fR in the previous example, in ``as.c'' or ''as.h''.
+.NH 3 
+The function of the @-sign and the if-statement.
+.PP
+The right hand side of a rule is made up of function calls. 
+Since the as_table is
+interpreted on two levels, during code expander generation and during code
+expander execution, two levels of calls are present in it. A function-call
+without an ``@''-sign
+is called during code expander generation (e.g., the
+.ft CW
+gen_operand()\fR in the
+first example). 
+A function call with an ``@''-sign is called during code 
+expander execution (e.g.,
+the \fBback\fR-primitives). So the last group will be part of the compiler.
+.PP
+The need for the ``@''-sign construction arises, for example, when
+implementing push/pop optimization (e.g., ``push x'' followed by ``pop y'' 
+can be replaced by ``move x, y'').
+In this case flags need to be set, unset, and tested during the execution of
+the compiler:
+.DS L
+.ft CW
+PUSH src  ==>   /* save in ax */
+                mov_instr( AX_oper, src);  
+                /* set flag */
+                @assign( push_waiting, TRUE).         
+\fR
+.DE
+.DS
+.ft CW
+POP dst   ==>   @if ( push_waiting)
+                       /* ``mov_instr'' is asg-generated */
+                       mov_instr( dst, AX_oper);      
+                       @assign( push_waiting, FALSE).
+                @else
+                       /* ``pop_instr'' is asg-generated */
+                       pop_instr( dst).               
+                @fi.
+\fR
+.DE
+.LP
+Although the @-sign is followed syntactically by a
+function name, this function can very well be the name of a macro defined in C.
+This is in fact the case with ``@assign()'' in the above example.
+.PP
+The case may arise when information is needed that is not known 
+until execution of
+the compiler.  For example one needs to know if a ``$\fIi\fR'' argument fits in
+one byte.
+In this case one can use a special if-statement provided 
+by \fBasg\fR: @if, @elsif, @else, @fi. This means that the conditions 
+will be evaluated at
+run time of the \fBce\fR. In such a condition one may of course refer 
+to the ''$\fIi\fR'' arguments. For example, constants can be 
+packed into one or two byte arguments as follows:
+.DS 
+.ft CW
+mov dst:ACCU, src:DATA ==> 
+                       @if ( fits_byte( %$(dst->expr)))
+                            @text1( 0xc0);
+                            @text1( %$(dst->expr)).
+                       @else
+                            @text1( 0xc8);
+                            @text2( %$(dst->expr)).
+                       @fi.
+.DE
+.NH 3
+References to operands
+.PP
+As noted before, the operands of an assembler instruction may be used as
+pointers to the struct \fIt_operand\fR in the right hand side of the table.
+Because of the free format assembler, the types of the fields in the struct
+\fIt_operand\fR are unknown to \fBasg\fR. As these fields can appear in calls
+to functions, \fBasg\fR must know 
+these types. This section explains how these types must be specified.
+.PP
+References to operands come in three forms: ordinary operands, operands that
+contain ``$\fIi\fR'' references, and operands that refer to names of local labels.
+The ``$\fIi\fR'' in operands represent names or numbers of a \fBC_instr\fR and must
+be given as arguments to the \fBback\fR-primitives. Labels in operands
+must be converted to a number that tells the distance, the number of bytes, 
+between the label and the current position in the text-segment. 
+.LP
+All these three cases are treated in an uniform way. When the table writer
+makes a reference to an operand of an assembly instruction, he must describe
+the type of the operand in the following way.
+.VS +4
+.TS
+center tab(#);
+l c l.
+reference#::=#``%'' conversion
+##``('' operand-name ``\->'' field-name ``)''
+conversion#::=# printformat
+#|#``$''
+#|#``dist''
+printformat#::=#see PRINT(3ACK)
+.[
+PRINT
+.]
+.TE
+.VS -4
+.LP
+The three cases differ only in the conversion field. The printformat conversion
+applies to ordinary operands. The ``%$'' applies to operands that contain
+a ``$\fIi\fR''. The expression between parentheses must result in a pointer to
+a char. The
+result of ``%$'' is of the type of ``$\fIi\fR''. The ``%dist''
+applies to operands that refer to a local label. The expression between
+the brackets must result in a pointer to a char. The result of ``%dist'' is 
+of type arith.
+.PP
+The following example illustrates the usage of ``%$''. (For an
+example that illustrates the usage of ordinary fields see
+the section on ``User supplied definitions and functions'').
+.DS
+.ft CW
+jmp dst ==> 
+    @text1( 0xe9);
+    @reloc2( %$(dst->lab), %$(dst->off), PC_REL).
+\fR
+.DE
+.PP
+A useful function concerning $\fIi\fRs is arg_type(), which takes as input a
+string starting with $\fIi\fR and returns the type of the \fIi\fR''th argument
+of the current EM-instruction, which can be STRING, ARITH or INT. One may need
+this function while decoding operands if the context of the $\fIi\fR does not
+give enough information.
+If the function arg_type() is used, the file
+arg_type.h must contain the definition of STRING, ARITH and INT.
+.PP
+%dist is only guaranteed to work when called as a parameter of text1(), text2() or text4().
+The goal of the %dist conversion is to reduce the number of reloc1(), reloc2()
+and reloc4()
+calls, saving space and time (no relocation at compiler run time). 
+The following example illustrates the usage of ``%dist''.
+.DS 
+.ft CW
+ jmp dst:ILB    ==> /* label in an instruction list */
+     @text1( 0xeb);          
+     @text1( %dist( dst->lab)).
+
+ ... dst:LABEL  ==> /* global label */
+     @text1( 0xe9);       
+     @reloc2( %$(dst->lab), %$(dst->off), PC_REL).
+\fR
+.DE
+.NH 3
+The functions assemble() and block_assemble()
+.PP
+The functions assemble() and block_assemble() are provided by \fBceg\fR.
+If, however, the table writer is not satisfied with the way they work 
+he can
+supply his own assemble() or block_assemble().
+The default function assemble() splits an assembly string into a 
+label, mnemonic,
+and operands and performs the following actions on them:
+.IP \0\01:
+It processes the local label; it records the name and current position. Thereafter it calls the function process_label() with one argument of type string,
+the label. The table writer has to define this function.
+.IP \0\02:
+Thereafter it calls the function process_mnemonic() with one argument of
+type string, the mnemonic. The table writer has to define this function.
+.IP \0\03:
+It calls process_operand() for each operand. Process_operand() must be
+written by the table-writer since no fixed representation for operands
+is enforced. It has two arguments: a string (the operand to decode) 
+and a pointer to the struct \fIt_operand\fR. The declaration of the struct 
+\fIt_operand\fR must be given in the
+file ``as.h'', and the table-writer can put all the information needed for
+encoding the operand in machine format in it.
+.IP \0\04:
+It examines the mnemonic and calls the associated function, generated by
+\fBasg\fR, with pointers to the decoded operands as arguments. This makes it
+possible to use the decoded operands in the right hand side of a rule (see
+below).
+.LP
+If the default assemble() does not work the way the table writer wants, he
+can supply his own version of it. Assemble() has the following arguments:
+.DS
+.ft CW
+assemble( instruction )
+    char *instruction;
+\fR
+.DE
+\fIinstruction\fR points to a null-terminated string.
+.PP
+The default function block_assemble() is called with a sequence of assembly
+instructions that belong to one action list. It calls assemble() for 
+every assembly instruction in
+this block. But if a special action is
+required on a block of assembly instructions, the table writer only has to
+rewrite this function to get a new \fBceg\fR that obliges to his wishes.
+The function block_assemble has the following arguments:
+.DS
+.ft CW
+block_assemble( instructions, nr, first, last)
+      char   **instruction;
+      int      nr, first, last;
+\fR
+.DE
+\fIInstruction\fR point to an array of pointers to strings representing
+assembly instructions. \fINr\fR is
+the number of instructions that must be assembled. \fIFirst\fR 
+and \fIlast\fR have no function in the default block_assemble(), but are 
+useful when optimizations are done in block_assemble().
+.PP
+Four things have to be specified in ``as.h'' and ``as.c''. First the user must
+give the declaration of struct \fIt_operand\fR in ``as.h'', and the functions
+process_operand(), process_mnemonic(), and process_label() must be given 
+in ``as.c''. If the right hand side of the as_table
+contains function calls other than the \fBback\fR-primitives, these functions
+must also be present in ``as.c''. Note that both the ``@''-sign (see 4.2.3) 
+and ``references'' (see 4.2.4) also work in the functions defined in ``as.c''. 
+.PP
+The following example shows the representative and essential parts of the 
+8086 ``as.h'' and ``as.c'' files. 
+.nr PS 10
+.nr VS 12
+.LP
+.DS L
+.ft CW
+/* Constants and type definitions in as.h */
+
+#define        UNKNOWN                0
+#define        IS_REG                 0x1
+#define        IS_ACCU                0x2
+#define        IS_DATA                0x4
+#define        IS_LABEL               0x8
+#define        IS_MEM                 0x10
+#define        IS_ADDR                0x20
+#define        IS_ILB                 0x40
+
+#define AX                0
+#define BX                3
+#define CL                1
+#define SP                4
+#define BP                5
+#define SI                6
+#define DI                7
+
+#define REG( op)         ( op->type & IS_REG)
+#define ACCU( op)        ( op->type & IS_REG  &&  op->reg == AX)
+#define REG_CL( op)      ( op->type & IS_REG  &&  op->reg == CL)
+#define DATA( op)        ( op->type & IS_DATA)
+#define LABEL( op)       ( op->type & IS_LABEL)
+#define ILB( op)         ( op->type & IS_ILB)
+#define MEM( op)         ( op->type & IS_MEM)
+#define ADDR( op)        ( op->type & IS_ADDR)
+#define EADDR( op)       ( op->type & ( IS_ADDR | IS_MEM | IS_REG))
+#define CONST1( op)      ( op->type & IS_DATA  && strcmp( "1", op->expr) == 0)
+#define MOVS( op)        ( op->type & IS_LABEL&&strcmp("\"movs\"", op->lab) == 0)
+#define IMMEDIATE( op)   ( op->type & ( IS_DATA | IS_LABEL))
+
+struct t_operand {
+        unsigned type;
+        int reg;
+        char *expr, *lab, *off;
+       };
+
+extern struct t_operand saved_op, *AX_oper;
+\fR
+.DE
+.nr PS 12
+.nr VS 14
+.LP
+.nr PS 10
+.nr VS 12
+.DS L
+.ft CW
+
+/* Some functions in as.c. */
+
+#include "arg_type.h"
+#include "as.h"
+
+#define last( s)     ( s + strlen( s) - 1)
+#define LEFT         '('
+#define RIGHT        ')'
+#define DOLLAR       '$'
+
+process_operand( str, op)
+char *str;
+struct t_operand *op;
+
+/*        expr            ->        IS_DATA en IS_LABEL
+ *        reg             ->        IS_REG en IS_ACCU
+ *        (expr)          ->        IS_ADDR
+ *        expr(reg)       ->        IS_MEM
+ */
+{
+        char *ptr, *index();
+
+        op->type = UNKNOWN;
+        if ( *last( str) == RIGHT) {
+                ptr = index( str, LEFT);
+                *last( str) = '\0';
+                *ptr = '\0';
+                if ( is_reg( ptr+1, op)) {
+                        op->type = IS_MEM;
+                        op->expr = ( *str == '\0' ? "0" : str);
+                }
+                else {
+                        set_label( ptr+1, op);
+                        op->type = IS_ADDR;
+                }
+        }
+        else
+                if ( is_reg( str, op))
+                        op->type = IS_REG;
+                else {
+                        if ( contains_label( str))
+                                set_label( str, op);
+                        else {
+                                op->type = IS_DATA;
+                                op->expr = str;
+                        }
+                }
+}
+
+/*********************************************************************/
+
+mod_RM( reg, op)
+int reg;
+struct t_operand *op;
+
+/* This function helps to decode operands in machine format.
+ * Note the $-operators
+ */
+{
+      if ( REG( op))
+              R233( 0x3, reg, op->reg);
+      else if ( ADDR( op)) {
+              R233( 0x0, reg, 0x6);
+              @reloc2( %$(op->lab), %$(op->off), ABSOLUTE);
+      }
+      else if ( strcmp( op->expr, "0") == 0)
+              switch( op->reg) {
+                case SI : R233( 0x0, reg, 0x4);
+                          break;
+
+                case DI : R233( 0x0, reg, 0x5);
+                          break;
+
+                case BP : R233( 0x1, reg, 0x6);        /* exception! */
+                          @text1( 0);
+                          break;
+
+                case BX : R233( 0x0, reg, 0x7);
+                          break;
+
+                default : fprint( STDERR, "Wrong index register %d\en",
+                                  op->reg);
+              }
+      else {
+              @if ( fit_byte( %$(op->expr)))
+                      switch( op->reg) {
+                          case SI : R233( 0x1, reg, 0x4);
+                                  break;
+      
+                        case DI : R233( 0x1, reg, 0x5);
+                                  break;
+      
+                        case BP : R233( 0x1, reg, 0x6);
+                                  break;
+      
+                        case BX : R233( 0x1, reg, 0x7);
+                                  break;
+      
+                        default : fprint( STDERR, "Wrong index register %d\en",
+                                          op->reg);
+                      }
+                      @text1( %$(op->expr));
+              @else
+                      switch( op->reg) {
+                        case SI : R233( 0x2, reg, 0x4);
+                                  break;
+      
+                        case DI : R233( 0x2, reg, 0x5);
+                                  break;
+      
+                        case BP : R233( 0x2, reg, 0x6);
+                                  break;
+      
+                        case BX : R233( 0x2, reg, 0x7);
+                                  break;
+      
+                        default : fprint( STDERR, "Wrong index register %d\en",
+                                          op->reg);
+                      }
+                      @text2( %$(op->expr));
+              @fi
+      }
+}
+\fR
+.DE
+.nr PS 12
+.nr VS 14
+.NH 2
+Generating assembly code
+.PP
+It is possible to generate assembly instead of object files (see section 5), in
+which case there is no need to supply ``as_table'', ``as.h'', and ``as.c''. 
+This option is useful for debugging the EM_table.
+.NH 1
+Building a code expander
+.PP
+This section describes how to generate a code expander in two phases.
+In phase one, the EM_table is
+written and assembly code is generated. If the assembly code is an actual
+language, the EM_table can be tested by assembling and running the generated 
+code. 
+If an ad-hoc assembly language is used by the table writer, it is not possible
+to test the EM_table, but the code generated is at least in readable form.
+In the second phase, the as_table is written and object code is generated.
+After the generated object code is fed into the loader, it can be tested.
+.NH 2
+Phase one
+.PP
+The following is a list of instructions to make a
+code expander that generates assembly instructions.
+.IP \0\01:
+Create a new directory.
+.IP \0\02:
+Create the ``EM_table'', ``mach.h'', and ``mach.c'' files; there is no need 
+for ``as_table'', ``as.h'', and ``as.c'' at this moment.
+.IP \0\03:
+type
+.br
+.ft CW
+install_ceg -as
+\fR
+.br
+install_ceg will create a Makefile and three directories : ceg, ce, and back.
+Ceg will contain the program ceg; this program will be
+used to turn ``EM_table'' into a set of C source files (in the ce directory),
+one for each
+EM-instruction. All these files will be compiled and put in a library called
+\fBce.a\fR.
+.br
+The option 
+.ft CW
+-as\fR means that a \fBback\fR-library will be 
+generated (in the directory ``back'') that
+supports the generation of assembly language. The library is named ``back.a''.
+.IP \0\04:
+Link a front end, ``ce.a'', and ``back.a'' together resulting in a compiler
+that generates assembly code.
+.LP
+If the table writer has chosen an actual assembly language, the EM_table can be
+tested (e.g., by running the compiler on the EM test set). If an error occurs,
+change the EM_table and type
+.IP
+.br
+.ft CW
+update_ceg\fR \fBC_instr
+\fR
+.br
+.LP
+where \fBC_instr\fR stands for the name of the erroneous EM-instruction.
+If the table writer has chosen an ad-hoc assembly language, he can at least
+read the generated code and look for possible errors. If an error is found,
+the same procedure as described above can be followed.
+.NH 2
+Phase two
+.PP
+The next phase is to generate a \fBce\fR that produces relocatable object
+code.
+.IP \0\01:
+Remove the ``ce'', ``ceg'', and ``back'' directories.
+.IP \0\02:
+Write the ``as_table'', ``as.h'', and ``as.c'' files.
+.IP \0\03:
+type
+.sp
+.ft CW
+install_ceg -obj \fR
+.sp
+The option 
+.ft CW
+-obj\fR means that ``back.a'' will contain a library 
+for generating
+ACK.OUT(5ACK) object files, see appendix B. 
+If the writer does not want to use the default ``back.a'',
+the 
+.ft CW
+-obj\fR flag must omitted and a ``back.a'' should be supplied that
+generates the generates object code in the desired format.
+.IP \0\04:
+Link a front end, ``ce.a'', and ``back.a'' together resulting in a compiler
+that generates object code.
+.LP
+The as_table is ready to be tested. If an error occurs, adapt the table.
+Then there are two ways to proceed: 
+.IP \0\01:
+recompile the whole EM_table,
+.sp
+.ft CW
+update_ceg ALL \fR
+.sp
+.IP \0\02:
+recompile just the few EM-instructions that contained the error,
+.sp
+.ft CW
+update_ceg \fBC_instr\fR
+.sp
+where \fBC_instr\fR is an erroneous EM-instruction.
+This has to be done for every EM-instruction that contained the erroneous
+assembly instruction.
+.NH
+Acknowledgements
+.PP
+We want to thank Henri Bal, Dick Grune, and Ceriel Jacobs for their 
+valuable suggestions and the critical reading of this paper.
+.NH
+References
+.LP
+.[
+$LIST$
+.]
+.bp
+.SH 
+Appendix A, \fRthe \fBback\fR-primitives
+.PP
+This appendix describes the routines available to generate relocatable
+object code. If the default back.a is used, the object code is in 
+ACK.OUT(5ACK) format.
+In de default back.a, the names defined here are remapped to more hidden names,
+to avoid name conflicts with for instance names used in the front-end. This
+remapping is done in an include-file, "back.h".
+A user-implemented back.a should do the same thing.
+.nr PS 10
+.nr VS 12
+.PP
+.IP A1.
+Text and data generation; with ONE_BYTE b; TWO_BYTES w; FOUR_BYTES l; arith n;
+.VS +4
+.TS
+tab(#);
+l c lw(10c).
+text1( b)#:#T{
+Put one byte in text-segment.
+T}
+text2( w)#:#T{
+Put word (two bytes) in text-segment, byte-order is defined by
+BYTES_REVERSED in mach.h.
+T}
+text4( l)#:#T{
+Put long ( two words) in text-segment, word-order is defined by
+WORDS_REVERSED in mach.h.
+T}
+#
+con1( b)#:#T{
+Same for CON-segment.
+T}
+con2( w)#:
+con4( l)#:
+#
+rom1( b)#:#T{
+Same for ROM-segment.
+T}
+rom2( w)#:
+rom4( l)#:
+#
+gen1( b)#:#T{
+Same for the current segment, only to be used in the ``..icon'', ``..ucon'', etc.
+pseudo EM-instructions.
+T}
+gen2( w)#:
+gen4( l)#:
+#
+bss( n)#:#T{
+Put n bytes in bss-segment, value is BSS_INIT.
+T}
+common( n)#:#T{
+If there is a saved label, generate a "common" for it, of size
+n. Otherwise, it is equivalent to bss(n).
+(see also the save_label routine).
+T}
+.TE
+.VS -4
+.IP A2.
+Relocation; with char *s; arith o; int r;
+.VS +4
+.TS
+tab(#);
+l c lw(10c).
+reloc1( s, o, r)#:#T{
+Generates relocation-information for 1 byte in the current segment.
+T}
+##s\0:\0the string which must be relocated
+##o\0:\0the offset in bytes from the string. 
+##T{
+r\0:\0relocation type. It can have the values ABSOLUTE or PC_REL. These
+two constants are defined in the file ``back.h''
+T}
+reloc2( s, o, r)#:#T{
+Generates relocation-information for 1 word in the
+current segment. Byte-order according to BYTES_REVERSED in mach.h.
+T}
+reloc4( s, o, r)#:#T{
+Generates relocation-information for 1 long in the
+current segment. Word-order according to WORDS_REVERSED in mach.h.
+T}
+.TE
+.VS -4
+.IP A3.
+Symbol table interaction; with int seg; char *s;
+.VS +4
+.TS
+tab(#);
+l c lw(10c).
+switch_segment( seg)#:#T{
+sets current segment to ``seg'', and does alignment if necessary. ``seg'' 
+can be one of the four constants defined in ``back.h'': SEGTXT, SEGROM,
+SEGCON, SEGBSS.
+T}
+#
+symbol_definition( s)#:#T{
+Define s in symbol-table.
+T}
+set_local_visible( s)#:#T{
+Record scope-information in symbol table.
+T}
+set_global_visible( s)#:#T{
+Record scope-information in symbol table.
+T}
+.TE
+.VS -4
+.IP A4.
+Start/end actions; with char *f;
+.VS +4
+.TS
+tab(#);
+l c lw(10c).
+open_back( f)#:#T{
+Directs output to file ``f'', if f is the null pointer output must be given on
+standard output.
+T}
+close_back()#:#T{
+close output stream.
+T}
+init_back()#:#T{
+Only used with user-written back-library, gives the opportunity to initialize.
+T}
+end_back()#:#T{
+Only used with user-written back-library.
+T}
+.TE
+.VS -4
+.IP A5.
+Label generation routines; with int n; arith g; char *l; These routines all
+return a "char *" to a static area, which is overwritten at each call.
+.VS +4
+.TS
+tab(#);
+l c lw(10c).
+extnd_pro( n)#:#T{
+Label set at the end of procedure \fIn\fP, to generate space for locals.
+T}
+extnd_start( n)#:#T{
+Label set at the beginning of procedure \fIn\fP, to jump back to after generating
+space for locals.
+T}
+extnd_name( l)#:#T{
+Create a name for a procedure named \fIl\fP.
+T}
+extnd_dnam( l)#:#T{
+Create a name for an external variable named \fIl\fP.
+T}
+extnd_dlb( g)#:#T{
+Create a name for numeric data label \fIg\fP.
+T}
+extnd_ilb( l, n)#:#T{
+Create a name for instruction label \fIl\fP in procedure \fIn\fP.
+T}
+extnd_hol( n)#:#T{
+Create a name for HOL block number \fIn\fP.
+T}
+extnd_part( n)#:#T{
+Create a unique label for the C_insertpart mechanism.
+T}
+extnd_cont( n)#:#T{
+Create another unique label for the C_insertpart mechanism.
+T}
+extnd_main( n)#:#T{
+Create yet another unique label for the C_insertpart mechanism.
+T}
+.TE
+.VS -4
+.IP A6.
+Some miscellaneous routines, with char *l; 
+.VS +4
+.TS
+tab(#);
+l c lw(10c).
+save_label( l)#:#T{
+Save label \fIl\fP. Unfortunately, in EM, when a label is encountered,
+it is not yet
+known in which segment it will end up. The save_label/dump_label mechanism
+is there to solve this problem.
+T}
+dump_label()#:#T{
+If there is a label saved, force definition for it now.
+T}
+align_word()#:#T{
+Align to a word boundary, if the current segment is not a text segment.
+T}
+.TE
+.VS -4
+.nr PS 12
+.nr VS 14
+.bp
+.SH 
+Appendix B, description of ACK-a.out library
+.PP 
+The object file produced by \fBce\fR is by default in ACK.OUT(5ACK)
+format. The object file is made up of one header, followed by
+four segment headers, followed by text, data, relocation information, 
+symbol table, and the string area. The object file is tuned for the ACK-LED,
+so there are some special things done just before the object file is dumped.
+First, four relocation records are added which contain the names of the four
+segments. Second, all the local relocation is resolved. This is done by the 
+function do_relo(). If there is a record belonging to a local
+name this address is relocated in the segment to which the record belongs.
+Besides doing the local relocation, do_relo() changes the ``nami''-field
+of the local relocation records. This field receives the index of one of the
+four
+relocation records belonging to a segment. After the local
+relocation has been resolved the routine output_back() dumps the 
+ACK object file.
+.LP
+If a different a.out format is wanted, one can choose between three strategies:
+.IP \ \1:
+The most simple one is to use a conversion program, which converts the ACK
+a.out format to the wanted a.out format. This program exists for all most
+all machines on which ACK runs. However,
+not all conversion programs can generate relocation information.
+The disadvantage is that the compiler will become slower.
+.IP \ \2: 
+A better solution is to change the functions output_back(), do_relo(),
+open_back(), and close_back() in such a way
+that they produce the wanted a.out format. This strategy saves a lot of I/O.
+.IP \ \3:
+If this still is not satisfactory, the
+\fBback\fR-primitives can be adapted to produce the wanted a.out format.

+ 284 - 0
doc/ceg/proposal.tr

@@ -0,0 +1,284 @@
+.TL
+
+Code Expander
+.br
+(proposal)
+
+.SH
+Introduction
+.LP
+The \fBcode expander\fR, \fBce\fR, is a program that translates EM-code to
+objectcode. The main goal is to translate very fast. \fBce\fR is an instance
+of the EM_CODE(3L)-interface. During execution of \fBce\fR, \fBce\fR will build
+in core a machine independent objectfile ( NEW A.OUT(5L)). With \fBcv\fR or 
+with routines supplied by the user the machine independent objectcode will
+be converted to a machine dependent object code. \fBce\fR needs 
+information about the targetmachine (e.g. the opcode's). We divide the
+information into two parts:
+.IP
+- The description in assembly instructions of EM-code instructions.
+.IP
+- The description in objectcode of assembly instructions.
+.LP
+With these two tables we can make a \fBcode expander generator\fR which
+generates a \fBce\fR. It is possible to put the information in one table
+but that will probably introduce (propable) more bugs in the table. So we 
+divide and conquer.  With this approach it is also possible to generate 
+assembly code ( rather yhan objectcode), wich is useful for debugging.
+There is of course a link between the two tables, the link 
+consist of a restriction on the assembly format. Every assembly
+instruction must have the following format:
+.sp
+	INSTR ::= LABEL : MNEMONIC  [ OPERAND ( "," OPERAND)* ]
+.sp
+.LP
+\fBCeg\fR uses the following algorithm:
+.IP \0\0a)
+The assembly table will be converted to a (C-)routine assemble().
+assemble() gets as argument a string, the assembler instruction,
+and can use the MNEMONIC to execute the corresponding action in the 
+assembly table.
+.IP \0\0b)
+The routine assemble() can now be used to convert the EM-code table to
+a set of C-routines, wich together form an instance of the
+EM_CODE(3L).
+.SH
+The EM-instruction table
+.LP
+We use the following grammar:
+.sp
+.TS
+center box ;
+l.
+TABLE ::= (ROW)*
+ROW   ::= C_instr ( SPECIAL | SIMPLE)
+SPECIAL ::= ( CONDITION SIMPLE)+  'default'  SIMPLE
+SIMPLE ::= '==>' ACTIONLIST | '::=' ACTIONLIST
+ACTIONLIST ::= [ ACTION ( ';' ACTION)* ] '.'
+ACTION ::= function-call | assembly-instruction
+.TE
+.LP
+An example for the 8086:
+.LP
+.DS
+C_lxl
+	$arg1 == 0  ==>  "push bp".
+	$arg1 == 1  ==>  "push EM_BSIZE(bp)".
+        default     ==>  "mov cx, $arg1";
+		         "mov si, bp";
+		         "1: mov si, EM_BSIZE(si);
+		         "loop 1b"
+		         "push si".
+.DE
+.sp
+Some remarks:
+.sp
+* The C_instr is a function indentifier in the EM_CODE(3L)-interface.
+.LP
+* CONDITION is a "boolean" C-expression. 
+.LP
+* The arguments of an EM-instruction can be used in CONDITION and in assembly
+instructions. They are referred by $arg\fIi\fR. \fBceg\fR modifies the 
+arguments as follows:
+.IP \0\0-
+For local variables at positive offsets it increases this offset by EM_BSIZE
+.IP \0\0-
+It makes names en labels unique. The user must supply the formats (see mach.h).
+.LP
+* function-call is allowed to implement e.g. push/pop optimization.
+For example:
+.LP
+.DS
+C_adi   
+	$arg1 == 2   ==> combine( "pop ax");
+		 	 combine( "pop bx");
+		 	 "add ax, bx";
+                         save( "push ax").
+        default      ==> arg_error( "C_adi", $arg1).
+.DE
+.LP
+* The C-functions called in the EM-instructions table have to use the routine
+assemble()/gen?(). "assembler-instr" is in fact assemble( "assembler-instr").
+.LP
+* \fBceg\fR takes care not only about the conversions of arguments but also 
+about
+changes between segments. There are situation when one doesn't want 
+conversion of arguments. This can be done by using ::= in stead of ==>.
+This is usefull when two C_instr are equivalent. For example:
+.IP
+C_slu  ::=  C_sli( $arg1)
+.LP
+* There are EM-CODE instructions wich are machine independent (e.g. C_open()).
+For these EM_CODE instructions \fBceg\fR will generate \fIdefault\fR-
+instructions. There is one exception: in the case of C_pro() the tablewriter 
+has to supply a function prolog().
+.LP
+* Also the EM-pseudoinstructions C_bss_\fIcstp\fR(), C_hol_\fIcstp\fR(),
+C_con_\fIcstp\fR() and C_rom_\fIcstp\fR can be translated automaticly.
+\fBceg\fR only has to know how to interpretate string-constants: 
+.DS
+\&..icon  $arg2 == 1  ==>  gen1( (char) atoi( $arg1))
+          $arg2 == 2  ==>  gen2( atoi( $arg1))
+          $arg2 == 4  ==>  gen4( atol( $arg1))
+\&..ucon  $arg2 == 1  ==>  gen1( (char) atoi( $arg1))
+	  $arg2 == 2  ==>  gen2( atoi( $arg1))
+    	  $arg2 == 4  ==>  gen4( atol( $arg1))
+\&..fcon  ::=  not_implemented( "..fcon")
+.DE
+.LP
+* Still, life can be made easier for the tablewriter; For the routines wich 
+he/she didn't implement \fBceg\fR will generate a default instruction wich
+generates an error-message. \fBceg\fR seems to generate :
+.IP
+C_xxx  ::=  not_implemented( "C_xxx")
+.SH
+The assembly table
+.LP
+How to map assembly on objectcode.
+.LP
+Each row in the table consists of two fields, one field for the assembly
+instruction, the other field for the corresponding objectcode. The tablewriter
+can use the following primitives to generate code for the machine
+instructions :
+.IP "\0\0gen1( b)\0\0:" 17
+generates one byte in de machine independent objectfile.
+.IP "\0\0gen2( w)\0\0:" 17
+generates one word ( = two bytes), the table writer can change the byte
+order by setting the flag BYTES_REVERSED.
+.IP "\0\0gen4( l)\0\0:" 17
+generates two words ( = four bytes), the table writer can change the word
+order by setting the flag WORDS_REVERSED.
+.IP "\0\0reloc( n, o, r)\0\0:" 17
+generates relocation information for a label ( = name + offset +
+relocationtype).
+.LP
+Besides these primitives the table writer may use his self written
+C-functions. This allows the table writer e.g. to write functions to set
+bitfields within a byte.
+.LP
+There are more or less two methods to encode the assembly instructions:
+.IP \0\0a)
+MNEMONIC and OPERAND('s) are encoded independently of each other. This can be
+done when the target machine has an orthogonal instruction set (e.g. pdp-11).
+.IP \0\0b)
+MNEMONIC and OPERAND('s) together determine the opcode. In this case the
+assembler often uses overloading: one MNEMONIC is used for several
+different machine-instructions. For example : (8086)
+.br
+	mov ax, bx
+.br
+	mov ax, variable
+.br
+These instructions have different opcodes.
+.LP
+As the transformation MNEMONIC-OPCODE is not one to
+one the table writer must be allowed to put restrictions on the operands.
+This can be done with type declarations. For example:
+.LP
+.DS
+	mov  dst:REG, src:MEM  ==>
+		gen1( 0x8b);
+		modRM( op2.reg, op1);
+.DE
+.DS
+	mov  dst:REG, src:REG  ==>
+		gen1( 0x89);
+		modRM( op2.reg, op1);
+.DE
+.LP
+modRM() is a function written by the tablewriter and is used to encode
+the operands. This frees the table writer of endless typing.
+.LP
+The table writer has to do the "typechecking" by himself. But typechecking
+is almost the same as operand decoding. So it's more efficient to do this
+in one function. We now have all the tools to describe the function
+assemble(). 
+.IP
+assemble() first calls the function
+decode_operand() ( by the table writer written), with two arguments: a 
+string ( the operand) and a
+pointer to a struct. The struct is declared by the table writer and must
+consist of at least a field called type. ( the other fields in the struct can
+be used to remember information about the decoded operand.) Now assemble()
+fires a row wich is selected by mapping the MNEMONIC and the type of the 
+operands. 
+.br
+In the second field of a row there may be references to other
+fields in the struct (e.g. op2.reg in the example above).
+.LP
+We ignored one problem. It's possible when the operands are encoded, that
+not everything is known. For example $arg\fIi\fR arguments in the
+EM-instruction table get their value at runtime. This problem is solved by
+introducing a function eval(). eval() has a string as argument and returns
+an arith. The string consists of constants and/or $arg\fIi\fR's and the value
+returned by eval() is the value of the string. To encode the $arg\fIi\fR's
+in as few bytes as possible the table writer can use the statements %if,
+%else and %endif. They can be used in the same manner as #if, #else and
+#endif in C and result in a runtime test. An example : 
+.LP
+.DS
+ -- Some rows of the assembly table
+ 
+ mov dst:REG, src:DATA  ==>
+        %if  sfit( eval( src), 8)   /* does the immediate-data fit in 1 byte? */
+ 		R53( 0x16 , op1.reg);
+ 	     	gen1( eval( src));
+        %else
+ 	      	R53( 0x17 , op1.reg);
+ 	      	gen2( eval( src));
+        %endif
+.LD
+ 
+ mov dst:REG, src:REG  ==>
+        gen1( 0x8b);
+        modRM( op1.reg, op2);
+ 
+.DE 
+.DS
+ -- The corresponding part in the function assemble() :
+ 
+ case MNEM_mov : 
+ 		decode_operand( arg1, &op1);
+ 		decode_operand( arg2, &op2);
+ 		if ( REG( op1.type) && DATA( op2.type)) {
+ 			printf( "if ( sfit( %s, 8)) {\\\\n", eval( src));
+ 			R53( 0x16 , op1.reg);
+ 			printf( "gen1( %s)\\\\n", eval( arg2));
+ 			printf( "}\\\\nelse {\\\\n");
+ 			R53( 0x17 , op1.reg);
+ 			printf( "gen2( %s)\\\\n", eval( arg2));
+ 			printf( "}\\\\n");
+ 		}
+ 		else if ( REG( op1.type) && REG( op2.type)) {
+ 			gen1( 0x8b);
+ 			modRM( op1.reg, op2);
+ 		}
+ 
+ 
+.DE
+.DS
+ -- Some rows of the right part of the EM-instruction table are translated
+ -- in the following C-functions.
+
+ "mov ax, $arg1" ==>
+ 	if ( sfit( w, 8)) {	/* w is the actual argument of C_xxx( w) */
+ 		gen1( 176);	/* R53() */
+ 		gen1( w);
+ 	}
+ 	else {
+ 		gen1( 184);
+ 		gen2( w);
+ 	}
+.LD 
+
+ "mov ax, bx"    ==> 
+ 	gen1( 138);
+  	gen1( 99);		/* modRM() */
+.DE
+.SH
+Restrictions
+.LP
+.IP \0\01)
+The EM-instructions  C_exc() is not implemented.
+.IP \0\03)
+All messages are ignored.

+ 12 - 0
doc/ceg/proto.make

@@ -0,0 +1,12 @@
+# $Id$
+
+#PARAMS         do not remove this line!
+
+SRC_DIR = $(SRC_HOME)/doc/ceg
+
+PIC=pic
+TBL=tbl
+REFER=refer
+
+$(TARGET_HOME)/doc/ceg.doc:	$(SRC_DIR)/ceg.tr $(SRC_DIR)/ceg.ref
+	$(PIC) $(SRC_DIR)/ceg.tr | $(REFER) -e -p $(SRC_DIR)/ceg.ref | $(TBL) > $@

+ 276 - 0
doc/ceg/prototype.tr

@@ -0,0 +1,276 @@
+.TL 
+A prototype Code expander
+.NH
+Introduction
+.PP
+A program to be compiled with ACK is first fed into the preprocessor.
+The output of the preprocessor goes into the appropiate front end,
+whose job it is to produce EM. The EM code generated is
+fed into the peephole optimizer, wich scans it with a window of few 
+instructions, replacing certain inefficient code sequences by better
+ones. Following the peephole optimizer follows a backend wich produces
+good assembly code. The assembly code goes into the assembler and the objectcode
+then goes into the loader/linker, the final component in the pipeline.
+.PP
+For various applications this scheme is too slow. For example for testing
+programs; In this case the program has to be translated fast and the 
+runtime of the objectcode may be slower. A solution is to build a code
+expander ( \fBce\fR) wich translates EM code to objectcode. Of course this 
+has to
+be done automaticly by a code expander generator, but to get some feeling
+for the problem we started out to build prototypes. 
+We built two types of ce's. One wich tranlated EM to assembly, one
+wich translated EM to objectcode.
+.NH
+EM to assembly
+.PP
+We made one for the 8086 and one for the vax4. These ce's are instances of the
+EM_CODE(3L)-interface and produce for a single EM instruction a set 
+of assembly instruction wich are semantic equivalent.
+We implemented in the 8086-ce push/pop-optimalization.
+.NH
+EM to objectcode
+.PP
+Instead of producing assembly code we tried to produce vax4-objectcode.
+During execution of ce, ce builds in core a machine independent
+objectfile ( NEW A.OUT(5L)) and just before dumping the tables this
+objectfile is converted to a Berkly 4.2BSD a.out-file. We build two versions;
+One with static memory allocation and one with dynamic memory allocation.
+If the first one runs out of memory it will give an error message and stop,
+the second one will allocate more memory and proceed with producing 
+objectcode.
+.PP
+The C-frontend calls the EM_CODE-interface. So after linking the frontend
+and the ce we have a pipeline in a program saving a lot of i/o.
+It is interesting to compare this C-compiler ( called fcemcom) with "cc -c". 
+fcemcom1 (the dynamic variant of fcemcom) is tuned in such a way, that
+alloc() won't be called.
+.NH 2
+Compile time
+.PP
+fac.c is a small program that produces n! ( see below). foo.c is small program
+that loops a lot.
+.TS
+center, box, tab(:);
+c | c | c | c | c | c
+c | c | n | n | n | n.
+compiler : program : real : user : sys : object size
+=
+fcemcom : sort.c : 31.0 : 17.5 : 1.8 : 23824
+fcemcom1 : : 59.0 : 21.2 : 3.3 : 
+cc -c : : 50.0 : 38.0 : 3.5 : 6788
+_
+fcemcom : ed.c : 37.0 : 23.6 : 2.3 : 41744
+fcemcom1 : : 1.16.0 : 28.3 : 4.6 : 
+cc -c : : 1.19.0 : 54.8 : 4.3 : 11108
+_
+fcemcom : cp.c :  4.0 : 2.4 : 0.8 : 4652
+fcemcom1 : : 9.0 : 3.0 : 1.0 : 
+cc -c : :  8.0 : 5.2 : 1.6 : 1048
+_
+fcemcom : uniq.c : 5.0 : 2.5 : 0.8 : 5568
+fcemcom1 : : 9.0 : 2.9 : 0.8 : 
+cc -c : : 13.0 : 5.4 : 2.0 : 3008
+_
+fcemcom : btlgrep.c : 24.0 : 7.2 : 1.4 : 12968
+fcemcom1 : : 23.0 : 8.1 : 1.2 : 
+cc -c : : 1.20.0 : 15.3 : 3.8 : 2392
+_
+fcemcom : fac.c : 1.0 : 0.1 : 0.5 : 216
+fecmcom1 : : 2.0 : 0.2 : 0.5 : 
+cc -c : : 3.0 : 0.7 : 1.3 : 92
+_
+fcemcom : foo.c : 4.0 : 0.2 : 0.5 : 272
+fcemcom1 : : 11.0 : 0.3 : 0.5 : 
+cc -c : : 7.0 : 0.8 : 1.6 : 108
+.TE
+.NH 2
+Run time
+.LP
+Is the runtime very bad?
+.TS
+tab(:), box, center;
+c | c | c | c | c
+c | c | n | n | n.
+compiler : program : real : user : system
+=
+fcem : sort.c : 22.0 : 17.5 : 1.5
+cc : : 5.0 : 2.4 : 1.1
+_
+fcem : btlgrep.c : 1.58.0 : 27.2 : 4.2
+cc : : 12.0 : 3.6 : 1.1
+_
+fcem : foo.c : 1.0 : 0.7 : 0.1
+cc : : 1.0 : 0.4 : 0.1
+_
+fcem : uniq.c : 2.0 : 0.5 : 0.3
+cc : : 1.0 : 0.1 : 0.2
+.TE
+.NH 2
+quality object code
+.LP
+The runtime is very bad so its interesting to have look at the code which is
+produced by fcemcom and by cc -c. I took a program which computes recursively
+n!.
+.DS
+long fac();
+
+main()
+{
+	int n;
+
+	scanf( "%D", &n); 
+	printf( "fac is %D\\\\n", fac( n));
+}
+
+long fac( n)
+int n;
+{
+	if ( n == 0)
+		return( 1);
+	else
+		return( n * fac( n-1));
+}
+.DE
+.br
+.br
+.br
+.br
+.LP
+"cc -c fac.c" produces :
+.DS 
+fac:	tstl 4(ap)
+	bnequ 7f
+	movl $1, r0
+	ret
+7f:	subl3 $1, 4(ap), r0
+	pushl r0
+	call $1, fac
+	movl r0, -4(fp)
+	mull3 -4(fp), 4(ap), r0
+	ret
+.DE
+.br
+.br
+.LP
+"fcem fac.c fac.o" produces :
+.DS 
+_fac:		0
+42:		jmp	be
+48:		pushl	4(ap)
+4e:		pushl	$0
+54:		subl2	(sp)+,(sp)
+57:		tstl	(sp)+
+59:		bnequ	61
+5b:		jmp	67
+61:		jmp	79
+67:		pushl	$1
+6d:		jmp	ba
+73:		jmp	b9
+79:		pushl	4(ap)
+7f:		pushl	$1
+85:		subl2	(sp)+,(sp)
+88:		calls	$0,_fac
+8f:		addl2	$4,sp
+96:		pushl	r0
+98:		pushl	4(ap)
+9e:		pushl	$4
+a4:		pushl	$4
+aa:		jsb	.cii
+b0:		mull2	(sp)+,(sp)
+b3:		jmp	ba
+b9:		ret
+ba:		movl	(sp)+,r0
+bd:		ret
+be:		jmp	48
+.DE
+.NH 1
+Conclusions
+.PP
+comparing "cc -c" with "fcemcom"
+.LP
+.TS
+center, box, tab(:);
+c | c  s | c | c  s
+^ | c  s | ^ | c  s
+^ | c | c | ^ | c | c
+l | n | n | n | n | n.
+program : compile time : object size : runtime
+:_::_
+: user : sys :: user : sys
+=
+sort.c : 0.47 : 0.5 : 3.5 : 7.3 : 1.4
+_
+ed.c : 0.46 : 0.5 : 3.8 : : :
+_
+cp.c : 0.46 : 0.5 : 4.4 : : :
+_
+uniq.c : 0.46 : 0.4 : 1.8 : : :
+_
+btlgrep.c : 0.47 : 0.3 : 5.4 : 7.5 : 3.8
+_
+fac.c : 0.14 : 0.4 : 2.3 : 1.8 : 1.0
+_
+foo.c : 0.25 : 0.3 : 2.5 : 5.0 : 1.5
+.TE
+.PP
+The results for fcemcom1 are almost identical; The only thing that changes
+is that fcemcom1 is 1.2 slower than fcemcom. ( compile time) This is due to
+to an another datastructure . In the static version we use huge array's for 
+the text- and 
+data-segment, the relocation information, the symboltable and stringarea.
+In the dynamic version we use linked lists, wich makes it expensive to get
+and to put a byte on a abritrary memory location. So it is probably better
+to use realloc(), because in the most cases there will be enough memory. 
+.PP
+The quality of the objectcode is very bad. The reason is that the frontend
+generates bad code and expects the peephole-optimizer to improve the code.
+This is also one of the main reasons that the runtime is very bad.
+(e.g. the expensive "cii" with arguments 4 and 4 could be deleted.) 
+So its seems a good
+idea to put a new peephole-optimizer between the frontend and the ce.
+.PP
+Using the peephole optimizer the ce would produce :
+.DS
+_fac:	0
+	pushl	4(ap)
+	tstl	(sp)+
+	beqlu	1f
+	jmp	3f
+ 1 :	pushl	$1
+	jmp	2f
+ 3 :	pushl	4(ap)
+	decl	(sp)
+	calls	$0,_fac
+	addl2	$4,sp
+	pushl	r0
+	pushl	4(ap)
+	mull2	(sp)+,(sp)
+	movl	(sp)+,r0
+  2 :   ret
+.DE
+.PP
+Bruce McKenzy already implemented it and made some improvements in the
+source code of the ce. The compile-time is two to two and a half times better 
+and the
+size of the objectcode is two to three times bigger.(comparing with "cc -c")
+Still we could do better.
+.PP
+Using peephole- and push/pop-optimization ce could produce :
+.DS 
+_fac:		0
+	tstl	4(ap)
+	beqlu	1f
+	jmp	2f
+  1 :	pushl	$1
+	jmp	3f
+  2 :	decl	4(ap)
+	calls	$0,_fac
+	addl2	$4,sp
+	mull3	4(ap), r0, -(sp)
+	movl 	(sp)+, r0
+  3 : 	ret
+.DE
+.PP
+prof doesn't cooperate, so no profile information.
+.PP

+ 1864 - 0
doc/cg.doc

@@ -0,0 +1,1864 @@
+.\" $Id$
+.RP
+.ND Nov 1984
+.TL
+The table driven code generator from 
+.br
+the Amsterdam Compiler Kit
+.AU
+Hans van Staveren
+.AI
+Dept. of Mathematics and Computer Science
+Vrije Universiteit
+Amsterdam, The Netherlands
+.AB
+It is possible to automate the process of compiler building
+to a great extent using collections of tools.
+The Amsterdam Compiler Kit is such a collection of tools.
+This document provides a description of the internal workings
+of the table driven code generator in the Amsterdam Compiler Kit,
+and a description of syntax and semantics of the driving table.
+.PP
+>>>  NOTE  <<<
+.br
+This document pertains to the \fBold\fP code generator.  Refer to the
+"Second Revised Edition" for the new code generator.
+.AE
+.NH 1
+Introduction
+.PP
+Part of the Amsterdam Compiler Kit is a code generator system consisting
+of a code generator generator (\fIcgg\fP for short) and some machine
+independent C code.
+.I Cgg
+reads a machine description table and creates two files,
+tables.h and tables.c.
+These are then used together with other C code to produce
+a code generator for the machine at hand.
+.PP
+This in turn reads compact EM code and produces
+assembly code.
+The remainder of this document will first broadly describe
+the working of the code generator,
+then a description of the machine table follows after which
+the internal workings of the code generator will be explained.
+.PP
+The reader is assumed to have at least a vague notion about the
+semantics of the intermediary EM code.
+Someone wishing to write a table for a new machine
+should be thoroughly acquainted with EM code
+and the assembly code of the machine at hand.
+.NH 1
+Global overview of the workings of the code generator.
+.PP
+The code generator or
+.I cg
+tries to generate good code by simulating the runtime stack
+of the program compiled and delaying emission of code as long
+as possible.
+It also keeps track of register contents, which enables it to
+eliminate redundant moves, and tries to eliminate redundant tests
+by keeping information about condition code status,
+if applicable for the machine.
+.PP
+.I Cg
+maintains a `fakestack' containing `tokens' that are built
+by executing the pseudo code contained in the code rules given
+by the table writer.
+One can think of the fakestack as a logical extension of the real
+stack the program compiled will have when run.
+During code generation tokens will be kept on the fakestack as long
+as possible but when they are moved to the real stack,
+by generating code for the push,
+all tokens above\u*\d
+.FS
+* in the rest of this document the stack is assumed to grow downwards,
+although the top of the stack will mean the first element that will
+be popped.
+.FE
+the tokens pushed will be pushed also,
+so that the fakestack will not contain holes.
+.PP
+The main loop of
+.I cg
+is this:
+.IP 1)
+find a pattern of EM instructions starting at the current one to
+generate code for.
+This pattern will usually be of length one but longer patterns can be used.
+.IP 2)
+Select one of the possibly many stack patterns that go with this
+EM pattern on the basis of heuristics and/or lookahead.
+.IP 3)
+Force the current fakestack contents to match the pattern.
+This may involve
+copying tokens to registers, making dummy transformations, e.g. to
+transform a "local" into an "register offsetted" or might even
+cause to have the complete fakestack contents put to the real stack
+and then back into registers if no suitable transformations
+were provided by the table writer.
+.IP 4)
+Execute the pseudocode associated with the code rule just selected,
+this may cause registers to be allocated,
+code to be emitted etc..
+.IP 5)
+Put tokens onto the fakestack to reflect the result of the operation.
+.IP 6)
+Insert some EM instructions into the stream,
+this is possible but not common.
+.IP 7)
+Account for the cost.
+The cost is kept in a (space, time) vector and lookahead decisions
+are based on a linear combination of these.
+.PP
+The table that drives
+.I cg
+is not read in every time,
+but instead is used at compiletime
+of
+.I cg
+to set parameters and to load pseudocode tables.
+A program called
+.I cgg
+reads the table and produces large lists of numbers that are
+compiled together with machine independent code to produce
+a code generator for the machine at hand.
+.NH 1
+Description of the machine table
+.PP
+The machine description table consists of the following sections:
+.IP 1)
+Constant definitions
+.IP 2)
+Register definitions
+.IP 3)
+Token definitions
+.IP 4)
+Token expression definitions
+.IP 5)
+Code rules
+.IP 6)
+Move definitions
+.IP 7)
+Test definitions
+.IP 8)
+Stacking definitions
+.PP
+Input is in free format, white space and newlines may be used
+at will to improve legibility.
+Identifiers used in the table have the same syntax as C identifiers,
+upper and lower case considered different, all characters significant.
+There is however one exception:
+identifiers must be more than one character long for parsing reasons.
+C style comments are accepted
+.DS
+	/* this is a comment */
+.DE
+and #define macros may be used if the need arises.
+.NH 2
+Some constants
+.PP
+Before anything else three constants must be defined,
+all with the syntax NAME=value, value being an integer.
+These constants are:
+.IP EM_WSIZE 10
+Number of bytes in a machine word.
+This is the number of bytes
+a simple \fBloc\fP instruction will put on the stack.
+.IP EM_PSIZE
+Number of bytes in a pointer.
+This is the number of bytes
+a \fBlal\fP instruction will put on the stack.
+.IP EM_BSIZE
+Number of bytes in the hole between AB and LB.
+If the calling sequence just saves PC and LB this
+size will be twice the pointersize.
+.PP
+EM_WSIZE and EM_PSIZE are checked when a program is compiled
+with the resulting code generator.
+EM_BSIZE is used by
+.I cg
+to add to the offset of instructions dealing with locals
+having positive offsets,
+i.e. parameters.
+.PP
+Optionally one can give here the factors with which the size and time
+parts of the cost function have to be multiplied to ensure they have the
+same order of magnitude.
+This can be done as
+.DS
+TIMEFACTOR = C\d1\u/C\d2\u
+SIZEFACTOR = C\d3\u/C\d4\u
+.DE
+Above numbers must be read as rational numbers.
+Defaults are 1/1 for both of them.
+These constants set the default size/time tradeoff in the code generator,
+so if TIMEFACTOR and SIZEFACTOR are both 1 the code generator will choose
+at random between two codesequences where one has
+cost (10,4) and the other has cost (8,6).
+See also the description of the cost field below.
+.PP
+Also optional is the definition of a printformat for integers in the codefile.
+This is given as
+.DS
+FORMAT = string
+.DE
+The default for string is "%ld".
+For example on the PDP 11 one can use
+.DS
+FORMAT= "0%lo"
+.DE
+to satisfy the old UNIX assembler that reads octal unless followed by
+a period, and the ACK assembler that follows C conventions.
+.NH 2
+Register definition
+.PP
+The next part of the tables describes the various registers of the
+machine and defines identifiers
+to be used in later parts of the tables.
+Example for the PDP-11:
+.DS L
+REGISTERS:
+R0 = ( "r0",2), REG.
+R1 = ( "r1",2), REG, ODDREG.
+R2 = ( "r2",2), REG.
+R3 = ( "r3",2), REG, ODDREG.
+R4 = ( "r4",2), REG.
+LB = ( "r5",2), LOCALBASE.
+R01= ( "r0",4,R0,R1), REGPAIR.
+R23= ( "r2",4,R2,R3), REGPAIR.
+FR0= ( "r0",4), FREG.
+FR1= ( "r1",4), FREG.
+FR2= ( "r2",4), FREG.
+FR3= ( "r3",4), FREG.
+DR0= ( "r0",8,FR0), DREG.
+DR1= ( "r1",8,FR1), DREG.
+DR2= ( "r2",8,FR2), DREG.
+DR3= ( "r3",8,FR3), DREG.
+.DE
+.PP
+The identifier before the '=' sign is the name of the register
+as used further on in the table.
+The string is the name of the register as far as the assembler is concerned.
+The number is the size of the register in bytes.
+Identifiers following the number but within the parentheses are previously
+defined registernames that are contained in the register being defined.
+The identifiers following the closing parenthesis are properties
+of the register.
+So for example R23 is a register with assembler name r2, 4 bytes long,
+contains the registers R2 and R3 and has the property REGPAIR.
+.PP
+It might seem wise to list each and every property of a register,
+so one might give R0 the extra property MFPTREG named after the not
+too well known MFPT instruction on newer PDP-11 types,
+but this is not a good idea.
+Every extra property means the registerset is more unorthogonal
+and 
+.I cg
+execution time is influenced by that,
+because it has to take into account a larger set of registers
+that are not equivalent.
+.PP
+There is a predefined property SCRATCH that is dynamic,
+i.e. a register can have the property SCRATCH one time,
+and loose it the next.
+A register has the property SCRATCH when it has a reference count of one.
+One needs to be able to discriminate between SCRATCH registers
+and others,
+because it is only allowed to do arithmetic on
+SCRATCH registers.
+.NH 2
+Stack token definition
+.PP
+The next part describes all possible tokens that can reside on
+the fakestack during code generation.
+Attributes of a token are described in the form of a C struct declaration,
+this is followed by the size in bytes of the token,
+optionally followed by the cost of the token when used as an addressing mode
+and the format
+to be used on output.
+.PP
+Tokens should usually be declared for every addressing mode
+of the machine at hand and for every size directly usable in
+a machine instruction.
+Example for the PDP-11 (incomplete):
+.DS L
+TOKENS:
+IREG2 =		{ REGISTER reg; } 2 "*%[reg]" /* indirect register */
+REGCONST =	{ REGISTER reg; STRING off; } 2 /* not really addressable */
+REGOFF2 =	{ REGISTER reg; STRING off; } 2 "%[off](%[reg])"
+IREGOFF2 =	{ REGISTER reg; STRING off; } 2 "*%[off](%[reg])"
+CONST =		{ INT off; } 2 cost=(2,850) "$%[off]."
+EXTERN2 =	{ STRING off; } 2 "%[off]"
+IEXTERN2 =	{ STRING off; } 2 "*%[off]"
+PAIRSIGNED =	{ REGISTER regeven,regodd; } 2 "%[regeven]"
+.DE
+.PP
+Types allowed in the struct are REGISTER, INT and STRING.
+Tokens without a printformat should never be output.
+.PP
+Notice that tokens need not correspond to addressing modes,
+the REGCONST token listed above,
+meaning the sum of the contents of the register and the constant,
+has no corresponding addressing mode on the PDP-11,
+but is included so that a sequence of add constant, load indirect,
+can be handled efficiently.
+This REGCONST token is needed as part of the path
+.DS
+REGISTER -> REGCONST -> REGOFF
+.DE
+of which the first and the last "exist" and the middle is needed
+only as an intermediate step.
+.NH 2
+Token expressions
+.PP
+Usually machines have certain collections of addressing modes that
+can be used with certain instructions.
+The stack patterns in the table are lists of these collections
+and since it is cumbersome to write out these long lists
+every time, there is a section here to give names to these
+collections.
+Please note that it is not forbidden to write out a token expression
+in the remainder of the table,
+but for clarity it is usually better not to.
+Example for the PDP-11 (incomplete):
+.DS L
+TOKENEXPRESSIONS:
+SOURCE2 = REG + IREG2 + REGOFF2 + IREGOFF2 + CONST + EXTERN2 +
+	  IEXTERN2
+SREG    = REG * SCRATCH
+.DE
+Permissible in the expressions are all PASCAL set operators, i.e.
+.IP +
+set union
+.IP -
+set difference
+.IP *
+set intersection
+.PP
+Every tokenidentifier is also a token expression identifier
+denoting the singleton collection of tokens containing
+just itself.
+Every register property as defined above is also a token expression
+matching all registers with that property when on the fakestack.
+The standard token expression identifier ALL denotes the collection of 
+all tokens.
+.NH 2
+Expressions
+.PP
+Throughout the rest of the table expressions can be used in some
+places.
+This section will give the syntax and semantics of expressions.
+There are four types of expressions: integer, string, register and undefined.
+Type checking is performed by
+.I cgg .
+An operator with at least one undefined operand returns undefined except
+for the defined() function mentioned below.
+An undefined expression is interpreted as FALSE when it is needed
+as a truth value.
+Basic terms in an expression are
+.IP number 16
+A number is a constant of type integer.
+.IP "string"
+A string within double quotes is a constant of type string.
+All the normal C style escapes may be used within the string.
+.IP REGIDENT
+The name of a register is a constant of type register.
+.IP $\fIi\fP
+A dollarsign followed by a number is the representation of the argument
+of EM instruction \fI\fP.
+The type of the operand is dependent on the instruction,
+sometimes it is integer,
+sometimes it is string.
+It is undefined when the instruction has no operand.
+.br
+Although an exhaustive list could be given describing all the types
+the following rule of thumb will suffice.
+If it is unimaginable for the operand of the instruction ever to be
+something different from a plain integer, the type is integer,
+otherwise it is string.
+.br
+.I Cg
+makes all necessary conversions,
+like adding EM_BSIZE to positive arguments of instructions
+dealing with locals,
+prepending underlines to global names,
+converting codelabels into a unique representation etc.
+Details about this can be found in the section about
+machine dependent C code.
+.IP %[1]
+This in general means the token mentioned first in the
+stack pattern.
+When used inside an expression the token must be a simple register.
+Type of this is register.
+.IP %[1.off]
+This means field "off" of the first stack pattern token.
+Type is the same as that of field "off".
+To use this expression implies a check that all tokens
+in the token expression used have the same attributes.
+.IP %[1.1]
+This is the first subregister of the first token.
+Previous comments apply.
+.IP %[b]
+The second allocated register.
+.IP %[a.2]
+The second subregister of the first allocated register.
+.PP
+All normal C operators apply to integers,
+the + operator serves for string concatenation
+and register expressions can only be compared to each other.
+Furthermore there are some special "functions":
+.IP tostring(e) 16
+Converts an integer expression e to a string.
+.IP defined(e)
+Returns 1 if expression e is defined, 0 otherwise.
+.IP samesign(e1,e2)
+Returns 1 if integer expression e1 and e2 have the same sign.
+.IP sfit(e1,e2)
+Returns 1 if integer expression e1 fits as a signed integer
+into a field of e2 bits, 0 otherwise.
+.IP ufit(e1,e2)
+Same as above but now for unsigned e1.
+.IP rom(a,n)
+Integer expression giving the n'th argument from the \fBrom\fP descriptor
+pointed at by the a'th EM instruction.
+Undefined if that descriptor does not exist.
+.IP loww(a)
+Returns the lower half of the argument of the a'th EM instruction.
+This is used to split the arguments of a \fBldc\fP instruction.
+.IP highw(a)
+Same for upper half.
+.NH 2
+Code rules
+.PP
+The largest section of the tables consists of the code generation rules.
+They specify EM patterns, stack patterns, code to be generated etc.
+Syntax is
+.DS L
+code rule : EM pattern '|' stack pattern '|' code '|' 
+	   stack replacement '|' EM replacement '|' cost ;
+.DE
+All parts are optional, however there must be at least one pattern present.
+If the empattern is missing the rule becomes a rewriting rule or
+.I coercion
+to be used when code generation cannot continue 
+because of an invalid stack pattern.
+The code rules are preceded by the word
+.DS
+CODE:
+.DE
+The next paragraphs describe the various parts in detail.
+.NH 3
+The EM pattern
+.PP
+The EM pattern consists of a list of EM mnemonics followed
+by a boolean expression.
+Examples:
+.DS
+\fBloe\fP
+.DE
+will match a single \fBloe\fP instruction,
+.DS
+\fBloc\fP \fBloc\fP \fBcif\fP $1==2 && $2==8
+.DE
+is a pattern that will match
+.DS
+\fBloc\fP 2
+\fBloc\fP 8
+\fBcif\fP
+.DE
+and
+.DS
+\fBlol\fP \fBinc\fP \fBstl\fP $1==$3
+.DE
+will match for example
+.DS
+.ta 10m 20m 30m 40m 50m 60m
+\fBlol\fP 6	\fBlol\fP -2		\fBlol\fP 4
+\fBinc\fP	\fBinc\fP	but \fInot\fP	\fBinc\fP
+\fBstl\fP 6	\fBstl\fP -2		\fBstl\fP -4
+.DE
+A missing boolean expression evaluates to TRUE.
+.PP
+When the EM pattern is the same as in the previous code rule the pattern
+should be given as `...'.
+The code generator will match the longest EM pattern on every occasion,
+if two patterns of the same length match the first in the table will be chosen,
+while all patterns of length greater than or equal to three are considered
+to be of the same length.
+.NH 3
+The stack pattern
+.PP
+The stack pattern is a list of token expressions,
+usually token expression identifiers for clarity.
+No boolean expression is allowed here.
+The first expression is the one that matches the top of the stack.
+.PP
+The pattern can be followed by the word STACK
+in which case the pattern only matches if there is nothing
+else on the fakestack.
+The code generator will stack everything not matched at the start
+of the rule.
+.PP
+The pattern can be preceded with the word
+.DS
+nocoercions:
+.DE
+which tells the code generator not to try to coerce to the pattern
+but only to use it when it is already there.
+There are two reasons for this construction,
+correctness and speed.
+It is needed for correctness when the pattern contains a register
+that is not transparent when data is moved through it.
+.PP
+Example: on the PDP-11 the shortest code for
+.DS
+\fBlae\fP a
+\fBloi\fP 8
+\fBlae\fP b
+\fBsti\fP 8
+.DE
+is
+.DS
+movf _a,fr0
+movf fr0,_b
+.DE
+assuming that the floating point processor is in double
+precision mode and fr0 is free.
+Unfortunately this is not correct since a trap can occur on certain
+kinds of data.
+This could happen if there was a pattern for \fBsti\fP\ 8 that allowed
+one to move a floating point register not preceded by nocoercions: .
+The code generator would then find that moving the 8-byte global _a
+to a floating point register and then storing it to _b was the cheapest,
+assuming that the space/time knob was turned far enough to space.
+It is unfortunate that the type information is no longer present,
+since if _a really is a floating point number the move could be
+made without error.
+.PP
+The second reason for the nocoercions: construct is speed.
+When the code generator has a long list of possible stack patterns
+for one EM pattern it can waste a lot of time trying to find coercions
+to all of them, while the mere presence of such a long list
+indicates that the table writer has given a lot of special cases.
+In this case prepending all the special cases by nocoercions:
+will stop the code generator from trying to find things there aren't.
+.NH 3
+The code part
+.PP
+The code part consists of three parts, stack cleanup, register allocation
+and code to generate.
+All of these may be omitted.
+.NH 4
+Stack cleanup
+.PP
+The stack cleanup part describes certain stacktokens that should neither remain on
+the fakestack, nor remembered as contents of registers.
+This is usually only required with store operations.
+The entire fakestack, except for the part matched in the stack pattern,
+is searched for tokens matching the expression and they are copied
+to the real stack.
+Every register that contains the stacktoken is marked as empty.
+.PP
+Syntax is
+.DS
+remove(token expression) \fIor\fP
+remove(token expression, boolean expression)
+.DE
+Example:
+.DS
+remove(REGOFF2,%[reg] != LB || %[off] == $1)
+.DE
+is part of a remove() call for use in the \fBstl\fP code rule.
+It removes all register offsetted tokens where the register is not the
+localbase plus the local wherein the store is done.
+The necessity for this can be seen from the following example:
+.DS
+\fBlol\fP 4
+\fBinl\fP 4
+\fBstl\fP 6
+.DE
+Without a proper remove() call in the rule for \fBinl\fP code would
+be generated as here
+.DS
+inc 4(r5)
+mov 4(r5),6(r5)
+.DE
+so local 6 would be given the new value of local 4 instead of the old
+as the EM code prescribed.
+.PP
+When generating something like a branch instruction it 
+might be needed to empty the fakestack completely.
+This can of course be done with
+.DS
+remove(ALL)
+.DE
+.NH 4
+Register allocation
+.PP
+The register allocation part describes the kind of registers needed.
+Syntax for allocate() is
+.DS
+allocate(itemlist)
+.DE
+where itemlist is a list of three kinds of things:
+.IP 1)
+a tokendescription, for example %[1].
+.br
+This will instruct the code generator to temporarily decrement the reference count 
+of all registers contained in the token,
+so that they are available for allocation in this allocate() call
+if they were only used in that token.
+See example below.
+.IP 2)
+a register property.
+.br
+This will allocate a register with that property.
+The register will be marked as empty at this point.
+Lookahead will be performed if necessary.
+.IP 3)
+a register property with initialization.
+.br
+This will allocate the register as in 2) but will also
+initialize it.
+This eases the task of the code generator because it can
+find a register already filled with the right value
+if it exists.
+.PP
+Examples:
+.DS
+allocate(OREG)
+.DE
+will allocate an odd register, while 
+.DS
+allocate(REG={REGOFF2,LB,$1})
+.DE
+will allocate a register while simultaneously filling it with
+the asked value.
+.br
+Inside the coercion from SOURCE2 to REGISTER in the PDP-11 table
+the following allocate() can be found.
+.DS
+allocate(%[1],REG=%[1])
+.DE
+This tells the code generator that registers contained in %[1] can be used
+again and asks to fill the register allocated with %[1].
+So if %[1]={REGOFF2,R3,"4"} and R3 has a reference count of 1
+the following code might be generated.
+.DS
+mov 4(r3),r3
+.DE
+In the rest of the line the registers allocated can be named by
+%[a] and %[b.1],%[b.2], i.e. with lower case letters
+in order of allocation.
+.PP
+Warning: 
+.DS
+allocate(R3)
+.DE
+is \fRnot\fP the way to allocate R3.
+R3 is not a register property, so it will be seen as a token description
+and the effect is that R3 will have its reference count decremented.
+.NH 4
+Code
+.PP
+Code to be generated is specified as a list of items of the following kind:
+.IP 1)
+a string in double quotes ("This is a string").
+.br
+This is copied to the codefile and a newline ( \en ) is appended.
+Inside the string all normal C string conventions are allowed,
+and substitutions can be made of the following sorts.
+.RS
+.IP a)
+$1, $2 etc.
+These are the operands of the corresponding EM instructions
+and are printed according to their type.
+To put a real '$' inside the string it must be doubled ('$$').
+.IP b)
+%[1], %[2.reg], %[b.1] etc.
+These have their obvious meaning.
+If they describe a complete token ( %[1] )
+the printformat for the token is used.
+If they stand for a basic term in an expression
+they will be printed according to their type.
+To put a real '%' inside the string it must be doubled ('%%').
+.IP c)
+%( arbitrary expression %).
+This allows inclusion of arbitrary expressions inside strings.
+Usually not needed very often,
+so that the awkward notation is not too bad.
+Note that %(%[1]%) is equivalent to %[1].
+.RE
+.IP 2)
+a move() call.
+This has the following syntax:
+.DS
+move(token description, token description)
+.DE
+Moves are handled specially since that enables the code generator
+to keep track of register contents.
+Example:
+.DS
+move(R3,{REGOFF2,LB,$1})
+.DE
+will generate code to move R3 to $1(r5) except when
+R3 already was a copy of $1(r5).
+Then the code will be omitted.
+The rules describing how to move things to each other
+can be found in the MOVES section described below.
+.IP 3)
+an erase() call.
+This has the following syntax:
+.DS
+erase(register expression)
+.DE
+This tells the code generator that the register mentioned no longer has any
+useful value.
+This is 
+.I necessary
+after code in the table has changed the contents of registers.
+For example, after an add to a register the register must be erased,
+because the contents do no longer match any token.
+.IP 4)
+For machines that have condition codes,
+alas most of them do,
+there are provisions to remember condition code setting
+and prevent needless testing.
+To set the condition code to a token put in the code the following call:
+.DS
+test(token)
+.DE
+where token can be all of the standard forms that can also be used in move().
+This will generate a test if the condition codes 
+were not already set to that token.
+It is also possible to tell 
+.I cg
+that a certain operation, like a preceding add
+has set the condition codes to some token with the call
+.DS
+setcc(token)
+.DE
+So a sequence of a setcc and a test on the same token will generate
+no code. 
+Another allowed call within the code is
+.DS
+samecc
+.DE
+which tells the code generator that condition codes were unaffected
+in this rule.
+If no setcc or samecc has been given the default is
+.DS
+nocc
+.DE
+when a piece of code contained strings,
+which tells the code generator that the condition codes
+have no useful value any more.
+.NH 3
+Stack replacement
+.PP
+The stack replacement is a possibly empty list of items to be pushed onto
+the fakestack. Three kinds of items are possible:
+.IP 1)
+An item of the form %[1]. This will push the stacktoken mentioned back
+onto the stack unchanged.
+.IP 2)
+A register expression. This will push the register mentioned
+onto the fakestack.
+.IP 3)
+An item of the form { REGOFF2,%[1.reg],$1 }.
+This generates a token with tokenidentifier REGOFF2 and attributes 
+in order of declaration.
+.PP
+All tokens matched by the stack pattern at the beginning of the code rule
+are first removed and their registers deallocated.
+Items are pushed in the order of appearance.
+This means that the last item will be on the top of the
+stack after the push.
+So if the stack pattern contained two token expressions
+and they must be pushed back unchanged,
+they have to be specified as stack replacement
+.DS
+%[2] %[1]
+.DE
+and not the other way around.
+.NH 3
+EM replacement
+.PP
+In exceptional cases it might be useful to leave part of an empattern
+undone.
+For example, a \fBsdl\fP instruction might be split into two \fBstl\fP instructions
+when there is no 4-byte quantity on the stack. The emreplacement part allows
+one to express this.
+Example:
+.DS
+\fBstl\fP $1 \fBstl\fP $1+2
+.DE
+The instructions are inserted in the stream so that they can match
+the first part of a pattern in the next step.
+Note that since the code generator traverses the EM instructions in a strict
+linear fashion,
+it is impossible to let the EM replacement match later parts of a pattern.
+So if there is a pattern
+.DS
+\fBloc\fP \fBstl\fP $1==0
+.DE
+and the input is
+.DS
+\fBloc\fP 0 \fBsdl\fP 4
+.DE
+the \fBloc\fP\ 0 will be processed first,
+then the \fBsdl\fP might be split into two \fBstl\fP's but the pattern
+cannot match now.
+.NH 3
+Cost
+.PP
+The cost field can be specified when there is more than one
+code rule with the same empattern.
+If the code generator has a choice between two possibilities
+to generate code it will choose the cheapest according to
+the cost field.
+The cost for a code generation is the sum of the costs
+of all the coercions needed, plus the cost for freeing
+registers plus the cost of the code rule itself.
+.PP
+The format of the costfield is
+.DS
+( nbytes, time )		or
+( nbytes, time ) + %[\fIi\fP]
+.DE
+with time in the metric desired, like nanoseconds or states.
+See constants section above.
+The %[\fIi\fP] in the second example is used for adding the cost of a certain
+address mode used in the code generated.
+This can of course be repeated if desired.
+The cost of the address mode must then be specified in the token definition
+section.
+.NH 3
+Examples
+.PP
+A list of examples for the PDP-11 is given here.
+Far from being complete it gives examples of most kinds
+of instructions.
+.DS L
+\fBadi\fP $1==2 | SREG,SOURCE2 |
+	"add %[2],%[1]" erase(%[1]) setcc(%[1])
+	  | %[1] | | (2,450) + %[2]
+\&...       | SOURCE2,SREG |
+	"add %[1],%[2]" erase(%[2]) setcc(%[2])
+	  | %[2] | | (2,450) + %[1]
+.DE
+is an example of the use of the `...' construct
+and shows how to place erase() and setcc() calls.
+.DS L
+
+\fBdvi\fP $1==2 | SOURCE2,SPAIRSIGNED |
+	"div %[1],%[2]" erase(%[2])
+	  | %[2.regeven] | |
+
+\fBcmi\fP \fBtgt\fP $1==2 | SOURCE2,SOURCE2 | allocate(REG={CONST,0})
+	"cmp %[2],%[1];ble 1f;inc %[a];1:" erase(%[a])
+	  | %[a] | |
+
+\fBcal\fP | STACK |
+	"jsr pc,$1" 
+	  | | |
+
+\fBlol\fP | | | { REGOFF2, LB, $1 } | |
+
+\fBstl\fP | SOURCE2 |
+	remove(REGOFF2,%[off]==$1)
+	move(%[1],{REGOFF2,LB,$1})
+	  | | |
+
+| SOURCE2 |
+	allocate(%[1],REGPAIR)
+	move(%[1],%[a.2])
+	test(%[a.2])
+	"sxt %[a.even]" | { PAIRSIGNED, %[a.1], %[a.2] }| | 
+.DE
+This coercion shows how to use the move and test calls.
+At first one might think that the testcall is unnecessary,
+since the move will have set the condition codes,
+but the move may never have been executed
+if the register already contained the value,
+in which case it is necessary to do the test.
+If the move was executed the test will be omitted.
+.DS L
+| SOURCE2 | allocate(%[1],REG=%[1]) | %[a] | |
+
+\fBsdl\fP | SOURCE2 | | %[1] | \fBstl\fP $1 \fBstl\fP $1+2 |
+
+\fBexg\fP $1==2 | SOURCE2 SOURCE2 | | %[1] %[2] | |
+.DE
+This last example again shows the difference in the order
+of the stack pattern and the stack replacement.
+.NH 2
+Move code rules
+.PP
+When issuing a move() call as described above or a register allocation
+with initialization, the code generator has to know which
+instruction to use for the move.
+The code will of course only be generated if it cannot be omitted.
+This is listed in the move section of the tables by giving a list
+of tuples:
+.DS
+( source, destination, codepart [ , costfield ] )
+.DE
+where the square brackets mean the costfield is optional.
+Example for the PDP-11
+.DS
+MOVES:
+( CONST %[off]==0 , SOURCE2, "clr %[2]" )
+( SOURCE2, SOURCE2, "mov %[1],%[2]" )
+.DE
+The moves are scanned from top to bottom,
+so the first one that matches will be chosen.
+.NH 2
+Test code rules
+.PP
+When issuing a test() call as described above,
+the code generator has to know which instruction
+to use for the test.
+The code will only be generated if the condition codes
+were not already set to the token.
+This is listed in the test section of the tables by giving
+a list of tuples:
+.DS
+( source, codepart [ , costfield ] )
+.DE
+Example for the PDP-11
+.DS
+TESTS:
+( SOURCE2, "tst %[1]")
+( DREG, "tstf %[1]\encfcc")
+.DE
+The tests are scanned from top to bottom,
+so the first one that matches will be chosen.
+.NH 2
+Stacking code rules.
+.PP
+When the code generator has to stack a token it must know
+which code to use.
+Since it must at all times be possible to empty the fakestack
+even when no registers are free,
+it is mandatory that all
+tokens used must have a rule attached for stacking them
+without using a scratch register.
+Since however this might be clumsy and 
+a register might in practice be available
+it is also possible to give rules
+which use a register.
+On the Intel 8086 for example,
+there is no instruction to push a constant without using a register,
+and the code needed to do it without, must use global data
+and as such is very complicated and wasteful of memory and time.
+It can therefore be left to be used in extreme cases,
+while in general the constant is pushed through a register.
+The stacking rules are listed in the stack section of the table as a list
+of tuples:
+.DS
+(source, [ register property ] , codepart [ , costfield ] )
+.DE
+Example for the Intel 8086:
+.DS
+STACKS:
+(CONST, REG, move(%[1],%[a]) "push %[a]")
+(REG ,, "push %[1]")
+.DE
+.NH 1
+The files mach.h and mach.c
+.PP
+The table writer must also supply two files containing
+machine dependent declarations and C code.
+These files are mach.h and mach.c.
+.NH 2
+Types in the code generator
+.PP
+Three different types of integer coexist in the code generator
+and their range depends on the machine at hand.
+The type 'int' is used for things like labelcounters that won't require
+more than 16 bits precision.
+The type 'word' is used among others to assemble datawords and
+is of type 'long'.
+The type 'full' is used for addresses and is of type 'long' if
+EM_WSIZE>2 or EM_PSIZE>2.
+.PP
+In macro and function definitions in later paragraphs implicit typing
+will be used for parameters, that is parameters starting with an 's'
+will be of type string, and the letters 'i','w','f' will stand for
+int, word and full respectively.
+.NH 2
+Global variables to work with
+.PP
+Some global variables are present in the code generator
+that can be manipulated by the routines in mach.h and mach.c.
+.LP
+The declarations are:
+.DS L
+.ta 20
+FILE *codefile;	/* code is emitted on this stream */
+word part_word;	/* words to be output are put together here */
+int part_size;	/* number of bytes already put in part_word */
+char str[];	/* Last string read in */
+long argval;	/* Last int read and kept */
+.DE
+.NH 2
+Macros in mach.h
+.PP
+In the file mach.h a collection of macros is defined that have
+to do with formatting of assembly code for the machine at hand.
+Some of these macros can of course be left undefined in which case the
+macro calls are left in the source and will be treated as 
+function calls.
+These functions can then be defined in \fImach.c\fR.
+.PP
+The macros to be defined are:
+.IP ex_ap(s) 16
+Must print the magic incantations that will mark the symbol \fI\fR
+to be exported to other modules.
+This is the translation of the EM \fBexa\fP and \fBexp\fP instructions.
+.IP in_ap(s)
+Same to import the symbol.
+Translation of \fBina\fP and \fBinp\fP.
+.IP newplb(s)
+Must print the definition of procedure label \fIs\fR.
+If left undefined the newilb() macro is used instead.
+.IP newilb(s)
+Must print the definition of instruction label \fIs\fR.
+.IP newdlb(s)
+Must print the definition of data label \fIs\fR.
+.IP dlbdlb(s1,s2)
+Must define data label
+.I s1
+to be equal to
+.I s2 .
+.IP newlbss(s,f)
+Must declare a piece of memory initialized to BSS_INIT(see below)
+of length 
+.I f
+and with label
+.I s .
+.IP cst_fmt
+Format to be used when converting constant arguments of
+EM instructions to string.
+Argument to be formatted will be 'full'.
+.IP off_fmt
+Format to be used for integer part of label+constant,
+argument will be 'full'.
+.IP fmt_ilb(ip,il,s)
+Must use the numbers 
+.I ip
+and 
+.I il
+which are a procedure number
+and a label number respectively and copy a string to
+.I s
+that must be unique for that combination.
+This procedure is optional, if it is not given ilb_fmt
+must be defined as below.
+.IP ilb_fmt
+Format to be used for creation of unique instruction labels.
+Arguments will be a unique procedure number (int) and the label
+number (int).
+.IP dlb_fmt
+Format to be used for printing numeric data labels.
+Argument will be 'int'.
+.IP hol_fmt
+Format to be used for generation of labels for
+space generated by a
+.B hol
+pseudo.
+Argument will be 'int'.
+.IP hol_off
+Format to be used for printing of the address of an element in
+.B hol
+space.
+Arguments will be the offset in the
+.B hol
+block (word) and the number of the
+.B hol
+(int).
+.IP con_cst(w)
+Must generate output that will assemble into one machineword.
+.IP con_ilb(s)
+Must generate output that will put the address of the instruction label
+into the datastream.
+.IP con_dlb(s)
+Must generate output that will put the address of the data label
+into the datastream.
+.IP fmt_id(sf,st)
+Must take the string in
+.I sf
+which is a nonnumeric global label, and transform it into a copy made to
+.I st
+which will not collide with reserved assembler words and system labels.
+This procedure is optional, if it is not given the id_first macro is used
+as defined below.
+.IP id_first
+Must be a character.
+This is prepended to all nonnumeric global labels if their length
+is shorter than the maximum allowed(currently 8) or if they already
+start with that character.
+This is to avoid conflicts of user labels with system labels.
+.IP BSS_INIT
+Must be a constant.
+This is the value filled in all the words not initialized explicitly.
+This is loader and system dependent.
+If omitted no initialization is assumed.
+.NH 3
+Example mach.h for the PDP-11
+.DS L
+.ta 8 16 24 32 40 48 56
+#define ex_ap(y)	fprintf(codefile,"\et.globl %s\en",y)
+#define in_ap(y)	/* nothing */
+
+#define newplb(x)	fprintf(codefile,"%s:\en",x)
+#define newilb(x)	fprintf(codefile,"%s:\en",x)
+#define newdlb(x)	fprintf(codefile,"%s:\en",x)
+#define	dlbdlb(x,y)	fprintf(codefile,"%s=%s\en",x,y)
+#define newlbss(l,x)	fprintf(codefile,"%s:.=.+%d.\en",l,x);
+
+#define cst_fmt		"$%d."
+#define off_fmt		"%d."
+#define ilb_fmt		"I%x_%x"
+#define dlb_fmt		"_%d"
+#define	hol_fmt		"hol%d"
+
+#define hol_off		"%ld.+hol%d"
+
+#define con_cst(x)	fprintf(codefile,"%ld.\en",x)
+#define con_ilb(x)	fprintf(codefile,"%s\en",x)
+#define con_dlb(x)	fprintf(codefile,"%s\en",x)
+
+#define id_first	'_'
+#define BSS_INIT	0
+.DE
+.NH 2
+Functions in mach.c
+.PP
+In mach.c some functions must be supplied,
+mostly manipulating data resulting from pseudoinstructions.
+The specifications are given here,
+implicit typing of parameters as above.
+.IP con_part(isz,word) 20
+This function must manipulate the globals 
+part_word and part_size to append the isz bytes
+contained in word to the output stream.
+If part_word is full, i.e. part_size==EM_WSIZE
+the function part_flush() may be called to empty the buffer.
+This is the function that must go through the trouble of
+doing byte order in words correct.
+.IP con_mult(w_size)
+This function must take the string str[] and create an integer
+from the string of size w_size and generate code to assemble global
+data for that integer.
+Only the sizes for which arithmetic is implemented need be
+handled,
+so if 200-byte integer division is not implemented,
+200-byte integer global data do not have to be implemented.
+Here one must take care of word order in long integers.
+.IP con_float()
+This function must generate code to assemble a floating
+point number of which the size is contained in argval
+and the ASCII representation in str[].
+.IP prolog(f_nlocals)
+This function is called at the start of every procedure.
+Function prolog code must be generated,
+and room made for local variables for a total of f_nlocals bytes.
+.IP mes(w_mesno)
+This function is called when a
+.B mes
+pseudo is seen that is not handled by the machine independent part.
+The example below probably shows all the table writer ever has to know
+about that.
+.IP segname[]
+This is not a function,
+but an array of four strings.
+These strings are put out whenever the code generator
+switches segments.
+Segments are SEGTXT, SEGCON, SEGROM and SEGBSS in that order.
+.NH 3
+Example mach.c for the PDP-11
+.PP
+As an example of the sort of code expected,
+the mach.c for the PDP-11 is presented here.
+.DS L
+.ta 8 16 24 32 40 48 56 64
+/*
+ * machine dependent back end routines for the PDP-11
+ */
+
+con_part(sz,w) register sz; word w; {
+
+	while (part_size % sz)
+		part_size++;
+	if (part_size == EM_WSIZE)
+		part_flush();
+	if (sz == 1) {
+		w &= 0xFF;
+		if (part_size)
+			w <<= 8;
+		part_word |= w;
+	} else {
+		assert(sz == 2);
+		part_word = w;
+	}
+	part_size += sz;
+}
+
+con_mult(sz) word sz; {
+	long l;
+
+	if (sz != 4)
+		fatal("bad icon/ucon size");
+	l = atol(str);
+	fprintf(codefile,"\et%o;%o\en",(int)(l>>16),(int)l);
+}
+
+con_float() {
+	double f;
+	register short *p,i;
+
+	/*
+	 * This code is correct only when the code generator is
+	 * run on a PDP-11 or VAX-11 since it assumes native
+	 * floating point format is PDP-11 format.
+	 */
+
+	if (argval != 4 && argval != 8)
+		fatal("bad fcon size");
+	f = atof(str);
+	p = (short *) &f;
+	i = *p++;
+	if (argval == 8) {
+		fprintf(codefile,"\et%o;%o;",i,*p++);
+		i = *p++;
+	}
+	fprintf(codefile,"\et%o;%o\en",i,*p++);
+}
+
+prolog(nlocals) full nlocals; {
+
+	fprintf(codefile,"mov r5,-(sp)\enmov sp,r5\en");
+	if (nlocals == 0)
+		return;
+	if (nlocals == 2)
+		fprintf(codefile,"tst -(sp)\en");
+	else
+		fprintf(codefile,"sub $%d.,sp\en",nlocals);
+}
+
+mes(type) word type; {
+	int argt ;
+
+	switch ( (int)type ) {
+	case ms_ext :
+		for (;;) {
+			switch ( argt=getarg(
+			    ptyp(sp_cend)|ptyp(sp_pnam)|sym_ptyp) ) {
+			case sp_cend :
+				return ;
+			default:
+				strarg(argt) ;
+				fprintf(codefile,".globl %s\en",argstr) ;
+				break ;
+			}
+		}
+	default :
+		while ( getarg(any_ptyp) != sp_cend ) ;
+		break ;
+	}
+}
+
+char    *segname[] = {
+	".text",        /* SEGTXT */
+	".data",        /* SEGCON */
+	".data",        /* SEGROM */
+	".bss"          /* SEGBSS */
+};
+.DE
+.NH 1
+Coercions
+.PP
+A central part in code generation is taken by the
+.I coercions .
+It is the responsibility of the table writer to provide
+all necessary coercions so that code generation can continue.
+The very minimal set of coercions are
+the coercions to unstack every token expression,
+in combination with the rules to stack every token.
+.PP
+If these are present the code generator can always make the necessary
+transformations by stacking and unstacking.
+Of course for codequality it is usually best to provide extra coercions
+to prevent this stacking to take place.
+.I Cg
+discriminates three types of coercions:
+.IP 1)
+Unstacking coercions.
+This category can use the allocate() call in its code.
+.IP 2)
+Splitting coercions, these are the coercions that split
+larger tokens into smaller ones.
+.IP 3)
+Transforming coercions, these are the coercions that transform
+a token into another one of the same size.
+This category can use the allocate() call in its code.
+.PP
+When a stack configuration does not match the stack pattern
+.I coercions
+are searched for in the following order:
+.IP 1)
+First tokens are split if necessary to get their sizes right.
+.IP 2)
+Then transforming coercions are found that will make the pattern match.
+.IP 3)
+Finally if the stack pattern is longer than the fakestack contents
+unstacking coercions will be used to fill up the pattern.
+.PP
+At any point, when coercions are missing so code generation could not
+continue, the offending tokens are stacked.
+.NH 1
+Internal workings of the code generator.
+.NH 2
+Description of tables.c and tables.h contents
+.PP
+In this section the intermediate files will be described 
+that are produced by
+.I cgg
+and compiled with machine independent code to produce a code generator.
+.NH 3
+Tables.c
+.PP
+Tables.c contains a large number of initialized array's of all sorts.
+Description of each follows:
+.br
+.in 1i
+.ti -0.5i
+byte code rules[]
+.br
+Pseudo code interpreted by the code generator.
+Always starts with some opcode followed by operands depending
+on the opcode.
+Integers in this table are between 0 and 32767 and have a one byte
+encoding if between 0 and 127.
+.ti -0.5i
+char stregclass[]
+.br
+Number of computed static register class per register.
+Two registers are in the same class if they have the same properties
+and don't share a common subregister.
+.ti -0.5i
+struct reginfo machregs[]
+.br
+Info per register.
+Initialized with representation string, size,
+members of the register and set of registers affected when this
+one is changed.
+Also contains room for runtime information,
+like contents and reference count.
+.ti -0.5i
+tkdef_t tokens[]
+.br
+Information per tokentype.
+Initialized with size, cost, type of operands and formatstring.
+.ti -0.5i
+node_t enodes[]
+.br
+List of triples representing expressions for the code generator.
+.ti -0.5i
+string code strings[]
+.br
+List of strings.
+All strings are put in a list and checked for duplication,
+so only one copy per string will reside here.
+.ti -0.5i
+set_t machsets[]
+.br
+List of token expression sets.
+Bit 0 of the set is used for the SCRATCH property of registers,
+bit 1 upto NREG are for the corresponding registers
+and bit NREG+1 upto the end are for corresponding tokens.
+.ti -0.5i
+inst_t tokeninstances[]
+.br
+List of descriptions for building tokens.
+Contains type of rule for building one,
+plus operands depending on the type.
+.ti -0.5i
+move_t moves[]
+.br
+List of move rules.
+Contains token expressions for source and destination
+plus cost and index for code rule.
+.ti -0.5i
+byte pattern[]
+.br
+EM patterns.
+This is structured internally as chains of patterns,
+each chain pointed at by pathash[].
+After each pattern the list of possible code rules is given.
+.ti -0.5i
+int pathash[256]
+.br
+Indices into pattern[] for all patterns with a certain low order
+byte of the hashing function.
+.ti -0.5i
+c1_t c1coercs[]
+.br
+List of rules to stack tokens.
+Contains token expressions,
+register needed,
+cost
+and code rule.
+.ti -0.5i
+c2_t c2coercs[]
+.br
+List of splitting coercions.
+Token expressions,
+split factor,
+replacements
+and code rule.
+.ti -0.5i
+c3_t c3coercs[]
+.br
+List of one to one coercions.
+Token expressions,
+register needed,
+replacement
+and code rule.
+.ti -0.5i
+struct reginfo **reglist[]
+.br
+List of lists of pointers to register information.
+For every property the list is here
+to find the registers corresponding to it.
+.in 0
+.NH 3
+tables.h
+.PP
+In tables.h various derived constants for the tables are
+given.
+They are then used to determine array sizes in the actual code generator,
+plus loop termination in some cases.
+.NH 2
+Other important data structures
+.PP
+During code generation some other data structures are used
+and here is a short description of some of the important ones.
+.PP
+Tokens are kept in the code generator as a struct consisting of
+one integer
+.I t_token
+which is -1 if the token is a register,
+and the number of the token otherwise,
+plus an array of
+.I TOKENSIZE
+unions
+.I t_att
+of which the first is the register number in case of a register.
+.PP
+The fakestack is an array of these tokens,
+there is a global variable
+.I stackheight .
+.PP
+The results of expressions are kept in a struct
+.I result
+with elements
+.I e_typ ,
+giving the type of the expression:
+.I EV_INT ,
+.I EV_REG
+or
+.I EV_STR ,
+and a union
+.I e_v
+which contains the real result.
+.NH 2
+A tour through the sources
+.NH 3
+codegen.c
+.PP
+The file codegen.c contains one large function consisting
+of one giant switch statement.
+It is the interpreter for the code generator pseudo code
+as contained in code rules[].
+This function can call itself recursively when doing lookahead.
+Arguments are:
+.IP codep 10
+Pointer into code rules, pseudo program counter.
+.IP ply
+Number of EM pattern lookahead allowed.
+.IP toplevel
+Boolean telling whether this is the toplevel codegen() or
+a deeper incarnation.
+.IP costlimit
+A cutoff value to limit searches.
+If the cost crosses costlimit the incarnation can terminate.
+.IP forced
+A register number if nonzero.
+This is used inside coercions to force the allocate() call to allocate
+a register determined by earlier lookahead.
+.PP
+The instructions inplemented in the switch:
+.NH 4
+DO_NEXTEM
+.PP
+Matches the next EM pattern and does lookahead if necessary to find the best
+code rule associated with this pattern.
+Heuristics are used to determine best code rule when possible.
+This is done by calling the distance() function.
+.NH 4
+DO_COERC
+.PP
+This sets the code generator in the state to do a from stack coercion.
+.NH 4
+DO_XMATCH
+.PP
+This is done when a match no longer has to be checked.
+Used when the nocoercions: trick is used in the table.
+.NH 4
+DO_MATCH
+.PP
+This is the big one inside this function.
+It has the task to transform the contents of the current
+fakestack to match the pattern given after it.
+.PP
+Since the code generator does not know combining coercions,
+i.e. there is no way to make a big token out of two smaller ones,
+the first thing done is to stack every token that is too small.
+After that all tokens too big are split if possible to the right size.
+.PP
+Next the coercions are sought that would transform tokens in place to
+the right one, plus the coercions that would pop tokens of the stack.
+Each of those might need a register, so a list of registers is generated
+and at the end of looking for coercions the function 
+.I tuples()
+is called to generate the list of all possible \fIn\fP-tuples,
+where 
+.I n
+equals the number of registers needed.
+.PP
+Lookahead is now performed if the number of tuples is greater than one.
+If no possibility is found within the costlimit,
+the fakestack is made smaller by pushing the bottom token,
+and this process is repeated until either a way is found or
+the fakestack is completely empty and there is still no way
+to make the match.
+.PP
+If there is a way the corresponding coercions are executed
+and the code is finished.
+.NH 4
+DO_REMOVE
+.PP
+Here the remove() call is executed, all tokens matched by the 
+token expression plus boolean expression are pushed.
+In the current implementation there is no attempt to move those
+tokens to registers, but that is a possible future extension.
+.NH 4
+DO_DEALLOCATE
+.PP
+This one temporarily decrements by one the reference count of all registers
+contained in the token given as argument.
+.NH 4
+DO_REALLOCATE
+.PP
+Here all temporary deallocates are made undone.
+.NH 4
+DO_ALLOCATE
+.PP
+This is the part that allocates a register and decides which one to use.
+If the
+.I forced
+argument was given its task is simple,
+otherwise some work must be done.
+First the list of possible registers is scanned,
+all free registers noted and it is noted whether any of those
+registers is already
+containing the initialization.
+If no registers are available some fakestack token is stacked and the
+process is repeated.
+.PP
+After that if an exact match was found, 
+the list of registers is reduced to one register matching exactly
+out of every register class.
+Now lookahead is performed if necessary and the register chosen.
+If an initialization was given the corresponding move is performed,
+otherwise the register is marked empty.
+.NH 4
+DO_LOUTPUT
+.PP
+This prints a string and an expression.
+Only done on toplevel.
+.NH 4
+DO_ROUTPUT
+.PP
+Prints a string and a new line.
+Only on toplevel.
+.NH 4
+DO_MOVE
+.PP
+Calls the move() function in the code generator to implement the move()
+function in the table.
+.NH 4
+DO_ERASE
+.PP
+Marks the register that is its argument as empty.
+.NH 4
+DO_TOKREPLACE
+.PP
+This is the token replacement part.
+It is also called if there is no token replacement because it has
+some other functions as well.
+.PP
+First the tokens that will be pushed on the fakestack are computed
+and stored in a temporary array.
+Then the tokens that were matched in this rule are popped
+and their embedded registers have their reference count
+decremented.
+After that the replacement tokens are pushed.
+.PP
+Finally all registers allocated in this rule have their reference count
+decremented.
+If they were not pushed on the fakestack they will be available again
+in the next code rule.
+.NH 4
+DO_EMREPLACE
+.PP
+Places replacement EM instructions back into the instruction stream.
+.NH 4
+DO_COST
+.PP
+Accounts for cost as given in the code rule.
+.NH 4
+DO_RETURN
+.PP
+Returns from this level of codegen().
+Is used at the end of coercions,
+move rules etc..
+.NH 3
+compute.c
+.PP
+This module computes the various expressions as given
+in the enodes[] array.
+Nothing very special happens here,
+it is just a recursive function computing leaves
+of expressions and applying the operator.
+.NH 3
+equiv.c
+.PP
+In this module the tuples() function is implemented.
+It is given the number of registers needed and
+a list of register lists and it constructs a list of tuples
+where the \fIn\fP'th register comes from the \fIn\fP'th list.
+Before the list is constructed however 
+the dynamic register classes are computed.
+Two registers are in the same dynamic class if they are in the
+same static class and their contents is the same.
+.PP
+After that the permute() recursive function is called to
+generate the list of tuples.
+After construction a generated tuple is added to the list
+if it is not already pairwise in the same class
+or if the register relations are not the same,
+i.e. if the first and second register share a common
+subregister in one tuple and not in the other they are considered different.
+.NH 3
+fillem.c
+.PP
+This is the routine that does the reading of EM instructions
+and the handling of pseudos.
+The mach.c module provided by the table writer is included
+at the end of this module.
+The routine fillemlines() is called by nextem() at toplevel
+to make sure there are enough instruction to match.
+It fills the EM instruction buffer up to 5 places from the end to
+keep room for EM replacement instructions,
+or up to a pseudo.
+.PP
+The dopseudo() function performs the function of the pseudo last
+encountered.
+If the pseudo is a 
+.B rom
+the corresponding label is saved with the contents of the
+.B rom
+to be available to the code generator later.
+The rest of the routines are small service routines for either
+input or data output.
+.NH 3
+gencode.c
+.PP
+This module contains routines called by codegen() to generate the real
+code to the codefile.
+The function gencode() gets a string as argument and copies it to codefile
+while processing certain embedded control characters implementing
+the $2 and [1.reg] escapes.
+The function genexpr() prints the expression given as argument.
+It is used to implement the %(\ expr\ %) escape.
+The prtoken() function interprets the tokenformat as given in
+the tokens[] array.
+.NH 3
+glosym.c
+.PP
+This module maintains a list of global symbols that have a 
+.B rom
+pseudo associated.
+There are functions to enter a symbol and to find a symbol.
+.NH 3
+main.c
+.PP
+Main routine of the code generator.
+Processes arguments and flags.
+Flags available are:
+.IP -d
+Sets debug mode if the code generator was not compiled with
+the NDEBUG macro defined.
+Debug mode gives very long output on stderr indicating
+all steps of the code generation process including nesting
+of the codegen() function.
+.IP -p\fIn\fP
+Sets the lookahead depth to
+.I n ,
+the
+.I p
+stands for ply,
+a well known word in chess playing programs.
+.IP -w\fIn\fP
+Sets the weight percentage for size in the cost function to
+.I n
+percent.
+Uses Euclides algorithm to simplify rationals.
+.NH 3
+move.c
+.PP
+Function to implement the move() pseudo function in the tables,
+register initialization and the setcc and test pseudo functions.
+First tests are made to try to prevent the move from really happening.
+The condition code register is treated special here.
+After that, if there is an after that,
+the move rule is found and the code executed.
+.NH 3
+nextem.c
+.PP
+The entry point of this module is nextem().
+It hashes the next three EM instructions,
+and uses the low order byte of the hash
+as an index into the array pathash[],
+to find a chain of patterns in the array
+pattern[],
+that are all tried for a match.
+.PP
+The function trypat() does most of the work
+checking patterns.
+When a pattern is found to match all instructions
+the operands of the instruction are placed into the dollar[] array.
+Then the boolean expression is tried.
+If it matches the function can return,
+leaving the operands still in the dollar[] array,
+so later in the code rule they can still be used.
+.NH 3
+reg.c
+.PP
+Collection of routines to handle registers.
+Reference count routines are here,
+chrefcount() and getrefcount(),
+plus routines to erase a single register or all of them,
+erasereg() and cleanregs().
+.PP
+If NDEBUG hasn't been defined, here is also the routine that checks
+if the reference count kept with the register information is in
+agreement with the number of times it occurs on the fakestack.
+.NH 3
+salloc.c
+.PP
+Module for string allocation and garbage collection.
+Contains entry points myalloc(),
+a routine calling malloc() and checking whether room is left,
+myfree(), just free(),
+popstr() a function called from state.c to free all strings
+made since the last saved status.
+Furthermore there is salloc() which has the size of the string as parameter
+and returns a pointer to the allocated space,
+while keeping a copy of the pointer for garbage allocation purposes.
+.PP
+The function garbage_collect is called from codegen() at toplevel
+every now and then,
+and checks all places where strings may reside to mark strings
+as being in use.
+Strings not in use are returned to the pool of free space.
+.NH 3
+state.c
+.PP
+Set of routines called to save current status,
+restore a previous saved state and to free the room
+occupied by a saved state.
+A list of structs is kept here to save the state.
+If this is not done,
+small allocates will take space
+from the holes big enough for state saves,
+and as a result every new state save will need a new struct.
+The code generator runs out of room very rapidly under these conditions.
+.NH 3
+subr.c
+.PP
+Random set of leftover routines.
+.NH 4
+match
+.PP
+Computes whether a certain token matches a certain token expression.
+Just computes a bitnumber according to the algorithm explained with
+machsets[],
+and tests the bit and the boolean expression if it is there.
+.NH 4
+instance,cinstance
+.PP
+These two functions compute a token from a description.
+They differ very slight, cinstance() is used to compute
+the result of a coercion in a certain context
+and therefore has more arguments, which it uses instead of
+the global information instance() works on.
+.NH 4
+eqtoken
+.PP
+eqtoken computes whether two tokens can be considered identical.
+Used to check register contents during moves mainly.
+.NH 4
+distance
+.PP
+This is the heuristic function that computes a distance from
+the current fakestack contents to the token pattern in the table.
+It likes exact matches most, then matches where at least the sizes are correct
+and if the sizes are not correct it likes too large sizes more than too
+small, since splitting a token is easier than combining one.
+.NH 4
+split
+.PP
+This function tries to find a splitting coercion
+and executes it immediately when found.
+The fakestack is shuffled thoroughly when this happens,
+so pieces below the token that must be split are saved first.
+.NH 4
+docoerc
+.PP
+This function executes a coercion that was found.
+The same shuffling is done, so the top of the stack is again saved.
+.NH 4
+stackupto
+.PP
+This function gets a pointer into the fakestack and must stack
+every token including the one pointed at up to the bottom of the fakestack.
+The first stacking rule possible is used,
+so rules using registers must come first.
+.NH 4
+findcoerc
+.PP
+Looks for a one to one coercion, if found it returns a pointer
+to it and leaves a list of possible registers to use in the global
+variable curreglist.
+This is used by codegen().
+.NH 3
+var.c
+.PP
+Global variables used by more than one module.
+External definitions are in extern.h.

+ 323 - 0
doc/cref.doc

@@ -0,0 +1,323 @@
+.\" $Header$
+.nr ID 4
+.de hd
+'sp 2
+'tl ''-%-''
+'sp 3
+..
+.de fo
+'bp
+..
+.tr ~
+.               TITLE
+.de TL
+.sp 15
+.ce
+\\fB\\$1\\fR
+..
+.               AUTHOR
+.de AU
+.sp 15
+.ce
+by
+.sp 2
+.ce
+\\$1
+..
+.               DATE
+.de DA
+.sp 3
+.ce
+( Dated \\$1 )
+..
+.               INSTITUTE
+.de VU
+.sp 3
+.ce 4
+Wiskundig Seminarium
+Vrije Universteit
+De Boelelaan 1081
+Amsterdam
+..
+.               PARAGRAPH
+.de PP
+.sp
+.ti +\n(ID
+..
+.nr CH 0 1
+.               CHAPTER
+.de CH
+.nr SH 0 1
+.bp
+.in 0
+\\fB\\n+(CH.~\\$1\\fR
+.PP
+..
+.               SUBCHAPTER
+.de SH
+.sp 3
+.in 0
+\\fB\\n(CH.\\n+(SH.~\\$1\\fR
+.PP
+..
+.               INDENT START
+.de IS
+.sp
+.in +\n(ID
+..
+.               INDENT END
+.de IE
+.in -\n(ID
+.sp
+..
+.de PT
+.ti -\n(ID
+.ta \n(ID
+.fc " @
+"\\$1@"\c
+.fc
+..
+.               DOUBLE INDENT START
+.de DS
+.sp
+.in +\n(ID
+.ll -\n(ID
+..
+.               DOUBLE INDENT END
+.de DE
+.ll +\n(ID
+.in -\n(ID
+.sp
+..
+.               EQUATION START
+.de EQ
+.sp
+.nf
+..
+.               EQUATION END
+.de EN
+.fi
+.sp
+..
+.               ITEM
+.de IT
+.sp
+.in 0
+\\fB~\\$1\\fR
+.ti +5
+..
+.de CS
+.br
+~-~\\
+..
+.br
+.fi
+.TL "Ack-C reference manual"
+.AU "Ed Keizer"
+.DA "September 12, 1983"
+.VU
+.wh 0 hd
+.wh 60 fo
+.CH "Introduction"
+The C frontend included in the Amsterdam Compiler Kit
+translates UNIX-V7 C into compact EM code [1].
+The language accepted is described in [2] and [3].
+This document describes which implementation dependent choices were
+made in the Ack-C frontend and
+some restrictions and additions.
+.CH "The language"
+.PP
+Under the same heading as used in [2] we describe the
+properties of the Ack-C frontend.
+.IT "2.2 Identifiers"
+External identifiers are unique up to 7 characters and allow
+both upper and lower case.
+.IT "2.3 Keywords"
+The word \fBvoid\fP is also reserved as a keyword.
+.IT "2.4.3 Character constants"
+The ASCII-mapping is used when a character is converted to an
+integer.
+.IT "2.4.4 Floating constants"
+To prevent loss of precision the compiler does not perform
+floating point constant folding.
+.IT "2.6 Hardware characteristics"
+The size of objects of the several arithmetic types and 
+pointers depend on the EM-implementation used.
+The ranges of the arithmetic types depend on the size used,
+the C-frontend assumes two's complement representation for the
+integral types.
+All sizes are multiples of bytes.
+The calling program \fIack\fP[4] passes information about the
+size of the types to the compiler proper.
+.br
+However, a few general remarks must be made:
+.sp 1
+.IS
+.PT (a)
+The size of pointers is a multiple of
+(or equal to) the size of an \fIint\fP.
+.PT (b)
+The following relations exist for the sizes of the types
+mentioned:
+.br
+.ti +5
+\fIchar<=short<=int<=long\fP
+.PT (c)
+Objects of type \fIchar\fP use one 8-bit byte of storage,
+although several bytes are allocated sometimes.
+.PT (d)
+All sizes are in multiples of bytes.
+.PT (e)
+Most EM implementations use 4 bytes for floats and 8 bytes
+for doubles, but exceptions to this rule occur.
+.IE
+.IT "4 What's in a name"
+The type \fIvoid\fP is added.
+Objects of type void do not exist.
+Functions declared as returning void, do not return a value at all.
+.IT "6.1 Characters and integers"
+Objects of type \fIchar\fP are unsigned and do not cause
+sign-extension when converted to \fIint\fP.
+The range of characters values is from 0 to 255.
+.IT "6.3 Floating and integral"
+Floating point numbers are truncated towards zero when
+converted to the integral types.
+.IT "6.4 Pointers and integers"
+When a \fIlong\fP is added to or subtracted from a pointer and
+longs are larger then pointers the \fIlong\fP is converted to an
+\fIint\fP before the operation is performed.
+.IT "7.2 Unary operators"
+It is allowed to cast any expression to the type \fIvoid\fP.
+.IT "8.2 Type specifiers"
+One type is added to the type-specifiers:
+.br
+.IS
+void
+.IE
+.IT "8.5 Structure and union declarations"
+The only type allowed for fields is \fIint\fP.
+Fields with exactly the size of \fIint\fP are signed,
+all other fields are unsigned.
+.br
+The size of any single structure must be less then 4096 bytes.
+.IT "8.6 Initialization"
+Initialization of structures containing bit fields is not
+allowed.
+There is one restriction when using an 'address expression' to initialize
+an integral variable.
+The integral variable must have the same size as a pointer.
+Conversions altering the size of the address expression are not allowed.
+.IT "9.10 Return statement"
+Return statements of the form:
+.IS
+	return ;
+.IE
+are the only form of return statement allowed in a function of type
+function returning void.
+.IT "10.1 External function definitions"
+The total amount for storage used for parameters
+in any function must be less then 4096 bytes.
+The same holds for the total amount of storage occupied by the
+automatic variables declared inside any function.
+.sp
+Using formal parameters whose size is smaller the the size of an int
+is less efficient on several machines.
+At procedure entry these parameters are converted from integer to the
+declared type, because the compiler doesn't know where the least
+significant bytes are stored in the int.
+.IT "11.2 Scope of externals"
+Most C compilers are rather lax in enforcing the restriction
+that only one external definition without the keyword
+\fIextern\fP is allowed in a program.
+The Ack-C frontend is very strict in this.
+The only exception is that declarations of arrays with a
+missing first array bounds expression are regarded to have an
+explicit keyword \fIextern\fP.
+.IT "14.4 Explicit pointer conversions"
+Pointers may be larger the ints, thus assigning a pointer to an
+int and back will not always result in the same pointer.
+The process mentioned above works with integrals
+of the same size or larger as pointers in all EM implementations
+having such integrals.
+When converting pointers to an integral type or vice-versa,
+the pointers is seen as an unsigned int.
+.br
+EM guarantees that any object can be placed at a word boundary,
+this allows the C-programs to use \fIint\fP pointers
+as pointers to objects of any type not smaller than an \fIint\fP.
+.CH "Frontend options"
+The C-frontend has a few options, these are controlled
+by flags:
+.IS
+.PT -V
+This flag is followed by a sequence of letters each followed by
+positive integers. Each letter indicates a
+certain type, the integer following it specifies the size of
+objects of that type. One letter indicates the wordsize used.
+.IS
+.sp 1
+.TS
+center tab(:);
+l l16 l l.
+letter:type:letter:type
+
+w:wordsize:i:int
+s:short:l:long
+f:float:d:double
+p:pointer::
+.TE
+.sp 1
+All existing implementations use an integer size equal to the
+wordsize.
+.IE
+The calling program \fIack\fP[4] provides the frontend with
+this flag, with values depending on the machine used.
+.sp 1
+.PT -l
+The frontend normally generates code to keep track of the line
+number and source file name at runtime for debugging purposes.
+Currently a pointer to a
+string containing the filename is stored at a fixed place in
+memory at each function
+entry and the line number at the start of every expression.
+At the return from a function these memory locations are not reset to
+the values they had before the call.
+Most library routines do not use this feature and thus do not
+ruin the current line number and filename when called.
+However, you are really unlucky when your program crashes due
+to a bug in such a library function, because the line number
+and filename do not indicate that something went wrong inside
+the library function.
+.br
+Providing the flag -l to the frontend tells it not to generate
+the code updating line number and file name.
+This is, for example, used when translating the stdio library.
+.br
+When the \fIack\fP[4] is called with the -L flag it provides
+the frontend with this flag.
+.sp 1
+.PT -Xp
+When this flag is present the frontend generates a call to
+the function \fBprocentry\fP at each function entry and a
+call to \fBprocexit\fP at each function exit.
+Both functions are provided with one parameter,
+a pointer to a string containing the function name.
+.br
+When \fIack\fP is called with the -p flag it provides the
+frontend with this flag.
+.IE
+.CH References
+.IS
+.PT [1]
+A.S. Tanenbaum, Hans van Staveren, Ed Keizer and Johan
+Stevenson \fIDescription of a machine architecture for use with
+block structured languages\fP Informatica report IR-81.
+.sp 1
+.PT [2]
+B.W. Kernighan and D.M. Ritchie, \fIThe C Programming
+language\fP, Prentice-Hall, 1978
+.PT [3]
+D.M. Ritchie, \fIC Reference Manual\fP
+.sp
+.PT [4]
+UNIX manual ack(I).

+ 629 - 0
doc/crefman.doc

@@ -0,0 +1,629 @@
+\." $Id$
+.\" eqn crefman.doc | troff -ms
+.EQ
+delim $$
+.EN
+.RP
+.TL
+ACK/CEM Compiler
+.br
+Reference Manual
+.AU
+Erik H. Baalbergen
+.AI
+Department of Mathematics and Computer Science
+Vrije Universiteit
+Amsterdam
+The Netherlands
+.AB no
+.AE
+.NH
+C Language
+.PP
+This section discusses the extensions to and deviations from the C language,
+as described in [1].
+The issues are numbered according to the reference manual.
+.SH
+2.2 Identifiers
+.PP
+Upper and lower case letters are different.
+The number of significant letters
+is 32 by default, but may be set to another value using the \fB\-M\fP option.
+The identifier length should be set according to the rest of the compilation
+programs.
+.SH
+2.3 Keywords
+.SH
+\f(CWasm\fP
+.PP
+The keyword \f(CWasm\fP
+is recognized.
+However, the statement
+.DS
+.ft CW
+asm(string);
+.ft R
+.DE
+is skipped, while a warning is given.
+.SH
+\f(CWenum\fP
+.PP
+The \f(CWenum\fP keyword is recognized and interpreted.
+.SH
+\f(CWentry\fP, \f(CWfortran\fP
+.PP
+The words \f(CWentry\fP and \f(CWfortran\fP
+are reserved under the restricted option.
+The words are not interpreted by the compiler.
+.SH
+2.4.1 Integer Constants
+.PP
+The type of an integer constant is the first of the corresponding list
+in which its value can be represented. Decimal: \f(CWint, long, unsigned long\fP;
+octal or hexadecimal: \f(CWint, unsigned, long, unsigned long\fP; suffixed by
+the letter L or l: \f(CWlong, unsigned long\fP.
+.SH
+2.4.3 Character Constants
+.PP
+A character constant is a sequence of 1 up to \f(CWsizeof(int)\fP characters
+enclosed in single quotes.
+The value of a character constant '$c sub 1 c sub 2 ... c sub n$'
+is $d sub n + M \(mu d sub {n - 1} + ... + M sup {n - 1} \(mu d sub 2 + M sup n \(mu d sub 1$,
+where M is 1 + maximum unsigned number representable in an \f(CWunsigned char\fP,
+and $d sub i$ is the signed value (ASCII)
+of character $c sub i$.
+.SH
+2.4.4 Floating Constants
+.PP
+The compiler does not support compile-time floating point arithmetic.
+.SH
+2.6 Hardware characteristics
+.PP
+The compiler is capable of producing EM code for machines with the following
+properties
+.IP \(bu
+a \f(CWchar\fP is 8 bits
+.IP \(bu
+the size of \f(CWint\fP is equal to the word size
+.IP \(bu
+the size of \f(CWshort\fP may not exceed the size of \f(CWint\fP
+.IP \(bu
+the size of \f(CWint\fP may not exceed the size of \f(CWlong\fP
+.IP \(bu
+the size of pointers is equal to the size of either \f(CWshort\fP, \f(CWint\fP
+or \f(CWlong\fP
+.LP
+.SH
+4 What's in a name?
+.SH
+\f(CWchar\fP
+.PP
+Objects of type \f(CWchar\fP are taken to be signed.
+The combination \f(CWunsigned char\fP is legal.
+.SH
+\f(CWunsigned\fP
+.PP
+The type combinations \f(CWunsigned char\fP, \f(CWunsigned short\fP and
+\f(CWunsigned long\fP are supported.
+.SH
+\f(CWenum\fP
+.PP
+The data type \f(CWenum\fP is implemented as described 
+in \fIRecent Changes to C\fP (see appendix A).
+.I Cem
+treats enumeration variables as if they were \f(CWint\fP.
+.SH
+\f(CWvoid\fP
+.PP
+Type \f(CWvoid\fP is implemented.
+The type specifies an empty set of values, which takes no storage space.
+.SH
+\fRFundamental types\fP
+.PP
+The names of the fundamental types can be redefined by the user, using
+\f(CWtypedef\fP.
+.SH
+7 Expressions
+.PP
+The order of evaluation of expressions depends on the complexity of the
+subexpressions.
+In case of commutative operations, the most complex subexpression is
+evaluated first.
+Parameter lists are evaluated from right to left.
+.SH
+7.2 Unary operators
+.PP
+The type of a \f(CWsizeof\fP expression is \f(CWunsigned int\fP.
+.SH
+7.13 Conditional operator
+.PP
+Both the second and the third expression in a conditional expression may
+include assignment operators.
+They may be structs or unions.
+.SH
+7.14 Assignment operators
+.PP
+Structures may be assigned, passed as arguments to functions, and returned
+by functions.
+The types of operands taking part must be the same.
+.SH
+8.2 Type specifiers
+.PP
+The combinations \f(CWunsigned char\fP, \f(CWunsigned short\fP
+and \f(CWunsigned long\fP are implemented.
+.SH
+8.5 Structure and union declarations
+.PP
+Fields of any integral type, either signed or unsigned,
+are supported, as long as the type fits in a word on the target machine.
+.PP
+Fields are left adjusted by default; the first field is put into the left
+part of a word, the next one on the right side of the first one, etc.
+The \f(CW-Vr\fP option in the call of the compiler
+causes fields to be right adjusted within a machine word.
+.PP
+The tags of structs and unions occupy a different name space from that of 
+variables and that of member names.
+.SH
+9.7 Switch statement
+.PP
+The type of \fIexpression\fP in
+.DS
+.ft CW
+\f(CWswitch (\fP\fIexpression\fP\f(CW)\fP \fIstatement\fP
+.ft
+.DE
+must be integral.
+A warning is given under the restricted option if the type is \f(CWlong\fP.
+.SH
+10 External definitions
+.PP
+See [4] for a discussion on this complicated issue.
+.SH
+10.1 External function definitions
+.PP
+Structures may be passed as arguments to functions, and returned
+by functions.
+.SH
+11.1 Lexical scope
+.PP
+Typedef names may be redeclared like any other variable name; the ice mentioned
+in \(sc11.1 is walked correctly.
+.SH
+12 Compiler control lines
+.PP
+Lines which do not occur within comment, and with \f(CW#\fP as first
+character, are interpreted as compiler control line.
+There may be an arbitrary number of spaces, tabs and comments (collectively
+referred as \fIwhite space\fP) following the \f(CW#\fP.
+Comments may contain newline characters.
+Control lines with only white space between the \f(CW#\fP and the line separator
+are skipped.
+.PP
+The #\f(CWinclude\fP, #\f(CWifdef\fP, #\f(CWifndef\fP, #\f(CWundef\fP, #\f(CWelse\fP and
+#\f(CWendif\fP control lines and line directives consist of a fixed number of
+arguments.
+The list of arguments may be followed an arbitrary sequence of characters,
+in which comment is interpreted as such.
+(I.e., the text between \f(CW/*\fP and \f(CW*/\fP is skipped, regardless of
+newlines; note that commented-out lines beginning with \f(CW#\fP are not
+considered to be control lines.)
+.SH
+12.1 Token replacement
+.PP
+The replacement text of macros is taken to be a string of characters, in which
+an identifier may stand for a formal parameter, and in which comment is
+interpreted as such.
+Comments and newline characters, preceeded by a backslash, in the replacement
+text are replaced by a space character.
+.PP
+The actual parameters of a macro are considered tokens and are
+balanced with regard to \f(CW()\fP, \f(CW{}\fP and \f(CW[]\fP.
+This prevents the use of macros like
+.DS
+.ft CW
+CTL([)
+.ft
+.DE
+.PP
+Formal parameters of a macro must have unique names within the formal-parameter
+list of that macro.
+.PP
+A message is given at the definition of a macro if the macro has 
+already been #\f(CWdefined\fP, while the number of formal parameters differ or
+the replacement texts are not equal (apart from leading and trailing
+white space).
+.PP
+Recursive use of macros is detected by the compiler.
+.PP
+Standard #\f(CWdefined\fP macros are
+.DS
+\f(CW__FILE__\fP  name of current input file as string constant
+\f(CW__DATE__\fP  curent date as string constant; e.g. \f(CW"Tue Wed  2 14:45:23 1986"\fP
+\f(CW__LINE__\fP  current line number as an integer
+.DE
+.PP
+No message is given if \fIidentifier\fP is not known in
+.DS
+.ft CW
+#undef \fIidentifier\fP
+.ft
+.DE
+.SH
+12.2 File inclusion
+.PP
+A newline character is appended to each file which is included.
+.SH
+12.3 Conditional compilation
+.PP
+The #\f(CWif\fP, #\f(CWifdef\fP and #\f(CWifndef\fP control lines may be followed
+by an arbitrary number of
+.DS
+.ft CW
+#elif \fIconstant-expression\fP
+.ft
+.DE
+control lines, before the corresponding #\f(CWelse\fP or #\f(CWendif\fP
+is encountered.
+The construct
+.DS
+.ft CW
+#elif \fIconstant-expression\fP
+some text
+#endif /* corresponding to #elif */
+.ft
+.DE
+is equivalent to
+.DS
+.ft CW
+#else
+#if \fIconstant-expression\fP
+some text
+#endif /* corresponding to #if */
+#endif /* corresponding to #else */
+.ft
+.DE
+.PP
+The \fIconstant-expression\fP in #\f(CWif\fP and #\f(CWelif\fP control lines
+may contain the construction
+.DS
+.ft CW
+defined(\fIidentifier\fP)
+.ft
+.DE
+which is replaced by \f(CW1\fP, if \fIidentifier\fP has been #\f(CWdefined\fP,
+and by \f(CW0\fP, if not.
+.PP
+Comments in skipped lines are interpreted as such.
+.SH
+12.4 Line control
+.PP
+Line directives may occur in the following forms:
+.DS
+.ft CW
+#line \fIconstant\fP
+#line \fIconstant\fP "\fIfilename\fP"
+#\fIconstant\fP
+#\fIconstant\fP "\fIfilename\fP"
+.ft
+.DE
+Note that \fIfilename\fP is enclosed in double quotes.
+.SH
+14.2 Functions
+.PP
+If a pointer to a function is called, the function the pointer points to
+is called instead.
+.SH
+15 Constant expressions
+.PP
+The compiler distinguishes the following types of integral constant expressions
+.IP \(bu
+field-width specifier
+.IP \(bu
+case-entry specifier
+.IP \(bu
+array-size specifier
+.IP \(bu
+global variable initialization value
+.IP \(bu
+enum-value specifier
+.IP \(bu
+truth value in \f(CW#if\fP control line
+.LP
+.PP
+Constant integral expressions are compile-time evaluated while an effort
+is made to report overflow.
+Constant floating expressions are not compile-time evaluated.
+.NH
+Compiler flags
+.IP \fB\-C\fR
+Run the preprocessor stand-alone while maintaining the comments.
+Line directives are produced whenever needed.
+.IP \fB\-D\fP\fIname\fP=\fIstring-of-characters\fP
+.br
+Define \fIname\fR as macro with \fIstring-of-characters\fR as
+replacement text.
+.IP \fB\-D\fP\fIname\fP
+.br
+Equal to \fB\-D\fP\fIname\fP\fB=1\fP.
+.IP \fB\-E\fP
+Run the preprocessor stand alone, i.e.,
+list the sequence of input tokens and delete any comments.
+Line directives are produced whenever needed.
+.IP \fB\-I\fIpath\fR
+.br
+Prepend \fIpath\fR to the list of include directories.
+To put the directories "include", "sys/h" and "util/h" into the
+include directory list in that order, the user has to specify
+.DS
+.ft CW
+-Iinclude -Isys/h -Iutil/h
+.ft R
+.DE
+An empty \fIpath\fP causes the standard include
+directory (usually \f(CW/usr/include\fP) to be forgotten.
+.IP \fB\-M\fP\fIn\fP
+.br
+Set maximum significant identifier length to \fIn\fP.
+.IP \fB\-n\fP
+Suppress EM register messages.
+The user-declared variables are not stored into registers on the target
+machine.
+.IP \fB\-p\fP
+Generate the EM \fBfil\fP and \fBlin\fP instructions in order to enable
+an interpreter to keep track of the current location in the source code.
+.IP \fB\-P\fP
+Equivalent with \fB\-E\fP, but without line directives.
+.IP \fB\-R\fP
+Interpret the input as restricted C (according to the language as 
+described in [1]).
+.IP \fB\-T\fP\fIpath\fP
+.br
+Create temporary files, if necessary, in directory \fIpath\fP.
+.IP \fB\-U\fP\fIname\fP
+.br
+Get rid of the compiler-predefined macro \fIname\fP, i.e.,
+consider
+.DS
+.ft CW
+#undef \fIname\fP
+.ft R
+.DE
+to appear in the beginning of the file.
+.IP \fB\-V\fIcm\fR.\fIn\fR,\ \fB\-V\fIcm\fR.\fIncm\fR.\fIn\fR\ ...
+.br
+Set the size and alignment requirements.
+The letter \fIc\fR indicates the simple type, which is one of
+\fBs\fR(short), \fBi\fR(int), \fBl\fR(long), \fBf\fR(float), \fBd\fR(double)
+or \fBp\fR(pointer).
+If \fIc\fR is \fBS\fP or \fBU\fP, then \fIn\fP is taken to be the initial
+alignment of structs or unions, respectively.
+The effective alignment of a struct or union is the least common multiple
+of the initial struct/union alignment and the alignments of its members.
+The \fIm\fR parameter can be used to specify the length of the type (in bytes)
+and the \fIn\fR parameter for the alignment of that type.
+Absence of \fIm\fR or \fIn\fR causes the default value to be retained.
+To specify that the bitfields should be right adjusted instead of the
+default left adjustment, specify \fBr\fR as \fIc\fR parameter.
+.IP \fB\-w\fR
+Suppress warning messages
+.IP \fB\-\-\fIcharacter\fR
+.br
+Set debug-flag \fIcharacter\fP.
+This enables some special features offered by a debug and develop version of
+the compiler.
+Some particular flags may be recognized, others may have surprising effects.
+.RS
+.IP \fBd\fP
+Generate a dependency graph, reflecting the calling structure of functions.
+Lines of the form
+.DS
+.ft CW
+DFA: \fIcalling-function\fP: \fIcalled-function\fP
+.ft
+.DE
+are generated whenever a function call is encountered.
+.IP \fBf\fP
+Dump whole identifier table, including macros and reserved words.
+.IP \fBh\fP
+Supply hash-table statistics.
+.IP \fBi\fP
+Print names of included files.
+.IP \fBm\fP
+Supply statistics concerning the memory allocation.
+.IP \fBt\fP
+Dump table of identifiers.
+.IP \fBu\fP
+Generate extra statistics concerning the predefined types and identifiers.
+Works in combination with \fBf\fP or \fBt\fP.
+.IP \fBx\fP
+Print expression trees in human-readable format.
+.RE
+.LP
+.SH
+References
+.IP [1]
+Brian W. Kernighan, Dennis M. Ritchie,
+.I
+The C Programming Language
+.R
+.IP [2]
+L. Rosler,
+.I
+Draft Proposed Standard - Programming Language C,
+.R
+ANSI X3J11 Language Subcommittee
+.IP [3]
+Erik H. Baalbergen, Dick Grune, Maarten Waage,
+.I
+The CEM Compiler,
+.R
+Informatica Manual IM-4, Dept. of Mathematics and Computer Science, Vrije
+Universiteit, Amsterdam, The Netherlands
+.IP [4]
+Erik H. Baalbergen,
+.I
+Modeling global declarations in C,
+.R
+internal paper
+.LP
+.bp
+.SH
+Appendix A - Enumeration Type
+.PP
+The syntax is
+.sp
+.RS
+.I enum-specifier :
+.RS
+\&\f(CWenum\fP { \fIenum-list\fP }
+.br
+\&\f(CWenum\fP \fIidentifier\fP { \fIenum-list\fP }
+.br
+\&\f(CWenum\fP \fIidentifier\fP
+.RE
+.sp
+\&\fIenum-list\fP :
+.RS
+\&\fIenumerator\fP
+.br
+\&\fIenum-list\fP , \fIenumerator\fP
+.RE
+.sp
+\&\fIenumerator\fP :
+.RS
+\&\fIidentifier\fP
+.br
+\&\fIidentifier\fP = \fIconstant-expression\fP
+.RE
+.sp
+.RE
+The identifier has the same role as the structure tag in a struct specification.
+It names a particular enumeration type.
+.PP
+The identifiers in the enum-list are declared as constants, and may appear
+whenever constants are required.
+If no enumerators with
+.B = 
+appear, then the values of the constants begin at 0 and increase by 1 as the
+declaration is read from left to right.
+An enumerator with
+.B =
+gives the associated identifier the value indicated; subsequent identifiers 
+continue the progression from the assigned value.
+.PP
+Enumeration tags and constants must all be distinct, and, unlike structure
+tags and members, are drawn from the same set as ordinary identifiers.
+.PP
+Objects of a given enumeration type are regarded as having a type distinct
+from objects of all other types.
+.bp
+.SH
+Appendix B:  C grammar in LL(1) form
+.PP
+The \fBbold-faced\fP and \fIitalicized\fP tokens represent terminal symbols.
+.vs 16
+.nf
+\fBexternal definitions\fP
+program:  external-definition*
+external-definition:  ext-decl-specifiers [declarator [function  |  non-function]  |  '\fB;\fP']  |  asm-statement
+ext-decl-specifiers:  decl-specifiers?
+non-function:  initializer? ['\fB,\fP' init-declarator]* '\fB;\fP'
+function:  declaration* compound-statement
+.sp 1
+\fBdeclarations\fP
+declaration:  decl-specifiers init-declarator-list? '\fB;\fP'
+decl-specifiers:  other-specifier+ [single-type-specifier other-specifier*]?  |  single-type-specifier other-specifier*
+other-specifier:  \fBauto\fP  |  \fBstatic\fP  |  \fBextern\fP  |  \fBtypedef\fP  |  \fBregister\fP  |  \fBshort\fP  |  \fBlong\fP  |  \fBunsigned\fP
+type-specifier:  decl-specifiers
+single-type-specifier:  \fItype-identifier\fP  |  struct-or-union-specifier  |  enum-specifier
+init-declarator-list:  init-declarator ['\fB,\fP' init-declarator]*
+init-declarator:  declarator initializer?
+declarator:  primary-declarator ['\fB(\fP' formal-list ? '\fB)\fP'  |  arrayer]*  |  '\fB*\fP' declarator
+primary-declarator:  identifier  |  '\fB(\fP' declarator '\fB)\fP'
+arrayer:  '\fB[\fP' constant-expression? '\fB]\fP'
+formal-list:  formal ['\fB,\fP' formal]*
+formal:  identifier
+enum-specifier:  \fBenum\fP [enumerator-pack  |  identifier enumerator-pack?]
+enumerator-pack:  '\fB{\fP' enumerator ['\fB,\fP' enumerator]* '\fB,\fP'? '\fB}\fP'
+enumerator:  identifier ['\fB=\fP' constant-expression]?
+struct-or-union-specifier:  [ \fBstruct\fP  |  \fBunion\fP] [ struct-declaration-pack  |  identifier struct-declaration-pack?]
+struct-declaration-pack:  '\fB{\fP' struct-declaration+ '\fB}\fP'
+struct-declaration:  type-specifier struct-declarator-list '\fB;\fP'?
+struct-declarator-list:  struct-declarator ['\fB,\fP' struct-declarator]*
+struct-declarator:  declarator bit-expression?  |  bit-expression
+bit-expression:  '\fB:\fP' constant-expression
+initializer:  '\fB=\fP'? initial-value
+cast:  '\fB(\fP' type-specifier abstract-declarator '\fB)\fP'
+abstract-declarator:  primary-abstract-declarator ['\fB(\fP' '\fB)\fP'  |  arrayer]*  |  '\fB*\fP' abstract-declarator
+primary-abstract-declarator:  ['\fB(\fP' abstract-declarator '\fB)\fP']?
+.sp 1
+\fBstatements\fP
+statement:
+	 expression-statement
+	| label '\fB:\fP' statement
+	| compound-statement
+	| if-statement
+	| while-statement
+	| do-statement
+	| for-statement
+	| switch-statement
+	| case-statement
+	| default-statement
+	| break-statement
+	| continue-statement
+	| return-statement
+	| jump
+	| '\fB;\fP'
+	| asm-statement
+	;
+expression-statement:  expression '\fB;\fP'
+label:  identifier
+if-statement:  \fBif\fP '\fB(\fP' expression '\fB)\fP' statement [\fBelse\fP statement]?
+while-statement:  \fBwhile\fP '\fB(\fP' expression '\fB)\fP' statement
+do-statement:  \fBdo\fP statement \fBwhile\fP '\fB(\fP' expression '\fB)\fP' '\fB;\fP'
+for-statement:  \fBfor\fP '\fB(\fP' expression? '\fB;\fP' expression? '\fB;\fP' expression? '\fB)\fP' statement
+switch-statement:  \fBswitch\fP '\fB(\fP' expression '\fB)\fP' statement
+case-statement:  \fBcase\fP constant-expression '\fB:\fP' statement
+default-statement:  \fBdefault\fP '\fB:\fP' statement
+break-statement:  \fBbreak\fP '\fB;\fP'
+continue-statement:  \fBcontinue\fP '\fB;\fP'
+return-statement:  \fBreturn\fP expression? '\fB;\fP'
+jump:  \fBgoto\fP identifier '\fB;\fP'
+compound-statement:  '\fB{\fP' declaration* statement* '\fB}\fP'
+asm-statement:  \fBasm\fP '\fB(\fP' \fIstring\fP '\fB)\fP' '\fB;\fP'
+.sp 1
+\fBexpressions\fP
+initial-value:  assignment-expression  |  initial-value-pack
+initial-value-pack:  '\fB{\fP' initial-value-list '\fB}\fP'
+initial-value-list:  initial-value ['\fB,\fP' initial-value]* '\fB,\fP'?
+primary:  \fIidentifier\fP  |  constant  |  \fIstring\fP  |  '\fB(\fP' expression '\fB)\fP'
+secundary:  primary [index-pack  |  parameter-pack  |  selection]*
+index-pack:  '\fB[\fP' expression '\fB]\fP'
+parameter-pack:  '\fB(\fP' parameter-list? '\fB)\fP'
+selection:  ['\fB.\fP'  |  '\fB\->\fP'] identifier
+parameter-list:  assignment-expression ['\fB,\fP' assignment-expression]*
+postfixed:  secundary postop?
+unary:  cast unary  |  postfixed  |  unop unary  |  size-of
+size-of:  \fBsizeof\fP [cast  |  unary]
+binary-expression:  unary [binop binary-expression]*
+conditional-expression:  binary-expression ['\fB?\fP' expression '\fB:\fP' assignment-expression]?
+assignment-expression:  conditional-expression [asgnop assignment-expression]?
+expression:  assignment-expression ['\fB,\fP' assignment-expression]*
+unop:  '\fB*\fP'  |  '\fB&\fP'  |  '\fB\-\fP'  |  '\fB!\fP'  |  '\fB~ \fP'  |  '\fB++\fP'  |  '\fB\-\-\fP'
+postop:  '\fB++\fP'  |  '\fB\-\-\fP'
+multop:  '\fB*\fP'  |  '\fB/\fP'  |  '\fB%\fP'
+addop:  '\fB+\fP'  |  '\fB\-\fP'
+shiftop:  '\fB<<\fP'  |  '\fB>>\fP'
+relop:  '\fB<\fP'  |  '\fB>\fP'  |  '\fB<=\fP'  |  '\fB>=\fP'
+eqop:  '\fB==\fP'  |  '\fB!=\fP'
+arithop:  multop  |  addop  |  shiftop  |  '\fB&\fP'  |  '\fB^ \fP'  |  '\fB|\fP'
+binop:  arithop  |  relop  |  eqop  |  '\fB&&\fP'  |  '\fB||\fP'
+asgnop:  '\fB=\fP'  |  '\fB+\fP' '\fB=\fP'  |  '\fB\-\fP' '\fB=\fP'  |  '\fB*\fP' '\fB=\fP'  |  '\fB/\fP' '\fB=\fP'  |  '\fB%\fP' '\fB=\fP'
+	| '\fB<<\fP' '\fB=\fP'  |  '\fB>>\fP' '\fB=\fP'  |  '\fB&\fP' '\fB=\fP'  |  '\fB^ \fP' '\fB=\fP'  |  '\fB|\fP' '\fB=\fP'
+	| '\fB+=\fP'  |  '\fB\-=\fP'  |  '\fB*=\fP'  |  '\fB/=\fP'  |  '\fB%=\fP'
+	| '\fB<<=\fP'  |  '\fB>>=\fP'  |  '\fB&=\fP'  |  '\fB^=\fP'  |  '\fB|=\fP'
+constant:  \fIinteger\fP  |  \fIfloating\fP
+constant-expression:  assignment-expression
+identifier:  \fIidentifier\fP  |  \fItype-identifier\fP
+.fi

+ 18 - 0
doc/ego/.distr

@@ -0,0 +1,18 @@
+proto.make
+bo
+ca
+cf
+cj
+cs
+ic
+il
+intro
+lv
+ov
+ra
+refs.gen
+refs.opt
+refs.stat
+sp
+sr
+ud

+ 55 - 0
doc/ego/Makefile

@@ -0,0 +1,55 @@
+REFS=-p refs.opt -p refs.stat -p refs.gen
+INTRO=intro/intro?
+OV=ov/ov?
+IC=ic/ic?
+CF=cf/cf?
+IL=il/il?
+SR=sr/sr?
+CS=cs/cs?
+SP=sp/sp?
+UD=ud/ud?
+LV=lv/lv?
+CJ=cj/cj?
+BO=bo/bo?
+RA=ra/ra?
+CA=ca/ca?
+EGO=$(INTRO) $(OV) $(IC) $(CF) $(IL) $(SR) $(CS) $(SP) $(CJ) $(BO) \
+    $(UD) $(LV) $(RA) $(CA)
+REFER=refer
+TROFF=troff
+TBL=tbl
+TARGET=-Tlp
+
+../ego.doc:	refs.opt refs.stat refs.gen intro/head intro/tail $(EGO)
+	 $(REFER) -sA+T -l4,2 $(REFS) intro/head $(EGO) intro/tail | $(TBL) > ../ego.doc
+
+ego.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(EGO)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(EGO) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > ego.f
+intro.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(INTRO)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(INTRO) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > intro.f
+ov.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(OV)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(OV) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > ov.f
+ic.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(IC)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(IC) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > ic.f
+cf.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(CF)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(CF) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > cf.f
+il.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(IL)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(IL) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > il.f
+sr.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(SR)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(SR) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > sr.f
+cs.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(CS)
+	 $(REFER)	-sA+T -l4,2 $(REFS) intro/head $(CS) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > cs.f
+sp.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(SP)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(SP) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > sp.f
+cj.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(CJ)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(CJ) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > cj.f
+bo.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(BO)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(BO) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > bo.f
+ud.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(UD)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(UD) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > ud.f
+lv.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(LV)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(LV) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > lv.f
+ra.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(RA)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(RA) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > ra.f
+ca.f:	refs.opt refs.stat refs.gen intro/head intro/tail $(CA)
+	 $(REFER)  -sA+T -l4,2 $(REFS) intro/head $(CA) intro/tail | $(TBL) | $(TROFF) $(TARGET) -ms > ca.f

+ 1 - 0
doc/ego/bo/.distr

@@ -0,0 +1 @@
+bo1

+ 162 - 0
doc/ego/bo/bo1

@@ -0,0 +1,162 @@
+.bp
+.NH 1
+Branch Optimization
+.NH 2
+Introduction
+.PP
+The Branch Optimization phase (BO) performs two related
+(branch) optimizations.
+.NH 3
+Fusion of basic blocks
+.PP
+If two basic blocks B1 and B2 have the following properties:
+.DS
+SUCC(B1) = {B2}
+PRED(B2) = {B1}
+.DE
+then B1 and B2 can be combined into one basic block.
+If B1 ends in an unconditional jump to the beginning of B2, this
+jump can be eliminated,
+hence saving a little execution time and object code size.
+This technique can be used to eliminate some deficiencies
+introduced by the front ends (for example, the "C" front end
+translates switch statements inefficiently due to its one pass nature).
+.NH 3
+While-loop optimization
+.PP
+The straightforward way to translate a while loop is to
+put the test for loop termination at the beginning of the loop.
+.DS
+while cond loop                       \kyLAB1: \kxTest cond
+   body of the loop     --->\h'|\nxu'Branch On False To LAB2
+end loop\h'|\nxu'code for body of loop
+\h'|\nxu'Branch To LAB1
+\h'|\nyu'LAB2:
+
+Fig. 10.1 Example of Branch Optimization
+.DE
+If the condition fails at the Nth iteration, the following code
+gets executed (dynamically):
+.DS
+.TS
+l l l.
+N	*	conditional branch (which fails N-1 times)
+N-1	*	unconditional branch
+N-1	*	body of the loop
+.TE
+.DE
+An alternative translation is:
+.DS
+     Branch To LAB2
+LAB1:
+     code for body of loop
+LAB2:
+     Test cond
+     Branch On True To LAB1
+.DE
+This translation results in the following profile:
+.DS
+.TS
+l l l.
+N	*	conditional branch (which succeeds N-1 times)
+1	*	unconditional branch
+N-1	*	body of the loop
+.TE
+.DE
+So the second translation will be significantly faster if N >> 2.
+If N=2, execution time will be slightly increased.
+On the average, the program will be speeded up.
+Note that the code sizes of the two translations will be the same.
+.NH 2
+Implementation
+.PP
+The basic block fusion technique is implemented
+by traversing the control flow graph of a procedure,
+looking for basic blocks B with only one successor (S).
+If one is found, it is checked if S has only one predecessor
+(which has to be B).
+If so, the two basic blocks can in principle be combined.
+However, as one basic block will have to be moved,
+the textual order of the basic blocks will be altered.
+This reordering causes severe problems in the presence
+of conditional jumps.
+For example, if S ends in a conditional branch,
+the basic block that comes textually next to S must stay
+in that position.
+So the transformation in Fig. 10.2 is illegal.
+.DS
+.TS
+l l l l l.
+LAB1:	S1		LAB1:	S1
+	BRA LAB2			S2
+	...	-->		BEQ LAB3
+LAB2:	S2			...
+	BEQ LAB3			S3
+	S3
+.TE
+
+Fig. 10.2 An illegal transformation of Branch Optimization
+.DE
+If B is moved towards S the same problem occurs if the block before B
+ends in a conditional jump.
+The problem could be solved by adding one extra branch,
+but this would reduce the gains of the optimization to zero.
+Hence the optimization will only be done if the block that
+follows S (in the textual order) is not a successor of S.
+This condition assures that S does not end in a conditional branch.
+The condition always holds for the code generated by the "C"
+front end for a switch statement.
+.PP
+After the transformation has been performed,
+some attributes of the basic blocks involved (such as successor and
+predecessor sets and immediate dominator) must be recomputed.
+.PP
+The while-loop technique is applied to one loop at a time.
+The list of basic blocks of the loop is traversed to find
+a block B that satisfies the following conditions:
+.IP 1.
+the textually next block to B is not part of the loop
+.IP 2.
+the last instruction of B is an unconditional branch;
+hence B has only one successor, say S
+.IP 3.
+the textually next block of B is a successor of S
+.IP 4.
+the last instruction of S is a conditional branch
+.LP
+If such a block B is found, the control flow graph is changed
+as depicted in Fig. 10.3.
+.DS
+.ft 5
+       |                                    |
+       |                                    v
+       v                                    |
+       |-----<------|                       ----->-----|
+   ____|____        |                                  |
+   |       |        |               |-------|          |
+   |  S1   |        |               |       v          |
+   |  Bcc  |        |               |     ....         |
+|--|       |        |               |                  |
+|  ---------        |               |   ----|----      |
+|                   |               |   |       |      |
+|     ....          ^               |   |  S2   |      |
+|                   |               |   |       |      |
+|   ---------       |               |   |       |      |
+v   |       |       |               ^   ---------      |
+|   |  S2   |       |               |       |          |
+|   | BRA   |       |               |       |-----<-----
+|   |       |       |               |       v
+|   ---------       |               |   ____|____
+|       |           |               |   |       |
+|       ------>------               |   |  S1   |
+|                                   |   |  Bnn  |
+|-------|                           |   |       |
+        |                           |   ----|----
+        v                           |       |
+                                    |----<--|
+                                            |
+                                            v
+.ft R
+
+Fig. 10.3 Transformation of the CFG by Branch Optimization
+.DE

+ 1 - 0
doc/ego/ca/.distr

@@ -0,0 +1 @@
+ca1

+ 65 - 0
doc/ego/ca/ca1

@@ -0,0 +1,65 @@
+.bp
+.NH 1
+Compact assembly generation
+.NH 2
+Introduction
+.PP
+The "Compact Assembly generation phase" (CA) transforms the
+intermediate code of the optimizer into EM code in
+Compact Assembly Language (CAL) format.
+In the intermediate code, all program entities
+(such as procedures, labels, global variables)
+are denoted by a unique identifying number (see 3.5).
+In the CAL output of the optimizer these numbers have to
+be replaced by normal identifiers (strings).
+The original identifiers of the input program are used whenever possible.
+Recall that the IC phase generates two files that can be
+used to map unique identifying numbers to procedure names and
+global variable names.
+For instruction labels CA always generates new names.
+The reasons for doing so are:
+.IP -
+instruction labels are only visible inside one procedure, so they can
+not be referenced in other modules
+.IP -
+the names are not very suggestive anyway, as they must be integer numbers
+.IP -
+the optimizer considerably changes the control structure of the program,
+so there is really no one to one mapping of instruction labels in
+the input and the output program.
+.LP
+As the optimizer combines all input modules into one module,
+visibility problems may occur.
+Two modules M1 and M2 can both define an identifier X (provided that
+X is not externally visible in any of these modules).
+If M1 and M2 are combined into one module M, two distinct
+entities with the same name would exist in M, which
+is not allowed.
+.[~[
+tanenbaum machine architecture
+.], section 11.1.4.3]
+In these cases, CA invents a new unique name for one of the entities.
+.NH 2
+Implementation
+.PP
+CA first reads the files containing the procedure and global variable names
+and stores the names in two tables.
+It scans these tables to make sure that all names are different.
+Subsequently it reads the EM text, one procedure at a time,
+and outputs it in CAL format.
+The major part of the code that does the latter transformation
+is adapted from the EM Peephole Optimizer.
+.PP
+The main problem of the implementation of CA is to
+assure that the visibility rules are obeyed.
+If an identifier must be externally visible (i.e.
+it was externally visible in the input program)
+and the identifier is defined (in the output program) before
+being referenced,
+an EXA or EXP pseudo must be generated for it.
+(Note that the optimizer may change the order of definitions and
+references, so some pseudos may be needed that were not
+present in the input program).
+On the other hand, an identifier may be only internally visible.
+If such an identifier is referenced before being defined,
+an INA or INP pseudo must be emitted prior to its first reference.

+ 6 - 0
doc/ego/cf/.distr

@@ -0,0 +1,6 @@
+cf1
+cf2
+cf3
+cf4
+cf5
+cf6

+ 94 - 0
doc/ego/cf/cf1

@@ -0,0 +1,94 @@
+.bp
+.NH
+The Control Flow Phase
+.PP
+In the previous chapter we described the intermediate
+code of the global optimizer.
+We also specified which part of this code
+was constructed by the IC phase of the optimizer.
+The Control Flow Phase (\fICF\fR) does
+the remainder of the job,
+i.e. it determines:
+.IP -
+the control flow graphs
+.IP -
+the loop tables
+.IP -
+the calling, change and use attributes of
+the procedure table entries
+.LP
+CF operates on one procedure at a time.
+For every procedure it first reads the EM instructions
+from the EM-text file and groups them into basic blocks.
+For every basic block, its successors and
+predecessors are determined,
+resulting in the control flow graph.
+Next, the immediate dominator of every basic block
+is computed.
+Using these dominators, any loop in the
+procedure is detected.
+Finally, interprocedural analysis is done,
+after which we will know the global effects of
+every procedure call on its environment.
+.sp
+CF uses the same internal data structures
+for the procedure table and object table as IC.
+.NH 2
+Partitioning into basic blocks
+.PP
+With regard to flow of control, we distinguish
+three kinds of EM instructions:
+jump instructions, instruction label definitions and
+normal instructions.
+Jump instructions are all conditional or unconditional
+branch instructions,
+the case instructions (CSA/CSB)
+and the RET (return) instruction.
+A procedure call (CAL) is not considered to be a jump.
+A defining occurrence of an instruction label
+is regarded as an EM instruction.
+.PP
+An instruction starts
+a new basic block, in any of the following cases:
+.IP 1.
+It is the first instruction of a procedure
+.IP 2.
+It is the first of a list of instruction label
+defining occurrences
+.IP 3.
+It follows a jump
+.LP
+If there are several consecutive instruction labels
+(which is highly unusual),
+all of them are put in the same basic block.
+Note that several cases may overlap,
+e.g. a label definition at the beginning of a procedure
+or a label following a jump.
+.PP
+A simple Finite State Machine is used to model
+the above rules.
+It also recognizes the end of a procedure,
+marked by an END pseudo.
+The basic blocks are stored internally as a doubly linked
+linear list.
+The blocks are linked in textual order.
+Every node of this list has the attributes described
+in the previous chapter (see syntax rule for
+basic_block).
+Furthermore, every node contains a pointer to its
+EM instructions,
+which are represented internally
+as a linear, doubly linked list,
+just as in the IC phase.
+However, instead of one list per procedure (as in IC)
+there is now one list per basic block.
+.PP
+On the fly, a table is build that maps
+every label identifier to the label definition
+instruction.
+This table is used for computing the control flow.
+The table is stored as a dynamically allocated array.
+The length of the array is the number of labels
+of the current procedure;
+this value can be found in the procedure table,
+where it was stored by IC.

+ 50 - 0
doc/ego/cf/cf2

@@ -0,0 +1,50 @@
+.NH 2
+Control Flow
+.PP
+A \fIsuccessor\fR of a basic block B is a block C
+that can be executed immediately after B.
+C is said to be a \fIpredecessor\fR of B.
+A block ending with a RET instruction
+has no successors.
+Such a block is called a \fIreturn block\fR.
+Any block that has no predecessors cannot be
+executed at all (i.e. it is unreachable),
+unless it is the first block of a procedure,
+called the \fIprocedure entry block\fR.
+.PP
+Internally, the successor and predecessor
+attributes of a basic block are stored as \fIsets\fR.
+Alternatively, one may regard all these
+sets of all basic blocks as a conceptual \fIgraph\fR,
+in which there is an edge from B to C if C
+is in the successor set of B.
+We call this conceptual graph
+the \fIControl Flow Graph\fR.
+.PP
+The only successor of a basic block ending on an
+unconditional branch instruction is the block that
+contains the label definition of the target of the jump.
+The target instruction can be found via the LAB_ID
+that is the operand of the jump instruction,
+by using the label-map table mentioned
+above.
+If the last instruction of a block is a
+conditional jump,
+the successors are the target block and the textually
+next block.
+The last instruction can also be a case jump
+instruction (CSA or CSB).
+We then analyze the case descriptor,
+to find all possible target instructions
+and their associated blocks.
+We require the case descriptor to be allocated in
+a ROM, so it cannot be changed dynamically.
+A case jump via an alterable descriptor could in principle
+go to any label in the program.
+In the presence of such an uncontrolled jump,
+hardly any optimization can be done.
+We do not expect any front end to generate such a descriptor,
+however, because of the controlled nature
+of case statements in high level languages.
+If the basic block does not end in a jump instruction,
+its only successor is the textually next block.

+ 53 - 0
doc/ego/cf/cf3

@@ -0,0 +1,53 @@
+.NH 2
+Immediate dominators
+.PP
+A basic block B dominates a block C if every path
+in the control flow graph from the procedure entry block
+to C goes through B.
+The immediate dominator of C is the closest dominator
+of C on any path from the entry block.
+See also
+.[~[
+aho compiler design
+.], section 13.1.]
+.PP
+There are a number of algorithms to compute
+the immediate dominator relation.
+.IP 1.
+Purdom and Moore give an algorithm that is
+easy to program and easy to describe (although the
+description they give is unreadable;
+it is given in a very messy Algol60 program full of gotos).
+.[
+predominators 
+.]
+.IP 2.
+Aho and Ullman present a bitvector algorithm, which is also
+easy to program and to understand.
+(See 
+.[~[
+aho compiler design
+.], section 13.1.]).
+.IP 3
+Lengauer and Tarjan introduce a fast algorithm that is
+hard to understand, yet remarkably easy to implement.
+.[
+lengauer dominators
+.]
+.LP
+The Purdom-Moore algorithm is very slow if the
+number of basic blocks in the flow graph is large.
+The Aho-Ullman algorithm in fact computes the
+dominator relation,
+from which the immediate dominator relation can be computed
+in time quadratic to the number of basic blocks, worst case.
+The storage requirement is also quadratic to the number
+of blocks.
+The running time of the third algorithm is proportional
+to:
+.DS
+(number of edges in the graph) * log(number of blocks).
+.DE
+We have chosen this algorithm because it is fast
+(as shown by experiments done by Lengauer and Tarjan),
+it is easy to program and requires little data space.

+ 93 - 0
doc/ego/cf/cf4

@@ -0,0 +1,93 @@
+.NH 2
+Loop detection
+.PP
+Loops are detected by using the loop construction
+algorithm of.
+.[~[
+aho compiler design
+.], section 13.1.]
+This algorithm uses \fIback edges\fR.
+A back edge is an edge from B to C in the CFG,
+whose head (C) dominates its tail (B).
+The loop associated with this back edge
+consists of C plus all nodes in the CFG
+that can reach B without going through C.
+.PP
+As an example of how the algorithm works,
+consider the piece of program of Fig. 4.1.
+First just look at the program and try to
+see what part of the code constitutes the loop.
+.DS
+loop
+   if cond then                       1
+      -- lots of simple
+      -- assignment
+      -- statements              2          3
+      exit; -- exit loop
+   else
+      S; -- one statement
+   end if;
+end loop;
+
+Fig. 4.1 A misleading loop
+.DE
+Although a human being may be easily deceived
+by the brackets "loop" and "end loop",
+the loop detection algorithm will correctly
+reply that only the test for "cond" and
+the single statement in the false-part
+of the if statement are part of the loop!
+The statements in the true-part only get
+executed once, so there really is no reason at all
+to say they're part of the loop too.
+The CFG contains one back edge, "3->1".
+As node 3 cannot be reached from node 2,
+the latter node is not part of the loop.
+.PP
+A source of problems with the algorithm is the fact
+that different back edges may result in
+the same loop.
+Such an ill-structured loop is
+called a \fImessy\fR loop.
+After a loop has been constructed, it is checked
+if it is really a new loop.
+.PP
+Loops can partly overlap, without one being nested
+inside the other.
+This is the case in the program of Fig. 4.2.
+.DS
+1:                              1
+   S1;
+2:
+   S2;                          2
+   if cond then
+      goto 4;
+   S3;                     3         4
+   goto 1;
+4:
+   S4;
+   goto 1;
+
+Fig. 4.2 Partly overlapping loops
+.DE
+There are two back edges "3->1" and "4->1",
+resulting in the loops {1,2,3} and {1,2,4}.
+With every basic block we associate a set of
+all loops it is part of.
+It is not sufficient just to record its
+most enclosing loop.
+.PP
+After all loops of a procedure are detected, we determine
+the nesting level of every loop.
+Finally, we find all strong and firm blocks of the loop.
+If the loop has only one back edge (i.e. it is not messy),
+the set of firm blocks consists of the
+head of this back edge and its dominators
+in the loop (including the loop entry block).
+A firm block is also strong if it is not a
+successor of a block that may exit the loop;
+a block may exit a loop if it has an (immediate) successor
+that is not part of the loop.
+For messy loops we do not determine the strong
+and firm blocks. These loops are expected
+to occur very rarely.

+ 82 - 0
doc/ego/cf/cf5

@@ -0,0 +1,82 @@
+.NH 2
+Interprocedural analysis
+.PP
+It is often desirable to know the effects
+a procedure call may have.
+The optimization below is only possible if
+we know for sure that the call to P cannot
+change A.
+.DS
+.TS
+l l.
+A := 10;	A:= 10;
+P;  -- procedure call    -->	P;
+B := A + 2;	B := 12;
+.TE
+.DE
+Although it is not possible to predict exactly
+all the effects a procedure call has, we may
+determine a kind of upper bound for it.
+So we compute all variables that may be
+changed by P, although they need not be
+changed at every invocation of P.
+We can get hold of this set by just looking
+at all assignment (store) instructions
+in the body of P.
+EM also has a set of \fIindirect\fR assignment
+instructions,
+i.e. assignment through a pointer variable.
+In general, it is not possible to determine
+which variable is affected by such an assignment.
+In these cases, we just record the fact that P
+does an indirect assignment.
+Note that this does not mean that all variables
+are potentially affected, as the front ends
+may generate messages telling that certain
+variables can never be accessed indirectly.
+We also set a flag if P does a use (load) indirect.
+Note that we only have to look at \fIglobal\fR
+variables.
+If P changes or uses any of its locals,
+this has no effect on its environment.
+Local variables of a lexically enclosing
+procedure can only be accessed indirectly.
+.PP
+A procedure P may of course call another procedure.
+To determine the effects of a call to P,
+we also must know the effects of a call to the second procedure.
+This second one may call a third one, and so on.
+Effectively, we need to compute the \fItransitive closure\fR
+of the effects.
+To do this, we determine for every procedure
+which other procedures it calls.
+This set is the "calling" attribute of a procedure.
+One may regard all these sets as a conceptual graph,
+in which there is an edge from P to Q
+if Q is in the calling set of P. This graph will
+be referred to as the \fIcall graph\fR.
+(Note the resemblance with the control flow graph).
+.PP
+We can detect which procedures are called by P
+by looking at all CAL instructions in its body.
+Unfortunately, a procedure may also be
+called indirectly, via a CAI instruction.
+Yet, only procedures that are used as operand of an LPI
+instruction can be called indirect,
+because this is the only way to take the address of a procedure.
+We determine for every procedure whether it does
+a CAI instruction.
+We also build a set of all procedures used as
+operand of an LPI.
+.sp
+After all procedures have been processed (i.e. all CFGs
+are constructed, all loops are detected,
+all procedures are analyzed to see which variables
+they may change, which procedures they call,
+whether they do a CAI or are used in an LPI) the
+transitive closure of all interprocedural
+information is computed.
+During the same process,
+the calling set of every procedure that uses a CAI
+is extended with the above mentioned set of all
+procedures that can be called indirect.

+ 21 - 0
doc/ego/cf/cf6

@@ -0,0 +1,21 @@
+.NH 2
+Source files
+.PP
+The sources of CF are in the following files and packages:
+.IP cf.h: 14
+declarations of global variables and data structures
+.IP cf.c:
+the routine main; interprocedural analysis;
+transitive closure
+.IP succ:
+control flow (successor and predecessor)
+.IP idom:
+immediate dominators
+.IP loop:
+loop detection
+.IP get:
+read object and procedure table;
+read EM text and partition it into basic blocks
+.IP put:
+write tables, CFGs and EM text
+.LP

+ 1 - 0
doc/ego/cj/.distr

@@ -0,0 +1 @@
+cj1

+ 144 - 0
doc/ego/cj/cj1

@@ -0,0 +1,144 @@
+.bp
+.NH 1
+Cross jumping
+.NH 2
+Introduction
+.PP
+The "Cross Jumping" optimization technique (CJ)
+.[
+wulf design optimizing compiler
+.]
+is basically a space optimization technique. It looks for pairs of
+basic blocks (B1,B2), for which:
+.DS
+SUCC(B1) = SUCC(B2) = {S}
+.DE
+(So B1 and B2 both have one and the same successor).
+If the last few non-branch instructions are the same for B1 and B2,
+one such sequence can be eliminated.
+.DS
+Pascal:
+
+if cond then
+    S1
+    S3
+else
+    S2
+    S3
+
+(pseudo) EM:
+.TS
+l l l.
+ TEST COND		 TEST COND
+ BNE *1		 BNE *1
+ S1		 S1
+ S3	--->	 BRA *2
+ BRA *2		1:
+1:		 S2
+ S2		2:
+ S3		 S3
+2:
+.TE
+
+Fig. 9.1 An example of Cross Jumping
+.DE
+As the basic blocks have the same successor,
+at least one of them ends in an unconditional branch instruction (BRA).
+Hence no extra branch instruction is ever needed, just the target
+of an existing branch needs to be changed; neither the program size
+nor the execution time will ever increase.
+In general, the execution time will remain the same, unless
+further optimizations can be applied because of this optimization.
+.PP
+This optimization is particularly effective,
+because it cannot always be done by the programmer at the source level,
+as demonstrated by the Fig. 8.2.
+.DS
+	Pascal:
+
+if cond then
+   x := f(4)
+else
+   x := g(5)
+
+
+EM:
+
+.TS
+l l.
+...	...
+LOC 4	LOC 5
+CAL F	CAL G
+ASP 2	ASP 2
+LFR 2	LFR 2
+STL X	STL X
+.TE
+
+Fig. 9.2 Effectiveness of Cross Jumping
+.DE
+At the source level there is no common tail,
+but at the EM level there is a common tail.
+.NH 2
+Implementation
+.PP
+The implementation of cross jumping is rather straightforward.
+The technique is applied to one procedure at a time.
+The control flow graph of the procedure 
+is scanned for pairs of basic blocks
+with the same (single) successor and with common tails.
+Note that there may be more than two such blocks (e.g. as the result
+of a case statement).
+This is dealt with by repeating the entire process until no
+further optimizations can de done for the current procedure.
+.sp
+If a suitable pair of basic blocks has been found, the control flow
+graph must be altered. One of the basic
+blocks must be split into two.
+The control flow graphs before and after the optimization are shown
+in Fig. 9.3 and Fig. 9.4.
+.DS
+.ft 5
+
+        --------                                --------
+        |      |                                |      |
+        | S1   |                                | S2   |
+        | S3   |                                | S3   |
+        |      |                                |      |
+        --------                                --------
+           |                                       |
+           |------------------|--------------------|
+                              |
+                              v
+.ft R
+
+Fig. 9.3 CFG before optimization
+.DE
+.DS
+.ft 5
+        --------                                --------
+        |      |                                |      |
+        | S1   |                                | S2   |
+        |      |                                |      |
+        --------                                --------
+           |                                       |
+           |--------------------<------------------|
+           v
+        --------
+        |      |
+        | S3   |
+        |      |
+        --------
+           |
+           v
+.ft R
+
+Fig. 9.4 CFG after optimization
+.DE
+Some attributes of the three resulting blocks (such as immediate dominator)
+are updated.
+.PP
+In some cases, cross jumping might split the computation of an expression
+into two, by inserting a branch somewhere in the middle.
+Most code generators will generate very poor assembly code when
+presented with such EM code. 
+Therefor, cross jumping is not performed in these cases.

+ 5 - 0
doc/ego/cs/.distr

@@ -0,0 +1,5 @@
+cs1
+cs2
+cs3
+cs4
+cs5

+ 45 - 0
doc/ego/cs/cs1

@@ -0,0 +1,45 @@
+.bp
+.NH 1
+Common subexpression elimination
+.NH 2
+Introduction
+.PP
+The Common Subexpression Elimination optimization technique (CS)
+tries to eliminate multiple computations of EM expressions
+that yield the same result.
+It places the result of one such computation
+in a temporary variable,
+and replaces the other computations by a reference
+to this temporary variable.
+The primary goal of this technique is to decrease
+the execution time of the program,
+but in general it will save space too.
+.PP
+As an example of the application of Common Subexpression Elimination,
+consider the piece of program in Fig. 7.1(a).
+.DS
+.TS
+l l l.
+x := a * b;	TMP := a * b;	x := a * b;
+CODE;	x := TMP;	CODE
+y := c + a * b;	CODE	y := x;
+	y := c + TMP;
+
+   (a)	   (b)	   (c)
+.TE
+
+Fig. 7.1  Examples of Common Subexpression Elimination
+.DE
+If neither a nor b is changed in CODE,
+the instructions can be replaced by those of Fig. 7.1(b),
+which saves one multiplication,
+but costs an extra store instruction.
+If the value of x is not changed in CODE either,
+the instructions can be replaced by those of Fig. 7.1(c).
+In this case
+the extra store is not needed.
+.PP
+In the following sections we will describe
+which transformations are done
+by CS and how this phase
+was implemented.

+ 86 - 0
doc/ego/cs/cs2

@@ -0,0 +1,86 @@
+.NH 2
+Specification of the Common Subexpression Elimination phase
+.PP
+In this section we will describe
+the window
+through which CS examines the code,
+the expressions recognized by CS,
+and finally the changes made to the code.
+.NH 3
+The working window
+.PP
+The CS algorithm is applied to the
+largest sequence of textually adjacent basic blocks
+B1,..,Bn, for which
+.DS
+PRED(Bj) = {Bj-1},  j = 2,..,n.
+.DE
+Intuitively, this window consists of straight line code,
+with only one entry point (at the beginning); it may
+contain jumps, which should all have their targets outside the window.
+This is illustrated in Fig. 7.2.
+.DS
+x := a * b;	(1)
+if x < 10 then	(2)
+    y := a * b;	(3)
+
+Fig. 7.2 The working window of CS
+.DE
+Line (2) can only be executed after line (1).
+Likewise, line (3) can only be executed after
+line (2).
+Both a and b have the same values at line (1) and at line (3).
+.PP
+Larger windows were avoided.
+In Fig. 7.3, the value of a at line (4) may have been obtained
+at more than one point.
+.DS
+x := a * b;	(1)
+if x < 10 then	(2)
+    a := 100;	(3)
+y := a * b;	(4)
+
+Fig. 7.3 Several working windows
+.DE
+.NH 3
+Recognized expressions.
+.PP
+The computations eliminated by CS need not be normal expressions
+(like "a * b"),
+but can even consist of a single operand that is expensive to access,
+such as an array element or a record field.
+If an array element is used,
+its address is computed implicitly.
+CS is able to eliminate either the element itself or its
+address, whichever one is most profitable.
+A variable of a textually enclosing procedure may also be
+expensive to access, depending on the lexical level difference.
+.NH 3
+Transformations
+.PP
+CS creates a new temporary local variable (TMP)
+for every eliminated expression,
+unless it is able to use an existing local variable.
+It emits code to initialize this variable with the
+result of the expression.
+Most recurrences of the expression
+can simply be replaced by a reference to TMP.
+If the address of an array element is recognized as
+a common subexpression,
+references to the element itself are replaced by
+indirect references through TMP (see Fig. 7.4).
+.DS
+.TS
+l l l.
+x := A[i];		TMP := &A[i];
+  . . .	-->	x := *TMP;
+A[i] := y;		   . . .
+			*TMP := y;
+.TE
+
+Fig. 7.4 Elimination of an array address computation
+.DE
+Here, '&' is the 'address of' operator,
+and unary '*' is the indirection operator.
+(Note that EM actually has different instructions to do
+a use-indirect or an assign-indirect.)

+ 250 - 0
doc/ego/cs/cs3

@@ -0,0 +1,250 @@
+.NH 2
+Implementation
+.PP
+.NH 3
+The value number method
+.PP
+To determine whether two expressions have the same result,
+there must be some way to determine whether their operands have
+the same values.
+We use a system of \fIvalue numbers\fP
+.[
+kennedy data flow analysis 
+.]
+in which each distinct value of whatever type,
+created or used within the working window,
+receives a unique identifying number, its value number.
+Two items have the same value number if and only if,
+based only upon information from the instructions in the window,
+their values are provably identical.
+For example, after processing the statement
+.DS
+a := 4;
+.DE
+the variable a and the constant 4 have the same value number.
+.PP
+The value number of the result of an expression depends only
+on the kind of operator and the value number(s) of the operand(s).
+The expressions need not be textually equal, as shown in Fig. 7.5.
+.DS
+.TS
+l l.
+a := c;	(1)
+use(a * b);	(2)
+d := b;	(3)
+use(c * d);	(4)
+.TE
+
+Fig. 7.5 Different expressions with the same value number
+.DE
+At line (1) a receives the same value number as c.
+At line (2) d receives the same value number as b.
+At line (4) the expression "c * d" receives the same value number
+as the expression "a * b" at line (2),
+because the value numbers of their left and right operands are the same,
+and the operator (*) is the same.
+.PP
+As another example of the value number method, consider Fig. 7.6.
+.DS
+.TS
+l l.
+use(a * b);	(1)
+a := 123;	(2)
+use(a * b);	(3)
+.TE
+
+Fig. 7.6 Identical expressions with the different value numbers
+.DE
+Although textually the expressions "a * b" in line 1 and line 3 are equal,
+a will have different value numbers at line 3 and line 1.
+The two expressions will not mistakenly be recognized as equivalent.
+.NH 3
+Entities
+.PP
+The Value Number Method distinguishes between operators and operands.
+The value numbers of operands are stored in a table,
+called the \fIsymbol table\fR.
+The value number of a subexpression depends on the
+(root) operator of the expression and on the value numbers
+of its operands.
+A table of "available expressions" is used to do this mapping.
+.PP
+CS recognizes the following kinds of EM operands, called \fIentities\fR:
+.DS
+- constant
+- local variable
+- external variable
+- indirectly accessed entity
+- offsetted entity
+- address of local variable
+- address of external variable
+- address of offsetted entity
+- address of local base
+- address of argument base
+- array element
+- procedure identifier
+- floating zero
+- local base
+- heap pointer
+- ignore mask
+.DE
+.LP
+Whenever a new entity is encountered in the working window,
+it is entered in the symbol table and given a brand new value number.
+Most entities have attributes (e.g. the offset in
+the current stackframe for local variables),
+which are also stored in the symbol table.
+.PP
+An entity is called static if its value cannot be changed
+(e.g. a constant or an address).
+.NH 3
+Parsing expressions
+.PP
+Common subexpressions are recognized by simulating the behaviour
+of the EM machine.
+The EM code is parsed from left to right;
+as EM is postfix code, this is a bottom up parse.
+At any point the current state of the EM runtime stack is
+reflected by a simulated "fake stack",
+containing descriptions of the parsed operands and expressions.
+A descriptor consists of:
+.DS
+(1) the value number of the operand or expression
+(2) the size of the operand or expression
+(3) a pointer to the first line of EM-code
+    that constitutes the operand or expression
+.DE
+Note that operands may consist of several EM instructions.
+Whenever an operator is encountered, the
+descriptors of its operands are on top of the fake stack.
+The operator and the value numbers of the operands 
+are used as indices in the table of available expressions,
+to determine the value number of the expression.
+.PP
+During the parsing process,
+we keep track of the first line of each expression;
+we need this information when we decide to eliminate the expression.
+.NH 3
+Updating entities
+.PP
+An entity is assigned a value number when it is
+used for the first time
+in the working window.
+If the entity is used as left hand side of an assignment,
+it gets the value number of the right hand side.
+Sometimes the effects of an instruction on an entity cannot
+be determined exactly;
+the current value and value number of the entity may become
+inconsistent.
+Hence the current value number must be forgotten.
+This is achieved by giving the entity a new value number
+that was not used before.
+The entity is said to be \fIkilled\fR.
+.PP
+As information is lost when an entity is killed,
+CS tries to save as many entities as possible.
+In case of an indirect assignment through a pointer,
+some analysis is done to see which variables cannot be altered.
+For a procedure call, the interprocedural information contained
+in the procedure table is used to restrict the set of entities that may
+be changed by the call.
+Local variables for which the front end generated 
+a register message can never be changed by an indirect assignment
+or a procedure call.
+.NH 3
+Changing the EM text
+.PP
+When a new expression comes available,
+it is checked whether its result is saved in a local
+that may go in a register.
+The last line of the expression must be followed
+by a STL or SDL instruction
+(depending on the size of the result)
+and a register message must be present for
+this local.
+If there is such a local,
+it is recorded in the available expressions table.
+Each time a new occurrence of this expression
+is found,
+the value number of the local is compared against
+the value number of the result.
+If they are different the local cannot be used and is forgotten.
+.PP
+The available expressions are linked in a list.
+New expressions are linked at the head of the list.
+In this way expressions that are contained within other
+expressions appear later in the list,
+because EM-expressions are postfix.
+The elimination process walks through the list,
+starting at the head, to find the largest expressions first.
+If an expression is eliminated,
+any expression later on in the list, contained in the former expression,
+is removed from the list,
+as expressions can only be eliminated once.
+.PP
+A STL or SDL is emitted after the first occurrence of the expression,
+unless there was an existing local variable that could hold the result.
+.NH 3
+Desirability analysis
+.PP
+Although the global optimizer works on EM code,
+the goal is to improve the quality of the object code.
+Therefore some machine-dependent information is needed
+to decide whether it is desirable to
+eliminate a given expression.
+Because it is impossible for the CS phase to know
+exactly what code will be generated,
+some heuristics are used.
+CS essentially looks for some special cases
+that should not be eliminated.
+These special cases can be turned on or off for a given machine,
+as indicated in a machine descriptor file.
+.PP
+Some operators can sometimes be translated
+into an addressing mode for the machine at hand.
+Such an operator is only eliminated
+if its operand is itself expensive,
+i.e. it is not just a simple load.
+The machine descriptor file contains a set of such operators.
+.PP
+Eliminating the loading of the Local Base or
+the Argument Base by the LXL resp. LXA instruction
+is only beneficial if the difference in lexical levels
+exceeds a certain threshold.
+The machine descriptor file contains this threshold.
+.PP
+Replacing a SAR or a LAR by an AAR followed by a LOI
+may possibly increase the size of the object code.
+We assume that this is only possible when the
+size of the array element is greater than some limit.
+.PP
+There are back ends that can very efficiently translate
+the index computing instruction sequence LOC SLI ADS.
+If this is the case,
+the SLI instruction between a LOC
+and an ADS is not eliminated.
+.PP
+To handle unforseen cases, the descriptor file may also contain
+a set of operators that should never be eliminated.
+.NH 3
+The algorithm
+.PP
+After these preparatory explanations,
+the algorithm itself is easy to understand.
+For each instruction within the current window,
+the following steps are performed in the given order :
+.IP 1.
+Check if this instruction defines an entity.
+If so, the set of entities is updated accordingly.
+.IP 2.
+Kill all entities that might be affected by this instruction.
+.IP 3.
+Simulate the instruction on the fake-stack.
+If this instruction is an operator,
+update the list of available expressions accordingly.
+.PP
+The result of this process is
+a list of available expressions plus the information
+needed to eliminate them.
+Expressions that are desirable to eliminate are eliminated.
+Next, the window is shifted and the process is repeated.

+ 311 - 0
doc/ego/cs/cs4

@@ -0,0 +1,311 @@
+.NH 2
+Implementation.
+.PP
+In this section we will discuss the implementation of the CS phase.
+We will first describe the basic actions that are undertaken
+by the algorithm, than the algorithm itself.
+.NH 3
+Partioning the EM instructions
+.PP
+There are over 100 EM instructions.
+For our purpose we partition this huge set into groups of
+instructions which can be more or less conveniently handled together.
+.PP
+There are groups for all sorts of load instructions:
+simple loads, expensive loads, loads of an array element.
+A load is considered \fIexpensive\fP when more than one EM instructions
+are involved in loading it.
+The load of a lexical entity is also considered expensive.
+For instance: LOF is expensive, LAL is not.
+LAR forms a group on its own, 
+because it is not only an expensive load,
+but also implicitly includes the ternary operator AAR,
+which computes the address of the array element.
+.PP
+There are groups for all sorts of operators:
+unary, binary, and ternary.
+The groups of operators are further partitioned according to the size
+of their operand(s) and result.
+.\" .PP
+.\" The distinction between operators and expensive loads is not always clear.
+.\" The ADP instruction for example,
+.\" might seem a unary operator because it pops one item
+.\" (a pointer) from the stack.
+.\" However, two ADP-instructions which pop an item with the same value number
+.\" need not have the same result,
+.\" because the attributes (an offset, to be added to the pointer)
+.\" can be different.
+.\" Is it then a binary operator?
+.\" That would give rise to the strange, and undesirable,
+.\" situation that some binary operators pop two operands
+.\" and others pop one.
+.\" The conclusion is inevitable:
+.\" we have been fooled by the name (ADd Pointer).
+.\" The ADP-instruction is an expensive load.
+.\" In this context LAF, meaning Load Address of oFfsetted,
+.\" would have been a better name,
+.\" corresponding to LOF, like LAL,
+.\" Load Address of Local, corresponds to LOL.
+.PP
+There are groups for all sorts of stores:
+direct, indirect, array element.
+The SAR forms a group on its own for the same reason
+as appeared with LAR.
+.PP
+The effect of the remaining instructions is less clear.
+They do not help very much in parsing expressions or
+in constructing our pseudo symboltable.
+They are partitioned according to the following criteria:
+.RS
+.IP "-"
+They change the value of an entity without using the stack
+(e.g. ZRL, DEE).
+.IP "-"
+They are subroutine calls (CAI, CAL).
+.IP "-"
+They change the stack in some irreproduceable way (e.g. ASP, LFR, DUP).
+.IP "-"
+They have no effect whatever on the stack or on the entities.
+This does not mean they can be deleted,
+but they can be ignored for the moment
+(e.g. MES, LIN, NOP).
+.IP "-"
+Their effect is too complicate too compute,
+so we just assume worst case behaviour.
+Hopefully, they do not occur very often.
+(e.g. MON, STR, BLM).
+.IP "-"
+They signal the end of the basic block (e.g. BLT, RET, TRP).
+.RE
+.NH 3
+Parsing expressions
+.PP
+To recognize expressions,
+we simulate the behaviour of the EM machine,
+by means of a fake-stack.
+When we scan the instructions in sequential order,
+we first encounter the instructions that load
+the operands on the stack,
+and then the instruction that indicates the operator,
+because EM expressions are postfix.
+When we find an instruction to load an operand,
+we load on the fake-stack a struct with the following information:
+.DS
+.TS
+l l.
+(1)	the value number of the operand
+(2)	the size of the operand
+(3)	a pointer to the first line of EM-code
+	that constitutes the operand
+.TE
+.DE
+In most cases, (3) will point to the line
+that loaded the operand (e.g. LOL, LOC),
+i.e. there is only one line that refers to this operand,
+but sometimes some information must be popped
+to load the operand (e.g. LOI, LAR).
+This information must have been pushed before,
+so we also pop a pointer to the first line that pushed
+the information.
+This line is now the first line that defines the operand.
+.PP
+When we find the operator instruction,
+we pop its operand(s) from the fake-stack.
+The first line that defines the first operand is
+now the first line of the expression.
+We now have all information to determine
+whether the just parsed expression has occurred before.
+We also know the first and last line of the expression;
+we need this when we decide to eliminate it.
+Associated with each available expression is a set of
+which the elements contains the first and last line of
+a recurrence of this expression.
+.PP
+Not only will the operand(s) be popped from the fake-stack,
+but the following will be pushed:
+.DS
+.TS
+l l.
+(1)	the value number of the result
+(2)	the size of the result
+(3)	a pointer to the first line of the expression
+.TE
+.DE
+In this way an item on the fake-stack always contains
+the necessary information.
+EM expressions are parsed bottum up.
+.NH 3
+Updating entities
+.PP
+As said before,
+we build our private "symboltable",
+while scanning the EM-instructions.
+The behaviour of the EM-machine is not only reflected
+in the fake-stack,
+but also in the entities.
+When an entity is created,
+we do not yet know its value,
+so we assign a brand new value number to it.
+Each time a store-instruction is encountered,
+we change the value number of the target entity of this store
+to the value number of the token that was popped
+from the fake-stack.
+Because entities may overlap,
+we must also "forget" the value numbers of entities
+that might be affected by this store.
+Each such entity will be \fIkilled\fP,
+i.e. assigned a brand new valuenumber.
+.PP
+Because we lose information when we forget
+the value number of an entity,
+we try to save as much entities as possible.
+When we store into an external,
+we don't have to kill locals and vice versa.
+Furthermore, we can see whether two locals or
+two externals overlap,
+because we know the offset from the local base,
+resp. the offset within the data block,
+and the size.
+The situation becomes more complicated when we have
+to consider indirection.
+The worst case is that we store through an unknown pointer.
+In that case we kill all entities except those locals
+for which a so-called \fIregister message\fP has been generated;
+this register message indicates that this local can never be
+accessed indirectly.
+If we know this pointer we can be more careful.
+If it points to a local then the entity that is accessed through
+this pointer can never overlap with an external.
+If it points to an external this entity can never overlap with a local.
+Furthermore, in the latter case,
+we can find the data block this entity belongs to.
+Since pointer arithmetic is only defined within a data block,
+this entity can never overlap with entities that are known to
+belong to another data block.
+.PP
+Not only after a store-instruction but also after a 
+subroutine-call it may be necessary to kill entities;
+the subroutine may affect global variables or store
+through a pointer.
+If a subroutine is called that is not available as EM-text,
+we assume worst case behaviour,
+i.e. we kill all entities without register message.
+.NH 3
+Additions and replacements.
+.PP
+When a new expression comes available,
+we check whether the result is saved in a local
+that may go in a register.
+The last line of the expression must be followed
+by a STL or SDL instruction,
+depending on the size of the result
+(resp. WS and 2*WS),
+and a register message must be present for
+this local.
+If we have found such a local,
+we store a pointer to it with the available expression.
+Each time a new occurrence of this expression
+is found,
+we compare the value number of the local against
+the value number of the result.
+When they are different we remove the pointer to it,
+because we cannot use it.
+.PP
+The available expressions are singly linked in a list.
+When a new expression comes available,
+we link it at the head of the list.
+In this way expressions that are contained within other
+expressions appear later in the list,
+because EM-expressions are postfix.
+When we are going to eliminate expressions,
+we walk through the list,
+starting at the head, to find the largest expressions first.
+When we decide to eliminate an expression,
+we look at the expressions in the tail of the list,
+starting from where we are now,
+to delete expressions that are contained within
+the chosen one because
+we cannot eliminate an expression more than once.
+.PP
+When we are going to eliminate expressions,
+and we do not have a local that holds the result,
+we emit a STL or SDL after the line where the expression
+was first found.
+The other occurrences are simply removed,
+unless they contain instructions that not only have
+effect on the stack; e.g. messages, stores, calls.
+Before each instruction that needs the result on the stack,
+we emit a LOL or LDL.
+When the expression was an AAR,
+but the instruction was a LAR or a SAR,
+we append a LOI resp. a STI of the number of bytes
+in an array-element after each LOL/LDL.
+.NH 3
+Desirability analysis
+.PP
+Although the global optimizer works on EM code,
+the goal is to improve the quality of the object code.
+Therefore we need some machine dependent information
+to decide whether it is desirable to
+eliminate a given expression.
+Because it is impossible for the CS phase to know
+exactly what code will be generated,
+we use some heuristics.
+In most cases it will save time when we eliminate an
+operator, so we just do it.
+We only look for some special cases.
+.PP
+Some operators can in some cases be translated
+into an addressing mode for the machine at hand.
+We only eliminate such an operator,
+when its operand is itself "expensive",
+i.e. not just a simple load.
+The user of the CS phase has to supply
+a set of such operators.
+.PP
+Eliminating the loading of the Local Base or
+the Argument Base by the LXL resp. LXA instruction
+is only beneficial when the number of lexical levels
+we have to go back exceeds a certain threshold.
+This threshold will be different when registers
+are saved by the back end.
+The user must supply this threshold.
+.PP
+Replacing a SAR or a LAR by an AAR followed by a LOI
+may possibly increase the size of the object code.
+We assume that this is only possible when the
+size of the array element is greater than some
+(user-supplied) limit.
+.PP
+There are back ends that can very efficiently translate
+the index computing instruction sequence LOC SLI ADS.
+If this is the case,
+we do not eliminate the SLI instruction between a LOC
+and an ADS.
+.PP
+To handle unforeseen cases, the user may also supply
+a set of operators that should never be eliminated.
+.NH 3
+The algorithm
+.PP
+After these preparatory explanations,
+we can be short about the algorithm itself.
+For each instruction within our window,
+the following steps are performed in the order given:
+.IP 1.
+We check if this instructin defines an entity.
+If this is the case the set of entities is updated accordingly.
+.IP 2.
+We kill all entities that might be affected by this instruction.
+.IP 3.
+The instruction is simulated on the fake-stack.
+Copy propagation is done.
+If this instruction is an operator,
+we update the list of available expressions accordingly.
+.PP
+When we have processed all instructions this way,
+we have built a list of available expressions plus the information we
+need to eliminate them.
+Those expressions of which desirability analysis tells us so,
+we eliminate.
+The we shift our window and continue.

+ 46 - 0
doc/ego/cs/cs5

@@ -0,0 +1,46 @@
+.NH 2
+Source files of CS
+.PP
+The sources of CS are in the following files and packages:
+.IP cs.h 14
+declarations of global variables and data structures
+.IP cs.c
+the routine main;
+a driving routine to process
+the basic blocks in the right order
+.IP vnm
+implements a procedure that performs
+the value numbering on one basic block
+.IP eliminate
+implements a procedure that does the
+transformations, if desirable
+.IP avail
+implements a procedure that manipulates the list of available expressions
+.IP entity
+implements a procedure that manipulates the set of entities
+.IP getentity
+implements a procedure that extracts the
+pseudo symboltable information from EM-instructions;
+uses a small table
+.IP kill
+implements several routines that find the entities
+that might be changed by EM-instructions
+and kill them
+.IP partition
+implements several routines that partition the huge set
+of EM-instructions into more or less manageable,
+more or less logical chunks
+.IP profit
+implements a procedure that decides whether it
+is advantageous to eliminate an expression;
+also removes expressions with side-effects
+.IP stack
+implements the fake-stack and operations on it
+.IP alloc
+implements several allocation routines
+.IP aux
+implements several auxiliary routines
+.IP debug
+implements several routines to provide debugging
+and verbose output
+.LP

+ 5 - 0
doc/ego/ic/.distr

@@ -0,0 +1,5 @@
+ic1
+ic2
+ic3
+ic4
+ic5

+ 57 - 0
doc/ego/ic/ic1

@@ -0,0 +1,57 @@
+.bp
+.NH
+The Intermediate Code and the IC phase
+.PP
+In this chapter the intermediate code of the EM global optimizer
+will be defined.
+The 'Intermediate Code construction' phase (IC),
+which builds the initial intermediate code from
+EM Compact Assembly Language,
+will be described.
+.NH 2
+Introduction
+.PP
+The EM global optimizer is a multi pass program,
+hence there is a need for an intermediate code.
+Usually, programs in the Amsterdam Compiler Kit use the
+Compact Assembly Language format
+.[~[
+keizer architecture
+.], section 11.2]
+for this purpose.
+Although this code has some convenient features,
+such as being compact,
+it is quite unsuitable in our case,
+because of a number of reasons.
+At first, the code lacks global information
+about whole procedures or whole basic blocks.
+Second, it uses identifiers ('names') to bind
+defining and applied occurrences of
+procedures, data labels and instruction labels.
+Although this is usual in high level programming
+languages, it is awkward in an intermediate code
+that must be read many times.
+Each pass of the optimizer would have
+to incorporate an identifier look-up mechanism
+to associate a defining occurrence with each
+applied occurrence of an identifier.
+Finally, EM programs are used to declare blocks of bytes,
+rather than variables. A 'hol 6' instruction may be used to
+declare three 2-byte variables.
+Clearly, the optimizer wants to deal with variables, and
+not with rows of bytes.
+.PP
+To overcome these problems, we have developed a new
+intermediate code.
+This code does not merely consist of the EM instructions,
+but also contains global information in the
+form of tables and graphs.
+Before describing the intermediate code we will
+first leap aside to outline
+the problems one generally encounters
+when trying to store complex data structures such as
+graphs outside the program, i.e. in a file.
+We trust this will enhance the
+comprehensibility of the
+intermediate code definition and the design and implementation
+of the IC phase.

+ 150 - 0
doc/ego/ic/ic2

@@ -0,0 +1,150 @@
+.NH 2
+Representation of complex data structures in a sequential file
+.PP
+Most programmers are quite used to deal with
+complex data structures, such as
+arrays, graphs and trees.
+There are some particular problems that occur
+when storing such a data structure
+in a sequential file.
+We call data that is kept in
+main memory
+.UL internal
+,as opposed to
+.UL external
+data
+that is kept in a file outside the program.
+.sp
+We assume a simple data structure of a
+scalar type (integer, floating point number)
+has some known external representation.
+An
+.UL array
+having elements of a scalar type can be represented
+externally easily, by successively
+representing its elements.
+The external representation may be preceded by a
+number, giving the length of the array.
+Now, consider a linear, singly linked list,
+the elements of which look like:
+.DS
+record
+        data: scalar_type;
+        next: pointer_type;
+end;
+.DE
+It is significant to note that the "next"
+fields of the elements only have a meaning within
+main memory.
+The field contains the address of some location in
+main memory.
+If a list element is written to a file in
+some program,
+and read by another program,
+the element will be allocated at a different
+address in main memory.
+Hence this address value is completely
+useless outside the program.
+.sp
+One may represent the list by ignoring these "next" fields
+and storing the data items in the order they are linked.
+The "next" fields are represented \fIimplicitly\fR.
+When the file is read again,
+the same list can be reconstructed.
+In order to know where the external representation of the
+list ends,
+it may be useful to put the length of
+the list in front of it.
+.sp
+Note that arrays and linear lists have the
+same external representation.
+.PP
+A doubly linked, linear list,
+with elements of the type:
+.DS
+record
+        data: scalar_type;
+        next,
+        previous: pointer_type;
+end
+.DE
+can be represented in precisely the same way.
+Both the "next" and the "previous" fields are represented
+implicitly.
+.PP
+Next, consider a binary tree,
+the nodes of which have type:
+.DS
+record
+        data: scalar_type;
+        left,
+        right: pointer_type;
+end
+.DE
+Such a tree can be represented sequentially,
+by storing its nodes in some fixed order, e.g. prefix order.
+A special null data item may be used to
+denote a missing left or right son.
+For example, let the scalar type be integer,
+and let the null item be 0.
+Then the tree of fig. 3.1(a)
+can be represented as in fig. 3.1(b).
+.DS
+.ft 5
+                        4
+                      /   \e
+                    9      12
+                  /  \e    /  \e
+                12    3   4   6
+                     / \e  \e  /
+                     8  1  5 1
+.ft R
+
+Fig. 3.1(a) A binary tree
+
+
+.ft 5
+4 9 12 0 0 3 8 0 0 1 0 0 12 4 0 5 0 0 6 1 0 0 0
+.ft R
+
+Fig. 3.1(b) Its sequential representation
+.DE
+We are still able to represent the pointer fields ("left"
+and "right") implicitly.
+.PP
+Finally, consider a general
+.UL graph
+, where each node has a "data" field and
+pointer fields,
+with no restriction on where they may point to.
+Now we're at the end of our tale.
+There is no way to represent the pointers implicitly,
+like we did with lists and trees.
+In order to represent them explicitly,
+we use the following scheme.
+Every node gets an extra field,
+containing some unique number that identifies the node.
+We call this number its
+.UL id.
+A pointer is represented externally as the id of the node
+it points to.
+When reading the file we use a table that maps
+an id to the address of its node.
+In general this table will not be completely filled in
+until we have read the entire external representation of
+the graph and allocated internal memory locations for
+every node.
+Hence we cannot reconstruct the graph in one scan.
+That is, there may be some pointers from node A to B,
+where B is placed after A in the sequential file than A.
+When we read the node of A we cannot map the id of B
+to the address of node B,
+as we have not yet allocated node B.
+We can overcome this problem if the size
+of every node is known in advance.
+In this case we can allocate memory for a node
+on first reference.
+Else, the mapping from id to pointer
+cannot be done while reading nodes.
+The mapping can be done either in an extra scan
+or at every reference to the node.

+ 431 - 0
doc/ego/ic/ic3

@@ -0,0 +1,431 @@
+.NH 2
+Definition of the intermediate code
+.PP
+The intermediate code of the optimizer consists
+of several components:
+.IP -
+the object table
+.IP -
+the procedure table
+.IP -
+the em code
+.IP -
+the control flow graphs
+.IP -
+the loop table
+.LP -
+.PP
+These components are described in
+the next sections.
+The syntactic structure of every component
+is described by a set of context free syntax rules,
+with the following conventions:
+.DS
+.TS
+l l.
+x	a non-terminal symbol
+A	a terminal symbol (in capitals)
+x: a b c;	a grammar rule
+a | b	a or b
+(a)+	1 or more occurrences of a
+{a}	0 or more occurrences of a
+.TE
+.DE
+.NH 3
+The object table
+.PP
+EM programs declare blocks of bytes rather than (global) variables.
+A typical program may declare 'HOL 7780'
+to allocate space for 8 I/O buffers,
+2 large arrays and 10 scalar variables.
+The optimizer wants to deal with
+.UL objects
+like variables, buffers and arrays
+and certainly not with huge numbers of bytes.
+Therefore the intermediate code contains information
+about which global objects are used.
+This information can be obtained from an EM program
+by just looking at the operands of instruction
+such as LOE, LAE, LDE, STE, SDE, INE, DEE and ZRE.
+.PP
+The object table consists of a list of
+.UL datablock
+entries.
+Each such entry represents a declaration like HOL, BSS,
+CON or ROM.
+There are five kinds of datablock entries.
+The fifth kind,
+UNKNOWN, denotes a declaration in a
+separately compiled file that is not made
+available to the optimizer.
+Each datablock entry contains the type of the block,
+its size, and a description of the objects that
+belong to it.
+If it is a rom,
+it also contains a list of values given
+as arguments to the rom instruction,
+provided that this list contains only integer numbers.
+An object has an offset (within its datablock)
+and a size.
+The size need not always be determinable.
+Both datablock and object contain a unique
+identifying number
+(see previous section for their use).
+.DS
+.UL syntax
+.TS
+lw(1i) l l.
+object_table:
+	{datablock} ;
+datablock:
+	D_ID	-- unique identifying number
+	PSEUDO	-- one of ROM,CON,BSS,HOL,UNKNOWN
+	SIZE	-- # bytes declared
+	FLAGS
+	{value}	-- contents of rom
+	{object} ;	-- objects of the datablock
+object:
+	O_ID	-- unique identifying number
+	OFFSET	-- offset within the datablock
+	SIZE ;	-- size of the object in bytes
+value:
+	argument ;
+.TE
+.DE
+A data block has only one flag: "external", indicating
+whether the data label is externally visible.
+The syntax for "argument" will be given later on
+(see em_text).
+.NH 3
+The procedure table
+.PP
+The procedure table contains global information
+about all procedures that are made available
+to the optimizer
+and that are needed by the EM program.
+(Library units may not be needed, see section 3.5).
+The table has one entry for
+every procedure.
+.DS
+.UL syntax
+.TS
+lw(1i) l l.
+procedure_table:
+	{procedure}
+procedure:
+	P_ID	-- unique identifying number
+	#LABELS	-- number of instruction labels
+	#LOCALS	-- number of bytes for locals 
+	#FORMALS	-- number of bytes for formals
+	FLAGS	-- flag bits
+	calling	-- procedures called by this one
+	change	-- info about global variables changed
+	use ;	-- info about global variables used
+calling:
+	{P_ID} ;	-- procedures called
+change:
+	ext	-- external variables changed
+	FLAGS ;
+use:
+	FLAGS ;
+ext:
+	{O_ID} ;	-- a set of objects
+.TE
+.DE
+.PP
+The number of bytes of formal parameters accessed by
+a procedure is determined by the front ends and
+passed via a message (parameter message) to the optimizer.
+If the front end is not able to determine this number
+(e.g. the parameter may be an array of dynamic size or
+the procedure may have a variable number of arguments) the attribute
+contains the value 'UNKNOWN_SIZE'.
+.sp 0
+A procedure has the following flags:
+.IP -
+external: true if the proc. is externally visible
+.IP -
+bodyseen: true if its code is available as EM text
+.IP -
+calunknown: true if it calls a procedure that has its bodyseen
+flag not set
+.IP -
+environ: true if it uses or changes a (non-global) variable in
+a lexically enclosing procedure
+.IP -
+lpi: true if is used as operand of an lpi instruction, so
+it may be called indirect
+.LP
+The change and use attributes both have one flag: "indirect",
+indicating whether the procedure does a 'use indirect'
+or a 'store indirect' (indirect means through a pointer).
+.NH 3
+The EM text
+.PP
+The EM text contains the EM instructions.
+Every EM instruction has an operation code (opcode)
+and 0 or 1 operands.
+EM pseudo instructions can have more than
+1 operand.
+The opcode is just a small (8 bit) integer.
+.sp
+There are several kinds of operands, which we will
+refer to as
+.UL types.
+Many EM instructions can have more than one type of operand.
+The types and their encodings in Compact Assembly Language
+are discussed extensively in.
+.[~[
+keizer architecture 
+.], section 11.2]
+Of special interest is the way numeric values
+are represented.
+Of prime importance is the machine independency of
+the representation.
+Ultimately, one could store every integer
+just as a string of the characters '0' to '9'.
+As doing arithmetic on strings is awkward,
+Compact Assembly Language allows several alternatives.
+The main idea is to look at the value of the integer.
+Integers that fit in 16, 32 or 64 bits are
+represented as a row of resp. 2, 4 and 8 bytes,
+preceded by an indication of how many bytes are used.
+Longer integers are represented as strings;
+this is only allowed within pseudo instructions, however.
+This concept works very well for target machines
+with reasonable word sizes.
+At present, most ACK software cannot be used for word sizes
+higher than 32 bits,
+although the handles for using larger word sizes are
+present in the design of the EM code.
+In the intermediate code we essentially use the
+same ideas.
+We allow three representations of integers.
+.IP -
+integers that fit in a short are represented as a short
+.IP -
+integers that fit in a long but not in a short are represented
+as longs
+.IP -
+all remaining integers are represented as strings
+(only allowed in pseudos).
+.LP
+The terms short and long are defined in
+.[~[
+ritchie reference manual programming language
+.], section 4]
+and depend only on the source machine
+(i.e. the machine on which ACK runs),
+not on the target machines.
+For historical reasons a long will often be called an
+.UL offset.
+.PP
+Operands can also be instruction labels,
+objects or procedures.
+Instruction labels are denoted by a
+.UL label
+.UL identifier,
+which can be distinguished from a normal identifier.
+.sp
+The operand of a pseudo instruction can be a list of
+.UL arguments.
+Arguments can have the same type as operands, except
+for the type short, which is not used for arguments.
+Furthermore, an argument can be a string or
+a string representation of a signed integer, unsigned integer
+or floating point number.
+If the number of arguments is not fully determined by
+the pseudo instruction (e.g. a ROM pseudo can have any number
+of arguments), then the list is terminated by a special
+argument of type CEND.
+.DS
+.UL syntax
+.TS
+lw(1i) l l.
+em_text:
+	{line} ;
+line:
+	INSTR	-- opcode
+	OPTYPE	-- operand type
+	operand ;
+operand:
+	empty |	-- OPTYPE = NO
+	SHORT |	-- OPTYPE = SHORT
+	OFFSET |	-- OPTYPE = OFFSET
+	LAB_ID |	-- OPTYPE = INSTRLAB
+	O_ID |	-- OPTYPE = OBJECT
+	P_ID |	-- OPTYPE = PROCEDURE
+	{argument} ;	-- OPTYPE = LIST
+argument:
+	ARGTYPE
+	arg ;
+arg:
+	empty |	-- ARGTYPE = CEND
+	OFFSET |
+	LAB_ID |
+	O_ID |
+	P_ID |
+	string |	-- ARGTYPE = STRING
+	const ;	-- ARGTYPE = ICON,UCON or FCON
+string:
+	LENGTH	-- number of characters
+	{CHARACTER} ;
+const:
+	SIZE	-- number of bytes
+	string ;	-- string representation of (un)signed
+		-- or floating point constant
+.TE
+.DE
+.NH 3
+The control flow graphs
+.PP
+Each procedure can be divided
+into a number of basic blocks.
+A basic block is a piece of code with
+no jumps in, except at the beginning,
+and no jumps out, except at the end.
+.PP
+Every basic block has a set of
+.UL successors,
+which are basic blocks that can follow it immediately in
+the dynamic execution sequence.
+The
+.UL predecessors
+are the basic blocks of which this one
+is a successor.
+The successor and predecessor attributes
+of all basic blocks of a single procedure
+are said to form the
+.UL control
+.UL flow
+.UL graph
+of that procedure.
+.PP
+Another important attribute is the
+.UL immediate
+.UL dominator.
+A basic block B dominates a block C if
+every path in the graph from the procedure entry block
+to C goes through B.
+The immediate dominator of C is the closest dominator
+of C on any path from the entry block.
+(Note that the dominator relation is transitive,
+so the immediate dominator is well defined.)
+.PP
+A basic block also has an attribute containing
+the identifiers of every
+.UL loop
+that the block belongs to (see next section for loops).
+.DS
+.UL syntax
+.TS
+lw(1i) l l.
+control_flow_graph:
+	{basic_block} ;
+basic_block:
+	B_ID	-- unique identifying number
+	#INSTR	-- number of EM instructions
+	succ
+	pred
+	idom	-- immediate dominator
+	loops	-- set of loops
+	FLAGS ;	-- flag bits
+succ:
+	{B_ID} ;
+pred:
+	{B_ID} ;
+idom:
+	B_ID ;
+loops:
+	{LP_ID} ;
+.TE
+.DE
+The flag bits can have the values 'firm' and 'strong',
+which are explained below.
+.NH 3
+The loop tables
+.PP
+Every procedure has an associated
+.UL loop
+.UL table
+containing information about all the loops
+in the procedure.
+Loops can be detected by a close inspection of
+the control flow graph.
+The main idea is to look for two basic blocks,
+B and C, for which the following holds:
+.IP -
+B is a successor of C
+.IP -
+B is a dominator of C
+.LP
+B is called the loop
+.UL entry
+and C is called the loop
+.UL end.
+Intuitively, C contains a jump backwards to
+the beginning of the loop (B).
+.PP
+A loop L1 is said to be
+.UL nested
+within loop L2 if all basic blocks of L1
+are also part of L2.
+It is important to note that loops could
+originally be written as a well structured for -or
+while loop or as a messy goto loop.
+Hence loops may partly overlap without one
+being nested inside the other.
+The
+.UL nesting
+.UL level
+of a loop is the number of loops in
+which it is nested (so it is 0 for
+an outermost loop).
+The details of loop detection will be discussed later.
+.PP
+It is often desirable to know whether a
+basic block gets executed during every iteration
+of a loop.
+This leads to the following definitions:
+.IP -
+A basic block B of a loop L is said to be a \fIfirm\fR block
+of L if B is executed on all successive iterations of L,
+with the only possible exception of the last iteration.
+.IP -
+A basic block B of a loop L is said to be a \fIstrong\fR block
+of L if B is executed on all successive iterations of L.
+.LP
+Note that a strong block is also a firm block.
+If a block is part of a conditional statement, it is neither
+strong nor firm, as it may be skipped during some iterations
+(see Fig. 3.2).
+.DS
+loop
+       if cond1 then
+	      ... \kx-- this code will not
+		  \h'|\nxu'-- result in a firm or strong block
+       end if;
+       ...  -- strong (always executed)
+       exit when cond2;
+       ...  \kx-- firm (not executed on last iteration).
+end loop;
+
+Fig. 3.2 Example of firm and strong block
+.DE
+.DS
+.UL syntax
+.TS
+lw(1i) l l.
+looptable:
+	{loop} ;
+loop:
+	LP_ID	-- unique identifying number
+	LEVEL	-- loop nesting level
+	entry	-- loop entry block
+	end ;
+entry:
+	B_ID ;
+end:
+	B_ID ;
+.TE
+.DE

+ 83 - 0
doc/ego/ic/ic4

@@ -0,0 +1,83 @@
+.NH 2
+External representation of the intermediate code
+.PP
+The syntax of the intermediate code was given
+in the previous section.
+In this section we will make some remarks about
+the representation of the code in sequential files.
+.sp
+We use sequential files in order to avoid
+the bookkeeping of complex file indices.
+As a consequence of this decision
+we can't store all components
+of the intermediate code
+in one file.
+If a phase wishes to change some attribute
+of a procedure,
+or wants to add or delete entire procedures
+(inline substitution may do the latter),
+the procedure table will only be fully updated
+after the entire EM text has been scanned.
+Yet, the next phase undoubtedly wants
+to read the procedure table before it
+starts working on the EM text.
+Hence there is an ordering problem, which
+can be solved easily by putting the
+procedure table in a separate file.
+Similarly, the data block table is kept
+in a file of its own.
+.PP
+The control flow graphs (CFGs) could be mixed
+with the EM text.
+Rather, we have chosen to put them
+in a separate file too.
+The control flow graph file should be regarded as a
+file that imposes some structure on the EM-text file,
+just as an overhead sheet containing a picture
+of a Flow Chart may be put on an overhead sheet
+containing statements.
+The loop tables are also put in the CFG file.
+A loop imposes an extra structure on the
+CFGs and hence on the EM text.
+So there are four files:
+.IP -
+the EM-text file
+.IP -
+the procedure table file
+.IP -
+the object table file
+.IP -
+the CFG and loop tables file
+.LP
+Every table is preceded by its length, in order to
+tell where it ends.
+The CFG file also contains the number of instructions of
+every basic block,
+indicating which part of the EM text belongs
+to that block.
+.DS
+.UL syntax
+.TS
+lw(1i) l l.
+intermediate_code:
+	object_table_file
+	proctable_file
+	em_text_file
+	cfg_file ;
+object_table_file:
+	LENGTH	-- number of objects
+	object_table ;
+proctable_file:
+	LENGTH	-- number of procedures
+	procedure_table ;
+em_text_file:
+	em_text ;
+cfg_file:
+	{per_proc} ;	-- one for every procedure
+per_proc:
+	BLENGTH	-- number of basic blocks
+	LLENGTH	-- number of loops
+	control_flow_graph
+	looptable ;
+.TE
+.DE

+ 166 - 0
doc/ego/ic/ic5

@@ -0,0 +1,166 @@
+.NH 2
+The Intermediate Code construction phase
+.PP
+The first phase of the global optimizer,
+called
+.UL IC,
+constructs a major part of the intermediate code.
+To be specific, it produces:
+.IP -
+the EM text
+.IP -
+the object table
+.IP -
+part of the procedure table
+.LP
+The calling, change and use attributes of a procedure
+and all its flags except the external and bodyseen flags
+are computed by the next phase (Control Flow phase).
+.PP
+As explained before,
+the intermediate code does not contain
+any names of variables or procedures.
+The normal identifiers are replaced by identifying
+numbers.
+Yet, the output of the global optimizer must
+contain normal identifiers, as this
+output is in Compact Assembly Language format.
+We certainly want all externally visible names
+to be the same in the input as in the output,
+because the optimized EM module may be a library unit,
+used by other modules.
+IC dumps the names of all procedures and data labels
+on two files:
+.IP -
+the procedure dump file, containing tuples (P_ID, procedure name)
+.IP -
+the data dump file, containing tuples (D_ID, data label name)
+.LP
+The names of instruction labels are not dumped,
+as they are not visible outside the procedure
+in which they are defined.
+.PP
+The input to IC consists of one or more files.
+Each file is either an EM module in Compact Assembly Language
+format, or a Unix archive file (library) containing such modules.
+IC only extracts those modules from a library that are
+needed somehow, just as a linker does.
+It is advisable to present as much code
+of the EM program as possible to the optimizer,
+although it is not required to present the whole program.
+If a procedure is called somewhere in the EM text,
+but its body (text) is not included in the input,
+its bodyseen flag in the procedure table will still
+be off.
+Whenever such a procedure is called,
+we assume the worst case for everything;
+it will change and use all variables it has access to,
+it will call every procedure etc.
+.sp
+Similarly, if a data label is used
+but not defined, the PSEUDO attribute in its data block
+will be set to UNKNOWN.
+.NH 3
+Implementation
+.PP
+Part of the code for the EM Peephole Optimizer
+.[
+staveren peephole toplass
+.]
+has been used for IC.
+Especially the routines that read and unravel
+Compact Assembly Language and the identifier
+lookup mechanism have been used.
+New code was added to recognize objects,
+build the object and procedure tables and to
+output the intermediate code.
+.PP
+IC uses singly linked linear lists for both the
+procedure and object table.
+Hence there are no limits on the size of such
+a table (except for the trivial fact that it must fit
+in main memory).
+Both tables are outputted after all EM code has
+been processed.
+IC reads the EM text of one entire procedure
+at a time,
+processes it and appends the modified code to
+the EM text file.
+EM code is represented internally as a doubly linked linear
+list of EM instructions.
+.PP
+Objects are recognized by looking at the operands
+of instructions that reference global data.
+If we come across the instructions:
+.DS
+.TS
+l l.
+LDE X+6	-- Load Double External
+LAE X+20	-- Load Address External
+.TE
+.DE
+we conclude that the data block
+preceded by the data label X contains an object
+at offset 6 of size twice the word size,
+and an object at offset 20 of unknown size.
+.sp
+A data block entry of the object table is allocated
+at the first reference to a data label.
+If this reference is a defining occurrence
+or a INA pseudo instruction,
+the label is not externally visible
+.[~[
+keizer architecture
+.], section 11.1.4.3]
+In this case, the external flag of the data block
+is turned off.
+If the first reference is an applied occurrence
+or a EXA pseudo instruction, the flag is set.
+We record this information, because the
+optimizer may change the order of defining and
+applied occurrences.
+The INA and EXA pseudos are removed from the EM text.
+They may be regenerated by the last phase
+of the optimizer.
+.sp
+Similar rules hold for the procedure table
+and the INP and EXP pseudos.
+.NH 3
+Source files of IC
+.PP
+The source files of IC consist
+of the files ic.c, ic.h and several packages.
+.UL ic.h
+contains type definitions, macros and
+variable declarations that may be used by
+ic.c and by every package.
+.UL ic.c
+contains the definitions of these variables,
+the procedure
+.UL main
+and some high level I/O routines used by main.
+.sp
+Every package xxx consists of two files.
+ic_xxx.h contains type definitions,
+macros, variable declarations and
+procedure declarations that may be used by
+every .c file that includes this .h file.
+The file ic_xxx.c provides the
+definitions of these variables and
+the implementation of the declared procedures.
+IC uses the following packages:
+.IP lookup: 18
+procedures that loop up procedure, data label
+and instruction label names; procedures to dump
+the procedure and data label names.
+.IP lib:
+one procedure that gets the next useful input module;
+while scanning archives, it skips unnecessary modules.
+.IP aux:
+several auxiliary routines.
+.IP io:
+low-level I/O routines that unravel the Compact
+Assembly Language.
+.IP put:
+routines that output the intermediate code
+.LP

+ 6 - 0
doc/ego/il/.distr

@@ -0,0 +1,6 @@
+il1
+il2
+il3
+il4
+il5
+il6

+ 112 - 0
doc/ego/il/il1

@@ -0,0 +1,112 @@
+.bp
+.NH 1
+Inline substitution
+.NH 2
+Introduction
+.PP
+The Inline Substitution technique (IL)
+tries to decrease the overhead associated
+with procedure calls (invocations).
+During a procedure call, several actions
+must be undertaken to set up the right
+environment for the called procedure.
+.[
+johnson calling sequence
+.]
+On return from the procedure, most of these
+effects must be undone.
+This entire process introduces significant
+costs in execution time as well as
+in object code size.
+.PP
+The inline substitution technique replaces
+some of the calls by the modified body of
+the called procedure, hence eliminating
+the overhead.
+Furthermore, as the calling and called procedure
+are now integrated, they can be optimized
+together, using other techniques of the optimizer.
+This often leads to extra opportunities for
+optimization
+.[
+ball predicting effects
+.]
+.[
+carter code generation cacm
+.]
+.[
+scheifler inline cacm
+.]
+.PP
+An inline substitution of a call to a procedure P increases
+the size of the program, unless P is very small or P is
+called only once.
+In the latter case, P can be eliminated.
+In practice, procedures that are called only once occur
+quite frequently, due to the
+introduction of structured programming.
+(Carter
+.[
+carter umi ann arbor
+.]
+states that almost 50% of the Pascal procedures
+he analyzed were called just once).
+.PP
+Scheifler
+.[
+scheifler inline cacm
+.]
+has a more general view of inline substitution.
+In his model, the program under consideration is
+allowed to grow by a certain amount,
+i.e. code size is sacrificed to speed up the program.
+The above two cases are just special cases of
+his model, obtained by setting the size-change to
+(approximately) zero.
+He formulates the substitution problem as follows:
+.IP
+"Given a program, a subset of all invocations,
+a maximum program size, and a maximum procedure size,
+find a sequence of substitutions that minimizes
+the expected execution time."
+.LP
+Scheifler shows that this problem is NP-complete
+.[~[
+aho hopcroft ullman analysis algorithms
+.], chapter 10]
+by reduction to the Knapsack Problem.
+Heuristics will have to be used to find a near-optimal
+solution.
+.PP
+In the following chapters we will extend
+Scheifler's view and adapt it to the EM Global Optimizer.
+We will first describe the transformations that have
+to be applied to the EM text when a call is substituted
+in line.
+Next we will examine in which cases inline substitution
+is not possible or desirable.
+Heuristics will be developed for
+chosing a good sequence of substitutions.
+These heuristics make no demand on the user
+(such as making profiles
+.[
+scheifler inline cacm
+.]
+or giving pragmats
+.[~[
+ichbiah ada military standard
+.], section 6.3.2]),
+although the model could easily be extended
+to use such information.
+Finally, we will discuss the implementation
+of the IL phase of the optimizer.
+.PP
+We will often use the term inline expansion
+as a synonym of inline substitution.
+.sp 0
+The inverse technique of procedure abstraction
+(automatic subroutine generation)
+.[
+shaffer subroutine generation
+.]
+will not be discussed in this report.

+ 93 - 0
doc/ego/il/il2

@@ -0,0 +1,93 @@
+.NH 2
+Parameters and local variables.
+.PP
+In the EM calling sequence, the calling procedure
+pushes its parameters on the stack
+before doing the CAL.
+The called routine first saves some
+status information on the stack and then
+allocates space for its own locals
+(also on the stack).
+Usually, one special purpose register,
+the Local Base (LB) register,
+is used to access both the locals and the
+parameters.
+If memory is highly segmented,
+the stack frames of the caller and the callee
+may be allocated in different fragments;
+an extra Argument Base (AB) register is used
+in this case to access the actual parameters.
+See 4.2 of
+.[
+keizer architecture
+.]
+for further details.
+.PP
+If a procedure call is expanded in line,
+there are two problems:
+.IP 1. 3
+No stack frame will be allocated for the called procedure;
+we must find another place to put its locals.
+.IP 2.
+The LB register cannot be used to access the actual
+parameters;
+as the CAL instruction is deleted, the LB will
+still point to the local base of the \fIcalling\fR procedure.
+.LP
+The local variables of the called procedure will
+be put in the stack frame of the calling procedure,
+just after its own locals.
+The size of the stack frame of the
+calling procedure will be increased
+during its entire lifetime.
+Therefore our model will allow a
+limit to be set on the number of bytes
+for locals that the called procedure may have
+(see next section).
+.PP
+There are several alternatives to access the parameters.
+An actual parameter may be any auxiliary expression,
+which we will refer to as
+the \fIactual parameter expression\fR.
+The value of this expression is stored
+in a location on the stack (see above),
+the \fIparameter location\fR.
+.sp 0
+The alternatives for accessing parameters are:
+.IP -
+save the value of the stackpointer at the point of the CAL
+in a temporary variable X;
+this variable can be used to simulate the AB register,  i.e.
+parameter locations are accessed via an offset to
+the value of X.
+.IP -
+create a new temporary local variable T for
+the parameter (in the stack frame of the caller);
+every access to the parameter location must be changed
+into an access to T.
+.IP -
+do not evaluate the actual parameter expression before the call;
+instead, substitute this expression for every use of the
+parameter location.
+.LP
+The first method may be expensive if X is not
+put in a register.
+We will not use this method.
+The time required to evaluate and access the
+parameters when the second method is used
+will not differ much from the normal
+calling sequence (i.e. not in line call).
+It is not expensive, but there are no
+extra savings either.
+The third method is essentially the 'by name'
+parameter mechanism of Algol60.
+If the actual parameter is just a numeric constant,
+it is advantageous to use it.
+Yet, there are several circumstances
+under which it cannot or should not be used.
+We will deal with this in the next section.
+.sp 0
+In general we will use the third method,
+if it is possible and desirable.
+Such parameters will be called \fIin line parameters\fR.
+In all other cases we will use the second method.

+ 164 - 0
doc/ego/il/il3

@@ -0,0 +1,164 @@
+.NH 2
+Feasibility and desirability analysis
+.PP
+Feasibility and desirability analysis
+of in line substitution differ
+somewhat from most other techniques.
+Usually, much effort is needed to find
+a feasible opportunity for optimization
+(e.g. a redundant subexpression).
+Desirability analysis then checks
+if it is really advantageous to do
+the optimization.
+For IL, opportunities are easy to find.
+To see if an in line expansion is
+desirable will not be hard either.
+Yet, the main problem is to find the most
+desirable ones.
+We will deal with this problem later and
+we will first attend feasibility and
+desirability analysis.
+.PP
+There are several reasons why a procedure invocation
+cannot or should not be expanded in line.
+.sp
+A call to a procedure P cannot be expanded in line
+in any of the following cases:
+.IP 1. 3
+The body of P is not available as EM text.
+Clearly, there is no way to do the substitution.
+.IP 2.
+P, or any procedure called by P (transitively),
+follows the chain of statically enclosing
+procedures (via a LXL or LXA instruction)
+or follows the chain of dynamically enclosing
+procedures (via a DCH).
+If the call were expanded in line,
+one level would be removed from the chains,
+leading to total chaos.
+This chaos could be solved by patching up
+every LXL, LXA or DCH in all procedures
+that could be part of the chains,
+but this is hard to implement.
+.IP 3.
+P, or any procedure called by P (transitively),
+calls a procedure whose body is not
+available as EM text.
+The unknown procedure may use an LXL, LXA or DCH.
+However, in several languages a separately
+compiled procedure has no access to the
+static or dynamic chain.
+In this case
+this point does not apply.
+.IP 4.
+P, or any procedure called by P (transitively),
+uses the LPB instruction, which converts a
+local base to an argument base;
+as the locals and parameters are stored
+in a non-standard way (differing from the
+normal EM calling sequence) this instruction
+would yield incorrect results.
+.IP 5.
+The total number of bytes of the parameters
+of P is not known.
+P may be a procedure with a variable number
+of parameters or may have an array of dynamic size
+as value parameter.
+.LP
+It is undesirable to expand a call to a procedure P in line
+in any of the following cases:
+.IP 1. 3
+P is large, i.e. the number of EM instructions
+of P exceeds some threshold.
+The expanded code would be large too.
+Furthermore, several programs in ACK,
+including the global optimizer itself,
+may run out of memory if they they have to run
+in a small address space and are provided
+very large procedures.
+The threshold may be set to infinite,
+in which case this point does not apply.
+.IP 2.
+P has many local variables.
+All these variables would have to be allocated
+in the stack frame of the calling procedure.
+.PP
+If a call may be expanded in line, we have to
+decide how to access its parameters.
+In the previous section we stated that we would
+use in line parameters whenever possible and desirable.
+There are several reasons why a parameter
+cannot or should not be expanded in line.
+.sp
+No parameter of a procedure P can be expanded in line,
+in any of the following cases:
+.IP 1. 3
+P, or any procedure called by P (transitively),
+does a store-indirect or a use-indirect (i.e. through
+a pointer).
+However, if the front-end has generated messages
+telling that certain parameters can not be accessed
+indirectly, those parameters may be expanded in line.
+.IP 2.
+P, or any procedure called by P (transitively),
+calls a procedure whose body is not available as EM text.
+The unknown procedure may do a store-indirect
+or a use-indirect.
+However, the same remark about front-end messages
+as for 1. holds here.
+.IP 3.
+The address of a parameter location is taken (via a LAL).
+In the normal calling sequence, all parameters
+are stored sequentially. If the address of one
+parameter location is taken, the address of any
+other parameter location can be computed from it.
+Hence we must put every parameter in a temporary location;
+furthermore, all these locations must be in
+the same order as for the normal calling sequence.
+.IP 4.
+P has overlapping parameters; for example, it uses
+the parameter at offset 10 both as a 2 byte and as a 4 byte
+parameter.
+Such code may be produced by the front ends if
+the formal parameter is of some record type
+with variants.
+.PP
+Sometimes a specific parameter must not be expanded in line.
+.sp 0
+An actual parameter expression cannot be expanded in line
+in any of the following cases:
+.IP 1. 3
+P stores into the parameter location.
+Even if the actual parameter expression is a simple
+variable, it is incorrect to change the 'store into
+formal' into a 'store into actual', because of
+the parameter mechanism used.
+In Pascal, the following expansion is incorrect:
+.DS
+procedure p (x:integer);
+begin
+   x := 20;
+end;
+\&...
+a := 10;                \kxa := 10;
+p(a);        --->       \h'|\nxu'a := 20;
+write(a);               \h'|\nxu'write(a);
+.DE
+.IP 2.
+P changes any of the operands of the
+actual parameter expression.
+If the expression is expanded and evaluated
+after the operand has been changed,
+the wrong value will be used.
+.IP 3.
+The actual parameter expression has side effects.
+It must be evaluated only once,
+at the place of the call.
+.LP
+It is undesirable to expand an actual parameter in line
+in the following case:
+.IP 1. 3
+The parameter is used more than once
+(dynamically) and the actual parameter expression
+is not just a simple variable or constant.
+.LP

+ 135 - 0
doc/ego/il/il4

@@ -0,0 +1,135 @@
+.NH 2
+Heuristic rules
+.PP
+Using the information described
+in the previous section,
+we can find all calls that can
+be expanded in line, and for which
+this expansion is desirable.
+In general, we cannot expand all these calls,
+so we have to choose the 'best' ones.
+With every CAL instruction
+that may be expanded, we associate
+a \fIpay off\fR,
+which expresses how desirable it is
+to expand this specific CAL.
+.sp
+Let Tc denote the portion of EM text involved
+in a specific call, i.e. the pushing of the actual
+parameter expressions, the CAL itself,
+the popping of the parameters and the
+pushing of the result (if any, via an LFR).
+Let Te denote the EM text that would be obtained
+by expanding the call in line.
+Let Pc be the original program and Pe the program
+with Te substituted for Tc.
+The pay off of the CAL depends on two factors:
+.IP -
+T = execution_time(Pe) - execution_time(Pc)
+.IP -
+S = code_size(Pe) - code_size(Pc)
+.LP
+The change in execution time (T) depends on:
+.IP -
+T1 = execution_time(Te) - execution_time(Tc)
+.IP -
+N = number of times Te or Tc get executed.
+.LP
+We assume that T1 will be the same every
+time the code gets executed.
+This is a reasonable assumption.
+(Note that we are talking about one CAL,
+not about different calls to the same procedure).
+Hence
+.DS
+T = N * T1
+.DE
+T1 can be estimated by a careful analysis
+of the transformations that are performed.
+Below, we list everything that will be
+different when a call is expanded in line:
+.IP -
+The CAL instruction is not executed.
+This saves a subroutine jump.
+.IP -
+The instructions in the procedure prolog
+are not executed.
+These instructions, generated from the PRO pseudo,
+save some machine registers 
+(including the old LB), set the new LB and allocate space
+for the locals of the called routine.
+The savings may be less if there are no
+locals to allocate.
+.IP -
+In line parameters are not evaluated before the call
+and are not pushed on the stack.
+.IP -
+All remaining parameters are stored in local variables,
+instead of being pushed on the stack.
+.IP -
+If the number of parameters is nonzero,
+the ASP instruction after the CAL is not executed.
+.IP -
+Every reference to an in line parameter is
+substituted by the parameter expression.
+.IP -
+RET (return) instructions are replaced by
+BRA (branch) instructions.
+If the called procedure 'falls through'
+(i.e. it has only one RET, at the end of its code),
+even the BRA is not needed.
+.IP -
+The LFR (fetch function result) is not executed
+.PP
+Besides these changes, which are caused directly by IL,
+other changes may occur as IL influences other optimization
+techniques, such as Register Allocation and Constant Propagation.
+Our heuristic rules do not take into account the quite
+inpredictable effects on Register Allocation.
+It does, however, favour calls that have numeric \fIconstants\fR
+as parameter; especially the constant "0" as an inline
+parameter gets high scores,
+as further optimizations may often be possible.
+.PP
+It cannot be determined statically how often a CAL instruction gets
+executed.
+We will use \fIloop nesting\fR information here.
+The nesting level of the loop in which
+the CAL appears (if any) will be used as an
+indication for the number of times it gets executed.
+.PP
+Based on all these facts,
+the pay off of a call will be computed.
+The following model was developed empirically.
+Assume procedure P calls procedure Q.
+The call takes place in basic block B.
+.DS
+.TS
+l l l.
+ZP	\&=	# zero parameters
+CP	\&=	# constant parameters - ZP
+LN	\&=	Loop Nesting level (0 if outside any loop)
+F	\&=	\fIif\fR # formal parameters of Q > 0 \fIthen\fR 1 \fIelse\fR 0
+FT	\&=	\fIif\fR Q falls through \fIthen\fR 1 \fIelse\fR 0
+S	\&=	size(Q) - 1 - # inline_parameters - F
+L	\&=	\fIif\fR # local variables of P > 0 \fIthen\fR 0 \fIelse\fR -1
+A	\&=	CP + 2 * ZP
+N	\&=	\fIif\fR LN=0 and P is never called from a loop \fIthen\fR 0 \fIelse\fR (LN+1)**2
+FM	\&=	\fIif\fR B is a firm block \fIthen\fR 2 \fIelse\fR 1
+
+pay_off	\&=	(100/S + FT + F + L + A) * N * FM
+.TE
+.DE
+S stands for the size increase of the program,
+which is slightly less than the size of Q.
+The size of a procedure is taken to be its number
+of (non-pseudo) EM instructions.
+The terms "loop nesting level" and "firm" were defined
+in the chapter on the Intermediate Code (section "loop tables").
+If a call is not inside a loop and the calling procedure
+is itself never called from a loop (transitively),
+then the call will probably be executed at most once.
+Such a call is never expanded in line (its pay off is zero).
+If the calling procedure doesn't have local variables, a penalty (L)
+is introduced, as it will most likely get local variables if the
+call gets expanded.

+ 446 - 0
doc/ego/il/il5

@@ -0,0 +1,446 @@
+.NH 2
+Implementation
+.PP
+A major factor in the implementation
+of Inline Substitution is the requirement
+not to use an excessive amount of memory.
+IL essentially analyzes the entire program;
+it makes decisions based on which procedure calls
+appear in the whole program.
+Yet, because of the memory restriction, it is
+not feasible to read the entire program
+in main memory.
+To solve this problem, the IL phase has been
+split up into three subphases that are executed sequentially:
+.IP 1.
+analyze every procedure; see how it accesses its parameters;
+simultaneously collect all calls
+appearing in the whole program an put them
+in a \fIcall-list\fR.
+.IP 2.
+use the call-list and decide which calls will be substituted
+in line.
+.IP 3.
+take the decisions of subphase 2 and modify the
+program accordingly.
+.LP
+Subphases 1 and 3 scan the input program; only
+subphase 3 modifies it.
+It is essential that the decisions can be made
+in subphase 2
+without using the input program,
+provided that subphase 1 puts enough information
+in the call-list.
+Subphase 2 keeps the entire call-list in main memory
+and repeatedly scans it, to
+find the next best candidate for expansion.
+.PP
+We will specify the
+data structures used by IL before 
+describing the subphases.
+.NH 3
+Data structures
+.NH 4
+The procedure table
+.PP
+In subphase 1 information is gathered about every procedure
+and added to the procedure table.
+This information is used by the heuristic rules.
+A proctable entry for procedure p has
+the following extra information:
+.IP -
+is it allowed to substitute an invocation of p in line?
+.IP -
+is it allowed to put any parameter of such a call in line?
+.IP -
+the size of p (number of EM instructions)
+.IP -
+does p 'fall through'?
+.IP -
+a description of the formal parameters that p accesses; this information
+is obtained by looking at the code of p. For every parameter f,
+we record:
+.RS
+.IP -
+the offset of f
+.IP -
+the type of f (word, double word, pointer)
+.IP -
+may the corresponding actual parameter be put in line?
+.IP -
+is f ever accessed indirectly?
+.IP -
+if f used: never, once or more than once?
+.RE
+.IP -
+the number of times p is called (see below)
+.IP -
+the file address of its call-count information (see below).
+.LP
+.NH 4
+Call-count information
+.PP
+As a result of Inline Substitution, some procedures may
+become useless, because all their invocations have been
+substituted in line.
+One of the tasks of IL is to keep track which
+procedures are no longer called.
+Note that IL is especially keen on procedures that are
+called only once
+(possibly as a result of expanding all other calls to it).
+So we want to know how many times a procedure
+is called \fIduring\fR Inline Substitution.
+It is not good enough to compute this
+information afterwards.
+The task is rather complex, because
+the number of times a procedure is called
+varies during the entire process:
+.IP 1.
+If a call to p is substituted in line,
+the number of calls to p gets decremented by 1.
+.IP 2.
+If a call to p is substituted in line,
+and p contains n calls to q, then the number of calls to q
+gets incremented by n.
+.IP 3.
+If a procedure p is removed (because it is no
+longer called) and p contains n calls to q,
+then the number of calls to q gets decremented by n.
+.LP
+(Note that p may be the same as q, if p is recursive).
+.sp 0
+So we actually want to have the following information:
+.DS
+NRCALL(p,q) = number of call to q appearing in p,
+
+for all procedures p and q that may be put in line.
+.DE
+This information, called \fIcall-count information\fR is
+computed by the first subphase.
+It is stored in a file.
+It is represented as a number of lists, rather than as
+a (very sparse) matrix.
+Every procedure has a list of (proc,count) pairs,
+telling which procedures it calls, and how many times.
+The file address of its call-count list is stored
+in its proctable entry.
+Whenever this information is needed, it is fetched from
+the file, using direct access.
+The proctable entry also contains the number of times
+a procedure is called, at any moment.
+.NH 4
+The call-list
+.PP
+The call-list is the major data structure use by IL.
+Every item of the list describes one procedure call.
+It contains the following attributes:
+.IP -
+the calling procedure (caller)
+.IP -
+the called procedure (callee)
+.IP -
+identification of the CAL instruction (sequence number)
+.IP -
+the loop nesting level; our heuristic rules appreciate
+calls inside a loop (or even inside a loop nested inside
+another loop, etc.) more than other calls
+.IP -
+the actual parameter expressions involved in the call;
+for every actual, we record:
+.RS
+.IP -
+the EM code of the expression
+.IP -
+the number of bytes of its result (size)
+.IP -
+an indication if the actual may be put in line
+.RE
+.LP
+The structure of the call-list is rather complex.
+Whenever a call is expanded in line, new calls
+will suddenly appear in the program,
+that were not contained in the original body
+of the calling subroutine.
+These calls are inherited from the called procedure.
+We will refer to these invocations as \fInested calls\fR
+(see Fig. 5.1).
+.DS
+.TS
+lw(2.5i) l.
+procedure p is
+begin	.
+     a();	.
+     b();	.
+end;
+.TE
+
+.TS
+lw(2.5i) l.
+procedure r is	procedure r is
+begin	begin
+     x();	    x();
+     p();  -- in line	    a();  -- nested call
+     y();	    b();  -- nested call
+end;	    y();
+	end;
+.TE
+
+Fig. 5.1 Example of nested procedure calls
+.DE
+Nested calls may subsequently be put in line too
+(probably resulting in a yet deeper nesting level, etc.).
+So the call-list does not always reflect the source program,
+but changes dynamically, as decisions are made.
+If a call to p is expanded, all calls appearing in p
+will be added to the call-list.
+.sp 0
+A convenient and elegant way to represent
+the call-list is to use a LISP-like list.
+.[
+poel lisp trac
+.]
+Calls that appear at the same level
+are linked in the CDR direction. If a call C
+to a procedure p is expanded,
+all calls appearing in p are put in a sub-list
+of C, i.e. in its CAR.
+In the example above, before the decision
+to expand the call to p is made, the
+call-list of procedure r looks like:
+.DS
+(call-to-x, call-to-p, call-to-y)
+.DE
+After the decision, it looks like:
+.DS
+(call-to-x, (call-to-p*, call-to-a, call-to-b), call-to-y)
+.DE
+The call to p is marked, because it has been
+substituted.
+Whenever IL wants to traverse the call-list of some procedure,
+it uses the well-known LISP technique of
+recursion in the CAR direction and
+iteration in the CDR direction
+(see page 1.19-2 of
+.[
+poel lisp trac
+.]
+).
+All list traversals look like:
+.DS
+traverse(list)
+{
+    for (c = first(list); c != 0; c = CDR(c)) {
+        if (c is marked) {
+            traverse(CAR(c));
+        } else {
+            do something with c
+        }
+    }
+}
+.DE
+The entire call-list consists of a number of LISP-like lists,
+one for every procedure.
+The proctable entry of a procedure contains a pointer
+to the beginning of the list.
+.NH 3
+The first subphase: procedure analysis
+.PP
+The tasks of the first subphase are to determine
+several attributes of every procedure
+and to construct the basic call-list,
+i.e. without nested calls.
+The size of a procedure is determined
+by simply counting its EM instructions.
+Pseudo instructions are skipped.
+A procedure does not 'fall through' if its CFG
+contains a basic block
+that is not the last block of the CFG and
+that ends on a RET instruction.
+The formal parameters of a procedure are determined
+by inspection of
+its code.
+.PP
+The call-list in constructed by looking at all CAL instructions
+appearing in the program.
+The call-list should only contain calls to procedures
+that may be put in line.
+This fact is only known if the procedure was
+analyzed earlier.
+If a call to a procedure p appears in the program
+before the body of p,
+the call will always be put in the call-list.
+If p is later found to be unsuitable,
+the call will be removed from the list by the
+second subphase.
+.PP
+An important issue is the recognition
+of the actual parameter expressions of the call.
+The front ends produces messages telling how many
+bytes of formal parameters every procedure accesses.
+(If there is no such message for a procedure, it
+cannot be put in line).
+The actual parameters together must account for
+the same number of bytes.A recursive descent parser is used
+to parse side-effect free EM expressions.
+It uses a table and some
+auxiliary routines to determine
+how many bytes every EM instruction pops from the stack
+and how many bytes it pushes onto the stack.
+These numbers depend on the EM instruction, its argument,
+and the wordsize and pointersize of the target machine.
+Initially, the parser has to recognize the
+number of bytes specified in the formals-message,
+say N.
+Assume the first instruction before the CAL pops S bytes
+and pushes R bytes.
+If R > N, too many bytes are recognized
+and the parser fails.
+Else, it calls itself recursively to recognize the
+S bytes used as operand of the instruction.
+If it succeeds in doing so, it continues with the next instruction,
+i.e. the first instruction before the code recognized by
+the recursive call, to recognize N-R more bytes.
+The result is a number of EM instructions that collectively push N bytes.
+If an instruction is come across that has side-effects
+(e.g. a store or a procedure call) or of which R and S cannot
+be computed statically (e.g. a LOS), it fails.
+.sp 0
+Note that the parser traverses the code backwards.
+As EM code is essentially postfix code, the parser works top down.
+.PP
+If the parser fails to recognize the parameters, the call will not
+be substituted in line.
+If the parameters can be determined, they still have to
+match the formal parameters of the called procedure.
+This check is performed by the second subphase; it cannot be
+done here, because it is possible that the called
+procedure has not been analyzed yet.
+.PP
+The entire call-list is written to a file,
+to be processed by the second subphase.
+.NH 3
+The second subphase: making decisions
+.PP
+The task of the second subphase is quite easy
+to understand.
+It reads the call-list file,
+builds an incore call-list and deletes every
+call that may not be expanded in line (either because the called
+procedure may not be put in line, or because the actual parameters
+of the call do not match the formal parameters of the called procedure).
+It assigns a \fIpay-off\fR to every call,
+indicating how desirable it is to expand it.
+.PP
+The subphase repeatedly scans the call-list and takes
+the call with the highest ratio.
+The chosen one gets marked,
+and the call-list is extended with the nested calls,
+as described above.
+These nested calls are also assigned a ratio,
+and will be considered too during the next scans.
+.sp 0
+After every decision the number of times
+every procedure is called is updated, using
+the call-count information.
+Meanwhile, the subphase keeps track of the amount of space left
+available.
+If all space is used, or if there are no more calls left to
+be expanded, it exits this loop.
+Finally, calls to procedures that are called only
+once are also chosen.
+.PP
+The actual parameters of a call are only needed by
+this subphase to assign a ratio to a call.
+To save some space, these actuals are not kept in main memory.
+They are removed after the call has been read and a ratio
+has been assigned to it.
+So this subphase works with \fIabstracts\fR of calls.
+After all work has been done,
+the actual parameters of the chosen calls are retrieved
+from a file,
+as they are needed by the transformation subphase.
+.NH 3
+The third subphase: doing transformations
+.PP
+The third subphase makes the actual modifications to
+the EM text.
+It is directed by the decisions made in the previous subphase,
+as expressed via the call-list.
+The call-list read by this subphase contains
+only calls that were selected for expansion.
+The list is ordered in the same way as the EM text,
+i.e. if a call C1 appears before a call C2 in the call-list,
+C1 also appears before C2 in the EM text.
+So the EM text is traversed linearly,
+the calls that have to be substituted are determined
+and the modifications are made.
+If a procedure is come across that is no longer needed,
+it is simply not written to the output EM file.
+The substitution of a call takes place in distinct steps:
+.IP "change the calling sequence" 7
+.sp 0
+The actual parameter expressions are changed.
+Parameters that are put in line are removed.
+All remaining ones must store their result in a
+temporary local variable, rather than
+push it on the stack.
+The CAL instruction and any ASP (to pop actual parameters)
+or LFR (to fetch the result of a function)
+are deleted.
+.IP "fetch the text of the called procedure"
+.sp 0
+Direct disk access is used to to read the text of the
+called procedure.
+The file offset is obtained from the proctable entry.
+.IP "allocate bytes for locals and temporaries"
+.sp 0
+The local variables of the called procedure will be put in the
+stack frame of the calling procedure.
+The same applies to any temporary variables
+that hold the result of parameters
+that were not put in line.
+The proctable entry of the caller is updated.
+.IP "put a label after the CAL"
+.sp 0
+If the called procedure contains a RET (return) instruction
+somewhere in the middle of its text (i.e. it does
+not fall through), the RET must be changed into
+a BRA (branch), to jump over the
+remainder of the text.
+This label is not needed if the called
+procedure falls through.
+.IP "copy the text of the called procedure and modify it"
+.sp 0
+References to local variables of the called routine
+and to parameters that are not put in line
+are changed to refer to the
+new local of the caller.
+References to in line parameters are replaced
+by the actual parameter expression.
+Returns (RETs) are either deleted or
+replaced by a BRA.
+Messages containing information about local
+variables or parameters are changed.
+Global data declarations and the PRO and END pseudos
+are removed.
+Instruction labels and references to them are
+changed to make sure they do not have the
+same identifying number as
+labels in the calling procedure.
+.IP "insert the modified text"
+.sp 0
+The pseudos of the called procedure are put after the pseudos
+of the calling procedure.
+The real text of the callee is put at
+the place where the CAL was.
+.IP "take care of nested substitutions"
+.sp 0
+The expanded procedure may contain calls that
+have to be expanded too (nested calls).
+If the descriptor of this call contains actual
+parameter expressions,
+the code of the expressions has to be changed
+the same way as the code of the callee was changed.
+Next, the entire process of finding CALs and doing
+the substitutions is repeated recursively.
+.LP

+ 27 - 0
doc/ego/il/il6

@@ -0,0 +1,27 @@
+.NH 2
+Source files of IL
+.PP
+The sources of IL are in the following files
+and packages (the prefixes 1_, 2_ and 3_ refer to the three subphases):
+.IP il.h: 14
+declarations of global variables and
+data structures
+.IP il.c:
+the routine main; the driving routines of the three subphases
+.IP 1_anal:
+contains a subroutine that analyzes a procedure
+.IP 1_cal:
+contains a subroutine that analyzes a call
+.IP 1_aux:
+implements auxiliary procedures used by subphase 1
+.IP 2_aux:
+implements auxiliary procedures used by subphase 2
+.IP 3_subst:
+the driving routine for doing the substitution
+.IP 3_change:
+lower level routines that do certain modifications
+.IP 3_aux:
+implements auxiliary procedures used by subphase 3
+.IP aux:
+implements auxiliary procedures used by several subphases.
+.LP

+ 3 - 0
doc/ego/intro/.distr

@@ -0,0 +1,3 @@
+head
+intro1
+tail

+ 10 - 0
doc/ego/intro/head

@@ -0,0 +1,10 @@
+.ND
+.\".ll 80m
+.\".nr LL 80m
+.\".nr tl 78m
+.tr ~ 
+.ds >. .
+.ds >, ,
+.ds [. " [
+.ds .] ]
+.cs 5 22

+ 79 - 0
doc/ego/intro/intro1

@@ -0,0 +1,79 @@
+.TL
+The design and implementation of
+the EM Global Optimizer
+.AU
+H.E. Bal
+.AI
+Vrije Universiteit
+Wiskundig Seminarium, Amsterdam
+.AB
+The EM Global Optimizer is part of the Amsterdam Compiler Kit,
+a toolkit for making retargetable compilers.
+It optimizes the intermediate code common to all compilers of
+the toolkit (EM),
+so it can be used for all programming languages and
+all processors supported by the kit.
+.PP
+The optimizer is based on well-understood concepts like
+control flow analysis and data flow analysis.
+It performs the following optimizations:
+Inline Substitution, Strength Reduction, Common Subexpression Elimination,
+Stack Pollution, Cross Jumping, Branch Optimization, Copy Propagation,
+Constant Propagation, Dead Code Elimination and Register Allocation.
+.PP
+This report describes the design of the optimizer and several
+of its implementation issues.
+.AE
+.bp
+.NH 1
+Introduction
+.PP
+.FS
+This work was supported by the
+Stichting Technische Wetenschappen (STW)
+under grant VWI00.0001.
+.FE
+The EM Global Optimizer is part of a software toolkit
+for making production-quality retargetable compilers.
+This toolkit,
+called the Amsterdam Compiler Kit
+.[
+tanenbaum toolkit rapport
+.]
+.[
+tanenbaum toolkit cacm
+.]
+runs under the Unix*
+.FS
+*Unix is a Trademark of Bell Laboratories
+.FE
+operating system.
+.sp 0
+The main design philosophy of the toolkit is to use
+a language- and machine-independent
+intermediate code, called EM.
+.[
+keizer architecture
+.]
+The basic compilation process can be split up into
+two parts.
+A language-specific front end translates the source program into EM.
+A machine-specific back end transforms EM to assembly code
+of the target machine.
+.PP
+The global optimizer is an optional phase of the
+compilation process, and can be used to obtain
+machine code of a higher quality.
+The optimizer transforms EM-code to better EM-code,
+so it comes between the front end and the back end.
+It can be used with any combination of languages
+and machines, as far as they are supported by
+the compiler kit.
+.PP
+This report describes the design of the
+global optimizer and several of its
+implementation issues.
+Measurements can be found in.
+.[
+bal tanenbaum global
+.]

+ 17 - 0
doc/ego/intro/tail

@@ -0,0 +1,17 @@
+.SH
+Acknowledgements
+.PP
+The author would like to thank Andy Tanenbaum for his guidance,
+Duk Bekema for implementing the Common Subexpression Elimination phase
+and writing the initial documentation of that phase,
+Dick Grune for reading the manuscript of this report
+and Ceriel Jacobs, Ed Keizer, Martin Kersten, Hans van Staveren
+and the members of the S.T.W. user's group for their
+interest and assistance.
+.bp
+.SH
+References
+.LP
+.[
+$LIST$
+.]

Some files were not shown because too many files changed in this diff