gen68k.c 93 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536
  1. /*
  2. * GTools C compiler
  3. * =================
  4. * source file :
  5. * code generator for the 68000
  6. *
  7. * Copyright 2001-2004 Paul Froissart.
  8. * Credits to Christoph van Wuellen and Matthew Brandt.
  9. * All commercial rights reserved.
  10. *
  11. * This compiler may be redistributed as long there is no
  12. * commercial interest. The compiler must not be redistributed
  13. * without its full sources. This notice must stay intact.
  14. */
  15. #include "define.h"
  16. _FILE(__FILE__)
  17. #include "c.h"
  18. #include "expr.h"
  19. #include "gen.h"
  20. #include "cglbdec.h"
  21. #ifndef NOFLOAT
  22. #include "ffplib.h"
  23. #endif
  24. #ifdef VCG
  25. extern int vcg_lvl;
  26. #endif
  27. #ifdef MC680X0
  28. readonly struct amode push = {am_adec, 0, init_reg_t(STACKPTR - AREGBASE), nil_reg_t, 0},
  29. pop = {am_ainc, 0, init_reg_t(STACKPTR - AREGBASE), nil_reg_t, 0};
  30. #endif
  31. XLST_TYPE act_scratch CGLOB;
  32. #ifdef FLINE_RC
  33. int fline_rc CGLOB;
  34. #endif
  35. /*
  36. * this module contains all of the code generation routines for evaluating
  37. * expressions and conditions.
  38. */
  39. #ifdef MC680X0
  40. void swap_nodes(struct enode *node);
  41. void opt_compare(struct enode *node);
  42. struct amode *mk_scratch(long size) {
  43. /*
  44. * returns addressing mode of form offset(FRAMEPTR)
  45. * size is rounded up to AL_DEFAULT
  46. */
  47. struct amode *ap;
  48. /* round up the request */
  49. if (size % AL_DEFAULT)
  50. size += AL_DEFAULT - (size % AL_DEFAULT);
  51. /* allocate the storage */
  52. act_scratch += size;
  53. /*
  54. * The next statement could be deferred and put into the
  55. * routine checkstack(), but this is just safer.
  56. */
  57. if (act_scratch > max_scratch)
  58. max_scratch = act_scratch;
  59. ap = (struct amode *) xalloc((int) sizeof(struct amode),AMODE+MK_SCRATCH);
  60. ap->mode = am_indx;
  61. ap->preg = FRAMEPTR - AREGBASE;
  62. ap->offset = mk_icon((long) -(lc_auto+act_scratch));
  63. return ap;
  64. }
  65. struct amode *mk_label(unsigned int lab) {
  66. /*
  67. * construct a reference node for an internal label number.
  68. */
  69. struct enode *lnode;
  70. struct amode *ap;
  71. lnode = mk_node(en_labcon, NIL_ENODE, NIL_ENODE);
  72. lnode->v.enlab = lab;
  73. ap = (struct amode *) xalloc((int) sizeof(struct amode),AMODE+MK_LABEL);
  74. ap->mode = am_direct;
  75. ap->offset = lnode;
  76. return ap;
  77. }
  78. struct amode *mk_immed(long i) {
  79. /*
  80. * make a node to reference an immediate value i.
  81. */
  82. struct amode *ap;
  83. struct enode *ep;
  84. ep = mk_icon(i);
  85. /* ep = mk_icon(0l);
  86. ep->v.i = i;*/
  87. ap = (struct amode *) xalloc((int) sizeof(struct amode), AMODE+MK_IMMED);
  88. ap->mode = am_immed;
  89. ap->offset = ep;
  90. return ap;
  91. }
  92. struct amode *mk_offset(struct enode *node) {
  93. /*
  94. * make a direct reference to a node.
  95. */
  96. struct amode *ap;
  97. ap = (struct amode *) xalloc((int) sizeof(struct amode), AMODE+MK_OFFSET);
  98. ap->mode = am_direct;
  99. ap->offset = node;
  100. return ap;
  101. }
  102. struct amode *mk_legal(struct amode *ap, int flags, long size) {
  103. /*
  104. * mk_legal will coerce the addressing mode in ap1 into a mode that is
  105. * satisfactory for the flag word.
  106. */
  107. struct amode *ap2;
  108. if (flags & F_NOVALUE) {
  109. freeop(ap);
  110. return 0;
  111. }
  112. if (size==1)
  113. flags &= ~F_AREG;
  114. switch (ap->mode) {
  115. case am_immed:
  116. if (!(flags & F_DEREF) && ap->offset
  117. #ifdef EXE_OUT
  118. //&& !exestub_mode -> because it remains profitable to do this
  119. // even with very efficient relocations...
  120. #endif
  121. && (ap->offset->nodetype == en_nacon
  122. || ap->offset->nodetype == en_labcon)
  123. #ifdef AS
  124. && !external(ap->offset->v.enlab)
  125. #else
  126. #if defined(PC) && !defined(DISABLE_OPTS)
  127. && ((long)ap->offset->v.ensp>0x1000 ? internal(ap->offset->v.ensp) : 1)
  128. #endif
  129. #endif
  130. ) {
  131. ap->mode = am_direct;
  132. ap2 = temp_addr(); /* allocate to areg */
  133. g_code(op_lea, 0, ap, ap2);
  134. if (flags & F_AREG)
  135. return ap2;
  136. freeop(ap2);
  137. ap = temp_data();
  138. g_code(op_move, (int) (size+(size&1)), ap2, ap); /* if we want a byte, do a move.w, not a move.b */
  139. if (flags & F_DREG)
  140. return ap;
  141. ierr(MK_LEGAL,1);
  142. }
  143. if (flags & F_IMMED) {
  144. return ap; /* mode ok */
  145. }
  146. break;
  147. case am_areg:
  148. if (flags & F_AREG && (!(flags & F_VOL) || ap->preg <= MAX_ADDR))
  149. return ap;
  150. break;
  151. case am_dreg:
  152. if (flags & F_DREG && (!(flags & F_VOL) || ap->preg <= MAX_DATA))
  153. return ap;
  154. break;
  155. case am_direct:
  156. /* infunc("DrawColorLine")
  157. bkpt();*/
  158. if (!(flags & F_SRCOP) && !(flags & F_DEREF) && ap->offset
  159. #ifdef EXE_OUT
  160. //&& !exestub_mode -> because it remains rentable to do this
  161. // even with very efficient relocations...
  162. #endif
  163. && (ap->offset->nodetype == en_nacon
  164. || ap->offset->nodetype == en_labcon)
  165. #ifdef AS
  166. && !external(ap->offset->v.enlab)
  167. #else
  168. #if defined(PC) && !defined(DISABLE_OPTS)
  169. && ((long)ap->offset->v.ensp>0x1000 ? internal(ap->offset->v.ensp) : 1)
  170. #endif
  171. #endif
  172. && (flags & F_MEM)
  173. ) {
  174. ap2 = temp_addr(); // allocate to areg
  175. g_code(op_lea, 0, ap, ap2);
  176. ap2 = copy_addr(ap2);
  177. ap2->mode = am_ind;
  178. // if (flags & F_MEM)
  179. return ap2;
  180. /* freeop(ap2);
  181. if ((flags&(F_AREG|F_DEREF))==(F_AREG|F_DEREF) || !(flags&F_DREG))
  182. ap=temp_addr();
  183. else ap=temp_data();
  184. // ap = temp_data();
  185. g_code(op_move, (int) size, ap2, ap);
  186. // if (flags & F_DREG)
  187. return ap;
  188. ierr(MK_LEGAL,2);*/
  189. }
  190. /* FALL THROUGH */
  191. case am_ind:
  192. case am_indx:
  193. case am_indx2:
  194. case am_indx3:
  195. case am_ainc:
  196. if (flags & F_MEM)
  197. return ap;
  198. break;
  199. }
  200. if ((flags & F_DREG) && (flags & F_AREG)) {
  201. /* decide which mode is better */
  202. if (ap->mode == am_immed) {
  203. if (isbyte(ap->offset))
  204. flags &= F_DREG;
  205. else if (isshort(ap->offset) && size == 4)
  206. flags &= F_AREG;
  207. }
  208. }
  209. if (flags & F_DREG) {
  210. freeop(ap); /* maybe we can use it... */
  211. ap2 = temp_data(); /* allocate to dreg */
  212. g_code(op_move, (int) size, ap, ap2);
  213. return ap2;
  214. }
  215. if (!(flags & F_AREG))
  216. ierr(MK_LEGAL,3);
  217. if (size < 2)
  218. ierr(MK_LEGAL,4);
  219. freeop(ap);
  220. ap2 = temp_addr();
  221. g_code(op_move, (int) size, ap, ap2);
  222. return ap2;
  223. }
  224. int isshort(struct enode *node) {
  225. /*
  226. * return true if the node passed can be generated as a short offset.
  227. */
  228. return node->nodetype == en_icon &&
  229. (node->v.i >= -32768 && node->v.i <= 32767);
  230. }
  231. int isbyte(struct enode *node) {
  232. return node->nodetype == en_icon &&
  233. (node->v.i >= -128 && node->v.i <= 127);
  234. }
  235. struct amode *copy_addr(struct amode *ap) {
  236. /*
  237. * copy an address mode structure.
  238. */
  239. struct amode *newap;
  240. if (ap == 0)
  241. ierr(COPY_ADDR,1);
  242. newap = (struct amode *) xalloc((int) sizeof(struct amode), AMODE+COPY_ADDR);
  243. *newap = *ap;
  244. return newap;
  245. }
  246. struct amode *g_index(struct enode *node) {
  247. /*
  248. * generate code to evaluate an index node and return the addressing
  249. * mode of the result.
  250. */
  251. struct amode *ap1, *ap2;
  252. char size=4;
  253. #ifndef FORBID_TABLES
  254. if (node->v.p[0]->nodetype == en_tempref &&
  255. node->v.p[1]->nodetype == en_tempref &&
  256. (node->v.p[0]->v.i >= AREGBASE || node->v.p[1]->v.i >= AREGBASE)) {
  257. /* both nodes are registers, one is address */
  258. if (node->v.p[0]->v.i < AREGBASE) { /* first node is data register */
  259. ap1 = g_expr(node->v.p[1], F_AREG);
  260. ap1 = copy_addr(ap1);
  261. ap1->sreg = (reg_t)node->v.p[0]->v.i;
  262. ap1->mode = am_indx2; /* 0(Ax,Dx) */
  263. ap1->slen = 4;
  264. ap1->offset = mk_icon(0l);
  265. return ap1;
  266. } else { /* first node is address register */
  267. ap1 = g_expr(node->v.p[0], F_AREG);
  268. ap2 = g_expr(node->v.p[1], F_AREG | F_DREG);
  269. validate(ap1);
  270. ap1 = copy_addr(ap1);
  271. if (ap2->mode == am_dreg) {
  272. /* 0(Ax,Dx) */
  273. ap1->mode = am_indx2;
  274. ap1->sreg = ap2->preg;
  275. ap1->slen = 4;
  276. } else {
  277. /* 0(Ax,Ay) */
  278. ap1->mode = am_indx3;
  279. ap1->sreg = ap2->preg;
  280. ap1->slen = 4;
  281. }
  282. ap1->offset = mk_icon(0l);
  283. return ap1;
  284. }
  285. }
  286. #endif
  287. /* The general case (no tempref) */
  288. /* put address temprefs first place, data temprefs second place */
  289. if (node->v.p[1]->nodetype == en_tempref && node->v.p[1]->v.i >= AREGBASE)
  290. swap_nodes(node);
  291. else if (node->v.p[0]->nodetype == en_tempref && node->v.p[0]->v.i < AREGBASE)
  292. swap_nodes(node);
  293. else if (node->v.p[1]->etype==bt_pointer && node->v.p[0]->etype!=bt_pointer)
  294. swap_nodes(node);
  295. ap1 = g_expr(node->v.p[0], F_AREG | F_IMMED);
  296. if (ap1->mode == am_areg) {
  297. struct enode *ep=node->v.p[1],*tempep=ep;
  298. while (tempep->nodetype==en_cast) /* avoid useless op_ext's */
  299. if ((tempep=tempep->v.p[0])->esize==2)
  300. size=2, ep=tempep;
  301. ap2 = g_expr(ep, F_AREG | F_DREG | F_IMMED);
  302. validate(ap1);
  303. } else {
  304. ap2 = ap1;
  305. ap1 = g_expr(node->v.p[1], F_AREG/* | F_IMMED*/);
  306. validate(ap2);
  307. }
  308. /*
  309. * possible combinations:
  310. *
  311. * F_AREG + F_IMMED, F_AREG + F_DREG and F_AREG + F_AREG
  312. * (F_IMMED + F_IMMED is now impossible, since it would mean a bug
  313. * in the optimizer, and it does no harm except for optimization
  314. * if such a bug does exist)
  315. */
  316. /* watch out for: tempref(addr) + temp_addr, tempref(addr) + temp_data */
  317. if (/*ap1->mode == am_areg && */ap1->preg > MAX_ADDR) {
  318. /* ap1 = tempref address register */
  319. ap1 = copy_addr(ap1);
  320. #ifndef FORBID_TABLES
  321. if (ap2->mode == am_dreg) {
  322. /* 0(Ax,Dy) */
  323. ap1->mode = am_indx2;
  324. ap1->sreg = ap2->preg;
  325. ap1->slen = size;
  326. ap1->deep = ap2->deep;
  327. ap1->offset = mk_icon(0l);
  328. return ap1;
  329. }
  330. if (ap2->mode == am_areg) {
  331. /* 0(Ax,Ay) */
  332. ap1->mode = am_indx3;
  333. ap1->sreg = ap2->preg;
  334. ap1->slen = size;
  335. ap1->deep = ap2->deep;
  336. ap1->offset = mk_icon(0l);
  337. return ap1;
  338. }
  339. #endif
  340. #ifndef FORBID_TABLES
  341. if (ap2->mode == am_immed && /* if FORBID_TABLES, then we */
  342. (!isshort(ap2->offset))) /* always want ap1 to be F_VOL */
  343. #endif
  344. /* we want to add to ap1 later... */
  345. ap1 = mk_legal(ap1, F_AREG | F_VOL, 4l);
  346. }
  347. /* watch out for: temp_addr + tempref(data) */
  348. #ifndef FORBID_TABLES
  349. if (/*ap1->mode == am_areg && */ap2->mode == am_dreg && ap2->preg > MAX_DATA) {
  350. ap1 = copy_addr(ap1);
  351. ap1->mode = am_indx2;
  352. ap1->sreg = ap2->preg;
  353. ap1->slen = size;
  354. ap1->offset = mk_icon(0l);
  355. return ap1;
  356. }
  357. #endif
  358. /* if (ap1->mode == am_immed && ap2->mode == am_immed) {
  359. ap1 = copy_addr(ap1);
  360. ap1->offset = mk_node(en_add, ap1->offset, ap2->offset);
  361. ap1->mode = am_direct;
  362. return ap1;
  363. }*/
  364. if (ap2->mode == am_immed && isshort(ap2->offset)) {
  365. ap1 = mk_legal(ap1, F_AREG, 4l);
  366. ap1 = copy_addr(ap1);
  367. ap1->mode = am_indx;
  368. ap1->offset = ap2->offset;
  369. return ap1;
  370. }
  371. /* ap1 is volatile ... */
  372. g_code(op_add, size, ap2, ap1);/* add left to address reg */
  373. ap1 = copy_addr(ap1);
  374. ap1->mode = am_ind; /* mk_ indirect */
  375. freeop(ap2); /* release any temps in ap2 */
  376. return ap1; /* return indirect */
  377. }
  378. struct amode *g_deref(struct enode *node, enum(e_bt) type, int flags, long size) {
  379. /*
  380. * return the addressing mode of a dereferenced node.
  381. */
  382. struct amode *ap1;
  383. /*
  384. * If a reference to an aggregate is required, return a pointer to the
  385. * struct instead
  386. */
  387. if (bt_aggregate(type)) {
  388. return g_expr(node, /*F_ALL*/flags); // possibly BUGGY
  389. }
  390. #ifdef PREFER_POS_VALUES
  391. if (node->nodetype == en_sub && node->v.p[1]->nodetype==en_icon)
  392. node->nodetype = en_add, node->v.p[1]->v.i=-node->v.p[1]->v.i;
  393. #endif
  394. if (node->nodetype == en_add) {
  395. return g_index(node);
  396. }
  397. if (node->nodetype == en_autocon) {
  398. #ifdef BIGSTACK
  399. if (node->v.i >= -32768 && node->v.i <= 32767) {
  400. #endif
  401. ap1 = (struct amode *) xalloc((int) sizeof(struct amode),
  402. AMODE+G_DEREF);
  403. ap1->mode = am_indx;
  404. ap1->preg = FRAMEPTR-AREGBASE;
  405. ap1->offset = mk_icon((long) node->v.i);
  406. #ifdef BIGSTACK
  407. } else {
  408. ap1 = temp_addr();
  409. g_code(op_move, 4, mk_immed((long) node->v.i), ap1);
  410. g_code(op_add, 4, mk_reg(FRAMEPTR), ap1);
  411. ap1 = copy_addr(ap1);
  412. ap1->mode = am_ind;
  413. }
  414. #endif
  415. return ap1;
  416. }
  417. /* if (lineid==309)
  418. bkpt();*/
  419. /* special 68000 instructions */
  420. if (node->nodetype == en_ainc
  421. && (size ==1 || size ==2 || size ==4)
  422. && node->v.p[1]->v.i == size
  423. && node->v.p[0]->nodetype == en_tempref
  424. && node->v.p[0]->v.i >= AREGBASE
  425. && !(flags & F_USES)) {
  426. /* (An)+ */
  427. ap1 = (struct amode *) xalloc((int) sizeof(struct amode), AMODE+G_DEREF);
  428. ap1->mode = am_ainc;
  429. ap1->preg = (reg_t)(node->v.p[0]->v.i - AREGBASE);
  430. return ap1;
  431. }
  432. if (node->nodetype == en_assub
  433. && (size ==1 || size ==2 || size ==4)
  434. && node->v.p[1]->v.i == -size
  435. && node->v.p[0]->nodetype == en_tempref
  436. && node->v.p[0]->v.i >= AREGBASE
  437. && !(flags & F_USES)) {
  438. /* -(An) */
  439. ap1 = (struct amode *) xalloc((int) sizeof(struct amode), AMODE+G_DEREF);
  440. ap1->mode = am_adec;
  441. ap1->preg = (reg_t)(node->v.p[0]->v.i - AREGBASE);
  442. return ap1;
  443. }
  444. ap1 = g_expr(node, F_AREG | F_IMMED | F_DEREF); /* generate address */
  445. ap1 = copy_addr(ap1);
  446. #ifdef PC
  447. if (ap1->mode != am_areg && ap1->mode != am_immed)
  448. ierr(G_DEREF,1);
  449. #endif
  450. am_doderef(ap1->mode);
  451. return mk_legal(ap1,flags,size); // mk_legal was missing here : F_VOL wasn't
  452. // taken into a count...
  453. }
  454. struct amode *g_fderef(struct enode *node, struct amode *ap0, int flags) {
  455. /*
  456. * get a bitfield value
  457. */
  458. struct amode *ap, *ap1;
  459. long mask;
  460. int width = node->bit_width;
  461. int offs = (node->esize<<3)-node->bit_offset-width;
  462. if (!ap0)
  463. ap = g_deref(node->v.p[0], node->etype, flags, node->esize);
  464. else ap = ap0;
  465. ap = mk_legal(ap, F_DREG|F_VOL, node->esize);
  466. if (offs > 0) {
  467. if (offs <= 8) {
  468. /* can shift with quick constant */
  469. g_code(op_lsr, (int) node->esize,
  470. mk_immed((long) offs), ap);
  471. } else {
  472. /* must load constant first */
  473. ap1 = temp_data();
  474. g_code(op_moveq, 0, mk_immed((long) offs), ap1);
  475. g_code(op_lsr, (int) node->esize, ap1, ap);
  476. freeop(ap1);
  477. }
  478. }
  479. /*mask = 0;
  480. while (width--)
  481. mask = mask + mask + 1;*/
  482. mask = (1<<width)-1;
  483. g_code(op_and, (int) node->esize, mk_immed(mask), ap);
  484. return mk_legal(ap, flags, node->esize);
  485. }
  486. struct amode *g_unary(struct enode *node, int flags, enum(e_op) op) {
  487. /*
  488. * generate code to evaluate a unary minus or complement. float: unary minus
  489. * calls a library function
  490. */
  491. struct amode *ap;
  492. #ifdef DOUBLE
  493. long i;
  494. #endif
  495. if (flags & F_NOVALUE) {
  496. return g_expr(node->v.p[0], flags);
  497. }
  498. switch (node->etype) {
  499. case bt_uchar:
  500. case bt_char:
  501. case bt_short:
  502. case bt_ushort:
  503. case bt_long:
  504. case bt_ulong:
  505. case bt_pointer:
  506. ap = g_expr(node->v.p[0], F_DREG | F_VOL);
  507. g_code(op, (int) node->esize, ap, NIL_AMODE);
  508. return mk_legal(ap, flags, node->esize);
  509. #ifndef NOFLOAT
  510. case bt_float:
  511. #ifdef DOUBLE
  512. case bt_double:
  513. #endif
  514. if (op == op_neg) {
  515. #ifdef DOUBLE
  516. temp_inv();
  517. i = push_param(node->v.p[0]);
  518. call_library(str(ffpneg));
  519. return func_result(flags, i);
  520. #else
  521. int null_lab = nxtlabel();
  522. struct amode *ap = g_expr(node->v.p[0], F_DREG | F_VOL);
  523. g_code(op_tst, 4, ap, NIL_AMODE);
  524. g_code(op_beq, 0, mk_label(null_lab), NIL_AMODE);
  525. g_code(op_neg, 4, ap, NIL_AMODE);
  526. g_label(null_lab);
  527. return mk_legal(ap, flags, node->esize);
  528. #endif
  529. }
  530. #endif
  531. }
  532. ierr(G_UNARY,1);
  533. /* NOTREACHED */
  534. return 0; // make the compiler happy
  535. }
  536. struct amode *g_addsub(struct enode *node, int flags, enum(e_op) op) {
  537. /*
  538. * generate code to evaluate a binary node and return the addressing mode of
  539. * the result.
  540. */
  541. struct amode *ap1, *ap2;
  542. long i;
  543. if (flags & F_NOVALUE) {
  544. g_expr(node->v.p[0], flags);
  545. return g_expr(node->v.p[1], flags);
  546. }
  547. switch (node->etype) {
  548. case bt_uchar:
  549. case bt_char:
  550. case bt_short:
  551. case bt_ushort:
  552. case bt_long:
  553. case bt_ulong:
  554. case bt_pointer:
  555. flags &= (F_DREG | F_AREG);
  556. #ifdef LONG_ADDS_IN_AREG
  557. if (node->v.p[1]->nodetype==en_icon && node->esize==4
  558. #if 1
  559. && !((unsigned long)(i=node->v.p[1]->v.i)+8<=16)
  560. && (long)(short)i==i
  561. #else
  562. && (i=node->v.p[1]->v.i,(long)(short)i==i)
  563. #endif
  564. && (flags&F_AREG))
  565. flags = F_AREG;
  566. #endif
  567. #ifdef TWOBYTE_ARRAY_EXTRA_OPT
  568. if (node->v.p[1]->nodetype==en_lsh && node->v.p[1]->v.p[1]->nodetype==en_icon
  569. && node->v.p[1]->v.p[1]->v.i==1) {
  570. ap1 = g_expr(node->v.p[0], F_VOL | flags);
  571. ap2 = g_expr(node->v.p[1]->v.p[0], F_ALL | F_SRCOP | F_USES);
  572. validate(ap1); /* in case push occurred */
  573. g_code(op, (int) node->esize, ap2, ap1);
  574. g_code(op, (int) node->esize, ap2, ap1);
  575. } else {
  576. #endif
  577. ap1 = g_expr(node->v.p[0], F_VOL | flags);
  578. ap2 = g_expr(node->v.p[1], F_ALL | F_SRCOP);
  579. validate(ap1); /* in case push occurred */
  580. g_code(op, (int) node->esize, ap2, ap1);
  581. #ifdef TWOBYTE_ARRAY_EXTRA_OPT
  582. }
  583. #endif
  584. freeop(ap2);
  585. return mk_legal(ap1, flags, node->esize);
  586. #ifndef NOFLOAT
  587. case bt_double:
  588. temp_inv();
  589. i = push_param(node->v.p[1]);
  590. i += push_param(node->v.p[0]);
  591. switch (op) {
  592. case op_add:
  593. call_library(str(ffpadd));
  594. break;
  595. case op_sub:
  596. call_library(str(ffpsub));
  597. break;
  598. }
  599. return func_result(flags, i);
  600. #endif
  601. }
  602. ierr(G_ADDSUB,1);
  603. /* NOTREACHED */
  604. return 0; // make the compiler happy
  605. }
  606. struct amode *g_xbin(struct enode *node, int flags, enum(e_op) op) {
  607. /*
  608. * generate code to evaluate a restricted binary node and return the
  609. * addressing mode of the result.
  610. * these are bitwise operators so don't care about the type.
  611. * This needs to be revised with scalar types longer than 32 bit
  612. */
  613. struct amode *ap1, *ap2;
  614. ap1 = g_expr(node->v.p[0], F_VOL | F_DREG);
  615. ap2 = g_expr(node->v.p[1], F_DREG);
  616. validate(ap1); /* in case push occurred */
  617. g_code(op, (int) node->esize, ap2, ap1);
  618. freeop(ap2);
  619. return mk_legal(ap1, flags, node->esize);
  620. }
  621. struct amode *g_shift(struct enode *node, int flags, enum(e_op) op);
  622. int equalnode(struct enode *node1, struct enode *node2);
  623. struct amode *g_ybin(struct enode *node, int flags, enum(e_op) op) {
  624. /*
  625. * generate code to evaluate a restricted binary node and return the
  626. * addressing mode of the result.
  627. */
  628. struct amode *ap1, *ap2;
  629. #ifdef GENERATE_ROL_ROR
  630. if (
  631. (op==op_or || op==op_eor) &&
  632. node->v.p[0]->nodetype==en_lsh && node->v.p[1]->nodetype==en_rsh && bt_uns(node->v.p[1]->etype) &&
  633. node->v.p[0]->v.p[1]->nodetype==en_icon && node->v.p[1]->v.p[1]->nodetype==en_icon &&
  634. equalnode(node->v.p[0]->v.p[0],node->v.p[1]->v.p[0])
  635. ) {
  636. long lsh = node->v.p[0]->v.p[1]->v.i;
  637. long rsh = node->v.p[1]->v.p[1]->v.i;
  638. long sz = lsh+rsh;
  639. if (lsh>=0 && rsh>=0 && sz==8*node->esize) {
  640. int need_swap = 0;
  641. if (lsh>8 && rsh>8) {
  642. need_swap = 1;
  643. if (lsh>=16)
  644. lsh-=16,rsh+=16;
  645. else
  646. rsh-=16,lsh+=16;
  647. }
  648. if (!lsh || !rsh) {
  649. ap1 = g_expr(node->v.p[0]->v.p[0], F_DREG | F_VOL);
  650. } else {
  651. int shift_left = lsh<=8;
  652. struct enode *ep1,*ep2;
  653. ep1 = mk_icon(shift_left?lsh:rsh);
  654. ep1->etype = bt_uchar;
  655. ep1->esize = 1;
  656. ep2 = mk_node(en_lsh, node->v.p[0]->v.p[0], ep1);
  657. ep2->etype = node->etype;
  658. ep2->esize = node->esize;
  659. ap1 = g_shift(ep2, F_DREG|F_VOL, shift_left?op_rol:op_ror);
  660. }
  661. if (need_swap)
  662. g_code(op_swap, (int)node->esize, ap1, NIL_AMODE);
  663. return mk_legal(ap1, flags, node->esize);
  664. } else {
  665. // we could do better, but hey, we're lazy, aren't we?
  666. }
  667. }
  668. #endif
  669. ap1 = g_expr(node->v.p[0], F_VOL | F_DREG);
  670. ap2 = g_expr(node->v.p[1], (F_ALL & ~F_AREG) | F_SRCOP);
  671. validate(ap1); /* in case push occurred */
  672. g_code(op, (int) node->esize, ap2, ap1);
  673. freeop(ap2);
  674. return mk_legal(ap1, flags, node->esize);
  675. }
  676. #ifdef VCG
  677. typedef struct amode *(*COMMUTATIVE_G)(struct enode *node,int flags,enum(e_op) op);
  678. typedef struct amode *(*REVERSAL_G)(struct amode *ap, struct enode *ep,int flags);
  679. //struct amode *symmetric(struct amode *ap, struct enode *ep,int flags) {
  680. // if (ap==NULL) return (struct amode *)(void *)flags;
  681. // return ap;
  682. //}
  683. //struct amode *antisymmetric(struct amode *ap, struct enode *ep,int flags) {
  684. // if (ap==NULL) return (struct amode *)(void *)(F_DREG|F_VOL);
  685. // g_code(op_neg, ep->esize, ap, NIL_AMODE);
  686. // return mk_legal(ap,flags,ep->esize);
  687. //}
  688. enum { symmetric = 0 }; // antisymmetric is not supported
  689. struct amode *g_commute(void *func,struct enode *node,int flags,enum(e_op) op,int dummy/*void *reversal*/) {
  690. if (vcg_init()) {
  691. struct amode *ap = NULL;
  692. struct enode *n0=node->v.p[0],*n1=node->v.p[1];
  693. int o1,o2;
  694. //struct amode *ap;
  695. ((COMMUTATIVE_G)func)(node,flags,op);
  696. o1=vcg_done();
  697. node->v.p[0]=n1,node->v.p[1]=n0;
  698. vcg_init();
  699. ((COMMUTATIVE_G)func)(node,flags,op);
  700. //((REVERSAL_G)reversal)(((COMMUTATIVE_G)func)
  701. // (node,(int)((REVERSAL_G)reversal)(NULL,node,flags),op),node,flags);
  702. o2=vcg_done();
  703. if (o2<=o1)
  704. ap = ((COMMUTATIVE_G)func)(node,flags,op);
  705. //ap = ((REVERSAL_G)reversal)(((COMMUTATIVE_G)func)
  706. // (node,(int)((REVERSAL_G)reversal)(NULL,node,flags),op),node,flags);
  707. node->v.p[0]=n0,node->v.p[1]=n1; /* always restore the original node, even when reverse is better! */
  708. if (ap)
  709. return ap;
  710. }
  711. return ((COMMUTATIVE_G)func)(node,flags,op);
  712. }
  713. #endif
  714. struct amode *g_shift(struct enode *node, int flags, enum(e_op) op) {
  715. /*
  716. * generate code to evaluate a shift node and return the address mode of the
  717. * result.
  718. */
  719. struct amode *ap1,*ap2;
  720. if (op == op_lsl && node->v.p[1]->nodetype == en_icon
  721. && (node->v.p[1]->v.i == 1
  722. #ifndef HARDCORE_FOR_UNPACKED_SIZE
  723. #ifdef SPEED_OPT
  724. || ((speed_opt_value>=0
  725. #ifdef EXE_OUT
  726. || exestub_mode
  727. #endif
  728. ) && node->v.p[1]->v.i == 2)
  729. #else
  730. || node->v.p[1]->v.i == 2
  731. #endif
  732. #endif
  733. )) {
  734. int i;
  735. ap1 = g_expr(node->v.p[0], F_VOL | (flags & (F_DREG | F_AREG)));
  736. i = node->v.p[1]->v.i-1;
  737. do
  738. g_code(op_add, (int) node->esize, ap1, ap1);
  739. while (i--);
  740. } else {
  741. ap1 = g_expr(node->v.p[0], F_DREG | F_VOL);
  742. ap2 = g_expr(node->v.p[1], F_DREG | F_IMMED);
  743. validate(ap1);
  744. /* quick constant only legal if 1<=const<=8 */
  745. if (ap2->mode == am_immed && ap2->offset->nodetype == en_icon
  746. && (ap2->offset->v.i > 8 || ap2->offset->v.i < 1)) {
  747. if (ap2->offset->v.i <= 0 || ap2->offset->v.i > 32)
  748. uwarn("shift constant out of range");
  749. #ifdef USE_SWAP_FOR_SHIFT16
  750. if ((unsigned int)ap2->offset->v.i <= 16 && node->esize==4) {
  751. /* validate(ap1); -> useless since ap2 is am_immed :) */
  752. g_code(op_swap, 0, ap1, NIL_AMODE);
  753. if ((ap2->offset->v.i -= 16)) {
  754. ap2 = mk_immed(-ap2->offset->v.i);
  755. op = (op==op_lsl?op_ror:op_rol); /* since op might be op_asr too */
  756. g_code(op, (int) node->esize, ap2, ap1);
  757. }
  758. #error The following bugs... ((long)x)<<16 => ext/swap/ext #roll#
  759. /* to be improved since it normally results from 'long' shifts and surrounding
  760. g_cast does not recurse down to the shift node...
  761. The current optimization is nevertheless useful with respect to speed. */
  762. if (node->esize==4) g_code(op_ext, (int) node->esize, ap1, NIL_AMODE);
  763. goto ok_shift;
  764. } else
  765. #endif
  766. ap2 = mk_legal(ap2, F_DREG, 1l);
  767. }
  768. g_code(op, (int) node->esize, ap2, ap1);
  769. #ifdef USE_SWAP_FOR_SHIFT16
  770. ok_shift:
  771. #endif
  772. freeop(ap2);
  773. }
  774. return mk_legal(ap1, flags, node->esize);
  775. }
  776. struct amode *g_div(struct enode *node, int flags) {
  777. /*
  778. * generate code to evaluate a divide operator
  779. */
  780. struct amode *ap1, *ap2;
  781. long i;
  782. switch (node->etype) {
  783. case bt_short:
  784. case bt_ushort:
  785. ap1 = g_expr(node->v.p[0], F_DREG | F_VOL);
  786. ap2 = g_expr(node->v.p[1], F_ALL | F_SRCOP);
  787. validate(ap1);
  788. if (node->etype == bt_short) {
  789. g_code(op_ext, 4, ap1, NIL_AMODE);
  790. g_code(op_divs, 0, ap2, ap1);
  791. } else {
  792. g_code(op_and, 4, mk_immed(65535l), ap1);
  793. g_code(op_divu, 0, ap2, ap1);
  794. }
  795. freeop(ap2);
  796. return mk_legal(ap1, flags, 2l);
  797. case bt_long:
  798. case bt_ulong:
  799. case bt_double:
  800. temp_inv();
  801. i = push_param(node->v.p[1]);
  802. i += push_param(node->v.p[0]);
  803. switch (node->etype) {
  804. case bt_long:
  805. call_library("__divsi3");
  806. break;
  807. case bt_ulong:
  808. case bt_pointer:
  809. call_library("__udivsi3");
  810. break;
  811. #ifndef NOFLOAT
  812. case bt_double:
  813. call_library(str(ffpdiv));
  814. break;
  815. #endif
  816. }
  817. return func_result(flags, i);
  818. }
  819. ierr(G_DIV,1);
  820. /* NOTREACHED */
  821. return 0; // make the compiler happy
  822. }
  823. struct amode *g_mod(struct enode *node, int flags) {
  824. /*
  825. * generate code to evaluate a mod operator
  826. */
  827. struct amode *ap1, *ap2;
  828. long i;
  829. switch (node->etype) {
  830. case bt_short:
  831. case bt_ushort:
  832. ap1 = g_expr(node->v.p[0], F_DREG | F_VOL);
  833. ap2 = g_expr(node->v.p[1], F_ALL | F_SRCOP);
  834. validate(ap1);
  835. if (node->etype == bt_short) {
  836. g_code(op_ext, 4, ap1, NIL_AMODE);
  837. g_code(op_divs, 0, ap2, ap1);
  838. } else {
  839. g_code(op_and, 4, mk_immed(65535l), ap1);
  840. g_code(op_divu, 0, ap2, ap1);
  841. }
  842. g_code(op_swap, 0, ap1, NIL_AMODE);
  843. freeop(ap2);
  844. return mk_legal(ap1, flags, 2l);
  845. case bt_long:
  846. case bt_ulong:
  847. temp_inv();
  848. i = push_param(node->v.p[1]);
  849. i += push_param(node->v.p[0]);
  850. if (node->etype == bt_long)
  851. call_library("__modsi3");
  852. else
  853. call_library("__umodsi3");
  854. return func_result(flags, i);
  855. }
  856. ierr(G_MOD,1);
  857. /* NOTREACHED */
  858. return 0; // make the compiler happy
  859. }
  860. void swap_nodes(struct enode *node) {
  861. /*
  862. * exchange the two operands in a node.
  863. */
  864. struct enode *temp;
  865. temp = node->v.p[0];
  866. node->v.p[0] = node->v.p[1];
  867. node->v.p[1] = temp;
  868. }
  869. struct amode *g_mul(struct enode *node, int flags) {
  870. struct amode *ap1, *ap2;
  871. #ifdef OLD_GCCLIKE_MULSI3
  872. long i;
  873. #endif
  874. /* switch (node->etype) {
  875. case bt_long:
  876. case bt_ulong:
  877. if (node->v.p[0]->nodetype == en_icon)
  878. swap_nodes(node);
  879. ap1 = g_expr(node->v.p[0], F_DREG | F_VOL);
  880. ap2 = g_expr(node->v.p[1], F_ALL | F_SRCOP);
  881. validate(ap1);
  882. if (node->etype == bt_long)
  883. g_code(op_muls, 0, ap2, ap1);
  884. else
  885. g_code(op_mulu, 0, ap2, ap1);
  886. freeop(ap2);
  887. return mk_legal(ap1, flags, 2l);
  888. case bt_pointer:
  889. case bt_double:
  890. temp_inv();
  891. i = push_param(node->v.p[1]);
  892. i += push_param(node->v.p[0]);
  893. if (node->etype == bt_ulong)
  894. call_library(".ulmul"); // obsolete
  895. else if (node->etype == bt_double)
  896. call_library(".fpmult"); // obsolete
  897. else
  898. call_library(".lmul"); // obsolete
  899. return func_result(flags, i);
  900. }*/
  901. switch (node->etype) {
  902. case bt_short:
  903. case bt_ushort:
  904. if (node->v.p[0]->nodetype == en_icon)
  905. swap_nodes(node);
  906. ap1 = g_expr(node->v.p[0], F_DREG | F_VOL);
  907. ap2 = g_expr(node->v.p[1], F_ALL | F_SRCOP);
  908. validate(ap1);
  909. if (node->v.p[0]->etype == bt_short)
  910. g_code(op_muls, 0, ap2, ap1);
  911. else
  912. g_code(op_mulu, 0, ap2, ap1);
  913. freeop(ap2);
  914. return mk_legal(ap1, flags, 2l);
  915. case bt_ulong:
  916. case bt_long:
  917. case bt_pointer:
  918. case bt_double:
  919. temp_inv();
  920. #ifdef OLD_GCCLIKE_MULSI3
  921. i = push_param(node->v.p[1]);
  922. i += push_param(node->v.p[0]);
  923. #ifndef NOFLOAT
  924. if (node->etype == bt_double)
  925. call_library(str(ffpmul));
  926. else
  927. #endif
  928. call_library("__mulsi3"); /* it's the same function for ulong & long :) */
  929. return func_result(flags, i);
  930. #else
  931. { char *libfunc="__mulsi3_rp";
  932. if (tst_ushort(node->v.p[0]))
  933. swap_nodes(node);
  934. if (tst_ushort(node->v.p[1])) { /* favour ushort rather than short! */
  935. node->v.p[1]->etype=bt_ushort;
  936. node->v.p[1]->esize=2;
  937. libfunc="__mulsi3ui2_rp";
  938. } else {
  939. if (tst_short(node->v.p[0]))
  940. swap_nodes(node);
  941. if (tst_short(node->v.p[1])) {
  942. node->v.p[1]->etype=bt_short;
  943. node->v.p[1]->esize=2;
  944. libfunc="__mulsi3si2_rp";
  945. }
  946. }
  947. ap1 = g_expr(node->v.p[0], F_DREG | F_VOL); /* put it in D0 */
  948. ap2 = g_expr(node->v.p[1], F_DREG | F_VOL); /* put it in D1 */
  949. validate(ap1);
  950. call_library(libfunc);
  951. freeop(ap2);
  952. return mk_legal(ap1, flags, 4l);
  953. }
  954. #endif
  955. }
  956. ierr(G_MUL,1);
  957. /* NOTREACHED */
  958. return 0; // make the compiler happy
  959. }
  960. /*
  961. * Returns a complexity measure (in terms of time) of the given node, if it's constant.
  962. * If it's not, returns -128.
  963. */
  964. int complexity(struct enode *ep) {
  965. int comp=1;
  966. if (!ep) return 0;
  967. restart:
  968. switch (ep->nodetype) {
  969. case en_icon:
  970. if (ep->v.i) comp++;
  971. case en_tempref:
  972. comp--;
  973. case en_fcon:
  974. case en_nacon:
  975. case en_labcon:
  976. case en_autocon:
  977. return comp;
  978. case en_fcall:
  979. case en_assign:
  980. case en_asadd: case en_assub:
  981. case en_asmul: case en_asmod: case en_asdiv:
  982. case en_aslsh: case en_asrsh:
  983. case en_asand: case en_asor: case en_asxor:
  984. case en_ainc: case en_adec:
  985. case en_alloca:
  986. return -128;
  987. case en_cond:
  988. // msg("DEBUG: complexity\n");
  989. return -128;
  990. case en_compound:
  991. // msg("DEBUG: complexity\n");
  992. return -128;
  993. case en_mod: case en_div:
  994. comp+=2;
  995. case en_mul:
  996. comp+=2;
  997. case en_void:
  998. case en_add: case en_sub:
  999. case en_lsh: case en_rsh:
  1000. case en_and: case en_or: case en_xor:
  1001. case en_eq: case en_ne:
  1002. case en_lt: case en_le: case en_gt: case en_ge:
  1003. case en_land: case en_lor:
  1004. comp+=complexity(ep->v.p[0]);
  1005. if (comp<0) return -128;
  1006. {
  1007. int comp2=complexity(ep->v.p[1]);
  1008. if (comp2<0) return -128;
  1009. return comp+comp2;
  1010. }
  1011. case en_ref:
  1012. case en_fieldref:
  1013. case en_deref:
  1014. case en_uminus: case en_not:
  1015. case en_compl:
  1016. case en_cast:
  1017. comp++;
  1018. ep=ep->v.p[0];
  1019. goto restart;
  1020. default:
  1021. fatal("COMPLEXITY");
  1022. return 0; // make the compiler happy
  1023. }
  1024. }
  1025. struct amode *g_hook(struct enode *node, int flags) {
  1026. /*
  1027. * generate code to evaluate a condition operator node (?:)
  1028. */
  1029. struct amode *ap1, *ap2;
  1030. unsigned int false_label, end_label;
  1031. int flagx;
  1032. int result_is_void;
  1033. long size=node->esize;
  1034. struct enode *vnode=node->v.p[1];
  1035. #ifdef VCG
  1036. int alternatives = complexity(node->v.p[0]);
  1037. int best_flagx;
  1038. unsigned int cost1=-1,cost2=-1;
  1039. #endif
  1040. end_label = nxtlabel();
  1041. result_is_void = (node->etype == bt_void);
  1042. if (bt_aggregate(node->etype)) {
  1043. size = 4;
  1044. }
  1045. if (!result_is_void) {
  1046. flagx = (flags & (F_DREG | F_AREG)) == F_AREG ?
  1047. F_AREG | F_VOL : F_DREG | F_VOL;
  1048. #ifdef VCG
  1049. best_flagx = vcg_lvl?-1:flagx;
  1050. #endif
  1051. } else {
  1052. #ifdef VCG
  1053. best_flagx =
  1054. #endif
  1055. flagx = F_ALL | F_SRCOP | F_NOVALUE;
  1056. }
  1057. // temp_inv(); /* I do not think I can avoid that */
  1058. #ifdef VCG
  1059. best_flagx=flagx;
  1060. while (1) {
  1061. if (best_flagx<0)
  1062. vcg_init(); // always succeeds, otherwise best_flagx would be flagx
  1063. #endif
  1064. if (vnode->v.p[0]) {
  1065. /* all scratch registers are void */
  1066. #ifdef OPTIMIZED_HOOK
  1067. struct ocode *old_peep_tail=peep_tail;
  1068. int old_max_reg=max_reg;
  1069. max_reg=0;
  1070. #endif
  1071. #ifdef VCG
  1072. if (vcg_lvl && alternatives IS_VALID) {
  1073. int alt1,alt2,norm;
  1074. if (complexity(vnode->v.p[1])>=0) {
  1075. vcg_init();
  1076. ap1 = g_expr(vnode->v.p[1], flagx);
  1077. falsejp(node->v.p[0], end_label);
  1078. if (ap_hasbeenpushed(ap1)) {
  1079. validate(ap1);
  1080. freeop(ap1);
  1081. vcg_done();
  1082. best_flagx=0;
  1083. goto classic_fashion;
  1084. }
  1085. freeop(ap1);
  1086. alt1=vcg_cost()>>3;
  1087. ap2 = g_expr(vnode->v.p[0], flagx);
  1088. alt1+=vcg_done();
  1089. } else alt1=32767;
  1090. swap_nodes(vnode);
  1091. if (complexity(vnode->v.p[1])>=0) {
  1092. vcg_init();
  1093. ap1 = g_expr(vnode->v.p[1], flagx);
  1094. truejp(node->v.p[0], end_label);
  1095. if (ap_hasbeenpushed(ap1))
  1096. ierr(G_HOOK,3);
  1097. freeop(ap1);
  1098. alt2=vcg_cost()>>3;
  1099. ap2 = g_expr(vnode->v.p[0], flagx);
  1100. alt2+=vcg_done();
  1101. } else alt2=32767;
  1102. swap_nodes(vnode);
  1103. vcg_init();
  1104. false_label = nxtlabel();
  1105. falsejp(node->v.p[0], false_label);
  1106. norm=vcg_cost()>>3;
  1107. ap1 = g_expr(vnode->v.p[0], flagx);
  1108. freeop(ap1);
  1109. g_code(op_bra, 0, mk_label(end_label), NIL_AMODE);
  1110. g_label(false_label);
  1111. ap2 = g_expr(vnode->v.p[1], flagx);
  1112. norm+=vcg_done();
  1113. if (norm>alt1 && norm>alt2) {
  1114. if (alt2<alt1) swap_nodes(vnode);
  1115. ap1 = g_expr(vnode->v.p[1], flagx);
  1116. (alt2>=alt1?falsejp:truejp)
  1117. (node->v.p[0], end_label);
  1118. freeop(ap1);
  1119. ap2 = g_expr(vnode->v.p[0], flagx);
  1120. if (alt2<alt1) swap_nodes(vnode); // we absolutely need this!!!
  1121. goto done; // (hardcore TI-Chess+VTILog debug)
  1122. }
  1123. }
  1124. #endif
  1125. #if defined(ALTERNATE_HOOK) || defined(OPTIMIZED_HOOK)
  1126. int c1=complexity(vnode->v.p[0]),c2=complexity(vnode->v.p[1]);
  1127. if (c1>=0 || c2>=0) {
  1128. if ((unsigned)c1<(unsigned)c2)
  1129. swap_nodes(vnode);
  1130. ap1 = g_expr(vnode->v.p[1], flagx);
  1131. ((unsigned)c1<(unsigned)c2?truejp:falsejp)
  1132. (node->v.p[0], end_label);
  1133. freeop(ap1);
  1134. ap2 = g_expr(vnode->v.p[0], flagx);
  1135. if ((unsigned)c1<(unsigned)c2)
  1136. swap_nodes(vnode);
  1137. } else {
  1138. #endif
  1139. classic_fashion:
  1140. false_label = nxtlabel();
  1141. falsejp(node->v.p[0], false_label);
  1142. /* all scratch registers are void */
  1143. ap1 = g_expr(vnode->v.p[0], flagx);
  1144. freeop(ap1);
  1145. /* all scratch registers are void */
  1146. g_code(op_bra, 0, mk_label(end_label), NIL_AMODE);
  1147. g_label(false_label);
  1148. ap2 = g_expr(vnode->v.p[1], flagx);
  1149. #if defined(ALTERNATE_HOOK) || defined(OPTIMIZED_HOOK)
  1150. }
  1151. #endif
  1152. done:
  1153. if (!result_is_void && !equal_address(ap1,ap2))
  1154. ierr(G_HOOK,1);
  1155. } else {
  1156. ap1 = g_expr(node->v.p[0], flagx);
  1157. /* all scratch registers are void */
  1158. g_code(op_tst, (int)node->v.p[0]->esize, ap1, NIL_AMODE);
  1159. freeop(ap1);
  1160. g_code(op_bne, 0, mk_label(end_label), NIL_AMODE);
  1161. ap2 = g_expr(vnode->v.p[1], flagx);
  1162. if (!result_is_void && !equal_address(ap1,ap2))
  1163. ierr(G_HOOK,2);
  1164. }
  1165. g_label(end_label);
  1166. #ifdef VCG
  1167. if (best_flagx>=0)
  1168. #endif
  1169. return mk_legal(ap2, flags, size);
  1170. #ifdef VCG
  1171. if (best_flagx<0) {
  1172. cost1=cost2;
  1173. cost2=vcg_done();
  1174. if (!(flags&F_AREG) || size==1 || flagx==(F_AREG|F_VOL))
  1175. best_flagx = cost1<cost2?(F_DREG|F_VOL):flagx;
  1176. else flagx=F_AREG|F_VOL;
  1177. }
  1178. }
  1179. #endif
  1180. }
  1181. struct amode *g_asadd(struct enode *node, int flags, enum(e_op) op) {
  1182. /*
  1183. * generate a plus equal or a minus equal node.
  1184. */
  1185. int f;
  1186. struct amode *ap1, *ap2;
  1187. switch (node->etype) {
  1188. case bt_char:
  1189. case bt_uchar:
  1190. case bt_short:
  1191. case bt_ushort:
  1192. case bt_long:
  1193. case bt_ulong:
  1194. case bt_pointer:
  1195. if (flags & F_NOVALUE)
  1196. f = F_ALL;
  1197. else
  1198. f = F_ALL | F_USES;
  1199. ap1 = g_expr(node->v.p[0], f);
  1200. if (ap1->mode==am_dreg || ap1->mode==am_areg)
  1201. f = F_ALL | F_SRCOP;
  1202. else f = F_DREG | F_IMMED;
  1203. ap2 = g_expr(node->v.p[1], f);
  1204. validate(ap1);
  1205. g_code(op, (int) node->esize, ap2, ap1);
  1206. freeop(ap2);
  1207. return mk_legal(ap1, flags, node->esize);
  1208. #ifndef NOFLOAT
  1209. case bt_float:
  1210. #ifdef DOUBLE
  1211. case bt_double:
  1212. #endif
  1213. if (op == op_add)
  1214. return as_fcall(node, flags, str(ffpadd));
  1215. else
  1216. return as_fcall(node, flags, str(ffpsub));
  1217. #endif
  1218. }
  1219. ierr(G_ASADD,1);
  1220. /* NOTREACHED */
  1221. return 0; // make the compiler happy
  1222. }
  1223. readonly xstatic int bits_for_size[4]={7,15,0,31};
  1224. void g_bitmancode(enum(e_op) op,int size,struct amode *ap1,struct amode *ap2) {
  1225. struct amode *ap3=0;
  1226. if (ap2->mode!=am_dreg && size && size!=1) {
  1227. int bits_to_skip;
  1228. if (ap1->mode!=am_immed || ap1->offset->nodetype!=en_icon)
  1229. uerrc("illegal address mode");
  1230. bits_to_skip=bits_for_size[size-1]-ap1->offset->v.i;
  1231. while (bits_to_skip>=8) {
  1232. if (!ap3) ap3=copy_addr(ap2),ap3->offset=copynode(ap3->offset);
  1233. if (ap3->mode==am_ind)
  1234. ap3->mode=am_indx,ap3->offset=mk_icon(0);
  1235. if (ap3->offset->nodetype==en_icon)
  1236. ap3->offset->v.i++;
  1237. else ap3->offset=mk_node(en_add,ap3->offset,mk_icon(1)),opt4(&ap3->offset);
  1238. bits_to_skip-=8;
  1239. }
  1240. ap1=mk_immed(7-bits_to_skip);
  1241. }
  1242. if (!ap3) ap3=ap2;
  1243. g_code(op,ap2->mode==am_dreg?4:1,ap1,ap3);
  1244. }
  1245. #ifdef BITWISE_REDUCE
  1246. /* the latter relies on the big-endianness of the 68k */
  1247. int bitwise_reduction(unsigned long x,int *size) {
  1248. if (!x) return; // anyway the constant folder will already have taken care :)
  1249. int offs=0,end_offs=0;
  1250. unsigned long v=0xFF<<((*size-1)*8);
  1251. while (!(x&v)) offs++,x<<=8;
  1252. if (!(x&(v>>8))) {
  1253. if (!(x&(v>>16)) && !(x&(v>>24)))
  1254. *size=4;
  1255. else *size=2;
  1256. } else *size=1;
  1257. if (*size==1) {
  1258. if (offs==1 && v!=0xFF) /* avoid such a case... (unless *size _was_ already 1) */
  1259. offs=0,*size=2;
  1260. } else if (offs&1) /* too bad... we can do nothing */
  1261. offs=0,*size=4;
  1262. return offs;
  1263. }
  1264. readonly int deflt_types[5]={0,bt_uchar,bt_ushort,0,bt_ulong};
  1265. void bitwise_optimize(struct enode *ep,long mode) {
  1266. long *ref=0;
  1267. /* we may not call swap_nodes here! VCG needs to preserve the order */
  1268. if (ep->v.p[0]->nodetype==en_icon && ep->v.p[1]->nodetype==en_ref)
  1269. ref=&ep->v.p[0]->v.i;
  1270. else if (ep->v.p[1]->nodetype==en_icon && ep->v.p[0]->nodetype==en_ref)
  1271. ref=&ep->v.p[1]->v.i;
  1272. if (ref) {
  1273. int offs=bitwise_reduction((*ref)^mode,&ep->esize);
  1274. ep->etype=deflt_types[ep->esize];
  1275. ep->v.p[0]->etype=
  1276. #endif
  1277. struct amode *g_asxor(struct enode *node, int flags) {
  1278. /*
  1279. * generate an ^= node
  1280. */
  1281. int f;
  1282. struct amode *ap1, *ap2;
  1283. switch (node->etype) {
  1284. case bt_char:
  1285. case bt_uchar:
  1286. case bt_short:
  1287. case bt_ushort:
  1288. case bt_long:
  1289. case bt_ulong:
  1290. case bt_pointer:
  1291. if (flags & F_NOVALUE)
  1292. f = F_ALL;
  1293. else
  1294. f = F_ALL | F_USES;
  1295. ap1 = g_expr(node->v.p[0], f);
  1296. if (ap1->mode==am_dreg || ap1->mode==am_areg)
  1297. f = F_ALL | F_SRCOP;
  1298. else f = F_DREG | F_IMMED;
  1299. ap2 = g_expr(node->v.p[1], f);
  1300. validate(ap1);
  1301. if (ap2->mode==am_immed && ap1->mode!=am_dreg && ap1->mode!=am_areg) {
  1302. int i,n=0,j=0; long z=ap2->offset->v.i;
  1303. for (i=0;i<8*node->esize;i++)
  1304. if (z&(1<<i)) { n++; j=i; }
  1305. if (n==1) {
  1306. g_bitmancode(op_bchg, (int) node->esize, mk_immed(j), ap1);
  1307. } else g_code(op_eor, (int) node->esize, ap2, ap1);
  1308. } else g_code(op_eor, (int) node->esize, ap2, ap1);
  1309. freeop(ap2);
  1310. return mk_legal(ap1, flags, node->esize);
  1311. }
  1312. ierr(G_ASXOR,1);
  1313. /* NOTREACHED */
  1314. return 0; // make the compiler happy
  1315. }
  1316. struct amode *g_aslogic(struct enode *node, int flags, enum(e_op) op) {
  1317. /*
  1318. * generate a&= or a|=
  1319. */
  1320. int f;
  1321. struct amode *ap1, *ap2, *ap3;
  1322. if (flags & F_NOVALUE)
  1323. f = F_ALL;
  1324. else
  1325. f = F_ALL | F_USES;
  1326. ap1 = g_expr(node->v.p[0], f);
  1327. if (ap1->mode==am_dreg || ap1->mode==am_areg)
  1328. f = (F_ALL & ~F_AREG) | F_SRCOP;
  1329. else f = F_DREG | F_IMMED;
  1330. ap2 = g_expr(node->v.p[1], f);
  1331. validate(ap1);
  1332. if (ap1->mode != am_areg) {
  1333. if (/*(op==op_and || op==op_or || op==op_eor) && */ap2->mode==am_immed
  1334. && ap1->mode!=am_dreg && ap1->mode!=am_areg
  1335. && (node->esize==1/* || (ap1->mode!=am_ainc && ap1->mode!=am_adec)*/)) {
  1336. int i,n=-1,j=0,and=(op==op_and); long z=ap2->offset->v.i;
  1337. for (i=0;i<8*node->esize;i++)
  1338. if ((!!(z&(1<<i)))^and) { n++; j=i; }
  1339. if (!n) {
  1340. ap3 = ap1;
  1341. /* if (node->esize!=1) {
  1342. ap3 = copy_addr(ap1);
  1343. switch (ap1->mode) {
  1344. case am_ind:
  1345. ap3->mode=am_indx;
  1346. ap3->offset=mk_immed(
  1347. }*/
  1348. g_bitmancode(and?op_bclr:(op==op_or?op_bset:op_bchg),
  1349. 0/* ALWAYS .B !!! */, mk_immed(j), ap3);
  1350. goto asdone;
  1351. }
  1352. }
  1353. g_code(op, (int) node->esize, ap2, ap1);
  1354. } else {
  1355. ap3 = temp_data();
  1356. g_code(op_move, 4, ap1, ap3);
  1357. g_code(op, (int) node->esize, ap2, ap3);
  1358. g_code(op_move, (int) node->esize, ap3, ap1);
  1359. freeop(ap3);
  1360. }
  1361. asdone:
  1362. freeop(ap2);
  1363. return mk_legal(ap1, flags, node->esize);
  1364. }
  1365. struct amode *g_asshift(struct enode *node, int flags, enum(e_op) op) {
  1366. /*
  1367. * generate shift equals operators.
  1368. */
  1369. int f;
  1370. struct amode *ap1, *ap2, *ap3;
  1371. switch (node->etype) {
  1372. case bt_uchar:
  1373. case bt_char:
  1374. case bt_ushort:
  1375. case bt_short:
  1376. case bt_ulong:
  1377. case bt_long:
  1378. case bt_pointer:
  1379. if (flags & F_NOVALUE)
  1380. f = F_ALL;
  1381. else
  1382. f = F_ALL | F_USES;
  1383. ap1 = g_expr(node->v.p[0], f);
  1384. if (ap1->mode != am_dreg) {
  1385. ap3 = temp_data();
  1386. g_code(op_move, (int) node->esize, ap1, ap3);
  1387. } else
  1388. ap3 = ap1;
  1389. ap2 = g_expr(node->v.p[1], F_DREG | F_IMMED);
  1390. /* add if const=1 and op is << */
  1391. if (op==op_lsl && ap2->mode == am_immed && ap2->offset->nodetype == en_icon
  1392. && ap2->offset->v.i == 1) {
  1393. op=op_add; ap2=ap3;
  1394. }
  1395. /* quick constant if 2<=const<=8 */
  1396. if (ap2->mode == am_immed && ap2->offset->nodetype == en_icon
  1397. && (ap2->offset->v.i > 8 || ap2->offset->v.i < 1)) {
  1398. /*if (ap2->offset->v.i <= 0)
  1399. uwarn("negative shift constant");*/
  1400. ap2 = mk_legal(ap2, F_DREG, 1l);
  1401. }
  1402. validate(ap3);
  1403. g_code(op, (int) node->esize, ap2, ap3);
  1404. if (ap2 != ap3)
  1405. freeop(ap2);
  1406. if (ap3 != ap1) {
  1407. g_code(op_move, (int) node->esize, ap3, ap1);
  1408. freeop(ap3);
  1409. }
  1410. return mk_legal(ap1, flags, node->esize);
  1411. }
  1412. ierr(G_ASSHIFT,1);
  1413. /* NOTREACHED */
  1414. return 0; // make the compiler happy
  1415. }
  1416. struct amode *g_asmul(struct enode *node, int flags) {
  1417. /*
  1418. * generate a *= node.
  1419. */
  1420. struct amode *ap1, *ap2, *ap3;
  1421. enum(e_op) op = op_mulu;
  1422. switch (node->etype) {
  1423. case bt_char:
  1424. ap1 = g_expr(node->v.p[0], F_ALL | F_USES);
  1425. if (ap1->mode != am_dreg) {
  1426. ap2 = temp_data();
  1427. g_code(op_move, 1, ap1, ap2);
  1428. } else
  1429. ap2 = ap1;
  1430. g_code(op_ext, 2, ap2, NIL_AMODE);
  1431. ap3 = g_expr(node->v.p[1], F_DREG | F_IMMED);
  1432. if (ap3->mode == am_dreg)
  1433. g_code(op_ext, 2, ap3, NIL_AMODE);
  1434. validate(ap2);
  1435. g_code(op_muls, 0, ap3, ap2);
  1436. freeop(ap3);
  1437. if (ap2 != ap1) {
  1438. validate(ap1);
  1439. g_code(op_move, 1, ap2, ap1);
  1440. freeop(ap2);
  1441. }
  1442. return mk_legal(ap1, flags, node->esize);
  1443. case bt_uchar:
  1444. ap1 = g_expr(node->v.p[0], F_ALL | F_USES);
  1445. if (ap1->mode != am_dreg) {
  1446. ap2 = temp_data();
  1447. g_code(op_move, 1, ap1, ap2);
  1448. } else
  1449. ap2 = ap1;
  1450. g_code(op_and, 2, mk_immed(255l), ap2);
  1451. ap3 = g_expr(node->v.p[1], F_DREG | F_IMMED);
  1452. if (ap3->mode == am_dreg)
  1453. g_code(op_and, 2, mk_immed(255l), ap3);
  1454. validate(ap2);
  1455. g_code(op_mulu, 0, ap3, ap2);
  1456. freeop(ap3);
  1457. if (ap2 != ap1) {
  1458. validate(ap1);
  1459. g_code(op_move, 1, ap2, ap1);
  1460. freeop(ap2);
  1461. }
  1462. return mk_legal(ap1, flags, node->esize);
  1463. case bt_short:
  1464. op = op_muls;
  1465. case bt_ushort:
  1466. ap1 = g_expr(node->v.p[0], F_ALL | F_USES);
  1467. ap2 = g_expr(node->v.p[1], F_ALL);
  1468. validate(ap1);
  1469. if (ap1->mode != am_dreg) {
  1470. ap3 = temp_data();
  1471. g_code(op_move, 2, ap1, ap3);
  1472. g_code(op, 0, ap2, ap3);
  1473. freeop(ap2);
  1474. freeop(ap3);
  1475. g_code(op_move, 2, ap3, ap1);
  1476. } else {
  1477. g_code(op_muls, 0, ap2, ap1);
  1478. freeop(ap2);
  1479. }
  1480. return mk_legal(ap1, flags, node->esize);
  1481. case bt_long:
  1482. case bt_ulong:
  1483. case bt_pointer:
  1484. return as_fcall(node, flags, "__mulsi3");
  1485. #ifndef NOFLOAT
  1486. case bt_float:
  1487. #ifdef DOUBLE
  1488. case bt_double:
  1489. #endif
  1490. return as_fcall(node, flags, str(ffpmul));
  1491. #endif
  1492. }
  1493. ierr(G_ASMUL,1);
  1494. /* NOTREACHED */
  1495. return 0; // make the compiler happy
  1496. }
  1497. struct amode *g_asdiv(struct enode *node, int flags) {
  1498. /*
  1499. * generate /= and %= nodes.
  1500. */
  1501. struct amode *ap1, *ap2, *ap3;
  1502. switch (node->etype) {
  1503. case bt_char:
  1504. ap1 = g_expr(node->v.p[0], F_ALL | F_USES);
  1505. if (ap1->mode != am_dreg) {
  1506. ap2 = temp_data();
  1507. g_code(op_move, 1, ap1, ap2);
  1508. } else
  1509. ap2 = ap1;
  1510. g_code(op_ext, 2, ap2, NIL_AMODE);
  1511. g_code(op_ext, 4, ap2, NIL_AMODE);
  1512. ap3 = g_expr(node->v.p[1], F_DREG | F_IMMED);
  1513. if (ap3->mode == am_dreg)
  1514. g_code(op_ext, 2, ap3, NIL_AMODE);
  1515. validate(ap2);
  1516. g_code(op_divs, 0, ap3, ap2);
  1517. freeop(ap3);
  1518. if (ap2 != ap1) {
  1519. validate(ap1);
  1520. g_code(op_move, 1, ap2, ap1);
  1521. freeop(ap2);
  1522. }
  1523. return mk_legal(ap1, flags, node->esize);
  1524. case bt_uchar:
  1525. ap1 = g_expr(node->v.p[0], F_ALL | F_USES);
  1526. if (ap1->mode != am_dreg) {
  1527. ap2 = temp_data();
  1528. g_code(op_move, 1, ap1, ap2);
  1529. } else
  1530. ap2 = ap1;
  1531. g_code(op_and, 4, mk_immed(255l), ap2);
  1532. ap3 = g_expr(node->v.p[1], F_DREG | F_IMMED);
  1533. if (ap3->mode == am_dreg)
  1534. g_code(op_and, 2, mk_immed(255l), ap3);
  1535. validate(ap2);
  1536. g_code(op_divu, 0, ap3, ap2);
  1537. freeop(ap3);
  1538. if (ap2 != ap1) {
  1539. validate(ap1);
  1540. g_code(op_move, 1, ap2, ap1);
  1541. freeop(ap2);
  1542. }
  1543. return mk_legal(ap1, flags, node->esize);
  1544. case bt_short:
  1545. case bt_ushort:
  1546. ap1 = temp_data();
  1547. ap2 = g_expr(node->v.p[0], F_ALL | F_USES);
  1548. validate(ap1);
  1549. g_code(op_move, 2, ap2, ap1);
  1550. ap3 = g_expr(node->v.p[1], F_ALL & ~F_AREG);
  1551. validate(ap2);
  1552. validate(ap1);
  1553. if (node->etype == bt_short) {
  1554. g_code(op_ext, 4, ap1, NIL_AMODE);
  1555. g_code(op_divs, 0, ap3, ap1);
  1556. } else {
  1557. g_code(op_and, 4, mk_immed(65535l), ap1);
  1558. g_code(op_divu, 0, ap3, ap1);
  1559. }
  1560. freeop(ap3);
  1561. g_code(op_move, 2, ap1, ap2);
  1562. freeop(ap2);
  1563. return mk_legal(ap1, flags, 2l);
  1564. case bt_long:
  1565. return as_fcall(node, flags, "__divsi3");
  1566. case bt_ulong:
  1567. case bt_pointer:
  1568. return as_fcall(node, flags, "__udivsi3");
  1569. #ifndef NOFLOAT
  1570. #ifdef DOUBLE
  1571. case bt_double:
  1572. #endif
  1573. case bt_float:
  1574. return as_fcall(node, flags, str(ffpdiv));
  1575. #endif
  1576. }
  1577. ierr(G_ASDIV,1);
  1578. /* NOTREACHED */
  1579. return 0; // make the compiler happy
  1580. }
  1581. struct amode *g_asmod(struct enode *node, int flags) {
  1582. /*
  1583. * generate /= and %= nodes.
  1584. */
  1585. struct amode *ap1, *ap2, *ap3;
  1586. switch (node->etype) {
  1587. case bt_short:
  1588. case bt_ushort:
  1589. ap1 = temp_data();
  1590. ap2 = g_expr(node->v.p[0], F_ALL | F_USES);
  1591. validate(ap1);
  1592. g_code(op_move, 2, ap2, ap1);
  1593. ap3 = g_expr(node->v.p[1], F_ALL & ~F_AREG);
  1594. validate(ap2);
  1595. validate(ap1);
  1596. if (node->etype == bt_short) {
  1597. g_code(op_ext, 4, ap1, NIL_AMODE);
  1598. g_code(op_divs, 0, ap3, ap1);
  1599. } else {
  1600. g_code(op_and, 4, mk_immed(65535l), ap1);
  1601. g_code(op_divu, 0, ap3, ap1);
  1602. }
  1603. g_code(op_swap, 0, ap1, NIL_AMODE);
  1604. freeop(ap3);
  1605. g_code(op_move, 2, ap1, ap2);
  1606. freeop(ap2);
  1607. return mk_legal(ap1, flags, 2l);
  1608. case bt_long:
  1609. return as_fcall(node, flags, ".lrem");
  1610. case bt_ulong:
  1611. case bt_pointer:
  1612. return as_fcall(node, flags, ".ulrem");
  1613. }
  1614. ierr(G_ASMOD,1);
  1615. /* NOTREACHED */
  1616. return 0; // make the compiler happy
  1617. }
  1618. void structassign(struct amode *ap1, struct amode *ap2, long size, int mode) {
  1619. /*
  1620. * assign structure from ap1 to ap2
  1621. * ap1, ap2 are scratch address registers
  1622. */
  1623. #ifdef BIGMEM
  1624. long loop_c;
  1625. #else
  1626. int loop_c;
  1627. #endif
  1628. int rest;
  1629. struct amode *ap3;
  1630. unsigned int label;
  1631. ap1 = copy_addr(ap1);
  1632. ap2 = copy_addr(ap2);
  1633. ap1->mode = mode;
  1634. ap2->mode = mode;
  1635. loop_c = size >> 2;
  1636. rest = (int) (size & 3);
  1637. if (loop_c <= 5) /* loop-unrolling */
  1638. while (loop_c--)
  1639. g_code(op_move, 4, ap1, ap2);
  1640. else {
  1641. loop_c--; /* for dbra */
  1642. ap3 = temp_data();
  1643. freeop(ap3);
  1644. label = nxtlabel();
  1645. #ifdef BIGMEM
  1646. if (loop_c <= 65535) { // single loop
  1647. #endif
  1648. g_code(op_move, 2, mk_immed(loop_c), ap3);
  1649. g_label(label);
  1650. g_code(op_move, 4, ap1, ap2);
  1651. g_code(op_dbra, 0, ap3, mk_label(label));
  1652. #ifdef BIGMEM
  1653. } else { // extended loop
  1654. g_code(op_move, 4, mk_immed(loop), ap3);
  1655. g_label(label);
  1656. g_code(op_move, 4, ap1, ap2);
  1657. g_code(op_dbra, 0, ap3, mk_label(label));
  1658. g_code(op_sub, 4, mk_immed(65536l), ap3);
  1659. g_code(op_bhs, 0, mk_label(label), NIL_AMODE);
  1660. }
  1661. #endif
  1662. }
  1663. #if AL_DEFAULT!=2
  1664. if (rest >= 2) {
  1665. rest -= 2;
  1666. g_code(op_move, 2, ap1, ap2);
  1667. }
  1668. /* This cannot happen if the size of structures is always even */
  1669. if (rest)
  1670. g_code(op_move, 1, ap1, ap2);
  1671. #ifdef SHORT_STRUCT_PASSING
  1672. #error "Much of the short struct stuff assumes that structs whose size is under 4 \
  1673. really are short structs (while they aren't, since to do an assignment of a \
  1674. 3-byte struct, you have to do 2 moves, if not 3...)"
  1675. #error "So let AL_DEFAULT be 2."
  1676. #endif
  1677. #else
  1678. if (rest)
  1679. g_code(op_move, 2, ap1, ap2);
  1680. #endif
  1681. }
  1682. struct amode *g_assign(struct enode *node, int flags) {
  1683. /*
  1684. * generate code for an assignment node.
  1685. */
  1686. int f;
  1687. struct amode *ap1, *ap2, *ap3;
  1688. struct enode *ep;
  1689. long size = node->esize;
  1690. if (flags & F_NOVALUE)
  1691. f = F_ALL;
  1692. else
  1693. f = F_ALL | F_USES;
  1694. if (bt_aggregate(node->etype)) {
  1695. #ifdef SHORT_STRUCT_PASSING
  1696. if (node->esize<=4) {
  1697. ap1 = g_expr(node->v.p[1], F_AREG);
  1698. ap2 = g_expr(node->v.p[0], F_AREG);
  1699. validate(ap1);
  1700. structassign(ap1, ap2, (long)node->esize, am_ind);
  1701. freeop(ap2);
  1702. return mk_legal(ap1, flags, 4l);
  1703. }
  1704. #endif
  1705. /*
  1706. * Other parts of this module return a pointer to a struct in a register,
  1707. * not the struct itself
  1708. */
  1709. ap1 = g_expr(node->v.p[1], F_AREG | F_VOL);
  1710. ap2 = g_expr(node->v.p[0], F_AREG | F_VOL);
  1711. validate(ap1);
  1712. /* hacky: save ap1 if needed later, structassign destroys it */
  1713. if (!(flags & F_NOVALUE)) {
  1714. ap3 = temp_addr();
  1715. /* BTW, this code gets eliminated with MAX_ADDR = 1 */
  1716. g_code(op_move, 4, ap1, ap3);
  1717. structassign(ap3, ap2, (long)size, am_ainc);
  1718. freeop(ap3);
  1719. freeop(ap2);
  1720. validate(ap1);
  1721. return mk_legal(ap1, flags, 4l);
  1722. } else { /* no need to save any registers */
  1723. structassign(ap1, ap2, (long)node->esize, am_ainc);
  1724. freeop(ap2);
  1725. /* mk_legal is a no-op here */
  1726. return mk_legal(ap1, flags, 4l);
  1727. }
  1728. }
  1729. if (node->v.p[0]->nodetype == en_fieldref) {
  1730. long mask;
  1731. int i;
  1732. /*
  1733. * Field assignment
  1734. */
  1735. #ifdef OLD_FIELD_ASSIGN
  1736. /* get the value */
  1737. ap1 = g_expr(node->v.p[1], F_DREG | F_VOL);
  1738. i = node->v.p[0]->bit_width;
  1739. mask = 0;
  1740. while (i--)
  1741. mask = mask + mask + 1;
  1742. g_code(op_and, (int) size, mk_immed(mask), ap1);
  1743. i = (node->v.p[0]->esize<<3)-node->v.p[0]->bit_offset-node->v.p[0]->bit_width;
  1744. mask <<= i;
  1745. if (!(flags & F_NOVALUE)) {
  1746. /*
  1747. * result value needed
  1748. */
  1749. ap3 = temp_data();
  1750. g_code(op_move, 4, ap1, ap3);
  1751. } else
  1752. ap3 = ap1;
  1753. if (i > 0) {
  1754. if (i == 1) {
  1755. /* add dn,dn */
  1756. g_code(op_add, (int) node->esize, ap3, ap3);
  1757. } else if (i <= 8) {
  1758. g_code(op_lsl, (int) size,
  1759. mk_immed(i), ap3);
  1760. } else {
  1761. ap2 = temp_data();
  1762. g_code(op_moveq, 0,
  1763. mk_immed(i), ap2);
  1764. g_code(op_lsl, (int) size, ap2, ap3);
  1765. freeop(ap2);
  1766. }
  1767. }
  1768. ep = mk_node(en_ref, node->v.p[0]->v.p[0], NIL_ENODE);
  1769. ep->esize = 1;
  1770. ap2 = g_expr(ep, F_MEM);
  1771. validate(ap3);
  1772. g_code(op_and, (int) size, mk_immed(~mask), ap2);
  1773. g_code(op_or, (int) size, ap3, ap2);
  1774. freeop(ap2);
  1775. if (!(flags & F_NOVALUE)) {
  1776. freeop(ap3);
  1777. validate(ap1);
  1778. }
  1779. #else
  1780. {
  1781. ep = mk_node(en_ref, node->v.p[0]->v.p[0], NIL_ENODE);
  1782. ep->esize = node->v.p[1]->esize;
  1783. ep->etype = node->v.p[1]->etype;
  1784. ap2 = g_expr(ep, F_MEM);
  1785. ep=node->v.p[1];
  1786. /* get the value */
  1787. i = node->v.p[0]->bit_width;
  1788. mask = 0;
  1789. while (i--)
  1790. mask = mask + mask + 1;
  1791. ep = mk_node(en_and, ep, mk_icon(mask));
  1792. ep->esize=ep->v.p[0]->esize;
  1793. ep->etype=ep->v.p[0]->etype;
  1794. ep->v.p[1]->esize=ep->esize;
  1795. ep->v.p[1]->etype=ep->etype;
  1796. i = (node->v.p[0]->esize<<3)-node->v.p[0]->bit_offset-node->v.p[0]->bit_width;
  1797. mask <<= i;
  1798. ep = mk_node(en_lsh, ep, mk_icon(i));
  1799. ep->esize=ep->v.p[0]->esize;
  1800. ep->etype=ep->v.p[0]->etype;
  1801. ep->v.p[1]->esize=1;
  1802. ep->v.p[1]->etype=bt_char;
  1803. opt0(&ep);
  1804. ap1 = g_expr(ep, F_DREG | F_IMMED);
  1805. validate(ap2);
  1806. if (ap1->mode==am_immed && node->v.p[0]->bit_width==1) {
  1807. // note that thus, we need not free/validate anything...
  1808. if (!((~ap1->offset->v.i)&mask)) {
  1809. ap1->offset->v.i = i;
  1810. g_bitmancode(op_bset, (int)size, ap1, ap2);
  1811. freeop(ap2);
  1812. if (!(flags & F_NOVALUE))
  1813. return mk_legal(mk_immed(1L),flags,node->esize);
  1814. return NIL_AMODE;
  1815. } else if (!(ap1->offset->v.i&mask)) {
  1816. ap1->offset->v.i = i;
  1817. g_bitmancode(op_bclr, (int)size, ap1, ap2);
  1818. freeop(ap2);
  1819. if (!(flags & F_NOVALUE))
  1820. return mk_legal(mk_immed(0L),flags,node->esize);
  1821. return NIL_AMODE;
  1822. }
  1823. } else {
  1824. g_code(op_and, (int) size, mk_immed(~mask), ap2);
  1825. g_code(op_or, (int) size, ap1, ap2);
  1826. freeop(ap1);
  1827. if (!(flags & F_NOVALUE))
  1828. return g_fderef(node->v.p[0], ap2, flags);
  1829. freeop(ap2);
  1830. return NIL_AMODE;
  1831. }
  1832. }
  1833. #endif
  1834. return mk_legal(ap1, flags, size);
  1835. }
  1836. /*
  1837. * (uns.) char, (uns.) short, (uns.) long, float
  1838. *
  1839. * we want to pass the right hand side as the expression value. This can't
  1840. * be done if the left side is a register variable on which the right
  1841. * hand side addressing mode depends. But if the left side IS a register
  1842. * variable, it is desirable to pass the left side, so no problem.
  1843. */
  1844. if (node->v.p[0]->nodetype == en_tempref) {
  1845. /* pass the left side as expr. value */
  1846. ap1 = g_expr(node->v.p[0], f);
  1847. ap2 = g_expr(node->v.p[1], F_ALL | F_SRCOP);
  1848. validate(ap1);
  1849. g_code(op_move, (int) size, ap2, ap1);
  1850. freeop(ap2);
  1851. return mk_legal(ap1, flags, size);
  1852. } else {
  1853. /* pass the right side as expr. value */
  1854. /* normally, this is more efficient */
  1855. ap1 = g_expr(node->v.p[1], f | F_SRCOP);
  1856. ap2 = g_expr(node->v.p[0], F_ALL);
  1857. validate(ap1);
  1858. g_code(op_move, (int) size, ap1, ap2);
  1859. freeop(ap2);
  1860. return mk_legal(ap1, flags, size);
  1861. }
  1862. }
  1863. struct amode *g_aincdec(struct enode *node, int flags, enum(e_op) op) {
  1864. /*
  1865. * generate an auto increment or decrement node. op should be either op_add
  1866. * (for increment) or op_sub (for decrement).
  1867. */
  1868. struct amode *ap1, *ap2;
  1869. switch (node->etype) {
  1870. case bt_uchar:
  1871. case bt_char:
  1872. case bt_short:
  1873. case bt_ushort:
  1874. case bt_long:
  1875. case bt_ulong:
  1876. case bt_pointer:
  1877. if (flags & F_NOVALUE) {/* dont need result */
  1878. ap1 = g_expr(node->v.p[0], F_ALL);
  1879. g_code(op, (int) node->esize, mk_immed((long) node->v.p[1]->v.i),
  1880. ap1);
  1881. return mk_legal(ap1, flags, node->esize);
  1882. }
  1883. if (flags & F_DREG)
  1884. ap1 = temp_data();
  1885. else
  1886. ap1 = temp_addr();
  1887. ap2 = g_expr(node->v.p[0], F_ALL | F_USES);
  1888. validate(ap1);
  1889. g_code(op_move, (int) node->esize, ap2, ap1);
  1890. g_code(op, (int) node->esize, mk_immed((long) node->v.p[1]->v.i), ap2);
  1891. freeop(ap2);
  1892. return mk_legal(ap1, flags, node->esize);
  1893. }
  1894. ierr(G_AINCDEC,1);
  1895. /* NOTREACHED */
  1896. return 0; // make the compiler happy
  1897. }
  1898. long push_param(struct enode *ep) {
  1899. /*
  1900. * push the operand expression onto the stack. return the number of bytes
  1901. * pushed
  1902. */
  1903. struct amode *ap;
  1904. #ifdef OLD_STRUCT_PUSH
  1905. struct amode *ap1;
  1906. #endif
  1907. long size = ep->esize;
  1908. int old_pushed;
  1909. /* pushing of aggregates: (short) structures and unions, as well as BCD floats */
  1910. if (bt_aggregate(ep->etype)) {
  1911. if (ep->nodetype==en_ref)
  1912. ep = ep->v.p[0];
  1913. /* all other cases return a pointer to the struct anyway */
  1914. #ifdef OLD_STRUCT_PUSH
  1915. /* allocate stack space */
  1916. g_code(op_sub, 4, mk_immed(size), mk_reg(STACKPTR));
  1917. /*
  1918. * F_VOL was missing in the following line --
  1919. * it took a hard-core debugging session to find this error
  1920. */
  1921. ap = g_expr(ep, F_AREG | F_VOL);
  1922. ap1 = temp_addr();
  1923. validate(ap);
  1924. g_code(op_move, 4, mk_reg(STACKPTR), ap1);
  1925. /* now, copy it on stack - the same as structassign */
  1926. structassign(ap, ap1, size, am_ainc);
  1927. freeop(ap1);
  1928. freeop(ap);
  1929. #else
  1930. if ((size&1)) size++; /* otherwise there will be a bunch of problems */
  1931. {
  1932. struct enode *ep2=mk_icon(size);
  1933. ep2->etype=bt_long; ep2->esize=4;
  1934. ep = mk_node(en_add,ep,ep2);
  1935. ep->etype=bt_pointer; ep->esize=4;
  1936. opt0(&ep);
  1937. ap = g_expr(ep, F_AREG | F_VOL);
  1938. /* now, copy it on stack - the same as structassign */
  1939. structassign(ap, mk_reg(STACKPTR), size, am_adec);
  1940. freeop(ap);
  1941. }
  1942. #endif
  1943. return size;
  1944. }
  1945. old_pushed = pushed;
  1946. pushed = 0;
  1947. ap = g_expr(ep, F_ALL | F_DEREF);
  1948. /*
  1949. * This is a hook for the peephole optimizer, which will convert lea
  1950. * <ea>,An + pea (An) ==> pea <ea>
  1951. */
  1952. #ifdef POP_OPT
  1953. if (old_pushed && pushed)
  1954. g_code(_op_adj, 0, NIL_AMODE, NIL_AMODE);
  1955. #endif
  1956. pushed = 1;
  1957. if ((ap->mode == am_areg || ap->mode == am_immed)
  1958. && size == 4 && ap->preg <= MAX_ADDR) {
  1959. ap = copy_addr(ap);
  1960. am_doderef(ap->mode);
  1961. g_code(op_pea, 0, ap, NIL_AMODE);
  1962. } else
  1963. g_code(op_move, (int) size, ap, push_am);
  1964. freeop(ap);
  1965. return size+(size&1);
  1966. }
  1967. int req_all_aregs(struct enode *plist,int rp_dn,int rp_an) {
  1968. /* requires all regs if and only if (num_params>=regs_num || num_pointers>=aregs_num) */
  1969. rp_dn+=rp_an;
  1970. while (plist) {
  1971. if (!--rp_dn) return 1;
  1972. if (plist->v.p[0]->etype==bt_pointer && !--rp_an) return 1;
  1973. plist=plist->v.p[1];
  1974. }
  1975. return 0;
  1976. }
  1977. long g_parms(struct enode *plist
  1978. #ifdef REGPARM
  1979. , int rp_dn, int rp_an, struct amode **a1ap
  1980. #endif
  1981. ) {
  1982. /*
  1983. * push a list of parameters onto the stack and return the number of
  1984. * parameters pushed.
  1985. */
  1986. #ifdef BIGSTACK
  1987. long i=0;
  1988. #else
  1989. int i=0;
  1990. #endif
  1991. pushed = 0;
  1992. #ifdef REGPARM
  1993. #ifdef PC
  1994. if (rp_dn IS_INVALID || rp_an IS_INVALID)
  1995. ierr(G_PARMS,1);
  1996. #endif
  1997. if (rp_dn || rp_an) {
  1998. int nr=rp_dn+rp_an,np=0,n;
  1999. struct enode *ep=plist,**p,**dp,**ap,*allocbase[16],*list[16];
  2000. struct amode deep[CONVENTION_MAX_DATA+1+CONVENTION_MAX_ADDR+1],*deepp;
  2001. while (ep) np++, ep=ep->v.p[1];
  2002. /* first, push stack params while all the temp registers are free */
  2003. while (np>nr) {
  2004. i += push_param(plist->v.p[0]);
  2005. plist = plist->v.p[1];
  2006. np--;
  2007. }
  2008. /* store the last params so we can examinate them in the correct order */
  2009. n=np; while (n--) list[n]=plist->v.p[0], plist=plist->v.p[1];
  2010. /* now, fill in 'allocbase' */
  2011. p=list; dp=&allocbase[0]; ap=&allocbase[8];
  2012. n=np; while (n--) {
  2013. ep=*p++;
  2014. if ((ep->etype==bt_pointer && rp_an) || !rp_dn)
  2015. *ap++=ep, rp_an--;
  2016. else *dp++=ep, rp_dn--;
  2017. }
  2018. *ap=NULL; *dp=NULL;
  2019. /* finally, load all the parameters into the correct registers */
  2020. dp=&allocbase[0]; ap=&allocbase[8];
  2021. deepp=deep;
  2022. n=8; while (n--) { /* push d0/a0/d1/a1/... */
  2023. if (*dp) {
  2024. //#ifdef PC
  2025. struct amode *amp=
  2026. //#endif
  2027. g_expr(*dp++,F_DREG|F_VOL);
  2028. #ifdef INFINITE_REGISTERS
  2029. struct amode *ap2 = (struct amode *) xalloc((int) sizeof(struct amode), 0);
  2030. ap2->mode = am_dreg;
  2031. ap2->preg = TDREGBASE+dp-allocbase-1;
  2032. g_code(op_move, dp[-1]->esize, amp, ap2);
  2033. freeop(amp);
  2034. amp=ap2;
  2035. #endif
  2036. #ifdef PC
  2037. if (amp->mode!=am_dreg || amp->preg!=TDREGBASE+dp-allocbase-1)
  2038. ierr(REGPARM,1);
  2039. #endif
  2040. *deepp++=*amp;
  2041. }
  2042. if (*ap) {
  2043. //#ifdef PC
  2044. struct amode *amp=
  2045. //#endif
  2046. *a1ap = g_expr(*ap++,F_AREG|F_VOL);
  2047. #ifdef INFINITE_REGISTERS
  2048. struct amode *ap2 = (struct amode *) xalloc((int) sizeof(struct amode), 0);
  2049. ap2->mode = am_areg;
  2050. ap2->preg = TDREGBASE+ap-allocbase-1-8;
  2051. g_code(op_move, ap[-1]->esize, amp, ap2);
  2052. freeop(amp);
  2053. amp=ap2;
  2054. #endif
  2055. #ifdef PC
  2056. if (amp->mode!=am_areg || amp->preg!=TDREGBASE+ap-allocbase-1-8)
  2057. ierr(REGPARM,2);
  2058. #endif
  2059. *deepp++=*amp;
  2060. }
  2061. }
  2062. while (deepp>deep)
  2063. validate(--deepp);
  2064. } else {
  2065. #endif
  2066. while (plist != 0) {
  2067. i += push_param(plist->v.p[0]);
  2068. plist = plist->v.p[1];
  2069. }
  2070. #ifdef REGPARM
  2071. }
  2072. #endif
  2073. return i;
  2074. }
  2075. struct amode *func_result(int flags, long bytes) {
  2076. /*
  2077. * saves a function call result in D0 it is assumed that flags contain
  2078. * either F_DREG or F_AREG return value is the addressing mode of the
  2079. * result bytes is the number of bytes to pop off the stack
  2080. *
  2081. * This routine does not use mk_legal and takes care of the stuff itself.
  2082. */
  2083. struct amode *ap;
  2084. if (bytes != 0)
  2085. /* adjust stack pointer */
  2086. g_code(op_add, 4, mk_immed(bytes), mk_reg(STACKPTR));
  2087. if (flags & F_NOVALUE)
  2088. return 0;
  2089. if (flags & F_DREG) {
  2090. ap = temp_data();
  2091. g_code(op_move, 4, mk_reg(RESULT), ap);
  2092. } else if (flags & F_AREG) {
  2093. ap = temp_addr();
  2094. g_code(op_move, 4, mk_reg(RESULT), ap);
  2095. #ifdef PC
  2096. } else {
  2097. ierr(FUNC_RESULT,1);
  2098. return 0; // make the compiler happy
  2099. #endif
  2100. }
  2101. return ap;
  2102. }
  2103. struct amode *func_result2(int flags, long bytes, int reg) {
  2104. /*
  2105. * Saves a function call result in REG. It is assumed that flags contain
  2106. * either F_DREG or F_AREG. Return value is the addressing mode of the
  2107. * result; bytes is the number of bytes to pop off the stack
  2108. *
  2109. * This routine does not use mk_legal and takes care of the stuff itself.
  2110. */
  2111. struct amode *ap;
  2112. if (bytes != 0) /* adjust stack pointer */
  2113. g_code(op_add, 4, mk_immed(bytes), mk_reg(STACKPTR));
  2114. if (flags & F_NOVALUE)
  2115. return 0;
  2116. if ((flags & F_DREG) && reg==RESULT) { // permet d'éviter move.l a0,d0 /
  2117. ap = temp_data(); // move.l d0,rn #roll#
  2118. g_code(op_move, 4, mk_reg(reg), ap);
  2119. } else if (flags & F_AREG) {
  2120. ap = temp_addr();
  2121. g_code(op_move, 4, mk_reg(reg), ap);
  2122. } else {
  2123. #ifdef PC
  2124. if (flags & F_DREG) {
  2125. #endif
  2126. ap = temp_data();
  2127. g_code(op_move, 4, mk_reg(reg), ap);
  2128. #ifdef PC
  2129. } else {
  2130. ierr(FUNC_RESULT,2);
  2131. return 0; // make the compiler happy
  2132. }
  2133. #endif
  2134. }
  2135. return ap;
  2136. }
  2137. struct amode *as_fcall(struct enode *node, int flags, char *libname) {
  2138. /* assignment operations with library calls */
  2139. long i;
  2140. struct amode *ap1;
  2141. long size;
  2142. size = node->esize;
  2143. temp_inv();
  2144. i = push_param(node->v.p[1]);
  2145. if (node->v.p[0]->nodetype == en_tempref) {
  2146. /* ap1 cannot be destroyed, no problem */
  2147. ap1 = g_expr(node->v.p[0], F_DREG | F_AREG);
  2148. g_code(op_move, (int) size, ap1, push_am);
  2149. i += size;
  2150. call_library(libname);
  2151. /* ap1 is always valid and not equal to RESULT */
  2152. g_code(op_move, (int) size, mk_reg(RESULT), ap1);
  2153. } else {
  2154. uwarn("possible flaw in lib call");
  2155. ap1 = g_expr(node->v.p[0], F_DREG | F_AREG);
  2156. g_code(op_move, (int) size, ap1, push_am);
  2157. i += size;
  2158. call_library(libname);
  2159. /* ap1 is always valid and not equal to RESULT */
  2160. g_code(op_move, (int) size, mk_reg(RESULT), ap1);
  2161. }
  2162. g_code(op_add, 4, mk_immed((long) i), mk_reg(STACKPTR));
  2163. if (!(flags & F_NOVALUE)) {
  2164. if (flags & F_AREG)
  2165. ap1 = temp_addr();
  2166. else
  2167. ap1 = temp_data();
  2168. g_code(op_move, 4, mk_reg(RESULT), ap1);
  2169. return mk_legal(ap1, flags, size);
  2170. } else
  2171. return 0;
  2172. }
  2173. #ifndef __HAVE_REGS_IMAGE
  2174. #define __HAVE_REGS_IMAGE
  2175. typedef struct _regsimg {
  2176. #ifndef INFINITE_REGISTERS
  2177. int reg_alloc_ptr,reg_stack_ptr;
  2178. int next_data,next_addr;
  2179. #endif
  2180. } REGS_IMAGE;
  2181. #endif
  2182. readonly struct amode am_a1={am_areg,0,1,0,0,0};
  2183. readonly struct amode am_a2={am_areg,0,2,0,0,0};
  2184. readonly struct amode am_a2ind={am_ind,0,2,0,0,0};
  2185. extern int hexatoi(char *s);
  2186. struct amode *g_fcall(struct enode *node, int flags) {
  2187. /*
  2188. * generate a function call node and return the address mode of the result.
  2189. */
  2190. struct amode *ap; enum(e_node) nt;
  2191. long i;
  2192. #ifdef SHORT_STRUCT_PASSING
  2193. int short_struct_return=0;
  2194. #endif
  2195. #ifdef PC
  2196. // avoid a compiler warning...
  2197. struct amode *struct_ap = 0;
  2198. #else
  2199. struct amode *struct_ap = struct_ap;
  2200. #endif
  2201. #ifdef REGPARM
  2202. // struct amode *regap[(MAX_DATA+1+MAX_ADDR+1)+1],**rapp;
  2203. struct amode *a1ap; struct enode *dep;
  2204. REGS_IMAGE regs_img;
  2205. int allaregs_patch=0;
  2206. #endif
  2207. /* push any used addr&data temps */
  2208. dep = node->v.p[0];
  2209. while (dep->nodetype==en_cast && dep->esize==4) dep=dep->v.p[0];
  2210. nt = dep->nodetype;
  2211. // nt = node->v.p[0]->nodetype;
  2212. /* if (nt==en_nacon && !strcmp(node->v.p[0]->v.ensp,"rand"))
  2213. printf("jdfio");*/
  2214. temp_inv();
  2215. #ifdef REGPARM
  2216. useregs(&regs_img);
  2217. #ifndef INFINITE_REGISTERS
  2218. if (node->rp_an>MAX_ADDR && !(nt==en_nacon || nt==en_labcon
  2219. || (nt==en_tempref && node->v.p[0]->v.i>=AREGBASE))
  2220. && req_all_aregs(node->v.p[1],node->rp_dn,node->rp_an))
  2221. allaregs_patch=1;
  2222. #endif
  2223. i = g_parms(node->v.p[1],node->rp_dn,
  2224. node->rp_an/*-allaregs_patch*/,&a1ap); /* generate parameters */
  2225. if (allaregs_patch) freeop(a1ap);
  2226. #else
  2227. i = g_parms(node->v.p[1]); /* generate parameters */
  2228. #endif
  2229. /*
  2230. * for functions returning a structure or a union, push a pointer to the
  2231. * return value as additional argument The scratch space will be
  2232. * allocated in the stack frame of the calling function.
  2233. */
  2234. if (bt_aggregate(node->etype)) {
  2235. struct_ap = mk_scratch(node->esize);
  2236. #ifdef SHORT_STRUCT_PASSING
  2237. if (node->esize>4)
  2238. #endif
  2239. g_code(op_pea, 0, struct_ap, NIL_AMODE), i += 4l;
  2240. #ifdef SHORT_STRUCT_PASSING
  2241. else short_struct_return=1;
  2242. #endif
  2243. // freeop(ap); it is useless, as scratch amode's need not be freed
  2244. }
  2245. /* call the function */
  2246. if (nt == en_nacon || nt == en_labcon) {
  2247. /*if (!strcmp(node->v.p[0]->v.ensp,"rand"))
  2248. printf("jdfio");*/
  2249. #ifdef FLINE_RC
  2250. if (fline_rc && nt==en_nacon && dep->v.ensp && !strncmp(dep->v.ensp,"_ROM_CALL_",10))
  2251. g_code(op_dc, 2, mk_offset(mk_icon(0xF800+hexatoi(dep->v.ensp+10))), NIL_AMODE);
  2252. else
  2253. #endif
  2254. g_code(op_jsr, 0, mk_offset(dep), NIL_AMODE);
  2255. // g_code(op_jsr, 0, mk_offset(node->v.p[0]), NIL_AMODE);
  2256. } else {
  2257. #ifdef REGPARM
  2258. if (allaregs_patch) {
  2259. g_code(op_move, 4, (struct amode *)&am_a2, push_am);
  2260. g_code(op_move, 4, (struct amode *)&am_a1, (struct amode *)&am_a2);
  2261. }
  2262. #endif
  2263. ap = g_expr(node->v.p[0], F_AREG);
  2264. ap = copy_addr(ap);
  2265. ap->mode = am_ind;
  2266. freeop(ap);
  2267. #ifdef REGPARM
  2268. if (allaregs_patch) {
  2269. #ifdef PC
  2270. if (ap->preg!=1)
  2271. ierr(REGPARM,3);
  2272. #endif
  2273. /* struct amode *ap2 =
  2274. (struct amode *) xalloc((int) sizeof(struct amode), AMODE);*/
  2275. g_code(op_exg, 4, (struct amode *)&am_a1, (struct amode *)&am_a2);
  2276. ap=(struct amode *)&am_a2ind;
  2277. /* ap2->mode = am_areg;
  2278. ap2->preg = node->rp_an-1;
  2279. g_code(op_move, 4, pop_am, ap2); // always long since it's 'bt_pointer'
  2280. i-=4;*/
  2281. }
  2282. #endif
  2283. g_code(op_jsr, 0, ap, NIL_AMODE);
  2284. #ifdef REGPARM
  2285. if (allaregs_patch)
  2286. g_code(op_move, 4, pop_am, (struct amode *)&am_a2);
  2287. #endif
  2288. }
  2289. #ifdef REGPARM
  2290. /* free register params */
  2291. /* rapp=regap;
  2292. while (*rapp)
  2293. freeop(*rapp++);*/
  2294. freeregs(&regs_img);
  2295. #endif
  2296. #ifdef SHORT_STRUCT_PASSING
  2297. if (short_struct_return) {
  2298. if (flags & F_NOVALUE)
  2299. return func_result2(F_NOVALUE,i,0);
  2300. g_code(op_lea,0,struct_ap,mk_reg(PRESULT)); /* note : this *is* commutative with
  2301. * popping args off as we use a virtual
  2302. * a6-like register for struct_ap...
  2303. * But we had better postpone this lea
  2304. * as late as possible to take advantage
  2305. * of peephole optimizations.
  2306. */
  2307. ap = mk_reg(PRESULT); ap->mode=am_ind;
  2308. g_code(op_move,node->esize,mk_reg(RESULT),ap);
  2309. }
  2310. #endif
  2311. return func_result2(flags, i,
  2312. (node->etype>=bt_pointer&&node->etype<=bt_union)?PRESULT:RESULT);
  2313. }
  2314. struct amode *g_alloca(struct enode *node) {
  2315. struct enode *ep=mk_node(en_add,node->v.p[0],mk_icon(1));
  2316. struct amode *ap1, *ap2;
  2317. ep->etype=node->v.p[0]->etype;
  2318. ep->esize=node->v.p[0]->esize;
  2319. ep->v.p[1]->etype=ep->etype;
  2320. ep->v.p[1]->esize=ep->esize;
  2321. ep=mk_node(en_and,mk_icon(-2),ep);
  2322. ep->etype=ep->v.p[1]->etype;
  2323. ep->esize=ep->v.p[1]->esize;
  2324. ep->v.p[0]->etype=ep->etype;
  2325. ep->v.p[0]->esize=ep->esize;
  2326. ap1 = mk_reg(STACKPTR);
  2327. opt0(&ep);
  2328. ap2 = g_expr(ep, F_DREG | F_IMMED);
  2329. g_code(op_sub, 2, ap2, ap1);
  2330. freeop(ap2);
  2331. return ap1;
  2332. }
  2333. #define F_GCAST 0
  2334. //#define F_GCAST F_USES => seems completely useless...
  2335. #ifdef G_CAST2
  2336. struct amode *g_cast2(struct enode *ep, enum(e_bt) typ2, int flags) {
  2337. /*
  2338. * generates code for a en_cast node
  2339. *
  2340. */
  2341. struct amode *ap;
  2342. enum(e_bt) typ1=ep->etype;
  2343. if (typ1==bt_long && typ2==bt_short) { /* useful when using short mul's */
  2344. if (ep->nodetype==en_cast && ep->v.p[0]->etype==bt_short)
  2345. return g_expr(ep->v.p[0], flags);
  2346. }
  2347. ap=g_expr(ep, F_ALL | F_SRCOP | F_GCAST);
  2348. return g_cast(ap, typ1, typ2, flags);
  2349. }
  2350. #endif
  2351. struct amode *g_cast(struct amode *ap, enum(e_bt) typ1, enum(e_bt) typ2, int flags) {
  2352. /*
  2353. * generates code for an en_cast node
  2354. *
  2355. */
  2356. struct amode *ap1;
  2357. int f;
  2358. if (flags & F_NOVALUE) {
  2359. freeop(ap);
  2360. return 0;
  2361. }
  2362. /* the following code from now on is meaningless :
  2363. * 'useless' casts are sometimes generated to keep track of
  2364. * the previous TYP structure */
  2365. #if 0
  2366. if (typ1 == typ2)
  2367. /*
  2368. * this can happen in with the g_xmul stuff, where a cast from
  2369. * (u)short to long now casts from (u)short to (u)short for an 68000
  2370. * mulu or muls instruction.
  2371. * It is safe to cut things short then.
  2372. * It should not happen with types other than (u)short, but
  2373. * it does not harm either.
  2374. */
  2375. if (typ1 == bt_short || typ1 == bt_ushort)
  2376. return mk_legal(ap, flags, 2l);
  2377. //else
  2378. // msg("DEBUG: g_cast: typ1 == typ2\n");
  2379. #endif
  2380. switch (typ2) {
  2381. /* switch: type to cast to */
  2382. case bt_char:
  2383. case bt_uchar:
  2384. switch (typ1) {
  2385. case bt_uchar:
  2386. case bt_char:
  2387. return mk_legal(ap, flags, 1l);
  2388. case bt_ushort:
  2389. case bt_short:
  2390. if ((ap1 = g_offset(ap, 1)) == 0)
  2391. ap1 = mk_legal(ap, F_DREG, 2l);
  2392. return mk_legal(ap1, flags, 1l);
  2393. case bt_ulong:
  2394. case bt_long:
  2395. case bt_pointer:
  2396. if ((ap1 = g_offset(ap, 3)) == 0)
  2397. ap1 = mk_legal(ap, F_DREG, 4l);
  2398. return mk_legal(ap1, flags, 1l);
  2399. case bt_float:
  2400. #ifdef DOUBLE
  2401. case bt_double:
  2402. #endif
  2403. return g_cast(g_cast(ap, bt_double, bt_long, F_DREG),
  2404. bt_long, typ2, F_DREG);
  2405. }
  2406. break;
  2407. case bt_ushort:
  2408. case bt_short:
  2409. switch (typ1) {
  2410. case bt_uchar:
  2411. ap = mk_legal(ap, F_DREG | F_VOL, 1l);
  2412. g_code(op_and, 2, mk_immed(255l), ap);
  2413. return mk_legal(ap, flags, 2l);
  2414. case bt_char:
  2415. ap = mk_legal(ap, F_DREG | F_VOL, 1l); // F_VOL is important here!
  2416. g_code(op_ext, 2, ap, NIL_AMODE); // (otherwise (short)(char)my_short fails...)
  2417. return mk_legal(ap, flags, 2l);
  2418. case bt_short:
  2419. case bt_ushort:
  2420. return mk_legal(ap, flags, 2l);
  2421. case bt_long:
  2422. case bt_ulong:
  2423. case bt_pointer:
  2424. if ((ap1 = g_offset(ap, 2)) == 0)
  2425. ap1 = mk_legal(ap, F_DREG, 4l);
  2426. return mk_legal(ap1, flags, 2l);
  2427. case bt_float:
  2428. #ifdef DOUBLE
  2429. case bt_double:
  2430. #endif
  2431. return g_cast(g_cast(ap, bt_double, bt_long, F_DREG),
  2432. bt_long, typ2, F_DREG);
  2433. }
  2434. break;
  2435. case bt_long:
  2436. case bt_ulong:
  2437. case bt_pointer:
  2438. switch (typ1) {
  2439. case bt_uchar:
  2440. ap = mk_legal(ap, F_DREG | F_VOL, 1l);
  2441. g_code(op_and, 4, mk_immed(255l), ap);
  2442. return mk_legal(ap, flags, 4l);
  2443. case bt_char:
  2444. ap = mk_legal(ap, F_DREG | F_VOL, 1l); // F_VOL is important here!
  2445. g_code(op_ext, 2, ap, NIL_AMODE); // (otherwise (short)(char)my_short fails...)
  2446. g_code(op_ext, 4, ap, NIL_AMODE);
  2447. return mk_legal(ap, flags, 4l);
  2448. case bt_ushort:
  2449. ap = mk_legal(ap, F_DREG | F_VOL, 2l);
  2450. g_code(op_and, 4, mk_immed(65535l), ap);
  2451. return mk_legal(ap, flags, 4l);
  2452. case bt_short:
  2453. f = flags & (F_DREG | F_AREG);
  2454. if (f == 0) f = F_DREG | F_AREG;
  2455. ap = mk_legal(ap, f | F_VOL, 2l); // F_VOL is important here!
  2456. if (ap->mode == am_dreg) // (otherwise (short)(char)my_short fails...)
  2457. g_code(op_ext, 4, ap, NIL_AMODE);
  2458. return mk_legal(ap, flags, 4l);
  2459. case bt_long:
  2460. case bt_ulong:
  2461. case bt_pointer:
  2462. return mk_legal(ap, flags, 4l);
  2463. #ifndef NOFLOAT
  2464. case bt_float:
  2465. #ifdef DOUBLE
  2466. case bt_double:
  2467. #endif
  2468. /* library call */
  2469. #ifndef BCDFLT
  2470. freeop(ap);
  2471. temp_inv();
  2472. g_code(op_move, 4, ap, push_am);
  2473. if (typ2 == bt_long)
  2474. call_library(str(ffpftol));
  2475. else
  2476. call_library(str(ffpftou));
  2477. return func_result(flags, 4l);
  2478. #else
  2479. fatal(
  2480. "__floatsibf"); // !!!STUDY ME!!!
  2481. #endif
  2482. #endif
  2483. }
  2484. break;
  2485. #ifndef NOFLOAT
  2486. case bt_float:
  2487. #ifdef DOUBLE
  2488. case bt_double:
  2489. #endif
  2490. switch (typ1) {
  2491. case bt_char:
  2492. case bt_uchar:
  2493. case bt_short:
  2494. case bt_ushort:
  2495. ap = g_cast(ap, typ1, bt_long, F_ALL);
  2496. case bt_long:
  2497. case bt_ulong:
  2498. case bt_pointer:
  2499. /* library call */
  2500. #ifndef BCDFLT
  2501. freeop(ap);
  2502. temp_inv();
  2503. g_code(op_move, 4, ap, push_am);
  2504. if (typ1 == bt_ulong || typ1 == bt_pointer)
  2505. call_library(str(ffputof));
  2506. else
  2507. call_library(str(ffpltof));
  2508. return func_result(flags, 4l);
  2509. #else
  2510. fatal(
  2511. "__fixbfsi"); // !!!STUDY ME!!!
  2512. #endif
  2513. case bt_float:
  2514. #ifdef DOUBLE
  2515. case bt_double:
  2516. #endif
  2517. return mk_legal(ap, flags, (long)float_size);
  2518. }
  2519. #endif
  2520. break;
  2521. }
  2522. ierr(G_CAST,1);
  2523. /* NOTREACHED */
  2524. return 0; // make the compiler happy
  2525. }
  2526. struct amode *g_offset(struct amode *ap, int off) {
  2527. /*
  2528. * return true, if ap can be switched to address a location with a short
  2529. * offset. typical application: cast long -> short: 8(a6) --> 10(a6) offset
  2530. * is a small number (1,2 or 3)
  2531. */
  2532. switch (ap->mode) {
  2533. case am_ind:
  2534. ap = copy_addr(ap);
  2535. ap->mode = am_indx;
  2536. ap->offset = mk_icon((long) off);
  2537. return ap;
  2538. case am_indx:
  2539. if (ap->offset->nodetype == en_icon &&
  2540. off + ap->offset->v.i <= 32767) {
  2541. ap = copy_addr(ap);
  2542. ap->offset->v.i += off;
  2543. return ap;
  2544. }
  2545. break;
  2546. case am_indx2:
  2547. case am_indx3:
  2548. if (ap->offset->nodetype == en_icon &&
  2549. off + ap->offset->v.i <= 127) {
  2550. ap = copy_addr(ap);
  2551. ap->offset->v.i += off;
  2552. return ap;
  2553. }
  2554. break;
  2555. case am_direct:
  2556. ap = copy_addr(ap);
  2557. ap->offset = mk_node(en_add, ap->offset,
  2558. mk_icon((long) off));
  2559. return ap;
  2560. }
  2561. /* special value indicating that it must be done by hand */
  2562. return 0;
  2563. }
  2564. struct amode *g_xmul(struct enode *node, int flags, enum(e_op) op) {
  2565. /*
  2566. * performs a mixed-mode multiplication
  2567. */
  2568. struct amode *ap1, *ap2;
  2569. /* if (lineid==139)
  2570. bkpt();*/
  2571. ap1 = g_expr(node->v.p[1], F_DREG | F_VOL);
  2572. ap2 = g_expr(node->v.p[0], F_ALL & ~F_AREG);
  2573. validate(ap1);
  2574. g_code(op, 0, ap2, ap1);
  2575. freeop(ap2);
  2576. return mk_legal(ap1, flags, node->esize);
  2577. }
  2578. #ifndef __HAVE_STACK_IMAGE
  2579. #define __HAVE_STACK_IMAGE
  2580. typedef struct _stackimg {
  2581. int next_data,next_addr;
  2582. #ifndef INFINITE_REGISTERS
  2583. int reg_alloc_ptr,reg_stack_ptr;
  2584. char dreg_in_use[MAX_DATA+1];
  2585. char areg_in_use[MAX_ADDR+1];
  2586. struct reg_struct reg_stack[MAX_REG_STACK+1],reg_alloc[MAX_REG_STACK+1];
  2587. int act_scratch;
  2588. #endif
  2589. } STACK_IMAGE;
  2590. #endif
  2591. struct amode *g_compound(struct snode *st, int flags) {
  2592. STACK_IMAGE img;
  2593. struct amode *ap1,*ap2;
  2594. int old=need_res;
  2595. need_res=(~flags)&F_NOVALUE;
  2596. temp_inv(); /* we are forced to do so :'( */
  2597. usestack(&img);
  2598. genstmt(st);
  2599. need_res=old;
  2600. if (!(ap1=lastexpr_am)) {
  2601. if (!(flags&F_NOVALUE))
  2602. err_force_line=st->line, uerrc("no value returned in compound expression");
  2603. }
  2604. if (flags&F_NOVALUE) {
  2605. freeop(lastexpr_am);
  2606. return NIL_AMODE;
  2607. }
  2608. freestack(&img);
  2609. /* always one of F_DREG or F_AREG is set */
  2610. if ((flags&(F_AREG|F_DEREF))==(F_AREG|F_DEREF) || !(flags&F_DREG))
  2611. ap2=temp_addr();
  2612. else ap2=temp_data();
  2613. g_code(op_move, 4, ap1, ap2);
  2614. return ap2; /* we needn't call mk_legal :) */
  2615. }
  2616. struct amode *g_expr(struct enode *node, int flags) {
  2617. /*
  2618. * general expression evaluation. returns the addressing mode of the result.
  2619. *
  2620. * notice how most of the code actually lies in other functions: this is to
  2621. * reduce the stack footprint, which is necessary because calls to g_expr may
  2622. * be deeply nested
  2623. */
  2624. struct amode *ap1, *ap2;
  2625. unsigned int lab0
  2626. #ifndef ALTERNATE_HOOK
  2627. , lab1
  2628. #endif
  2629. ;
  2630. long size;
  2631. enum(e_bt) type;
  2632. if (node == 0)
  2633. ierr(G_EXPR,1);
  2634. /* if (node==0x7e1b50)
  2635. bkpt();*/
  2636. if (tst_const(node)) {
  2637. #ifndef NOBCDFLT
  2638. if (node->nodetype==en_fcon) {
  2639. extern int glblabel;
  2640. int lab = nxtglabel();
  2641. int i;
  2642. dseg();
  2643. put_align(2);
  2644. put_label(lab);
  2645. genfloat(node->v.f);
  2646. //char s[2+BCDLEN];
  2647. //int i;
  2648. //s[0] = (node->v.f.exponent>>8)&255;
  2649. //s[1] = (node->v.f.exponent>>0)&255;
  2650. //for (i=0;i<BCDLEN;i++)
  2651. // s[i+2] = node->v.f.mantissa[i];
  2652. //lab = stringlit(s,sizeof(s)-1);
  2653. node = mk_node(en_labcon, NIL_ENODE, NIL_ENODE);
  2654. node->v.enlab = lab;
  2655. node->etype = bt_pointer;
  2656. node->esize = 4;
  2657. }
  2658. #endif
  2659. ap1 = (struct amode *) xalloc((int) sizeof(struct amode), AMODE+G_EXPR);
  2660. ap1->mode = am_immed;
  2661. ap1->offset = node;
  2662. return mk_legal(ap1, flags, node->esize);
  2663. }
  2664. type = node->etype;
  2665. size = node->esize;
  2666. switch (node->nodetype) {
  2667. case en_autocon:
  2668. ap1 = temp_addr();
  2669. #ifdef BIGSTACK
  2670. if (node->v.i >= -32768 && node->v.i <= 32767) {
  2671. #endif
  2672. ap2 = (struct amode *) xalloc((int) sizeof(struct amode),
  2673. AMODE+G_EXPR);
  2674. ap2->mode = am_indx;
  2675. ap2->preg = FRAMEPTR - AREGBASE; /* frame pointer */
  2676. ap2->offset = node; /* use as constant node */
  2677. g_code(op_lea, 0, ap2, ap1);
  2678. #ifdef BIGSTACK
  2679. } else {
  2680. g_code(op_move, 4, mk_immed((long) node->v.p[0]->v.i), ap1);
  2681. g_code(op_add, 4, mk_reg(FRAMEPTR), ap1);
  2682. ap1 = copy_addr(ap1);
  2683. ap1->mode = am_ind;
  2684. }
  2685. #endif
  2686. return mk_legal(ap1, flags, size);
  2687. case en_ref:
  2688. /*
  2689. * g_deref uses flags and size only to test F_USES
  2690. */
  2691. ap1 = g_deref(node->v.p[0], type, flags, node->esize);
  2692. if (bt_aggregate(type))
  2693. return mk_legal(ap1, flags, 4l);
  2694. else
  2695. return mk_legal(ap1, flags, size);
  2696. case en_fieldref:
  2697. return g_fderef(node, NULL, flags);
  2698. case en_tempref:
  2699. ap1 = (struct amode *) xalloc((int) sizeof(struct amode), AMODE+G_EXPR);
  2700. if (node->v.i < AREGBASE) {
  2701. ap1->mode = am_dreg;
  2702. ap1->preg = (reg_t)node->v.i;
  2703. } else {
  2704. ap1->mode = am_areg;
  2705. ap1->preg = (reg_t)(node->v.i - AREGBASE);
  2706. }
  2707. return mk_legal(ap1, flags, size);
  2708. case en_uminus:
  2709. return g_unary(node, flags, op_neg);
  2710. case en_compl:
  2711. return g_unary(node, flags, op_not);
  2712. case en_add:
  2713. #ifdef VCG
  2714. return g_commute(g_addsub, node, flags, op_add, symmetric);
  2715. #else
  2716. return g_addsub(node, flags, op_add);
  2717. #endif
  2718. case en_sub:
  2719. //#ifdef VCG
  2720. // return g_commute(g_addsub, node, flags, op_sub, antisymmetric);
  2721. //#else
  2722. return g_addsub(node, flags, op_sub);
  2723. //#endif
  2724. case en_and:
  2725. #ifdef BITWISE_REDUCE
  2726. bitwise_optimize(node, -1L);
  2727. #endif
  2728. #ifdef VCG
  2729. return g_commute(g_ybin, node, flags, op_and, symmetric);
  2730. #else
  2731. return g_ybin(node, flags, op_and);
  2732. #endif
  2733. case en_or:
  2734. #ifdef BITWISE_REDUCE
  2735. bitwise_optimize(node, 0L);
  2736. #endif
  2737. #ifdef VCG
  2738. return g_commute(g_ybin, node, flags, op_or, symmetric);
  2739. #else
  2740. return g_ybin(node, flags, op_or);
  2741. #endif
  2742. case en_xor:
  2743. #ifdef BITWISE_REDUCE
  2744. bitwise_optimize(node, 0L);
  2745. #endif
  2746. #ifdef VCG
  2747. return g_commute(g_xbin, node, flags, op_eor, symmetric);
  2748. #else
  2749. return g_xbin(node, flags, op_eor);
  2750. #endif
  2751. case en_mul:
  2752. /*
  2753. * special optimization possible if there are patterns matching the
  2754. * 68000 mulu, muls instructions. ugly, but it gives a big
  2755. * performance increase
  2756. */
  2757. if (type == bt_long || type == bt_ulong || type == bt_pointer) {
  2758. /* TODO : (char/uchar) * (icon both short & ushort) would be more
  2759. * efficient with muls instead of mulu (ext.w instead of and.w
  2760. * #255)
  2761. */
  2762. if (tst_ushort(node->v.p[0]) && tst_ushort(node->v.p[1])) {
  2763. /*if (node->v.p[0]->esize>2) {*/
  2764. node->v.p[0]->etype = bt_ushort;
  2765. node->v.p[0]->esize = 2;
  2766. /*}
  2767. if (node->v.p[1]->esize>2) {*/
  2768. node->v.p[1]->etype = bt_ushort;
  2769. node->v.p[1]->esize = 2;
  2770. /*}*/
  2771. return g_xmul(node, flags, op_mulu);
  2772. } else if (tst_short(node->v.p[0]) && tst_short(node->v.p[1])) {
  2773. /*if (node->v.p[0]->esize>2) {*/
  2774. node->v.p[0]->etype = bt_short;
  2775. node->v.p[0]->esize = 2;
  2776. /*}
  2777. if (node->v.p[1]->esize>2) {*/
  2778. node->v.p[1]->etype = bt_short;
  2779. node->v.p[1]->esize = 2;
  2780. /*}*/
  2781. return g_xmul(node, flags, op_muls);
  2782. }
  2783. }
  2784. return g_mul(node, flags);
  2785. case en_div:
  2786. return g_div(node, flags);
  2787. case en_mod:
  2788. return g_mod(node, flags);
  2789. case en_lsh:
  2790. return g_shift(node, flags, op_lsl);
  2791. case en_rsh:
  2792. if (type==bt_ulong || type==bt_ushort || type==bt_uchar)
  2793. return g_shift(node, flags, op_lsr);
  2794. else
  2795. return g_shift(node, flags, op_asr);
  2796. case en_asadd:
  2797. return g_asadd(node, flags, op_add);
  2798. case en_assub:
  2799. return g_asadd(node, flags, op_sub);
  2800. case en_asand:
  2801. return g_aslogic(node, flags, op_and);
  2802. case en_asor:
  2803. return g_aslogic(node, flags, op_or);
  2804. case en_aslsh:
  2805. return g_asshift(node, flags, op_lsl);
  2806. case en_asrsh:
  2807. if (type==bt_ulong || type==bt_ushort || type==bt_uchar)
  2808. return g_asshift(node, flags, op_lsr);
  2809. else
  2810. return g_asshift(node, flags, op_asr);
  2811. case en_asmul:
  2812. return g_asmul(node, flags);
  2813. case en_asdiv:
  2814. return g_asdiv(node, flags);
  2815. case en_asmod:
  2816. return g_asmod(node, flags);
  2817. case en_asxor:
  2818. return g_asxor(node, flags);
  2819. case en_assign:
  2820. return g_assign(node, flags);
  2821. case en_ainc:
  2822. return g_aincdec(node, flags, op_add);
  2823. case en_adec:
  2824. return g_aincdec(node, flags, op_sub);
  2825. case en_land:
  2826. case en_lor:
  2827. case en_eq:
  2828. case en_ne:
  2829. case en_lt:
  2830. case en_le:
  2831. case en_gt:
  2832. case en_ge:
  2833. case en_not:
  2834. #ifndef ALTERNATE_HOOK
  2835. lab0 = nxtlabel();
  2836. lab1 = nxtlabel();
  2837. falsejp(node, lab0);
  2838. ap1 = temp_data();
  2839. g_code(op_moveq, 0, mk_immed(1l), ap1);
  2840. g_code(op_bra, 0, mk_label(lab1), NIL_AMODE);
  2841. g_label(lab0);
  2842. g_code(op_moveq, 0, mk_immed(0l), ap1);
  2843. g_label(lab1);
  2844. #else
  2845. lab0 = nxtlabel();
  2846. ap1 = temp_data();
  2847. g_code(op_moveq, 0, mk_immed(0l), ap1);
  2848. falsejp(node, lab0);
  2849. g_code(op_moveq, 0, mk_immed(1l), ap1);
  2850. g_label(lab0);
  2851. #endif
  2852. return mk_legal(ap1, flags, size);
  2853. case en_cond:
  2854. return g_hook(node, flags);
  2855. case en_void:
  2856. freeop(g_expr(node->v.p[0], F_ALL | F_SRCOP | F_NOVALUE));
  2857. return g_expr(node->v.p[1], flags);
  2858. case en_fcall:
  2859. return g_fcall(node, flags);
  2860. case en_alloca:
  2861. return mk_legal(g_alloca(node), flags, 4);
  2862. case en_cast:
  2863. /*
  2864. * On the 68000, suppress all casts between any of
  2865. * long, unsigned long, pointer
  2866. */
  2867. if (type == bt_pointer || type == bt_long || type == bt_ulong) {
  2868. type = node->v.p[0]->etype;
  2869. if (type == bt_pointer || type == bt_long || type == bt_ulong)
  2870. return g_expr(node->v.p[0], flags);
  2871. }
  2872. /*
  2873. * The cast really results in some work
  2874. */
  2875. #ifdef G_CAST2
  2876. return g_cast2(node->v.p[0], node->etype, flags);
  2877. #else
  2878. return g_cast(g_expr(node->v.p[0], F_ALL | F_SRCOP | F_GCAST),
  2879. node->v.p[0]->etype,
  2880. node->etype, flags);
  2881. #endif
  2882. case en_deref:
  2883. /*
  2884. * The cases where this node occurs are handled automatically:
  2885. * g_assign and g_fcall return a pointer to a structure rather than a
  2886. * structure.
  2887. */
  2888. return g_expr(node->v.p[0], flags);
  2889. case en_compound:
  2890. return g_compound(node->v.st, flags);
  2891. default:
  2892. uerr(ERR_OTH,"debug: node=$%lx, nodetype=%d, etype=%d, esize=%d",node,node->nodetype,node->etype,node->esize);
  2893. ierr(G_EXPR,2);
  2894. /* NOTREACHED */
  2895. return 0; // make the compiler happy
  2896. }
  2897. }
  2898. extern struct enode *regexp[REGEXP_SIZE];
  2899. int tst_ushort(struct enode *node) {
  2900. /*
  2901. * tests if node is a integer constant falling in the range of uns. short or
  2902. * if node is cast from uns. short, uns. char or char.
  2903. */
  2904. enum(e_bt) type;
  2905. if (node->nodetype == en_icon && 0 <= node->v.i && node->v.i <= 65535)
  2906. return 1;
  2907. if (node->nodetype == en_tempref) /* because it could be something
  2908. like a constant in a register */
  2909. return tst_ushort(regexp[reg_t_to_regexp(node->v.i)]);
  2910. if (node->nodetype == en_cast) {
  2911. type = node->v.p[0]->etype;
  2912. if (type == bt_ushort || type == bt_uchar || type == bt_char)
  2913. return 1;
  2914. }
  2915. return 0;
  2916. }
  2917. int tst_short(struct enode *node) {
  2918. /*
  2919. * tests if node is a integer constant falling in the range of short or if
  2920. * node is cast from signed or unsigned short.
  2921. */
  2922. enum(e_bt) type;
  2923. if (node->nodetype == en_icon && -32768 <= node->v.i && node->v.i <= 32767)
  2924. return 1;
  2925. if (node->nodetype == en_tempref) /* because it could be something
  2926. like a constant in a register */
  2927. return tst_short(regexp[reg_t_to_regexp(node->v.i)]);
  2928. if (node->nodetype == en_cast) {
  2929. type = node->v.p[0]->etype;
  2930. if (type == bt_short || type == bt_ushort
  2931. || type == bt_char || type == bt_uchar)
  2932. return 1;
  2933. }
  2934. return 0;
  2935. }
  2936. int tst_const(struct enode *node) {
  2937. /*
  2938. * tests if it is a constant node, that means either en_icon, en_nacon or
  2939. * en_labcon, or sums or differences of such nodes
  2940. */
  2941. enum(e_node) typ1 = node->nodetype;
  2942. enum(e_node) typ2;
  2943. if (typ1 == en_icon || typ1 == en_nacon || typ1 == en_labcon
  2944. || typ1 == en_fcon)
  2945. return 1;
  2946. if (typ1 == en_add || typ1 == en_sub) {
  2947. typ1 = node->v.p[0]->nodetype;
  2948. typ2 = node->v.p[1]->nodetype;
  2949. if (((typ1 == en_nacon || typ1 == en_labcon) && typ2 == en_icon) ||
  2950. ((typ2 == en_nacon || typ2 == en_labcon) && typ1 == en_icon))
  2951. return 1;
  2952. }
  2953. return 0;
  2954. }
  2955. static int g_compare(struct enode *node) {
  2956. /*
  2957. * generate code to do a comparison of the two operands of node. returns 1 if
  2958. * it was an unsigned comparison
  2959. */
  2960. struct amode *ap1, *ap2, *ap3;
  2961. #ifndef NOFLOAT
  2962. long i;
  2963. #endif
  2964. switch (node->v.p[0]->etype) {
  2965. case bt_uchar:
  2966. case bt_char:
  2967. case bt_ushort:
  2968. case bt_short:
  2969. case bt_pointer:
  2970. case bt_long:
  2971. case bt_ulong:
  2972. ap2 = g_expr(node->v.p[1], F_ALL | F_SRCOP);
  2973. /* We want to handle the special case 'tst.w pcrel_variable' smoothly,
  2974. * which is why we set F_SRCOP in the latter
  2975. * We will unset it later on. */
  2976. if (ap2->mode == am_immed)
  2977. ap1 = g_expr(node->v.p[0], (F_ALL & ~F_IMMED) | F_SRCOP);
  2978. else
  2979. ap1 = g_expr(node->v.p[0], F_AREG | F_DREG);
  2980. validate(ap2);
  2981. /*if (ap1->mode == am_direct
  2982. && (ap->offset->nodetype == en_nacon
  2983. || ap->offset->nodetype == en_labcon)
  2984. #ifdef AS
  2985. && !external(ap->offset->v.enlab)
  2986. #else
  2987. #ifdef PC
  2988. && ((long)ap->offset->v.ensp>0x1000 ? internal(ap->offset->v.ensp) : 1)
  2989. #endif
  2990. #endif
  2991. )
  2992. t*/
  2993. /*
  2994. * sorry, no tst.l An on the 68000, but we can move to a data
  2995. * register if one is free
  2996. * As there is no tst.l myval(pc), we can do it this way for am_direct's too.
  2997. */
  2998. if ((ap1->mode == am_areg || ap1->mode==am_direct)
  2999. && node->v.p[1]->nodetype == en_icon
  3000. && node->v.p[1]->v.i == 0 && free_data()) {
  3001. ap3 = temp_data();
  3002. g_code(op_move, node->v.p[0]->esize, ap1, ap3);
  3003. /* tst.l ap3 not needed */
  3004. freeop(ap3);
  3005. } else {
  3006. /* the only case where the following != nop is when ap2->mode==am_immed and ap1->mode==am_direct */
  3007. ap1=mk_legal(ap1, F_ALL, node->v.p[0]->esize);
  3008. g_code(op_cmp, (int) node->v.p[0]->esize, ap2, ap1);
  3009. }
  3010. freeop(ap1);
  3011. freeop(ap2);
  3012. if (node->v.p[0]->etype == bt_char ||
  3013. node->v.p[0]->etype == bt_short ||
  3014. node->v.p[0]->etype == bt_long)
  3015. return 0;
  3016. return 1;
  3017. case bt_struct:
  3018. ap1 = g_expr(node->v.p[1], F_AREG | F_VOL);
  3019. ap2 = g_expr(node->v.p[0], F_AREG | F_VOL);
  3020. validate(ap1); {
  3021. int lab=nxtlabel();
  3022. ap1 = copy_addr(ap1);
  3023. ap2 = copy_addr(ap2);
  3024. ap2->mode=ap1->mode=am_ainc;
  3025. ap3 = temp_data();
  3026. freeop(ap3);
  3027. g_code(op_move,2,mk_immed((node->esize-1)>>1),ap3);
  3028. g_label(lab);
  3029. g_code(op_cmp,2,ap1,ap2);
  3030. g_code(op_dbne,0,ap3,mk_label(lab));
  3031. }
  3032. freeop(ap2);
  3033. freeop(ap1);
  3034. return 1;
  3035. #ifndef NOFLOAT
  3036. case bt_float:
  3037. #ifdef DOUBLE
  3038. case bt_double:
  3039. #endif
  3040. #ifndef BCDFLT
  3041. if (node->v.p[1]->nodetype == en_fcon && node->v.p[1]->v.f==0) {
  3042. node->etype = bt_long; /* no conversion func call (raw cast) */
  3043. node=mk_node(en_cast,node,NIL_ENODE);
  3044. node->etype = bt_char;
  3045. node->esize = 1;
  3046. ap1 = g_expr(node, F_DALT);
  3047. g_code(op_tst, (int) node->esize, ap1, NIL_AMODE);
  3048. freeop(ap1);
  3049. } else {
  3050. #endif
  3051. temp_inv();
  3052. i = push_param(node->v.p[1]);
  3053. i += push_param(node->v.p[0]);
  3054. #ifndef BCDFLT
  3055. call_library(str(ffpcmp));
  3056. #else
  3057. call_library("__cmpbf2");
  3058. #endif
  3059. g_code(op_add, 4, mk_immed((long) i), mk_reg(STACKPTR));
  3060. return 0;
  3061. #ifndef BCDFLT
  3062. }
  3063. #endif
  3064. #endif
  3065. }
  3066. ierr(G_COMPARE,1);
  3067. /* NOTREACHED */
  3068. return 0; // make the compiler happy
  3069. }
  3070. #ifndef NOBCDFLT
  3071. readonly struct enode __bcd_zero__value={
  3072. en_labcon,
  3073. #ifdef DOUBLE
  3074. bt_double,
  3075. #else
  3076. bt_float,
  3077. #endif
  3078. 10
  3079. };
  3080. #ifdef AS
  3081. #define bcd_zero (pchsearch("__bcd_zero",PCHS_ADD), \
  3082. __bcd_zero__value.v.enlab=label("__bcd_zero"), \
  3083. &__bcd_zero__value)
  3084. #else
  3085. #define bcd_zero (__bcd_zero__value.v.ensp="__bcd_zero", &__bcd_zero__value)
  3086. #endif
  3087. #endif
  3088. void truejp(struct enode *node, unsigned int lab) {
  3089. /*
  3090. * generate a jump to lab if the node passed evaluates to a true condition.
  3091. */
  3092. struct amode *ap;
  3093. unsigned int lab0;
  3094. if (node == 0)
  3095. ierr(TRUEJP,1);
  3096. if (node->nodetype == en_icon) {
  3097. if (node->v.i)
  3098. g_code(op_bra, 0, mk_label(lab), NIL_AMODE);
  3099. return;
  3100. }
  3101. opt_compare(node);
  3102. switch (node->nodetype) {
  3103. case en_eq:
  3104. (void) g_compare(node);
  3105. g_code(op_beq, 0, mk_label(lab), NIL_AMODE);
  3106. break;
  3107. case en_ne:
  3108. (void) g_compare(node);
  3109. g_code(op_bne, 0, mk_label(lab), NIL_AMODE);
  3110. break;
  3111. case en_lt:
  3112. case en_le:
  3113. case en_gt:
  3114. case en_ge:
  3115. {
  3116. int n=(en_ge-node->nodetype)*2+op_bhs;
  3117. g_code(g_compare(node)?n:++n, 0, mk_label(lab), NIL_AMODE);
  3118. break;
  3119. }
  3120. case en_fieldref:
  3121. {
  3122. struct enode *ep,*ep2;
  3123. ep=mk_node(en_ref, node->v.p[0], (struct enode *) NIL_AMODE);
  3124. ep->esize=node->esize;
  3125. ep->etype=node->etype;
  3126. ep2=mk_icon(((1<<node->bit_width)-1) <<
  3127. ((node->esize<<3)-node->bit_offset-node->bit_width));
  3128. ep2->esize=node->esize;
  3129. ep2->etype=node->etype;
  3130. ep=mk_node(en_and,ep,ep2);
  3131. ep->esize=node->esize;
  3132. ep->etype=node->etype;
  3133. truejp(ep, lab);
  3134. break;
  3135. }
  3136. /* case en_lt:
  3137. g_compare(node) ?
  3138. g_code(op_blo, 0, mk_label(lab), NIL_AMODE) :
  3139. g_code(op_blt, 0, mk_label(lab), NIL_AMODE);
  3140. break;
  3141. case en_le:
  3142. g_compare(node) ?
  3143. g_code(op_bls, 0, mk_label(lab), NIL_AMODE) :
  3144. g_code(op_ble, 0, mk_label(lab), NIL_AMODE);
  3145. break;
  3146. case en_gt:
  3147. g_compare(node) ?
  3148. g_code(op_bhi, 0, mk_label(lab), NIL_AMODE) :
  3149. g_code(op_bgt, 0, mk_label(lab), NIL_AMODE);
  3150. break;
  3151. case en_ge:
  3152. g_compare(node) ?
  3153. g_code(op_bhs, 0, mk_label(lab), NIL_AMODE) :
  3154. g_code(op_bge, 0, mk_label(lab), NIL_AMODE);
  3155. break;*/
  3156. case en_land:
  3157. lab0 = nxtlabel();
  3158. falsejp(node->v.p[0], lab0);
  3159. truejp(node->v.p[1], lab);
  3160. g_label(lab0);
  3161. break;
  3162. case en_lor:
  3163. truejp(node->v.p[0], lab);
  3164. truejp(node->v.p[1], lab);
  3165. break;
  3166. case en_not:
  3167. falsejp(node->v.p[0], lab);
  3168. break;
  3169. #ifdef OPTIMIZED_AINCDEC_TEST
  3170. case en_adec:
  3171. // struct amode *lblap = mk_label(lab);
  3172. ap = g_expr(node->v.p[0], F_ALL);
  3173. /* if (ap->mode==am_dreg)
  3174. freeop(ap),
  3175. g_code(op_dbra, 0, ap, lblap);
  3176. else {*/
  3177. g_code(op_sub, (int) node->esize, mk_immed(1), ap);
  3178. freeop(ap);
  3179. g_code(op_bhs, 0, /*lblap*/mk_label(lab), NIL_AMODE);
  3180. /* }*/
  3181. break;
  3182. #endif
  3183. case en_and:
  3184. /*#ifdef PC*/
  3185. #define is_powerof2(x) (((x)<<1)==((x)^((x)-1))+1)
  3186. /*#else
  3187. #define is_powerof2(x) ({int __x=x;((__x)<<1)==((__x)^((__x)-1))+1;})
  3188. #endif*/
  3189. if (node->v.p[1]->nodetype==en_icon) {
  3190. unsigned long v=node->v.p[1]->v.i;
  3191. if (v>=128/*otherwise moveq is cool enough and faster for dregs*/
  3192. && is_powerof2(v)) {
  3193. ap=g_expr(node->v.p[0],F_DREG|F_MEM);
  3194. g_bitmancode(op_btst,node->v.p[0]->esize,mk_immed(pwrof2(v)),ap);
  3195. freeop(ap);
  3196. g_code(op_bne, 0, mk_label(lab), NIL_AMODE);
  3197. break;
  3198. }
  3199. }
  3200. /* FALL THROUGH */
  3201. default:
  3202. #ifndef NOFLOAT
  3203. if (node->etype == bt_float || node->etype == bt_double) {
  3204. #ifdef DOUBLE
  3205. long i;
  3206. temp_inv();
  3207. i = push_param(node);
  3208. call_library(".fptst"); // obsolete
  3209. /* The pop-off does not change the condition codes */
  3210. g_code(op_add, 4, mk_immed((long) i), mk_reg(STACKPTR));
  3211. } else {
  3212. #else
  3213. #ifndef BCDFLT
  3214. node->etype = bt_long; /* no conversion func call (raw cast) */
  3215. node=mk_node(en_cast,node,NIL_ENODE);
  3216. node->etype = bt_char;
  3217. node->esize = 1;
  3218. }
  3219. {
  3220. #else
  3221. g_compare(mk_node(en_void,node,bcd_zero));
  3222. } else {
  3223. #endif
  3224. #endif
  3225. #else
  3226. {
  3227. #endif
  3228. ap = g_expr(node, F_DALT|F_SRCOP);
  3229. if (ap->mode==am_direct && free_data()) {
  3230. struct amode *ap2 = temp_data();
  3231. g_code(op_move, (int) node->esize, ap, ap2);
  3232. g_code(op_tst, (int) node->esize, ap2, NIL_AMODE);
  3233. freeop(ap2);
  3234. } else
  3235. g_code(op_tst, (int) node->esize, ap, NIL_AMODE);
  3236. freeop(ap);
  3237. }
  3238. g_code(op_bne, 0, mk_label(lab), NIL_AMODE);
  3239. break;
  3240. }
  3241. }
  3242. void falsejp(struct enode *node, unsigned int lab) {
  3243. /*
  3244. * generate code to execute a jump to lab if the expression passed is
  3245. * false.
  3246. */
  3247. struct amode *ap;
  3248. unsigned int lab0;
  3249. if (node == 0)
  3250. ierr(FALSEJP,1);
  3251. if (node->nodetype == en_icon) {
  3252. if (!node->v.i)
  3253. g_code(op_bra, 0, mk_label(lab), NIL_AMODE);
  3254. return;
  3255. }
  3256. opt_compare(node);
  3257. switch (node->nodetype) {
  3258. case en_eq:
  3259. (void) g_compare(node);
  3260. g_code(op_bne, 0, mk_label(lab), NIL_AMODE);
  3261. break;
  3262. case en_ne:
  3263. (void) g_compare(node);
  3264. g_code(op_beq, 0, mk_label(lab), NIL_AMODE);
  3265. break;
  3266. case en_lt:
  3267. case en_le:
  3268. case en_gt:
  3269. case en_ge:
  3270. {
  3271. int n=(node->nodetype-en_lt)*2+op_bhs;
  3272. g_code(g_compare(node)?n:++n, 0, mk_label(lab), NIL_AMODE);
  3273. break;
  3274. }
  3275. case en_fieldref:
  3276. {
  3277. struct enode *ep,*ep2;
  3278. ep=mk_node(en_ref, node->v.p[0], (struct enode *) NIL_AMODE);
  3279. ep->esize=node->esize;
  3280. ep->etype=node->etype;
  3281. ep2=mk_icon(((1<<node->bit_width)-1) <<
  3282. ((node->esize<<3)-node->bit_offset-node->bit_width));
  3283. ep2->esize=node->esize;
  3284. ep2->etype=node->etype;
  3285. ep=mk_node(en_and,ep,ep2);
  3286. ep->esize=node->esize;
  3287. ep->etype=node->etype;
  3288. falsejp(ep, lab);
  3289. break;
  3290. }
  3291. /* case en_lt:
  3292. g_compare(node) ?
  3293. g_code(op_bhs, 0, mk_label(lab), NIL_AMODE) :
  3294. g_code(op_bge, 0, mk_label(lab), NIL_AMODE);
  3295. break;
  3296. case en_le:
  3297. g_compare(node) ?
  3298. g_code(op_bhi, 0, mk_label(lab), NIL_AMODE) :
  3299. g_code(op_bgt, 0, mk_label(lab), NIL_AMODE);
  3300. break;
  3301. case en_gt:
  3302. g_compare(node) ?
  3303. g_code(op_bls, 0, mk_label(lab), NIL_AMODE) :
  3304. g_code(op_ble, 0, mk_label(lab), NIL_AMODE);
  3305. break;
  3306. case en_ge:
  3307. g_compare(node) ?
  3308. g_code(op_blo, 0, mk_label(lab), NIL_AMODE) :
  3309. g_code(op_blt, 0, mk_label(lab), NIL_AMODE);
  3310. break;*/
  3311. case en_land:
  3312. falsejp(node->v.p[0], lab);
  3313. falsejp(node->v.p[1], lab);
  3314. break;
  3315. case en_lor:
  3316. lab0 = nxtlabel();
  3317. truejp(node->v.p[0], lab0);
  3318. falsejp(node->v.p[1], lab);
  3319. g_label(lab0);
  3320. break;
  3321. case en_not:
  3322. truejp(node->v.p[0], lab);
  3323. break;
  3324. #ifdef OPTIMIZED_AINCDEC_TEST
  3325. case en_adec:
  3326. ap = g_expr(node->v.p[0], F_ALL);
  3327. g_code(op_sub, (int) node->esize, mk_immed(1), ap);
  3328. freeop(ap);
  3329. g_code(op_blo, 0, mk_label(lab), NIL_AMODE);
  3330. break;
  3331. #endif
  3332. case en_and:
  3333. if (node->v.p[1]->nodetype==en_icon) {
  3334. unsigned long v=node->v.p[1]->v.i;
  3335. if (v>=128/*otherwise moveq is cool enough and faster for dregs*/
  3336. && is_powerof2(v)) {
  3337. ap=g_expr(node->v.p[0],F_DREG|F_MEM);
  3338. g_bitmancode(op_btst,node->v.p[0]->esize,mk_immed(pwrof2(v)),ap);
  3339. freeop(ap);
  3340. g_code(op_beq, 0, mk_label(lab), NIL_AMODE);
  3341. break;
  3342. }
  3343. }
  3344. /* FALL THROUGH */
  3345. default:
  3346. #ifndef NOFLOAT
  3347. if (node->etype == bt_float || node->etype == bt_double) {
  3348. #ifdef DOUBLE
  3349. long i;
  3350. temp_inv();
  3351. i = push_param(node);
  3352. call_library(".fptst"); // obsolete
  3353. /* The pop-off does not change the condition codes */
  3354. g_code(op_add, 4, mk_immed((long) i), mk_reg(STACKPTR));
  3355. } else {
  3356. #else
  3357. #ifndef BCDFLT
  3358. node->etype = bt_long; /* no conversion func call (raw cast) */
  3359. node=mk_node(en_cast,node,NIL_ENODE);
  3360. node->etype = bt_char;
  3361. node->esize = 1;
  3362. }
  3363. {
  3364. #else
  3365. g_compare(mk_node(en_void,node,bcd_zero));
  3366. } else {
  3367. #endif
  3368. #endif
  3369. #else
  3370. {
  3371. #endif
  3372. ap = g_expr(node, F_DALT|F_SRCOP);
  3373. if (ap->mode==am_direct && free_data()) {
  3374. struct amode *ap2 = temp_data();
  3375. g_code(op_move, (int) node->esize, ap, ap2);
  3376. g_code(op_tst, (int) node->esize, ap2, NIL_AMODE);
  3377. freeop(ap2);
  3378. } else
  3379. g_code(op_tst, (int) node->esize, ap, NIL_AMODE);
  3380. freeop(ap);
  3381. }
  3382. g_code(op_beq, 0, mk_label(lab), NIL_AMODE);
  3383. break;
  3384. }
  3385. }
  3386. void opt_compare(struct enode *node) {
  3387. /* temprefs should be the second operand to a cmp instruction */
  3388. enum(e_node) t = node->nodetype;
  3389. if ((t == en_eq || t == en_ne || t == en_le || t == en_ge
  3390. || t == en_lt || t == en_gt)
  3391. && (node->v.p[1]->nodetype == en_tempref ||
  3392. node->v.p[0]->nodetype == en_icon)) {
  3393. swap_nodes(node);
  3394. /* if you change the operands, change the comparison operator */
  3395. switch (t) {
  3396. case en_le:
  3397. node->nodetype = en_ge;
  3398. break;
  3399. case en_ge:
  3400. node->nodetype = en_le;
  3401. break;
  3402. case en_lt:
  3403. node->nodetype = en_gt;
  3404. break;
  3405. case en_gt:
  3406. node->nodetype = en_lt;
  3407. break;
  3408. }
  3409. }
  3410. }
  3411. #endif /* MC680X0 */
  3412. // vim:ts=4:sw=4