memory.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
  3. * See the copyright notice in the ACK home directory, in the file "Copyright".
  4. */
  5. /*
  6. * Memory manager. Memory is divided into NMEMS pieces. There is a struct
  7. * for each piece telling where it is, how many bytes are used, and how may
  8. * are left. If a request for core doesn't fit in the left bytes, an sbrk()
  9. * is done and pieces after the one that requested the growth are moved up.
  10. *
  11. * Unfortunately, we cannot use sbrk to request more memory, because its
  12. * result cannot be trusted. More specifically, it does not work properly
  13. * on 2.9 BSD, and probably does not work properly on 2.8 BSD and V7 either.
  14. * The problem is that "sbrk" adds the increment to the current "break"
  15. * WITHOUT testing the carry bit. So, if your break is at 40000, and
  16. * you "sbrk(30000)", it will succeed, but your break will be at 4464
  17. * (70000 - 65536).
  18. */
  19. #include <stdio.h>
  20. #include <unistd.h>
  21. #include <out.h>
  22. #include "const.h"
  23. #include "assert.h"
  24. #include "debug.h"
  25. #include "arch.h"
  26. #include "memory.h"
  27. #include "defs.h"
  28. #include "object.h"
  29. #include <missing_proto.h>
  30. static ind_t move_up(int piece, ind_t incr);
  31. static bool compact(int piece, ind_t incr, int flag);
  32. static void copy_down(struct memory *mem, ind_t dist);
  33. static void copy_up(struct memory *mem, ind_t dist);
  34. static void free_saved_moduls();
  35. struct memory mems[NMEMS];
  36. bool incore = TRUE; /* TRUE while everything can be kept in core. */
  37. ind_t core_position = (ind_t)0; /* Index of current module. */
  38. #define GRANULE 64 /* power of 2 */
  39. static char *BASE;
  40. static ind_t refused;
  41. int sbreak(ind_t incr)
  42. {
  43. unsigned int inc;
  44. incr = (incr + (GRANULE - 1)) & ~(GRANULE - 1);
  45. inc = incr;
  46. if ((refused && refused < incr) ||
  47. (sizeof(char *) < sizeof(long) &&
  48. (inc != incr || BASE + inc < BASE)) ||
  49. (void *)brk(BASE + incr) == (void *)-1) {
  50. if (!refused || refused > incr)
  51. refused = incr;
  52. return -1;
  53. }
  54. BASE += incr;
  55. return 0;
  56. }
  57. /*
  58. * Initialize some pieces of core. We hope that this will be our last
  59. * real allocation, meaning we've made the right choices.
  60. */
  61. void init_core()
  62. {
  63. char *base;
  64. ind_t total_size;
  65. struct memory *mem;
  66. #include "mach.c"
  67. #define ALIGN 8 /* minimum alignment for pieces */
  68. #define AT_LEAST (ind_t)2*ALIGN /* See comment about string areas. */
  69. total_size = (ind_t)0; /* Will accumulate the sizes. */
  70. BASE = base = (char *)sbrk(0); /* First free. */
  71. if ((int)base % ALIGN) {
  72. base = (char *)sbrk(ALIGN - (int)base % ALIGN);
  73. BASE = base = (char *)sbrk(0);
  74. }
  75. /*
  76. * String areas are special-cased. The first byte is unused as a way to
  77. * distinguish a name without string from a name which has the first
  78. * string in the string area.
  79. */
  80. for (mem = mems; mem < &mems[NMEMS]; mem++) {
  81. mem->mem_base = base;
  82. mem->mem_full = (ind_t)0;
  83. if (mem == &mems[ALLOLCHR] || mem == &mems[ALLOGCHR]) {
  84. if (mem->mem_left == 0) {
  85. mem->mem_left = ALIGN;
  86. total_size += ALIGN;
  87. base += ALIGN;
  88. }
  89. base += mem->mem_left;
  90. total_size += mem->mem_left;
  91. mem->mem_left--;
  92. mem->mem_full++;
  93. }
  94. else {
  95. base += mem->mem_left; /* Each piece will start after prev. */
  96. total_size += mem->mem_left;
  97. }
  98. }
  99. if (sbreak(total_size) == -1) {
  100. incore = FALSE; /* In core strategy failed. */
  101. if (sbreak(AT_LEAST) == -1)
  102. fatal("no core at all");
  103. base = BASE;
  104. for (mem = mems; mem < &mems[NMEMS]; mem++) {
  105. mem->mem_base = base;
  106. if (mem == &mems[ALLOLCHR] || mem == &mems[ALLOGCHR]) {
  107. base += ALIGN;
  108. mem->mem_left = ALIGN - 1;
  109. mem->mem_full = 1;
  110. }
  111. else {
  112. mem->mem_full = (ind_t)0;
  113. mem->mem_left = 0;
  114. }
  115. }
  116. }
  117. }
  118. /*
  119. * Allocate an extra block of `incr' bytes and move all pieces with index
  120. * higher than `piece' up with the size of the block.
  121. * Move up as much as possible, if "incr" fails.
  122. */
  123. static ind_t move_up(int piece, ind_t incr)
  124. {
  125. register struct memory *mem;
  126. #ifndef NOSTATISTICS
  127. extern int statistics;
  128. #endif
  129. debug("move_up(%d, %d)\n", piece, (int)incr);
  130. while (incr > 0 && sbreak(incr) == -1)
  131. incr -= INCRSIZE;
  132. if (incr <= 0) {
  133. incr = 0;
  134. return (ind_t) 0;
  135. }
  136. #ifndef NOSTATISTICS
  137. if (statistics) fprintf(stderr,"moving up %lx\n", (long) incr);
  138. #endif
  139. for (mem = &mems[NMEMS - 1]; mem > &mems[piece]; mem--)
  140. copy_up(mem, incr);
  141. mems[piece].mem_left += incr;
  142. return incr;
  143. }
  144. extern int passnumber;
  145. /*
  146. * This routine is called if `piece' needs `incr' bytes and the system won't
  147. * give them. We first steal the free bytes of all lower pieces and move them
  148. * and `piece' down. If that doesn't give us enough bytes, we steal the free
  149. * bytes of all higher pieces and move them up. We return whether we have
  150. * enough bytes, the first or the second time.
  151. */
  152. #define NORMAL 0
  153. #define FREEZE 1
  154. #define FORCED 2
  155. static bool compact(int piece, ind_t incr, int flag)
  156. {
  157. ind_t gain, size;
  158. struct memory *mem;
  159. int min = piece, max = piece;
  160. #define SHIFT_COUNT 2 /* let pieces only contribute if their free
  161. memory is more than 1/2**SHIFT_COUNT * 100 %
  162. of its occupied memory
  163. */
  164. debug("compact(%d, %d, %d)\n", piece, (int)incr, flag);
  165. for (mem = &mems[0]; mem < &mems[NMEMS - 1]; mem++) {
  166. assert(mem->mem_base + mem->mem_full + mem->mem_left == (mem+1)->mem_base);
  167. }
  168. mem = &mems[piece];
  169. if (flag == NORMAL) {
  170. /* try and gain a bit more than needed */
  171. gain = (mem->mem_full + incr) >> SHIFT_COUNT;
  172. if (incr < gain) incr = gain;
  173. }
  174. /*
  175. * First, check that moving will result in enough space
  176. */
  177. if (flag != FREEZE) {
  178. gain = mem->mem_left;
  179. for (mem = &mems[piece-1]; mem >= &mems[0]; mem--) {
  180. /*
  181. * Don't give it all away!
  182. * If this does not give us enough, bad luck
  183. */
  184. if (flag == FORCED)
  185. size = 0;
  186. else {
  187. size = mem->mem_full >> SHIFT_COUNT;
  188. if (size == 0) size = mem->mem_left >> 1;
  189. }
  190. if (mem->mem_left >= size)
  191. gain += (mem->mem_left - size) & ~(ALIGN - 1);
  192. if (gain >= incr) {
  193. min = mem - &mems[0];
  194. break;
  195. }
  196. }
  197. if (min == piece)
  198. for (mem = &mems[piece+1]; mem <= &mems[NMEMS - 1]; mem++) {
  199. /*
  200. * Don't give it all away!
  201. * If this does not give us enough, bad luck
  202. */
  203. if (flag == FORCED)
  204. size = 0;
  205. else {
  206. size = mem->mem_full >> SHIFT_COUNT;
  207. if (size == 0) size = mem->mem_left >> 1;
  208. }
  209. if (mem->mem_left >= size)
  210. gain += (mem->mem_left - size) & ~(ALIGN - 1);
  211. if (gain >= incr) {
  212. max = mem - &mems[0];
  213. break;
  214. }
  215. }
  216. if (min == piece) {
  217. min = 0;
  218. if (max == piece) max = 0;
  219. }
  220. if (gain < incr) return 0;
  221. }
  222. else {
  223. min = 0;
  224. max = NMEMS - 1;
  225. }
  226. gain = 0;
  227. for (mem = &mems[min]; mem != &mems[piece]; mem++) {
  228. /* Here memory is inserted before a piece. */
  229. assert(passnumber == FIRST || gain == (ind_t)0);
  230. if (gain) copy_down(mem, gain);
  231. if (flag == FREEZE || gain < incr) {
  232. if (flag != NORMAL) size = 0;
  233. else {
  234. size = mem->mem_full >> SHIFT_COUNT;
  235. if (size == 0) size = mem->mem_left >> 1;
  236. }
  237. if (mem->mem_left >= size) {
  238. size = (mem->mem_left - size) & ~(ALIGN - 1);
  239. gain += size;
  240. mem->mem_left -= size;
  241. }
  242. }
  243. }
  244. /*
  245. * Now mems[piece]:
  246. */
  247. if (gain) copy_down(mem, gain);
  248. gain += mem->mem_left;
  249. mem->mem_left = 0;
  250. if (gain < incr) {
  251. ind_t up = (ind_t)0;
  252. for (mem = &mems[max]; mem > &mems[piece]; mem--) {
  253. /* Here memory is appended after a piece. */
  254. if (flag == FREEZE || gain + up < incr) {
  255. if (flag != NORMAL) size = 0;
  256. else {
  257. size = mem->mem_full >> SHIFT_COUNT;
  258. if (size == 0) size = mem->mem_left >> 1;
  259. }
  260. if (mem->mem_left >= size) {
  261. size = (mem->mem_left - size) & ~(ALIGN - 1);
  262. up += size;
  263. mem->mem_left -= size;
  264. }
  265. }
  266. if (up) copy_up(mem, up);
  267. }
  268. gain += up;
  269. }
  270. mems[piece].mem_left += gain;
  271. assert(flag == FREEZE || gain >= incr);
  272. for (mem = &mems[0]; mem < &mems[NMEMS - 1]; mem++) {
  273. assert(mem->mem_base + mem->mem_full + mem->mem_left == (mem+1)->mem_base);
  274. }
  275. return gain >= incr;
  276. }
  277. /*
  278. * The bytes of `mem' must be moved `dist' down in the address space.
  279. * We copy the bytes from low to high, because the tail of the new area may
  280. * overlap with the old area, but we do not want to overwrite them before they
  281. * are copied.
  282. */
  283. static void copy_down(struct memory *mem, ind_t dist)
  284. {
  285. char *old;
  286. char *new;
  287. ind_t size;
  288. size = mem->mem_full;
  289. old = mem->mem_base;
  290. new = old - dist;
  291. mem->mem_base = new;
  292. while (size--)
  293. *new++ = *old++;
  294. }
  295. /*
  296. * The bytes of `mem' must be moved `dist' up in the address space.
  297. * We copy the bytes from high to low, because the tail of the new area may
  298. * overlap with the old area, but we do not want to overwrite them before they
  299. * are copied.
  300. */
  301. static void copy_up(struct memory *mem, ind_t dist)
  302. {
  303. char *old;
  304. char *new;
  305. ind_t size;
  306. size = mem->mem_full;
  307. old = mem->mem_base + size;
  308. new = old + dist;
  309. while (size--)
  310. *--new = *--old;
  311. mem->mem_base = new;
  312. }
  313. static int alloctype = NORMAL;
  314. /*
  315. * Add `size' bytes to the bytes already allocated for `piece'. If it has no
  316. * free bytes left, ask them from memory or, if that fails, from the free
  317. * bytes of other pieces. The offset of the new area is returned. No matter
  318. * how many times the area is moved, because of another allocate, this offset
  319. * remains valid.
  320. */
  321. ind_t alloc(int piece, long size)
  322. {
  323. ind_t incr = 0;
  324. ind_t left = mems[piece].mem_left;
  325. ind_t full = mems[piece].mem_full;
  326. assert(passnumber == FIRST || (!incore && piece == ALLOMODL));
  327. if (size == (long)0)
  328. return full;
  329. if (size != (ind_t)size)
  330. return BADOFF;
  331. switch(piece) {
  332. case ALLOMODL:
  333. case ALLORANL:
  334. size = int_align(size);
  335. }
  336. if (size - left > 0)
  337. incr = ((size - left + (INCRSIZE - 1)) / INCRSIZE) * INCRSIZE;
  338. if (incr == 0 ||
  339. (incr < left + full && (incr -= move_up(piece, left + full)) <= 0) ||
  340. move_up(piece, incr) == incr ||
  341. compact(piece, size, alloctype)) {
  342. mems[piece].mem_full += size;
  343. mems[piece].mem_left -= size;
  344. return full;
  345. } else {
  346. incore = FALSE;
  347. return BADOFF;
  348. }
  349. }
  350. /*
  351. * Same as alloc() but for a piece which really needs it. If the first
  352. * attempt fails, release the space occupied by other pieces and try again.
  353. */
  354. ind_t hard_alloc(int piece, long size)
  355. {
  356. ind_t ret;
  357. int i;
  358. if (size != (ind_t)size)
  359. return BADOFF;
  360. if ((ret = alloc(piece, size)) != BADOFF) {
  361. return ret;
  362. }
  363. /*
  364. * Deallocate what we don't need.
  365. */
  366. for (i = 0; i < NMEMS; i++) {
  367. switch (i) {
  368. case ALLOGLOB:
  369. case ALLOGCHR:
  370. case ALLOSYMB:
  371. case ALLOARCH:
  372. case ALLOMODL:
  373. case ALLORANL:
  374. break; /* Do not try to deallocate this. */
  375. default:
  376. dealloc(i);
  377. break;
  378. }
  379. }
  380. free_saved_moduls();
  381. if ((ret = alloc(piece, size)) != BADOFF) {
  382. return ret;
  383. }
  384. alloctype = FORCED;
  385. ret = alloc(piece, size);
  386. alloctype = NORMAL;
  387. return ret;
  388. }
  389. /*
  390. * We don't need the previous modules, so we put the current module
  391. * at the start of the piece allocated for module contents, thereby
  392. * overwriting the saved modules, and release its space.
  393. */
  394. static void free_saved_moduls()
  395. {
  396. ind_t size;
  397. char *old, *new;
  398. struct memory *mem = &mems[ALLOMODL];
  399. size = mem->mem_full - core_position;
  400. new = mem->mem_base;
  401. old = new + core_position;
  402. while (size--)
  403. *new++ = *old++;
  404. mem->mem_full -= core_position;
  405. mem->mem_left += core_position;
  406. core_position = (ind_t)0;
  407. }
  408. /*
  409. * The piece of memory with index `piece' is no longer needed.
  410. * We take care that it can be used by compact() later, if needed.
  411. */
  412. void dealloc(int piece)
  413. {
  414. /*
  415. * Some pieces need their memory throughout the program.
  416. */
  417. assert(piece != ALLOGLOB);
  418. assert(piece != ALLOGCHR);
  419. assert(piece != ALLOSYMB);
  420. assert(piece != ALLOARCH);
  421. mems[piece].mem_left += mems[piece].mem_full;
  422. mems[piece].mem_full = (ind_t)0;
  423. }
  424. char *core_alloc(int piece, long size)
  425. {
  426. register ind_t off;
  427. if ((off = alloc(piece, size)) == BADOFF)
  428. return (char *)0;
  429. return address(piece, off);
  430. }
  431. void core_free(int piece, char *p)
  432. {
  433. char *q = address(piece, mems[piece].mem_full);
  434. assert(p < q);
  435. if (sizeof(unsigned) == sizeof(char *))
  436. {
  437. mems[piece].mem_full -= (unsigned) (q - p);
  438. mems[piece].mem_left += (unsigned) (q - p);
  439. }
  440. else
  441. {
  442. mems[piece].mem_full -= (ind_t) q - (ind_t) p;
  443. mems[piece].mem_left += (ind_t) q - (ind_t) p;
  444. }
  445. }
  446. /*
  447. * Reset index into piece of memory for modules and
  448. * take care that the allocated pieces will not be moved.
  449. */
  450. void freeze_core()
  451. {
  452. int i;
  453. core_position = (ind_t)0;
  454. if (incore)
  455. return;
  456. for (i = 0; i < NMEMS; i++) {
  457. switch (i) {
  458. case ALLOGLOB:
  459. case ALLOGCHR:
  460. case ALLOSYMB:
  461. case ALLOARCH:
  462. break; /* Do not try to deallocate this. */
  463. default:
  464. dealloc(i);
  465. break;
  466. }
  467. }
  468. compact(NMEMS - 1, (ind_t)0, FREEZE);
  469. }
  470. /* ------------------------------------------------------------------------- */
  471. /*
  472. * To transform the various pieces of the output in core to the file format,
  473. * we must order the bytes in the unsigned shorts and longs as ACK prescribes.
  474. */
  475. void write_bytes()
  476. {
  477. unsigned short nsect;
  478. long offchar;
  479. struct memory *mem;
  480. extern unsigned short NLocals, NGlobals;
  481. extern long NLChars, NGChars;
  482. extern int flagword;
  483. extern struct outhead outhead;
  484. extern struct outsect outsect[];
  485. /* extern char *outputname; */
  486. int sectionno = 0;
  487. nsect = outhead.oh_nsect;
  488. offchar = OFF_CHAR(outhead);
  489. /*
  490. * We allocated two areas: one for local and one for global names.
  491. * Also, we used another kind of on_foff than on file.
  492. * At the end of the global area we have put the section names.
  493. */
  494. if (!(flagword & SFLAG)) {
  495. do_crs((struct outname *)mems[ALLOLOCL].mem_base, NLocals);
  496. namecpy((struct outname *)mems[ALLOLOCL].mem_base,
  497. NLocals,
  498. offchar
  499. );
  500. namecpy((struct outname *)mems[ALLOGLOB].mem_base,
  501. NGlobals + nsect,
  502. offchar + NLChars
  503. );
  504. }
  505. /*
  506. * These pieces must always be written.
  507. */
  508. wr_ohead(&outhead);
  509. wr_sect(outsect, nsect);
  510. for (mem = &mems[ALLOEMIT]; mem < &mems[ALLORELO]; mem++)
  511. wrt_emit(mem->mem_base, sectionno++, mem->mem_full);
  512. /*
  513. * The rest depends on the flags.
  514. */
  515. if (flagword & (RFLAG|CFLAG))
  516. wr_relo((struct outrelo *) mems[ALLORELO].mem_base,
  517. outhead.oh_nrelo);
  518. if (!(flagword & SFLAG)) {
  519. wr_name((struct outname *) mems[ALLOLOCL].mem_base,
  520. NLocals);
  521. wr_name((struct outname *) mems[ALLOGLOB].mem_base,
  522. NGlobals+nsect);
  523. wr_string(mems[ALLOLCHR].mem_base + 1, (long)NLChars);
  524. wr_string(mems[ALLOGCHR].mem_base + 1, (long)NGChars);
  525. #ifdef SYMDBUG
  526. wr_dbug(mems[ALLODBUG].mem_base, mems[ALLODBUG].mem_full);
  527. #endif /* SYMDBUG */
  528. }
  529. }
  530. void namecpy(struct outname *name, unsigned int nname, long offchar)
  531. {
  532. while (nname--) {
  533. if (name->on_foff)
  534. name->on_foff += offchar - 1;
  535. name++;
  536. }
  537. }