mem.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/stddef.h"
  6. #include "linux/kernel.h"
  7. #include "linux/mm.h"
  8. #include "linux/bootmem.h"
  9. #include "linux/swap.h"
  10. #include "linux/highmem.h"
  11. #include "linux/gfp.h"
  12. #include "asm/page.h"
  13. #include "asm/fixmap.h"
  14. #include "asm/pgalloc.h"
  15. #include "user_util.h"
  16. #include "kern_util.h"
  17. #include "kern.h"
  18. #include "mem_user.h"
  19. #include "uml_uaccess.h"
  20. #include "os.h"
  21. #include "linux/types.h"
  22. #include "linux/string.h"
  23. #include "init.h"
  24. #include "kern_constants.h"
  25. /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
  26. unsigned long *empty_zero_page = NULL;
  27. /* allocated in paging_init and unchanged thereafter */
  28. unsigned long *empty_bad_page = NULL;
  29. pgd_t swapper_pg_dir[PTRS_PER_PGD];
  30. unsigned long long highmem;
  31. int kmalloc_ok = 0;
  32. static unsigned long brk_end;
  33. void unmap_physmem(void)
  34. {
  35. os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
  36. }
  37. static void map_cb(void *unused)
  38. {
  39. map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
  40. }
  41. #ifdef CONFIG_HIGHMEM
  42. static void setup_highmem(unsigned long highmem_start,
  43. unsigned long highmem_len)
  44. {
  45. struct page *page;
  46. unsigned long highmem_pfn;
  47. int i;
  48. highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
  49. for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){
  50. page = &mem_map[highmem_pfn + i];
  51. ClearPageReserved(page);
  52. init_page_count(page);
  53. __free_page(page);
  54. }
  55. }
  56. #endif
  57. void mem_init(void)
  58. {
  59. /* clear the zero-page */
  60. memset((void *) empty_zero_page, 0, PAGE_SIZE);
  61. /* Map in the area just after the brk now that kmalloc is about
  62. * to be turned on.
  63. */
  64. brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
  65. map_cb(NULL);
  66. initial_thread_cb(map_cb, NULL);
  67. free_bootmem(__pa(brk_end), uml_reserved - brk_end);
  68. uml_reserved = brk_end;
  69. /* this will put all low memory onto the freelists */
  70. totalram_pages = free_all_bootmem();
  71. max_low_pfn = totalram_pages;
  72. #ifdef CONFIG_HIGHMEM
  73. totalhigh_pages = highmem >> PAGE_SHIFT;
  74. totalram_pages += totalhigh_pages;
  75. #endif
  76. num_physpages = totalram_pages;
  77. max_pfn = totalram_pages;
  78. printk(KERN_INFO "Memory: %luk available\n",
  79. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
  80. kmalloc_ok = 1;
  81. #ifdef CONFIG_HIGHMEM
  82. setup_highmem(end_iomem, highmem);
  83. #endif
  84. }
  85. /*
  86. * Create a page table and place a pointer to it in a middle page
  87. * directory entry.
  88. */
  89. static void __init one_page_table_init(pmd_t *pmd)
  90. {
  91. if (pmd_none(*pmd)) {
  92. pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  93. set_pmd(pmd, __pmd(_KERNPG_TABLE +
  94. (unsigned long) __pa(pte)));
  95. if (pte != pte_offset_kernel(pmd, 0))
  96. BUG();
  97. }
  98. }
  99. static void __init one_md_table_init(pud_t *pud)
  100. {
  101. #ifdef CONFIG_3_LEVEL_PGTABLES
  102. pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  103. set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
  104. if (pmd_table != pmd_offset(pud, 0))
  105. BUG();
  106. #endif
  107. }
  108. static void __init fixrange_init(unsigned long start, unsigned long end,
  109. pgd_t *pgd_base)
  110. {
  111. pgd_t *pgd;
  112. pud_t *pud;
  113. pmd_t *pmd;
  114. int i, j;
  115. unsigned long vaddr;
  116. vaddr = start;
  117. i = pgd_index(vaddr);
  118. j = pmd_index(vaddr);
  119. pgd = pgd_base + i;
  120. for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
  121. pud = pud_offset(pgd, vaddr);
  122. if (pud_none(*pud))
  123. one_md_table_init(pud);
  124. pmd = pmd_offset(pud, vaddr);
  125. for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
  126. one_page_table_init(pmd);
  127. vaddr += PMD_SIZE;
  128. }
  129. j = 0;
  130. }
  131. }
  132. #ifdef CONFIG_HIGHMEM
  133. pte_t *kmap_pte;
  134. pgprot_t kmap_prot;
  135. #define kmap_get_fixmap_pte(vaddr) \
  136. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
  137. (vaddr)), (vaddr))
  138. static void __init kmap_init(void)
  139. {
  140. unsigned long kmap_vstart;
  141. /* cache the first kmap pte */
  142. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  143. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  144. kmap_prot = PAGE_KERNEL;
  145. }
  146. static void init_highmem(void)
  147. {
  148. pgd_t *pgd;
  149. pud_t *pud;
  150. pmd_t *pmd;
  151. pte_t *pte;
  152. unsigned long vaddr;
  153. /*
  154. * Permanent kmaps:
  155. */
  156. vaddr = PKMAP_BASE;
  157. fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
  158. pgd = swapper_pg_dir + pgd_index(vaddr);
  159. pud = pud_offset(pgd, vaddr);
  160. pmd = pmd_offset(pud, vaddr);
  161. pte = pte_offset_kernel(pmd, vaddr);
  162. pkmap_page_table = pte;
  163. kmap_init();
  164. }
  165. #endif /* CONFIG_HIGHMEM */
  166. static void __init fixaddr_user_init( void)
  167. {
  168. #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
  169. long size = FIXADDR_USER_END - FIXADDR_USER_START;
  170. pgd_t *pgd;
  171. pud_t *pud;
  172. pmd_t *pmd;
  173. pte_t *pte;
  174. unsigned long paddr, vaddr = FIXADDR_USER_START;
  175. if ( ! size )
  176. return;
  177. fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
  178. paddr = (unsigned long)alloc_bootmem_low_pages( size);
  179. memcpy( (void *)paddr, (void *)FIXADDR_USER_START, size);
  180. paddr = __pa(paddr);
  181. for ( ; size > 0; size-=PAGE_SIZE, vaddr+=PAGE_SIZE, paddr+=PAGE_SIZE){
  182. pgd = swapper_pg_dir + pgd_index(vaddr);
  183. pud = pud_offset(pgd, vaddr);
  184. pmd = pmd_offset(pud, vaddr);
  185. pte = pte_offset_kernel(pmd, vaddr);
  186. pte_set_val( (*pte), paddr, PAGE_READONLY);
  187. }
  188. #endif
  189. }
  190. void paging_init(void)
  191. {
  192. unsigned long zones_size[MAX_NR_ZONES], vaddr;
  193. int i;
  194. empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
  195. empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
  196. for(i = 0; i < ARRAY_SIZE(zones_size); i++)
  197. zones_size[i] = 0;
  198. zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
  199. (uml_physmem >> PAGE_SHIFT);
  200. #ifdef CONFIG_HIGHMEM
  201. zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
  202. #endif
  203. free_area_init(zones_size);
  204. /*
  205. * Fixed mappings, only the page table structure has to be
  206. * created - mappings will be set by set_fixmap():
  207. */
  208. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  209. fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
  210. fixaddr_user_init();
  211. #ifdef CONFIG_HIGHMEM
  212. init_highmem();
  213. #endif
  214. }
  215. struct page *arch_validate(struct page *page, gfp_t mask, int order)
  216. {
  217. unsigned long addr, zero = 0;
  218. int i;
  219. again:
  220. if(page == NULL)
  221. return page;
  222. if(PageHighMem(page))
  223. return page;
  224. addr = (unsigned long) page_address(page);
  225. for(i = 0; i < (1 << order); i++){
  226. current->thread.fault_addr = (void *) addr;
  227. if(__do_copy_to_user((void __user *) addr, &zero,
  228. sizeof(zero),
  229. &current->thread.fault_addr,
  230. &current->thread.fault_catcher)){
  231. if(!(mask & __GFP_WAIT))
  232. return NULL;
  233. else break;
  234. }
  235. addr += PAGE_SIZE;
  236. }
  237. if(i == (1 << order))
  238. return page;
  239. page = alloc_pages(mask, order);
  240. goto again;
  241. }
  242. /* This can't do anything because nothing in the kernel image can be freed
  243. * since it's not in kernel physical memory.
  244. */
  245. void free_initmem(void)
  246. {
  247. }
  248. #ifdef CONFIG_BLK_DEV_INITRD
  249. void free_initrd_mem(unsigned long start, unsigned long end)
  250. {
  251. if (start < end)
  252. printk ("Freeing initrd memory: %ldk freed\n",
  253. (end - start) >> 10);
  254. for (; start < end; start += PAGE_SIZE) {
  255. ClearPageReserved(virt_to_page(start));
  256. init_page_count(virt_to_page(start));
  257. free_page(start);
  258. totalram_pages++;
  259. }
  260. }
  261. #endif
  262. void show_mem(void)
  263. {
  264. int pfn, total = 0, reserved = 0;
  265. int shared = 0, cached = 0;
  266. int highmem = 0;
  267. struct page *page;
  268. printk("Mem-info:\n");
  269. show_free_areas();
  270. printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
  271. pfn = max_mapnr;
  272. while(pfn-- > 0) {
  273. page = pfn_to_page(pfn);
  274. total++;
  275. if(PageHighMem(page))
  276. highmem++;
  277. if(PageReserved(page))
  278. reserved++;
  279. else if(PageSwapCache(page))
  280. cached++;
  281. else if(page_count(page))
  282. shared += page_count(page) - 1;
  283. }
  284. printk("%d pages of RAM\n", total);
  285. printk("%d pages of HIGHMEM\n", highmem);
  286. printk("%d reserved pages\n", reserved);
  287. printk("%d pages shared\n", shared);
  288. printk("%d pages swap cached\n", cached);
  289. }
  290. /*
  291. * Allocate and free page tables.
  292. */
  293. pgd_t *pgd_alloc(struct mm_struct *mm)
  294. {
  295. pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
  296. if (pgd) {
  297. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  298. memcpy(pgd + USER_PTRS_PER_PGD,
  299. swapper_pg_dir + USER_PTRS_PER_PGD,
  300. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  301. }
  302. return pgd;
  303. }
  304. void pgd_free(pgd_t *pgd)
  305. {
  306. free_page((unsigned long) pgd);
  307. }
  308. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  309. {
  310. pte_t *pte;
  311. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  312. return pte;
  313. }
  314. struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  315. {
  316. struct page *pte;
  317. pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  318. return pte;
  319. }