physmem.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/mm.h"
  6. #include "linux/rbtree.h"
  7. #include "linux/slab.h"
  8. #include "linux/vmalloc.h"
  9. #include "linux/bootmem.h"
  10. #include "linux/module.h"
  11. #include "linux/pfn.h"
  12. #include "asm/types.h"
  13. #include "asm/pgtable.h"
  14. #include "kern_util.h"
  15. #include "user_util.h"
  16. #include "mode_kern.h"
  17. #include "mem.h"
  18. #include "mem_user.h"
  19. #include "os.h"
  20. #include "kern.h"
  21. #include "init.h"
  22. struct phys_desc {
  23. struct rb_node rb;
  24. int fd;
  25. __u64 offset;
  26. void *virt;
  27. unsigned long phys;
  28. struct list_head list;
  29. };
  30. static struct rb_root phys_mappings = RB_ROOT;
  31. static struct rb_node **find_rb(void *virt)
  32. {
  33. struct rb_node **n = &phys_mappings.rb_node;
  34. struct phys_desc *d;
  35. while(*n != NULL){
  36. d = rb_entry(*n, struct phys_desc, rb);
  37. if(d->virt == virt)
  38. return n;
  39. if(d->virt > virt)
  40. n = &(*n)->rb_left;
  41. else
  42. n = &(*n)->rb_right;
  43. }
  44. return n;
  45. }
  46. static struct phys_desc *find_phys_mapping(void *virt)
  47. {
  48. struct rb_node **n = find_rb(virt);
  49. if(*n == NULL)
  50. return NULL;
  51. return rb_entry(*n, struct phys_desc, rb);
  52. }
  53. static void insert_phys_mapping(struct phys_desc *desc)
  54. {
  55. struct rb_node **n = find_rb(desc->virt);
  56. if(*n != NULL)
  57. panic("Physical remapping for %p already present",
  58. desc->virt);
  59. rb_link_node(&desc->rb, rb_parent(*n), n);
  60. rb_insert_color(&desc->rb, &phys_mappings);
  61. }
  62. LIST_HEAD(descriptor_mappings);
  63. struct desc_mapping {
  64. int fd;
  65. struct list_head list;
  66. struct list_head pages;
  67. };
  68. static struct desc_mapping *find_mapping(int fd)
  69. {
  70. struct desc_mapping *desc;
  71. struct list_head *ele;
  72. list_for_each(ele, &descriptor_mappings){
  73. desc = list_entry(ele, struct desc_mapping, list);
  74. if(desc->fd == fd)
  75. return desc;
  76. }
  77. return NULL;
  78. }
  79. static struct desc_mapping *descriptor_mapping(int fd)
  80. {
  81. struct desc_mapping *desc;
  82. desc = find_mapping(fd);
  83. if(desc != NULL)
  84. return desc;
  85. desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
  86. if(desc == NULL)
  87. return NULL;
  88. *desc = ((struct desc_mapping)
  89. { .fd = fd,
  90. .list = LIST_HEAD_INIT(desc->list),
  91. .pages = LIST_HEAD_INIT(desc->pages) });
  92. list_add(&desc->list, &descriptor_mappings);
  93. return desc;
  94. }
  95. int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
  96. {
  97. struct desc_mapping *fd_maps;
  98. struct phys_desc *desc;
  99. unsigned long phys;
  100. int err;
  101. fd_maps = descriptor_mapping(fd);
  102. if(fd_maps == NULL)
  103. return -ENOMEM;
  104. phys = __pa(virt);
  105. desc = find_phys_mapping(virt);
  106. if(desc != NULL)
  107. panic("Address 0x%p is already substituted\n", virt);
  108. err = -ENOMEM;
  109. desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
  110. if(desc == NULL)
  111. goto out;
  112. *desc = ((struct phys_desc)
  113. { .fd = fd,
  114. .offset = offset,
  115. .virt = virt,
  116. .phys = __pa(virt),
  117. .list = LIST_HEAD_INIT(desc->list) });
  118. insert_phys_mapping(desc);
  119. list_add(&desc->list, &fd_maps->pages);
  120. virt = (void *) ((unsigned long) virt & PAGE_MASK);
  121. err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
  122. if(!err)
  123. goto out;
  124. rb_erase(&desc->rb, &phys_mappings);
  125. kfree(desc);
  126. out:
  127. return err;
  128. }
  129. static int physmem_fd = -1;
  130. static void remove_mapping(struct phys_desc *desc)
  131. {
  132. void *virt = desc->virt;
  133. int err;
  134. rb_erase(&desc->rb, &phys_mappings);
  135. list_del(&desc->list);
  136. kfree(desc);
  137. err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
  138. if(err)
  139. panic("Failed to unmap block device page from physical memory, "
  140. "errno = %d", -err);
  141. }
  142. int physmem_remove_mapping(void *virt)
  143. {
  144. struct phys_desc *desc;
  145. virt = (void *) ((unsigned long) virt & PAGE_MASK);
  146. desc = find_phys_mapping(virt);
  147. if(desc == NULL)
  148. return 0;
  149. remove_mapping(desc);
  150. return 1;
  151. }
  152. void physmem_forget_descriptor(int fd)
  153. {
  154. struct desc_mapping *desc;
  155. struct phys_desc *page;
  156. struct list_head *ele, *next;
  157. __u64 offset;
  158. void *addr;
  159. int err;
  160. desc = find_mapping(fd);
  161. if(desc == NULL)
  162. return;
  163. list_for_each_safe(ele, next, &desc->pages){
  164. page = list_entry(ele, struct phys_desc, list);
  165. offset = page->offset;
  166. addr = page->virt;
  167. remove_mapping(page);
  168. err = os_seek_file(fd, offset);
  169. if(err)
  170. panic("physmem_forget_descriptor - failed to seek "
  171. "to %lld in fd %d, error = %d\n",
  172. offset, fd, -err);
  173. err = os_read_file(fd, addr, PAGE_SIZE);
  174. if(err < 0)
  175. panic("physmem_forget_descriptor - failed to read "
  176. "from fd %d to 0x%p, error = %d\n",
  177. fd, addr, -err);
  178. }
  179. list_del(&desc->list);
  180. kfree(desc);
  181. }
  182. EXPORT_SYMBOL(physmem_forget_descriptor);
  183. EXPORT_SYMBOL(physmem_remove_mapping);
  184. EXPORT_SYMBOL(physmem_subst_mapping);
  185. void arch_free_page(struct page *page, int order)
  186. {
  187. void *virt;
  188. int i;
  189. for(i = 0; i < (1 << order); i++){
  190. virt = __va(page_to_phys(page + i));
  191. physmem_remove_mapping(virt);
  192. }
  193. }
  194. int is_remapped(void *virt)
  195. {
  196. struct phys_desc *desc = find_phys_mapping(virt);
  197. return desc != NULL;
  198. }
  199. /* Changed during early boot */
  200. unsigned long high_physmem;
  201. extern unsigned long long physmem_size;
  202. int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
  203. {
  204. struct page *p, *map;
  205. unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
  206. unsigned long iomem_len, iomem_pages, total_len, total_pages;
  207. int i;
  208. phys_pages = physmem >> PAGE_SHIFT;
  209. phys_len = phys_pages * sizeof(struct page);
  210. iomem_pages = iomem >> PAGE_SHIFT;
  211. iomem_len = iomem_pages * sizeof(struct page);
  212. highmem_pages = highmem >> PAGE_SHIFT;
  213. highmem_len = highmem_pages * sizeof(struct page);
  214. total_pages = phys_pages + iomem_pages + highmem_pages;
  215. total_len = phys_len + iomem_len + highmem_len;
  216. if(kmalloc_ok){
  217. map = kmalloc(total_len, GFP_KERNEL);
  218. if(map == NULL)
  219. map = vmalloc(total_len);
  220. }
  221. else map = alloc_bootmem_low_pages(total_len);
  222. if(map == NULL)
  223. return -ENOMEM;
  224. for(i = 0; i < total_pages; i++){
  225. p = &map[i];
  226. memset(p, 0, sizeof(struct page));
  227. SetPageReserved(p);
  228. INIT_LIST_HEAD(&p->lru);
  229. }
  230. max_mapnr = total_pages;
  231. return 0;
  232. }
  233. /* Changed during early boot */
  234. static unsigned long kmem_top = 0;
  235. unsigned long get_kmem_end(void)
  236. {
  237. if(kmem_top == 0)
  238. kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
  239. return kmem_top;
  240. }
  241. void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
  242. int r, int w, int x)
  243. {
  244. __u64 offset;
  245. int fd, err;
  246. fd = phys_mapping(phys, &offset);
  247. err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
  248. if(err) {
  249. if(err == -ENOMEM)
  250. printk("try increasing the host's "
  251. "/proc/sys/vm/max_map_count to <physical "
  252. "memory size>/4096\n");
  253. panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
  254. "err = %d\n", virt, fd, offset, len, r, w, x, err);
  255. }
  256. }
  257. extern int __syscall_stub_start;
  258. void setup_physmem(unsigned long start, unsigned long reserve_end,
  259. unsigned long len, unsigned long long highmem)
  260. {
  261. unsigned long reserve = reserve_end - start;
  262. int pfn = PFN_UP(__pa(reserve_end));
  263. int delta = (len - reserve) >> PAGE_SHIFT;
  264. int err, offset, bootmap_size;
  265. physmem_fd = create_mem_file(len + highmem);
  266. offset = uml_reserved - uml_physmem;
  267. err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
  268. len - offset, 1, 1, 0);
  269. if(err < 0){
  270. os_print_error(err, "Mapping memory");
  271. exit(1);
  272. }
  273. /* Special kludge - This page will be mapped in to userspace processes
  274. * from physmem_fd, so it needs to be written out there.
  275. */
  276. os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
  277. os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
  278. bootmap_size = init_bootmem(pfn, pfn + delta);
  279. free_bootmem(__pa(reserve_end) + bootmap_size,
  280. len - bootmap_size - reserve);
  281. }
  282. int phys_mapping(unsigned long phys, __u64 *offset_out)
  283. {
  284. struct phys_desc *desc = find_phys_mapping(__va(phys & PAGE_MASK));
  285. int fd = -1;
  286. if(desc != NULL){
  287. fd = desc->fd;
  288. *offset_out = desc->offset;
  289. }
  290. else if(phys < physmem_size){
  291. fd = physmem_fd;
  292. *offset_out = phys;
  293. }
  294. else if(phys < __pa(end_iomem)){
  295. struct iomem_region *region = iomem_regions;
  296. while(region != NULL){
  297. if((phys >= region->phys) &&
  298. (phys < region->phys + region->size)){
  299. fd = region->fd;
  300. *offset_out = phys - region->phys;
  301. break;
  302. }
  303. region = region->next;
  304. }
  305. }
  306. else if(phys < __pa(end_iomem) + highmem){
  307. fd = physmem_fd;
  308. *offset_out = phys - iomem_size;
  309. }
  310. return fd;
  311. }
  312. static int __init uml_mem_setup(char *line, int *add)
  313. {
  314. char *retptr;
  315. physmem_size = memparse(line,&retptr);
  316. return 0;
  317. }
  318. __uml_setup("mem=", uml_mem_setup,
  319. "mem=<Amount of desired ram>\n"
  320. " This controls how much \"physical\" memory the kernel allocates\n"
  321. " for the system. The size is specified as a number followed by\n"
  322. " one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
  323. " This is not related to the amount of memory in the host. It can\n"
  324. " be more, and the excess, if it's ever used, will just be swapped out.\n"
  325. " Example: mem=64M\n\n"
  326. );
  327. extern int __init parse_iomem(char *str, int *add);
  328. __uml_setup("iomem=", parse_iomem,
  329. "iomem=<name>,<file>\n"
  330. " Configure <file> as an IO memory region named <name>.\n\n"
  331. );
  332. /*
  333. * This list is constructed in parse_iomem and addresses filled in in
  334. * setup_iomem, both of which run during early boot. Afterwards, it's
  335. * unchanged.
  336. */
  337. struct iomem_region *iomem_regions = NULL;
  338. /* Initialized in parse_iomem */
  339. int iomem_size = 0;
  340. unsigned long find_iomem(char *driver, unsigned long *len_out)
  341. {
  342. struct iomem_region *region = iomem_regions;
  343. while(region != NULL){
  344. if(!strcmp(region->driver, driver)){
  345. *len_out = region->size;
  346. return region->virt;
  347. }
  348. region = region->next;
  349. }
  350. return 0;
  351. }
  352. int setup_iomem(void)
  353. {
  354. struct iomem_region *region = iomem_regions;
  355. unsigned long iomem_start = high_physmem + PAGE_SIZE;
  356. int err;
  357. while(region != NULL){
  358. err = os_map_memory((void *) iomem_start, region->fd, 0,
  359. region->size, 1, 1, 0);
  360. if(err)
  361. printk("Mapping iomem region for driver '%s' failed, "
  362. "errno = %d\n", region->driver, -err);
  363. else {
  364. region->virt = iomem_start;
  365. region->phys = __pa(region->virt);
  366. }
  367. iomem_start += region->size + PAGE_SIZE;
  368. region = region->next;
  369. }
  370. return 0;
  371. }
  372. __initcall(setup_iomem);