unpopulated-alloc.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/errno.h>
  3. #include <linux/gfp.h>
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/memremap.h>
  7. #include <linux/slab.h>
  8. #include <asm/page.h>
  9. #include <xen/page.h>
  10. #include <xen/xen.h>
  11. static DEFINE_MUTEX(list_lock);
  12. static struct page *page_list;
  13. static unsigned int list_count;
  14. static int fill_list(unsigned int nr_pages)
  15. {
  16. struct dev_pagemap *pgmap;
  17. struct resource *res;
  18. void *vaddr;
  19. unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
  20. int ret = -ENOMEM;
  21. res = kzalloc(sizeof(*res), GFP_KERNEL);
  22. if (!res)
  23. return -ENOMEM;
  24. res->name = "Xen scratch";
  25. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  26. ret = allocate_resource(&iomem_resource, res,
  27. alloc_pages * PAGE_SIZE, 0, -1,
  28. PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
  29. if (ret < 0) {
  30. pr_err("Cannot allocate new IOMEM resource\n");
  31. goto err_resource;
  32. }
  33. pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
  34. if (!pgmap) {
  35. ret = -ENOMEM;
  36. goto err_pgmap;
  37. }
  38. pgmap->type = MEMORY_DEVICE_GENERIC;
  39. pgmap->range = (struct range) {
  40. .start = res->start,
  41. .end = res->end,
  42. };
  43. pgmap->nr_range = 1;
  44. pgmap->owner = res;
  45. #ifdef CONFIG_XEN_HAVE_PVMMU
  46. /*
  47. * memremap will build page tables for the new memory so
  48. * the p2m must contain invalid entries so the correct
  49. * non-present PTEs will be written.
  50. *
  51. * If a failure occurs, the original (identity) p2m entries
  52. * are not restored since this region is now known not to
  53. * conflict with any devices.
  54. */
  55. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  56. xen_pfn_t pfn = PFN_DOWN(res->start);
  57. for (i = 0; i < alloc_pages; i++) {
  58. if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
  59. pr_warn("set_phys_to_machine() failed, no memory added\n");
  60. ret = -ENOMEM;
  61. goto err_memremap;
  62. }
  63. }
  64. }
  65. #endif
  66. vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
  67. if (IS_ERR(vaddr)) {
  68. pr_err("Cannot remap memory range\n");
  69. ret = PTR_ERR(vaddr);
  70. goto err_memremap;
  71. }
  72. for (i = 0; i < alloc_pages; i++) {
  73. struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
  74. BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
  75. pg->zone_device_data = page_list;
  76. page_list = pg;
  77. list_count++;
  78. }
  79. return 0;
  80. err_memremap:
  81. kfree(pgmap);
  82. err_pgmap:
  83. release_resource(res);
  84. err_resource:
  85. kfree(res);
  86. return ret;
  87. }
  88. /**
  89. * xen_alloc_unpopulated_pages - alloc unpopulated pages
  90. * @nr_pages: Number of pages
  91. * @pages: pages returned
  92. * @return 0 on success, error otherwise
  93. */
  94. int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
  95. {
  96. unsigned int i;
  97. int ret = 0;
  98. mutex_lock(&list_lock);
  99. if (list_count < nr_pages) {
  100. ret = fill_list(nr_pages - list_count);
  101. if (ret)
  102. goto out;
  103. }
  104. for (i = 0; i < nr_pages; i++) {
  105. struct page *pg = page_list;
  106. BUG_ON(!pg);
  107. page_list = pg->zone_device_data;
  108. list_count--;
  109. pages[i] = pg;
  110. #ifdef CONFIG_XEN_HAVE_PVMMU
  111. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  112. ret = xen_alloc_p2m_entry(page_to_pfn(pg));
  113. if (ret < 0) {
  114. unsigned int j;
  115. for (j = 0; j <= i; j++) {
  116. pages[j]->zone_device_data = page_list;
  117. page_list = pages[j];
  118. list_count++;
  119. }
  120. goto out;
  121. }
  122. }
  123. #endif
  124. }
  125. out:
  126. mutex_unlock(&list_lock);
  127. return ret;
  128. }
  129. EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
  130. /**
  131. * xen_free_unpopulated_pages - return unpopulated pages
  132. * @nr_pages: Number of pages
  133. * @pages: pages to return
  134. */
  135. void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
  136. {
  137. unsigned int i;
  138. mutex_lock(&list_lock);
  139. for (i = 0; i < nr_pages; i++) {
  140. pages[i]->zone_device_data = page_list;
  141. page_list = pages[i];
  142. list_count++;
  143. }
  144. mutex_unlock(&list_lock);
  145. }
  146. EXPORT_SYMBOL(xen_free_unpopulated_pages);
  147. #ifdef CONFIG_XEN_PV
  148. static int __init init(void)
  149. {
  150. unsigned int i;
  151. if (!xen_domain())
  152. return -ENODEV;
  153. if (!xen_pv_domain())
  154. return 0;
  155. /*
  156. * Initialize with pages from the extra memory regions (see
  157. * arch/x86/xen/setup.c).
  158. */
  159. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  160. unsigned int j;
  161. for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
  162. struct page *pg =
  163. pfn_to_page(xen_extra_mem[i].start_pfn + j);
  164. pg->zone_device_data = page_list;
  165. page_list = pg;
  166. list_count++;
  167. }
  168. }
  169. return 0;
  170. }
  171. subsys_initcall(init);
  172. #endif