mem-reservation.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. // SPDX-License-Identifier: GPL-2.0
  2. /******************************************************************************
  3. * Xen memory reservation utilities.
  4. *
  5. * Copyright (c) 2003, B Dragovic
  6. * Copyright (c) 2003-2004, M Williamson, K Fraser
  7. * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  8. * Copyright (c) 2010 Daniel Kiper
  9. * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  10. */
  11. #include <asm/xen/hypercall.h>
  12. #include <xen/interface/memory.h>
  13. #include <xen/mem-reservation.h>
  14. #include <linux/moduleparam.h>
  15. bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
  16. core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
  17. /*
  18. * Use one extent per PAGE_SIZE to avoid to break down the page into
  19. * multiple frame.
  20. */
  21. #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
  22. #ifdef CONFIG_XEN_HAVE_PVMMU
  23. void __xenmem_reservation_va_mapping_update(unsigned long count,
  24. struct page **pages,
  25. xen_pfn_t *frames)
  26. {
  27. int i;
  28. for (i = 0; i < count; i++) {
  29. struct page *page = pages[i];
  30. unsigned long pfn = page_to_pfn(page);
  31. BUG_ON(!page);
  32. /*
  33. * We don't support PV MMU when Linux and Xen is using
  34. * different page granularity.
  35. */
  36. BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  37. set_phys_to_machine(pfn, frames[i]);
  38. /* Link back into the page tables if not highmem. */
  39. if (!PageHighMem(page)) {
  40. int ret;
  41. ret = HYPERVISOR_update_va_mapping(
  42. (unsigned long)__va(pfn << PAGE_SHIFT),
  43. mfn_pte(frames[i], PAGE_KERNEL),
  44. 0);
  45. BUG_ON(ret);
  46. }
  47. }
  48. }
  49. EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
  50. void __xenmem_reservation_va_mapping_reset(unsigned long count,
  51. struct page **pages)
  52. {
  53. int i;
  54. for (i = 0; i < count; i++) {
  55. struct page *page = pages[i];
  56. unsigned long pfn = page_to_pfn(page);
  57. /*
  58. * We don't support PV MMU when Linux and Xen are using
  59. * different page granularity.
  60. */
  61. BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  62. if (!PageHighMem(page)) {
  63. int ret;
  64. ret = HYPERVISOR_update_va_mapping(
  65. (unsigned long)__va(pfn << PAGE_SHIFT),
  66. __pte_ma(0), 0);
  67. BUG_ON(ret);
  68. }
  69. __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  70. }
  71. }
  72. EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
  73. #endif /* CONFIG_XEN_HAVE_PVMMU */
  74. /* @frames is an array of PFNs */
  75. int xenmem_reservation_increase(int count, xen_pfn_t *frames)
  76. {
  77. struct xen_memory_reservation reservation = {
  78. .address_bits = 0,
  79. .extent_order = EXTENT_ORDER,
  80. .domid = DOMID_SELF
  81. };
  82. /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
  83. set_xen_guest_handle(reservation.extent_start, frames);
  84. reservation.nr_extents = count;
  85. return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
  86. }
  87. EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
  88. /* @frames is an array of GFNs */
  89. int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
  90. {
  91. struct xen_memory_reservation reservation = {
  92. .address_bits = 0,
  93. .extent_order = EXTENT_ORDER,
  94. .domid = DOMID_SELF
  95. };
  96. /* XENMEM_decrease_reservation requires a GFN */
  97. set_xen_guest_handle(reservation.extent_start, frames);
  98. reservation.nr_extents = count;
  99. return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  100. }
  101. EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);