123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531 |
- /* SPDX-License-Identifier: GPL-2.0 */
- /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
- #include <linux/device.h>
- #include <linux/io.h>
- #include <linux/kasan.h>
- #include <linux/memory_hotplug.h>
- #include <linux/mm.h>
- #include <linux/pfn_t.h>
- #include <linux/swap.h>
- #include <linux/mmzone.h>
- #include <linux/swapops.h>
- #include <linux/types.h>
- #include <linux/wait_bit.h>
- #include <linux/xarray.h>
- static DEFINE_XARRAY(pgmap_array);
- /*
- * The memremap() and memremap_pages() interfaces are alternately used
- * to map persistent memory namespaces. These interfaces place different
- * constraints on the alignment and size of the mapping (namespace).
- * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
- * only map subsections (2MB), and at least one architecture (PowerPC)
- * the minimum mapping granularity of memremap_pages() is 16MB.
- *
- * The role of memremap_compat_align() is to communicate the minimum
- * arch supported alignment of a namespace such that it can freely
- * switch modes without violating the arch constraint. Namely, do not
- * allow a namespace to be PAGE_SIZE aligned since that namespace may be
- * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
- */
- #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
- unsigned long memremap_compat_align(void)
- {
- return SUBSECTION_SIZE;
- }
- EXPORT_SYMBOL_GPL(memremap_compat_align);
- #endif
- #ifdef CONFIG_DEV_PAGEMAP_OPS
- DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
- EXPORT_SYMBOL(devmap_managed_key);
- static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
- {
- if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
- pgmap->type == MEMORY_DEVICE_FS_DAX)
- static_branch_dec(&devmap_managed_key);
- }
- static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
- {
- if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
- pgmap->type == MEMORY_DEVICE_FS_DAX)
- static_branch_inc(&devmap_managed_key);
- }
- #else
- static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
- {
- }
- static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
- {
- }
- #endif /* CONFIG_DEV_PAGEMAP_OPS */
- static void pgmap_array_delete(struct range *range)
- {
- xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
- NULL, GFP_KERNEL);
- synchronize_rcu();
- }
- static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
- {
- struct range *range = &pgmap->ranges[range_id];
- unsigned long pfn = PHYS_PFN(range->start);
- if (range_id)
- return pfn;
- return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
- }
- bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
- {
- int i;
- for (i = 0; i < pgmap->nr_range; i++) {
- struct range *range = &pgmap->ranges[i];
- if (pfn >= PHYS_PFN(range->start) &&
- pfn <= PHYS_PFN(range->end))
- return pfn >= pfn_first(pgmap, i);
- }
- return false;
- }
- static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
- {
- const struct range *range = &pgmap->ranges[range_id];
- return (range->start + range_len(range)) >> PAGE_SHIFT;
- }
- static unsigned long pfn_next(unsigned long pfn)
- {
- if (pfn % 1024 == 0)
- cond_resched();
- return pfn + 1;
- }
- #define for_each_device_pfn(pfn, map, i) \
- for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
- static void dev_pagemap_kill(struct dev_pagemap *pgmap)
- {
- if (pgmap->ops && pgmap->ops->kill)
- pgmap->ops->kill(pgmap);
- else
- percpu_ref_kill(pgmap->ref);
- }
- static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
- {
- if (pgmap->ops && pgmap->ops->cleanup) {
- pgmap->ops->cleanup(pgmap);
- } else {
- wait_for_completion(&pgmap->done);
- percpu_ref_exit(pgmap->ref);
- }
- /*
- * Undo the pgmap ref assignment for the internal case as the
- * caller may re-enable the same pgmap.
- */
- if (pgmap->ref == &pgmap->internal_ref)
- pgmap->ref = NULL;
- }
- static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
- {
- struct range *range = &pgmap->ranges[range_id];
- struct page *first_page;
- int nid;
- /* make sure to access a memmap that was actually initialized */
- first_page = pfn_to_page(pfn_first(pgmap, range_id));
- /* pages are dead and unused, undo the arch mapping */
- nid = page_to_nid(first_page);
- mem_hotplug_begin();
- remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
- PHYS_PFN(range_len(range)));
- if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- __remove_pages(PHYS_PFN(range->start),
- PHYS_PFN(range_len(range)), NULL);
- } else {
- arch_remove_memory(nid, range->start, range_len(range),
- pgmap_altmap(pgmap));
- kasan_remove_zero_shadow(__va(range->start), range_len(range));
- }
- mem_hotplug_done();
- untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
- pgmap_array_delete(range);
- }
- void memunmap_pages(struct dev_pagemap *pgmap)
- {
- unsigned long pfn;
- int i;
- dev_pagemap_kill(pgmap);
- for (i = 0; i < pgmap->nr_range; i++)
- for_each_device_pfn(pfn, pgmap, i)
- put_page(pfn_to_page(pfn));
- dev_pagemap_cleanup(pgmap);
- for (i = 0; i < pgmap->nr_range; i++)
- pageunmap_range(pgmap, i);
- WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
- devmap_managed_enable_put(pgmap);
- }
- EXPORT_SYMBOL_GPL(memunmap_pages);
- static void devm_memremap_pages_release(void *data)
- {
- memunmap_pages(data);
- }
- static void dev_pagemap_percpu_release(struct percpu_ref *ref)
- {
- struct dev_pagemap *pgmap =
- container_of(ref, struct dev_pagemap, internal_ref);
- complete(&pgmap->done);
- }
- static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
- int range_id, int nid)
- {
- struct range *range = &pgmap->ranges[range_id];
- struct dev_pagemap *conflict_pgmap;
- int error, is_ram;
- if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
- "altmap not supported for multiple ranges\n"))
- return -EINVAL;
- conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
- if (conflict_pgmap) {
- WARN(1, "Conflicting mapping in same section\n");
- put_dev_pagemap(conflict_pgmap);
- return -ENOMEM;
- }
- conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
- if (conflict_pgmap) {
- WARN(1, "Conflicting mapping in same section\n");
- put_dev_pagemap(conflict_pgmap);
- return -ENOMEM;
- }
- is_ram = region_intersects(range->start, range_len(range),
- IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
- if (is_ram != REGION_DISJOINT) {
- WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
- is_ram == REGION_MIXED ? "mixed" : "ram",
- range->start, range->end);
- return -ENXIO;
- }
- error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
- PHYS_PFN(range->end), pgmap, GFP_KERNEL));
- if (error)
- return error;
- if (nid < 0)
- nid = numa_mem_id();
- error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0,
- range_len(range));
- if (error)
- goto err_pfn_remap;
- mem_hotplug_begin();
- /*
- * For device private memory we call add_pages() as we only need to
- * allocate and initialize struct page for the device memory. More-
- * over the device memory is un-accessible thus we do not want to
- * create a linear mapping for the memory like arch_add_memory()
- * would do.
- *
- * For all other device memory types, which are accessible by
- * the CPU, we do want the linear mapping and thus use
- * arch_add_memory().
- */
- if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- error = add_pages(nid, PHYS_PFN(range->start),
- PHYS_PFN(range_len(range)), params);
- } else {
- error = kasan_add_zero_shadow(__va(range->start), range_len(range));
- if (error) {
- mem_hotplug_done();
- goto err_kasan;
- }
- error = arch_add_memory(nid, range->start, range_len(range),
- params);
- }
- if (!error) {
- struct zone *zone;
- zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
- move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
- PHYS_PFN(range_len(range)), params->altmap,
- MIGRATE_MOVABLE);
- }
- mem_hotplug_done();
- if (error)
- goto err_add_memory;
- /*
- * Initialization of the pages has been deferred until now in order
- * to allow us to do the work while not holding the hotplug lock.
- */
- memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
- PHYS_PFN(range->start),
- PHYS_PFN(range_len(range)), pgmap);
- percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
- - pfn_first(pgmap, range_id));
- return 0;
- err_add_memory:
- kasan_remove_zero_shadow(__va(range->start), range_len(range));
- err_kasan:
- untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
- err_pfn_remap:
- pgmap_array_delete(range);
- return error;
- }
- /*
- * Not device managed version of dev_memremap_pages, undone by
- * memunmap_pages(). Please use dev_memremap_pages if you have a struct
- * device available.
- */
- void *memremap_pages(struct dev_pagemap *pgmap, int nid)
- {
- struct mhp_params params = {
- .altmap = pgmap_altmap(pgmap),
- .pgprot = PAGE_KERNEL,
- };
- const int nr_range = pgmap->nr_range;
- int error, i;
- if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
- return ERR_PTR(-EINVAL);
- switch (pgmap->type) {
- case MEMORY_DEVICE_PRIVATE:
- if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
- WARN(1, "Device private memory not supported\n");
- return ERR_PTR(-EINVAL);
- }
- if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
- WARN(1, "Missing migrate_to_ram method\n");
- return ERR_PTR(-EINVAL);
- }
- if (!pgmap->ops->page_free) {
- WARN(1, "Missing page_free method\n");
- return ERR_PTR(-EINVAL);
- }
- if (!pgmap->owner) {
- WARN(1, "Missing owner\n");
- return ERR_PTR(-EINVAL);
- }
- break;
- case MEMORY_DEVICE_FS_DAX:
- if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
- IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
- WARN(1, "File system DAX not supported\n");
- return ERR_PTR(-EINVAL);
- }
- break;
- case MEMORY_DEVICE_GENERIC:
- break;
- case MEMORY_DEVICE_PCI_P2PDMA:
- params.pgprot = pgprot_noncached(params.pgprot);
- break;
- default:
- WARN(1, "Invalid pgmap type %d\n", pgmap->type);
- break;
- }
- if (!pgmap->ref) {
- if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
- return ERR_PTR(-EINVAL);
- init_completion(&pgmap->done);
- error = percpu_ref_init(&pgmap->internal_ref,
- dev_pagemap_percpu_release, 0, GFP_KERNEL);
- if (error)
- return ERR_PTR(error);
- pgmap->ref = &pgmap->internal_ref;
- } else {
- if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
- WARN(1, "Missing reference count teardown definition\n");
- return ERR_PTR(-EINVAL);
- }
- }
- devmap_managed_enable_get(pgmap);
- /*
- * Clear the pgmap nr_range as it will be incremented for each
- * successfully processed range. This communicates how many
- * regions to unwind in the abort case.
- */
- pgmap->nr_range = 0;
- error = 0;
- for (i = 0; i < nr_range; i++) {
- error = pagemap_range(pgmap, ¶ms, i, nid);
- if (error)
- break;
- pgmap->nr_range++;
- }
- if (i < nr_range) {
- memunmap_pages(pgmap);
- pgmap->nr_range = nr_range;
- return ERR_PTR(error);
- }
- return __va(pgmap->ranges[0].start);
- }
- EXPORT_SYMBOL_GPL(memremap_pages);
- /**
- * devm_memremap_pages - remap and provide memmap backing for the given resource
- * @dev: hosting device for @res
- * @pgmap: pointer to a struct dev_pagemap
- *
- * Notes:
- * 1/ At a minimum the res and type members of @pgmap must be initialized
- * by the caller before passing it to this function
- *
- * 2/ The altmap field may optionally be initialized, in which case
- * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
- *
- * 3/ The ref field may optionally be provided, in which pgmap->ref must be
- * 'live' on entry and will be killed and reaped at
- * devm_memremap_pages_release() time, or if this routine fails.
- *
- * 4/ range is expected to be a host memory range that could feasibly be
- * treated as a "System RAM" range, i.e. not a device mmio range, but
- * this is not enforced.
- */
- void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
- {
- int error;
- void *ret;
- ret = memremap_pages(pgmap, dev_to_node(dev));
- if (IS_ERR(ret))
- return ret;
- error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
- pgmap);
- if (error)
- return ERR_PTR(error);
- return ret;
- }
- EXPORT_SYMBOL_GPL(devm_memremap_pages);
- void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
- {
- devm_release_action(dev, devm_memremap_pages_release, pgmap);
- }
- EXPORT_SYMBOL_GPL(devm_memunmap_pages);
- unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
- {
- /* number of pfns from base where pfn_to_page() is valid */
- if (altmap)
- return altmap->reserve + altmap->free;
- return 0;
- }
- void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
- {
- altmap->alloc -= nr_pfns;
- }
- /**
- * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
- * @pfn: page frame number to lookup page_map
- * @pgmap: optional known pgmap that already has a reference
- *
- * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
- * is non-NULL but does not cover @pfn the reference to it will be released.
- */
- struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
- {
- resource_size_t phys = PFN_PHYS(pfn);
- /*
- * In the cached case we're already holding a live reference.
- */
- if (pgmap) {
- if (phys >= pgmap->range.start && phys <= pgmap->range.end)
- return pgmap;
- put_dev_pagemap(pgmap);
- }
- /* fall back to slow path lookup */
- rcu_read_lock();
- pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
- if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
- pgmap = NULL;
- rcu_read_unlock();
- return pgmap;
- }
- EXPORT_SYMBOL_GPL(get_dev_pagemap);
- #ifdef CONFIG_DEV_PAGEMAP_OPS
- void free_devmap_managed_page(struct page *page)
- {
- /* notify page idle for dax */
- if (!is_device_private_page(page)) {
- wake_up_var(&page->_refcount);
- return;
- }
- __ClearPageWaiters(page);
- mem_cgroup_uncharge(page);
- /*
- * When a device_private page is freed, the page->mapping field
- * may still contain a (stale) mapping value. For example, the
- * lower bits of page->mapping may still identify the page as an
- * anonymous page. Ultimately, this entire field is just stale
- * and wrong, and it will cause errors if not cleared. One
- * example is:
- *
- * migrate_vma_pages()
- * migrate_vma_insert_page()
- * page_add_new_anon_rmap()
- * __page_set_anon_rmap()
- * ...checks page->mapping, via PageAnon(page) call,
- * and incorrectly concludes that the page is an
- * anonymous page. Therefore, it incorrectly,
- * silently fails to set up the new anon rmap.
- *
- * For other types of ZONE_DEVICE pages, migration is either
- * handled differently or not done at all, so there is no need
- * to clear page->mapping.
- */
- page->mapping = NULL;
- page->pgmap->ops->page_free(page);
- }
- #endif /* CONFIG_DEV_PAGEMAP_OPS */
|