123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * dax: direct host memory access
- * Copyright (C) 2020 Red Hat, Inc.
- */
- #include "fuse_i.h"
- #include <linux/delay.h>
- #include <linux/dax.h>
- #include <linux/uio.h>
- #include <linux/pfn_t.h>
- #include <linux/iomap.h>
- #include <linux/interval_tree.h>
- /*
- * Default memory range size. A power of 2 so it agrees with common FUSE_INIT
- * map_alignment values 4KB and 64KB.
- */
- #define FUSE_DAX_SHIFT 21
- #define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT)
- #define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE)
- /* Number of ranges reclaimer will try to free in one invocation */
- #define FUSE_DAX_RECLAIM_CHUNK (10)
- /*
- * Dax memory reclaim threshold in percetage of total ranges. When free
- * number of free ranges drops below this threshold, reclaim can trigger
- * Default is 20%
- */
- #define FUSE_DAX_RECLAIM_THRESHOLD (20)
- /** Translation information for file offsets to DAX window offsets */
- struct fuse_dax_mapping {
- /* Pointer to inode where this memory range is mapped */
- struct inode *inode;
- /* Will connect in fcd->free_ranges to keep track of free memory */
- struct list_head list;
- /* For interval tree in file/inode */
- struct interval_tree_node itn;
- /* Will connect in fc->busy_ranges to keep track busy memory */
- struct list_head busy_list;
- /** Position in DAX window */
- u64 window_offset;
- /** Length of mapping, in bytes */
- loff_t length;
- /* Is this mapping read-only or read-write */
- bool writable;
- /* reference count when the mapping is used by dax iomap. */
- refcount_t refcnt;
- };
- /* Per-inode dax map */
- struct fuse_inode_dax {
- /* Semaphore to protect modifications to the dmap tree */
- struct rw_semaphore sem;
- /* Sorted rb tree of struct fuse_dax_mapping elements */
- struct rb_root_cached tree;
- unsigned long nr;
- };
- struct fuse_conn_dax {
- /* DAX device */
- struct dax_device *dev;
- /* Lock protecting accessess to members of this structure */
- spinlock_t lock;
- /* List of memory ranges which are busy */
- unsigned long nr_busy_ranges;
- struct list_head busy_ranges;
- /* Worker to free up memory ranges */
- struct delayed_work free_work;
- /* Wait queue for a dax range to become free */
- wait_queue_head_t range_waitq;
- /* DAX Window Free Ranges */
- long nr_free_ranges;
- struct list_head free_ranges;
- unsigned long nr_ranges;
- };
- static inline struct fuse_dax_mapping *
- node_to_dmap(struct interval_tree_node *node)
- {
- if (!node)
- return NULL;
- return container_of(node, struct fuse_dax_mapping, itn);
- }
- static struct fuse_dax_mapping *
- alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
- static void
- __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
- {
- unsigned long free_threshold;
- /* If number of free ranges are below threshold, start reclaim */
- free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
- 1);
- if (fcd->nr_free_ranges < free_threshold)
- queue_delayed_work(system_long_wq, &fcd->free_work,
- msecs_to_jiffies(delay_ms));
- }
- static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
- unsigned long delay_ms)
- {
- spin_lock(&fcd->lock);
- __kick_dmap_free_worker(fcd, delay_ms);
- spin_unlock(&fcd->lock);
- }
- static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
- {
- struct fuse_dax_mapping *dmap;
- spin_lock(&fcd->lock);
- dmap = list_first_entry_or_null(&fcd->free_ranges,
- struct fuse_dax_mapping, list);
- if (dmap) {
- list_del_init(&dmap->list);
- WARN_ON(fcd->nr_free_ranges <= 0);
- fcd->nr_free_ranges--;
- }
- spin_unlock(&fcd->lock);
- kick_dmap_free_worker(fcd, 0);
- return dmap;
- }
- /* This assumes fcd->lock is held */
- static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
- struct fuse_dax_mapping *dmap)
- {
- list_del_init(&dmap->busy_list);
- WARN_ON(fcd->nr_busy_ranges == 0);
- fcd->nr_busy_ranges--;
- }
- static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
- struct fuse_dax_mapping *dmap)
- {
- spin_lock(&fcd->lock);
- __dmap_remove_busy_list(fcd, dmap);
- spin_unlock(&fcd->lock);
- }
- /* This assumes fcd->lock is held */
- static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
- struct fuse_dax_mapping *dmap)
- {
- list_add_tail(&dmap->list, &fcd->free_ranges);
- fcd->nr_free_ranges++;
- wake_up(&fcd->range_waitq);
- }
- static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
- struct fuse_dax_mapping *dmap)
- {
- /* Return fuse_dax_mapping to free list */
- spin_lock(&fcd->lock);
- __dmap_add_to_free_pool(fcd, dmap);
- spin_unlock(&fcd->lock);
- }
- static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
- struct fuse_dax_mapping *dmap, bool writable,
- bool upgrade)
- {
- struct fuse_mount *fm = get_fuse_mount(inode);
- struct fuse_conn_dax *fcd = fm->fc->dax;
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_setupmapping_in inarg;
- loff_t offset = start_idx << FUSE_DAX_SHIFT;
- FUSE_ARGS(args);
- ssize_t err;
- WARN_ON(fcd->nr_free_ranges < 0);
- /* Ask fuse daemon to setup mapping */
- memset(&inarg, 0, sizeof(inarg));
- inarg.foffset = offset;
- inarg.fh = -1;
- inarg.moffset = dmap->window_offset;
- inarg.len = FUSE_DAX_SZ;
- inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ;
- if (writable)
- inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE;
- args.opcode = FUSE_SETUPMAPPING;
- args.nodeid = fi->nodeid;
- args.in_numargs = 1;
- args.in_args[0].size = sizeof(inarg);
- args.in_args[0].value = &inarg;
- err = fuse_simple_request(fm, &args);
- if (err < 0)
- return err;
- dmap->writable = writable;
- if (!upgrade) {
- /*
- * We don't take a refernce on inode. inode is valid right now
- * and when inode is going away, cleanup logic should first
- * cleanup dmap entries.
- */
- dmap->inode = inode;
- dmap->itn.start = dmap->itn.last = start_idx;
- /* Protected by fi->dax->sem */
- interval_tree_insert(&dmap->itn, &fi->dax->tree);
- fi->dax->nr++;
- spin_lock(&fcd->lock);
- list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
- fcd->nr_busy_ranges++;
- spin_unlock(&fcd->lock);
- }
- return 0;
- }
- static int fuse_send_removemapping(struct inode *inode,
- struct fuse_removemapping_in *inargp,
- struct fuse_removemapping_one *remove_one)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_mount *fm = get_fuse_mount(inode);
- FUSE_ARGS(args);
- args.opcode = FUSE_REMOVEMAPPING;
- args.nodeid = fi->nodeid;
- args.in_numargs = 2;
- args.in_args[0].size = sizeof(*inargp);
- args.in_args[0].value = inargp;
- args.in_args[1].size = inargp->count * sizeof(*remove_one);
- args.in_args[1].value = remove_one;
- return fuse_simple_request(fm, &args);
- }
- static int dmap_removemapping_list(struct inode *inode, unsigned int num,
- struct list_head *to_remove)
- {
- struct fuse_removemapping_one *remove_one, *ptr;
- struct fuse_removemapping_in inarg;
- struct fuse_dax_mapping *dmap;
- int ret, i = 0, nr_alloc;
- nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY);
- remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS);
- if (!remove_one)
- return -ENOMEM;
- ptr = remove_one;
- list_for_each_entry(dmap, to_remove, list) {
- ptr->moffset = dmap->window_offset;
- ptr->len = dmap->length;
- ptr++;
- i++;
- num--;
- if (i >= nr_alloc || num == 0) {
- memset(&inarg, 0, sizeof(inarg));
- inarg.count = i;
- ret = fuse_send_removemapping(inode, &inarg,
- remove_one);
- if (ret)
- goto out;
- ptr = remove_one;
- i = 0;
- }
- }
- out:
- kfree(remove_one);
- return ret;
- }
- /*
- * Cleanup dmap entry and add back to free list. This should be called with
- * fcd->lock held.
- */
- static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
- struct fuse_dax_mapping *dmap)
- {
- pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
- dmap->itn.start, dmap->itn.last, dmap->window_offset,
- dmap->length);
- __dmap_remove_busy_list(fcd, dmap);
- dmap->inode = NULL;
- dmap->itn.start = dmap->itn.last = 0;
- __dmap_add_to_free_pool(fcd, dmap);
- }
- /*
- * Free inode dmap entries whose range falls inside [start, end].
- * Does not take any locks. At this point of time it should only be
- * called from evict_inode() path where we know all dmap entries can be
- * reclaimed.
- */
- static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
- struct inode *inode,
- loff_t start, loff_t end)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_dax_mapping *dmap, *n;
- int err, num = 0;
- LIST_HEAD(to_remove);
- unsigned long start_idx = start >> FUSE_DAX_SHIFT;
- unsigned long end_idx = end >> FUSE_DAX_SHIFT;
- struct interval_tree_node *node;
- while (1) {
- node = interval_tree_iter_first(&fi->dax->tree, start_idx,
- end_idx);
- if (!node)
- break;
- dmap = node_to_dmap(node);
- /* inode is going away. There should not be any users of dmap */
- WARN_ON(refcount_read(&dmap->refcnt) > 1);
- interval_tree_remove(&dmap->itn, &fi->dax->tree);
- num++;
- list_add(&dmap->list, &to_remove);
- }
- /* Nothing to remove */
- if (list_empty(&to_remove))
- return;
- WARN_ON(fi->dax->nr < num);
- fi->dax->nr -= num;
- err = dmap_removemapping_list(inode, num, &to_remove);
- if (err && err != -ENOTCONN) {
- pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
- start, end);
- }
- spin_lock(&fcd->lock);
- list_for_each_entry_safe(dmap, n, &to_remove, list) {
- list_del_init(&dmap->list);
- dmap_reinit_add_to_free_pool(fcd, dmap);
- }
- spin_unlock(&fcd->lock);
- }
- static int dmap_removemapping_one(struct inode *inode,
- struct fuse_dax_mapping *dmap)
- {
- struct fuse_removemapping_one forget_one;
- struct fuse_removemapping_in inarg;
- memset(&inarg, 0, sizeof(inarg));
- inarg.count = 1;
- memset(&forget_one, 0, sizeof(forget_one));
- forget_one.moffset = dmap->window_offset;
- forget_one.len = dmap->length;
- return fuse_send_removemapping(inode, &inarg, &forget_one);
- }
- /*
- * It is called from evict_inode() and by that time inode is going away. So
- * this function does not take any locks like fi->dax->sem for traversing
- * that fuse inode interval tree. If that lock is taken then lock validator
- * complains of deadlock situation w.r.t fs_reclaim lock.
- */
- void fuse_dax_inode_cleanup(struct inode *inode)
- {
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_inode *fi = get_fuse_inode(inode);
- /*
- * fuse_evict_inode() has already called truncate_inode_pages_final()
- * before we arrive here. So we should not have to worry about any
- * pages/exception entries still associated with inode.
- */
- inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
- WARN_ON(fi->dax->nr);
- }
- static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
- {
- iomap->addr = IOMAP_NULL_ADDR;
- iomap->length = length;
- iomap->type = IOMAP_HOLE;
- }
- static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
- struct iomap *iomap, struct fuse_dax_mapping *dmap,
- unsigned int flags)
- {
- loff_t offset, len;
- loff_t i_size = i_size_read(inode);
- offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
- len = min(length, dmap->length - offset);
- /* If length is beyond end of file, truncate further */
- if (pos + len > i_size)
- len = i_size - pos;
- if (len > 0) {
- iomap->addr = dmap->window_offset + offset;
- iomap->length = len;
- if (flags & IOMAP_FAULT)
- iomap->length = ALIGN(len, PAGE_SIZE);
- iomap->type = IOMAP_MAPPED;
- /*
- * increace refcnt so that reclaim code knows this dmap is in
- * use. This assumes fi->dax->sem mutex is held either
- * shared/exclusive.
- */
- refcount_inc(&dmap->refcnt);
- /* iomap->private should be NULL */
- WARN_ON_ONCE(iomap->private);
- iomap->private = dmap;
- } else {
- /* Mapping beyond end of file is hole */
- fuse_fill_iomap_hole(iomap, length);
- }
- }
- static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
- loff_t length, unsigned int flags,
- struct iomap *iomap)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_conn_dax *fcd = fc->dax;
- struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
- int ret;
- bool writable = flags & IOMAP_WRITE;
- unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
- struct interval_tree_node *node;
- /*
- * Can't do inline reclaim in fault path. We call
- * dax_layout_busy_page() before we free a range. And
- * fuse_wait_dax_page() drops fi->i_mmap_sem lock and requires it.
- * In fault path we enter with fi->i_mmap_sem held and can't drop
- * it. Also in fault path we hold fi->i_mmap_sem shared and not
- * exclusive, so that creates further issues with fuse_wait_dax_page().
- * Hence return -EAGAIN and fuse_dax_fault() will wait for a memory
- * range to become free and retry.
- */
- if (flags & IOMAP_FAULT) {
- alloc_dmap = alloc_dax_mapping(fcd);
- if (!alloc_dmap)
- return -EAGAIN;
- } else {
- alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
- if (IS_ERR(alloc_dmap))
- return PTR_ERR(alloc_dmap);
- }
- /* If we are here, we should have memory allocated */
- if (WARN_ON(!alloc_dmap))
- return -EIO;
- /*
- * Take write lock so that only one caller can try to setup mapping
- * and other waits.
- */
- down_write(&fi->dax->sem);
- /*
- * We dropped lock. Check again if somebody else setup
- * mapping already.
- */
- node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
- if (node) {
- dmap = node_to_dmap(node);
- fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
- dmap_add_to_free_pool(fcd, alloc_dmap);
- up_write(&fi->dax->sem);
- return 0;
- }
- /* Setup one mapping */
- ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap,
- writable, false);
- if (ret < 0) {
- dmap_add_to_free_pool(fcd, alloc_dmap);
- up_write(&fi->dax->sem);
- return ret;
- }
- fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags);
- up_write(&fi->dax->sem);
- return 0;
- }
- static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
- loff_t length, unsigned int flags,
- struct iomap *iomap)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_dax_mapping *dmap;
- int ret;
- unsigned long idx = pos >> FUSE_DAX_SHIFT;
- struct interval_tree_node *node;
- /*
- * Take exclusive lock so that only one caller can try to setup
- * mapping and others wait.
- */
- down_write(&fi->dax->sem);
- node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
- /* We are holding either inode lock or i_mmap_sem, and that should
- * ensure that dmap can't be truncated. We are holding a reference
- * on dmap and that should make sure it can't be reclaimed. So dmap
- * should still be there in tree despite the fact we dropped and
- * re-acquired the fi->dax->sem lock.
- */
- ret = -EIO;
- if (WARN_ON(!node))
- goto out_err;
- dmap = node_to_dmap(node);
- /* We took an extra reference on dmap to make sure its not reclaimd.
- * Now we hold fi->dax->sem lock and that reference is not needed
- * anymore. Drop it.
- */
- if (refcount_dec_and_test(&dmap->refcnt)) {
- /* refcount should not hit 0. This object only goes
- * away when fuse connection goes away
- */
- WARN_ON_ONCE(1);
- }
- /* Maybe another thread already upgraded mapping while we were not
- * holding lock.
- */
- if (dmap->writable) {
- ret = 0;
- goto out_fill_iomap;
- }
- ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
- true);
- if (ret < 0)
- goto out_err;
- out_fill_iomap:
- fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
- out_err:
- up_write(&fi->dax->sem);
- return ret;
- }
- /* This is just for DAX and the mapping is ephemeral, do not use it for other
- * purposes since there is no block device with a permanent mapping.
- */
- static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
- unsigned int flags, struct iomap *iomap,
- struct iomap *srcmap)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_dax_mapping *dmap;
- bool writable = flags & IOMAP_WRITE;
- unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
- struct interval_tree_node *node;
- /* We don't support FIEMAP */
- if (WARN_ON(flags & IOMAP_REPORT))
- return -EIO;
- iomap->offset = pos;
- iomap->flags = 0;
- iomap->bdev = NULL;
- iomap->dax_dev = fc->dax->dev;
- /*
- * Both read/write and mmap path can race here. So we need something
- * to make sure if we are setting up mapping, then other path waits
- *
- * For now, use a semaphore for this. It probably needs to be
- * optimized later.
- */
- down_read(&fi->dax->sem);
- node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
- if (node) {
- dmap = node_to_dmap(node);
- if (writable && !dmap->writable) {
- /* Upgrade read-only mapping to read-write. This will
- * require exclusive fi->dax->sem lock as we don't want
- * two threads to be trying to this simultaneously
- * for same dmap. So drop shared lock and acquire
- * exclusive lock.
- *
- * Before dropping fi->dax->sem lock, take reference
- * on dmap so that its not freed by range reclaim.
- */
- refcount_inc(&dmap->refcnt);
- up_read(&fi->dax->sem);
- pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
- __func__, pos, length);
- return fuse_upgrade_dax_mapping(inode, pos, length,
- flags, iomap);
- } else {
- fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
- up_read(&fi->dax->sem);
- return 0;
- }
- } else {
- up_read(&fi->dax->sem);
- pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
- __func__, pos, length);
- if (pos >= i_size_read(inode))
- goto iomap_hole;
- return fuse_setup_new_dax_mapping(inode, pos, length, flags,
- iomap);
- }
- /*
- * If read beyond end of file happnes, fs code seems to return
- * it as hole
- */
- iomap_hole:
- fuse_fill_iomap_hole(iomap, length);
- pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
- __func__, pos, length, iomap->length);
- return 0;
- }
- static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
- ssize_t written, unsigned int flags,
- struct iomap *iomap)
- {
- struct fuse_dax_mapping *dmap = iomap->private;
- if (dmap) {
- if (refcount_dec_and_test(&dmap->refcnt)) {
- /* refcount should not hit 0. This object only goes
- * away when fuse connection goes away
- */
- WARN_ON_ONCE(1);
- }
- }
- /* DAX writes beyond end-of-file aren't handled using iomap, so the
- * file size is unchanged and there is nothing to do here.
- */
- return 0;
- }
- static const struct iomap_ops fuse_iomap_ops = {
- .iomap_begin = fuse_iomap_begin,
- .iomap_end = fuse_iomap_end,
- };
- static void fuse_wait_dax_page(struct inode *inode)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- up_write(&fi->i_mmap_sem);
- schedule();
- down_write(&fi->i_mmap_sem);
- }
- /* Should be called with fi->i_mmap_sem lock held exclusively */
- static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
- loff_t start, loff_t end)
- {
- struct page *page;
- page = dax_layout_busy_page_range(inode->i_mapping, start, end);
- if (!page)
- return 0;
- *retry = true;
- return ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, fuse_wait_dax_page(inode));
- }
- /* dmap_end == 0 leads to unmapping of whole file */
- int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
- u64 dmap_end)
- {
- bool retry;
- int ret;
- do {
- retry = false;
- ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
- dmap_end);
- } while (ret == 0 && retry);
- return ret;
- }
- ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
- {
- struct inode *inode = file_inode(iocb->ki_filp);
- ssize_t ret;
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!inode_trylock_shared(inode))
- return -EAGAIN;
- } else {
- inode_lock_shared(inode);
- }
- ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops);
- inode_unlock_shared(inode);
- /* TODO file_accessed(iocb->f_filp) */
- return ret;
- }
- static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
- {
- struct inode *inode = file_inode(iocb->ki_filp);
- return (iov_iter_rw(from) == WRITE &&
- ((iocb->ki_pos) >= i_size_read(inode) ||
- (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
- }
- static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
- {
- struct inode *inode = file_inode(iocb->ki_filp);
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
- ssize_t ret;
- ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
- if (ret < 0)
- return ret;
- fuse_invalidate_attr(inode);
- fuse_write_update_size(inode, iocb->ki_pos);
- return ret;
- }
- ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
- {
- struct inode *inode = file_inode(iocb->ki_filp);
- ssize_t ret;
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!inode_trylock(inode))
- return -EAGAIN;
- } else {
- inode_lock(inode);
- }
- ret = generic_write_checks(iocb, from);
- if (ret <= 0)
- goto out;
- ret = file_remove_privs(iocb->ki_filp);
- if (ret)
- goto out;
- /* TODO file_update_time() but we don't want metadata I/O */
- /* Do not use dax for file extending writes as write and on
- * disk i_size increase are not atomic otherwise.
- */
- if (file_extending_write(iocb, from))
- ret = fuse_dax_direct_write(iocb, from);
- else
- ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops);
- out:
- inode_unlock(inode);
- if (ret > 0)
- ret = generic_write_sync(iocb, ret);
- return ret;
- }
- static int fuse_dax_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
- {
- struct inode *inode = mapping->host;
- struct fuse_conn *fc = get_fuse_conn(inode);
- return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc);
- }
- static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size, bool write)
- {
- vm_fault_t ret;
- struct inode *inode = file_inode(vmf->vma->vm_file);
- struct super_block *sb = inode->i_sb;
- pfn_t pfn;
- int error = 0;
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_conn_dax *fcd = fc->dax;
- bool retry = false;
- if (write)
- sb_start_pagefault(sb);
- retry:
- if (retry && !(fcd->nr_free_ranges > 0))
- wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
- /*
- * We need to serialize against not only truncate but also against
- * fuse dax memory range reclaim. While a range is being reclaimed,
- * we do not want any read/write/mmap to make progress and try
- * to populate page cache or access memory we are trying to free.
- */
- down_read(&get_fuse_inode(inode)->i_mmap_sem);
- ret = dax_iomap_fault(vmf, pe_size, &pfn, &error, &fuse_iomap_ops);
- if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
- error = 0;
- retry = true;
- up_read(&get_fuse_inode(inode)->i_mmap_sem);
- goto retry;
- }
- if (ret & VM_FAULT_NEEDDSYNC)
- ret = dax_finish_sync_fault(vmf, pe_size, pfn);
- up_read(&get_fuse_inode(inode)->i_mmap_sem);
- if (write)
- sb_end_pagefault(sb);
- return ret;
- }
- static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
- {
- return __fuse_dax_fault(vmf, PE_SIZE_PTE,
- vmf->flags & FAULT_FLAG_WRITE);
- }
- static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size)
- {
- return __fuse_dax_fault(vmf, pe_size, vmf->flags & FAULT_FLAG_WRITE);
- }
- static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
- {
- return __fuse_dax_fault(vmf, PE_SIZE_PTE, true);
- }
- static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
- {
- return __fuse_dax_fault(vmf, PE_SIZE_PTE, true);
- }
- static const struct vm_operations_struct fuse_dax_vm_ops = {
- .fault = fuse_dax_fault,
- .huge_fault = fuse_dax_huge_fault,
- .page_mkwrite = fuse_dax_page_mkwrite,
- .pfn_mkwrite = fuse_dax_pfn_mkwrite,
- };
- int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
- {
- file_accessed(file);
- vma->vm_ops = &fuse_dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
- return 0;
- }
- static int dmap_writeback_invalidate(struct inode *inode,
- struct fuse_dax_mapping *dmap)
- {
- int ret;
- loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
- loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1);
- ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos);
- if (ret) {
- pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
- ret, start_pos, end_pos);
- return ret;
- }
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- start_pos >> PAGE_SHIFT,
- end_pos >> PAGE_SHIFT);
- if (ret)
- pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
- ret);
- return ret;
- }
- static int reclaim_one_dmap_locked(struct inode *inode,
- struct fuse_dax_mapping *dmap)
- {
- int ret;
- struct fuse_inode *fi = get_fuse_inode(inode);
- /*
- * igrab() was done to make sure inode won't go under us, and this
- * further avoids the race with evict().
- */
- ret = dmap_writeback_invalidate(inode, dmap);
- if (ret)
- return ret;
- /* Remove dax mapping from inode interval tree now */
- interval_tree_remove(&dmap->itn, &fi->dax->tree);
- fi->dax->nr--;
- /* It is possible that umount/shutdown has killed the fuse connection
- * and worker thread is trying to reclaim memory in parallel. Don't
- * warn in that case.
- */
- ret = dmap_removemapping_one(inode, dmap);
- if (ret && ret != -ENOTCONN) {
- pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
- dmap->window_offset, dmap->length, ret);
- }
- return 0;
- }
- /* Find first mapped dmap for an inode and return file offset. Caller needs
- * to hold fi->dax->sem lock either shared or exclusive.
- */
- static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_dax_mapping *dmap;
- struct interval_tree_node *node;
- for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
- node = interval_tree_iter_next(node, 0, -1)) {
- dmap = node_to_dmap(node);
- /* still in use. */
- if (refcount_read(&dmap->refcnt) > 1)
- continue;
- return dmap;
- }
- return NULL;
- }
- /*
- * Find first mapping in the tree and free it and return it. Do not add
- * it back to free pool.
- */
- static struct fuse_dax_mapping *
- inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
- bool *retry)
- {
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_dax_mapping *dmap;
- u64 dmap_start, dmap_end;
- unsigned long start_idx;
- int ret;
- struct interval_tree_node *node;
- down_write(&fi->i_mmap_sem);
- /* Lookup a dmap and corresponding file offset to reclaim. */
- down_read(&fi->dax->sem);
- dmap = inode_lookup_first_dmap(inode);
- if (dmap) {
- start_idx = dmap->itn.start;
- dmap_start = start_idx << FUSE_DAX_SHIFT;
- dmap_end = dmap_start + FUSE_DAX_SZ - 1;
- }
- up_read(&fi->dax->sem);
- if (!dmap)
- goto out_mmap_sem;
- /*
- * Make sure there are no references to inode pages using
- * get_user_pages()
- */
- ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
- if (ret) {
- pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
- ret);
- dmap = ERR_PTR(ret);
- goto out_mmap_sem;
- }
- down_write(&fi->dax->sem);
- node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
- /* Range already got reclaimed by somebody else */
- if (!node) {
- if (retry)
- *retry = true;
- goto out_write_dmap_sem;
- }
- dmap = node_to_dmap(node);
- /* still in use. */
- if (refcount_read(&dmap->refcnt) > 1) {
- dmap = NULL;
- if (retry)
- *retry = true;
- goto out_write_dmap_sem;
- }
- ret = reclaim_one_dmap_locked(inode, dmap);
- if (ret < 0) {
- dmap = ERR_PTR(ret);
- goto out_write_dmap_sem;
- }
- /* Clean up dmap. Do not add back to free list */
- dmap_remove_busy_list(fcd, dmap);
- dmap->inode = NULL;
- dmap->itn.start = dmap->itn.last = 0;
- pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
- __func__, inode, dmap->window_offset, dmap->length);
- out_write_dmap_sem:
- up_write(&fi->dax->sem);
- out_mmap_sem:
- up_write(&fi->i_mmap_sem);
- return dmap;
- }
- static struct fuse_dax_mapping *
- alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
- {
- struct fuse_dax_mapping *dmap;
- struct fuse_inode *fi = get_fuse_inode(inode);
- while (1) {
- bool retry = false;
- dmap = alloc_dax_mapping(fcd);
- if (dmap)
- return dmap;
- dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
- /*
- * Either we got a mapping or it is an error, return in both
- * the cases.
- */
- if (dmap)
- return dmap;
- /* If we could not reclaim a mapping because it
- * had a reference or some other temporary failure,
- * Try again. We want to give up inline reclaim only
- * if there is no range assigned to this node. Otherwise
- * if a deadlock is possible if we sleep with fi->i_mmap_sem
- * held and worker to free memory can't make progress due
- * to unavailability of fi->i_mmap_sem lock. So sleep
- * only if fi->dax->nr=0
- */
- if (retry)
- continue;
- /*
- * There are no mappings which can be reclaimed. Wait for one.
- * We are not holding fi->dax->sem. So it is possible
- * that range gets added now. But as we are not holding
- * fi->i_mmap_sem, worker should still be able to free up
- * a range and wake us up.
- */
- if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
- if (wait_event_killable_exclusive(fcd->range_waitq,
- (fcd->nr_free_ranges > 0))) {
- return ERR_PTR(-EINTR);
- }
- }
- }
- }
- static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
- struct inode *inode,
- unsigned long start_idx)
- {
- int ret;
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_dax_mapping *dmap;
- struct interval_tree_node *node;
- /* Find fuse dax mapping at file offset inode. */
- node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
- /* Range already got cleaned up by somebody else */
- if (!node)
- return 0;
- dmap = node_to_dmap(node);
- /* still in use. */
- if (refcount_read(&dmap->refcnt) > 1)
- return 0;
- ret = reclaim_one_dmap_locked(inode, dmap);
- if (ret < 0)
- return ret;
- /* Cleanup dmap entry and add back to free list */
- spin_lock(&fcd->lock);
- dmap_reinit_add_to_free_pool(fcd, dmap);
- spin_unlock(&fcd->lock);
- return ret;
- }
- /*
- * Free a range of memory.
- * Locking:
- * 1. Take fi->i_mmap_sem to block dax faults.
- * 2. Take fi->dax->sem to protect interval tree and also to make sure
- * read/write can not reuse a dmap which we might be freeing.
- */
- static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
- struct inode *inode,
- unsigned long start_idx,
- unsigned long end_idx)
- {
- int ret;
- struct fuse_inode *fi = get_fuse_inode(inode);
- loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
- loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
- down_write(&fi->i_mmap_sem);
- ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
- if (ret) {
- pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
- ret);
- goto out_mmap_sem;
- }
- down_write(&fi->dax->sem);
- ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
- up_write(&fi->dax->sem);
- out_mmap_sem:
- up_write(&fi->i_mmap_sem);
- return ret;
- }
- static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
- unsigned long nr_to_free)
- {
- struct fuse_dax_mapping *dmap, *pos, *temp;
- int ret, nr_freed = 0;
- unsigned long start_idx = 0, end_idx = 0;
- struct inode *inode = NULL;
- /* Pick first busy range and free it for now*/
- while (1) {
- if (nr_freed >= nr_to_free)
- break;
- dmap = NULL;
- spin_lock(&fcd->lock);
- if (!fcd->nr_busy_ranges) {
- spin_unlock(&fcd->lock);
- return 0;
- }
- list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
- busy_list) {
- /* skip this range if it's in use. */
- if (refcount_read(&pos->refcnt) > 1)
- continue;
- inode = igrab(pos->inode);
- /*
- * This inode is going away. That will free
- * up all the ranges anyway, continue to
- * next range.
- */
- if (!inode)
- continue;
- /*
- * Take this element off list and add it tail. If
- * this element can't be freed, it will help with
- * selecting new element in next iteration of loop.
- */
- dmap = pos;
- list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
- start_idx = end_idx = dmap->itn.start;
- break;
- }
- spin_unlock(&fcd->lock);
- if (!dmap)
- return 0;
- ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
- iput(inode);
- if (ret)
- return ret;
- nr_freed++;
- }
- return 0;
- }
- static void fuse_dax_free_mem_worker(struct work_struct *work)
- {
- int ret;
- struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
- free_work.work);
- ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
- if (ret) {
- pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
- ret);
- }
- /* If number of free ranges are still below threhold, requeue */
- kick_dmap_free_worker(fcd, 1);
- }
- static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
- {
- struct fuse_dax_mapping *range, *temp;
- /* Free All allocated elements */
- list_for_each_entry_safe(range, temp, mem_list, list) {
- list_del(&range->list);
- if (!list_empty(&range->busy_list))
- list_del(&range->busy_list);
- kfree(range);
- }
- }
- void fuse_dax_conn_free(struct fuse_conn *fc)
- {
- if (fc->dax) {
- fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
- kfree(fc->dax);
- }
- }
- static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
- {
- long nr_pages, nr_ranges;
- void *kaddr;
- pfn_t pfn;
- struct fuse_dax_mapping *range;
- int ret, id;
- size_t dax_size = -1;
- unsigned long i;
- init_waitqueue_head(&fcd->range_waitq);
- INIT_LIST_HEAD(&fcd->free_ranges);
- INIT_LIST_HEAD(&fcd->busy_ranges);
- INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
- id = dax_read_lock();
- nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
- &pfn);
- dax_read_unlock(id);
- if (nr_pages < 0) {
- pr_debug("dax_direct_access() returned %ld\n", nr_pages);
- return nr_pages;
- }
- nr_ranges = nr_pages/FUSE_DAX_PAGES;
- pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
- __func__, nr_pages, nr_ranges);
- for (i = 0; i < nr_ranges; i++) {
- range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL);
- ret = -ENOMEM;
- if (!range)
- goto out_err;
- /* TODO: This offset only works if virtio-fs driver is not
- * having some memory hidden at the beginning. This needs
- * better handling
- */
- range->window_offset = i * FUSE_DAX_SZ;
- range->length = FUSE_DAX_SZ;
- INIT_LIST_HEAD(&range->busy_list);
- refcount_set(&range->refcnt, 1);
- list_add_tail(&range->list, &fcd->free_ranges);
- }
- fcd->nr_free_ranges = nr_ranges;
- fcd->nr_ranges = nr_ranges;
- return 0;
- out_err:
- /* Free All allocated elements */
- fuse_free_dax_mem_ranges(&fcd->free_ranges);
- return ret;
- }
- int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev)
- {
- struct fuse_conn_dax *fcd;
- int err;
- if (!dax_dev)
- return 0;
- fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
- if (!fcd)
- return -ENOMEM;
- spin_lock_init(&fcd->lock);
- fcd->dev = dax_dev;
- err = fuse_dax_mem_range_init(fcd);
- if (err) {
- kfree(fcd);
- return err;
- }
- fc->dax = fcd;
- return 0;
- }
- bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
- {
- struct fuse_conn *fc = get_fuse_conn_super(sb);
- fi->dax = NULL;
- if (fc->dax) {
- fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
- if (!fi->dax)
- return false;
- init_rwsem(&fi->dax->sem);
- fi->dax->tree = RB_ROOT_CACHED;
- }
- return true;
- }
- static const struct address_space_operations fuse_dax_file_aops = {
- .writepages = fuse_dax_writepages,
- .direct_IO = noop_direct_IO,
- .set_page_dirty = noop_set_page_dirty,
- .invalidatepage = noop_invalidatepage,
- };
- void fuse_dax_inode_init(struct inode *inode)
- {
- struct fuse_conn *fc = get_fuse_conn(inode);
- if (!fc->dax)
- return;
- inode->i_flags |= S_DAX;
- inode->i_data.a_ops = &fuse_dax_file_aops;
- }
- bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
- {
- if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
- pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
- map_alignment, FUSE_DAX_SZ);
- return false;
- }
- return true;
- }
- void fuse_dax_cancel_work(struct fuse_conn *fc)
- {
- struct fuse_conn_dax *fcd = fc->dax;
- if (fcd)
- cancel_delayed_work_sync(&fcd->free_work);
- }
- EXPORT_SYMBOL_GPL(fuse_dax_cancel_work);
|