virtio_fs.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * virtio-fs: Virtio Filesystem
  4. * Copyright (C) 2018 Red Hat, Inc.
  5. */
  6. #include <linux/fs.h>
  7. #include <linux/dax.h>
  8. #include <linux/pci.h>
  9. #include <linux/pfn_t.h>
  10. #include <linux/module.h>
  11. #include <linux/virtio.h>
  12. #include <linux/virtio_fs.h>
  13. #include <linux/delay.h>
  14. #include <linux/fs_context.h>
  15. #include <linux/fs_parser.h>
  16. #include <linux/highmem.h>
  17. #include <linux/uio.h>
  18. #include "fuse_i.h"
  19. /* List of virtio-fs device instances and a lock for the list. Also provides
  20. * mutual exclusion in device removal and mounting path
  21. */
  22. static DEFINE_MUTEX(virtio_fs_mutex);
  23. static LIST_HEAD(virtio_fs_instances);
  24. enum {
  25. VQ_HIPRIO,
  26. VQ_REQUEST
  27. };
  28. #define VQ_NAME_LEN 24
  29. /* Per-virtqueue state */
  30. struct virtio_fs_vq {
  31. spinlock_t lock;
  32. struct virtqueue *vq; /* protected by ->lock */
  33. struct work_struct done_work;
  34. struct list_head queued_reqs;
  35. struct list_head end_reqs; /* End these requests */
  36. struct delayed_work dispatch_work;
  37. struct fuse_dev *fud;
  38. bool connected;
  39. long in_flight;
  40. struct completion in_flight_zero; /* No inflight requests */
  41. char name[VQ_NAME_LEN];
  42. } ____cacheline_aligned_in_smp;
  43. /* A virtio-fs device instance */
  44. struct virtio_fs {
  45. struct kref refcount;
  46. struct list_head list; /* on virtio_fs_instances */
  47. char *tag;
  48. struct virtio_fs_vq *vqs;
  49. unsigned int nvqs; /* number of virtqueues */
  50. unsigned int num_request_queues; /* number of request queues */
  51. struct dax_device *dax_dev;
  52. /* DAX memory window where file contents are mapped */
  53. void *window_kaddr;
  54. phys_addr_t window_phys_addr;
  55. size_t window_len;
  56. };
  57. struct virtio_fs_forget_req {
  58. struct fuse_in_header ih;
  59. struct fuse_forget_in arg;
  60. };
  61. struct virtio_fs_forget {
  62. /* This request can be temporarily queued on virt queue */
  63. struct list_head list;
  64. struct virtio_fs_forget_req req;
  65. };
  66. struct virtio_fs_req_work {
  67. struct fuse_req *req;
  68. struct virtio_fs_vq *fsvq;
  69. struct work_struct done_work;
  70. };
  71. static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
  72. struct fuse_req *req, bool in_flight);
  73. enum {
  74. OPT_DAX,
  75. };
  76. static const struct fs_parameter_spec virtio_fs_parameters[] = {
  77. fsparam_flag("dax", OPT_DAX),
  78. {}
  79. };
  80. static int virtio_fs_parse_param(struct fs_context *fc,
  81. struct fs_parameter *param)
  82. {
  83. struct fs_parse_result result;
  84. struct fuse_fs_context *ctx = fc->fs_private;
  85. int opt;
  86. opt = fs_parse(fc, virtio_fs_parameters, param, &result);
  87. if (opt < 0)
  88. return opt;
  89. switch (opt) {
  90. case OPT_DAX:
  91. ctx->dax = 1;
  92. break;
  93. default:
  94. return -EINVAL;
  95. }
  96. return 0;
  97. }
  98. static void virtio_fs_free_fc(struct fs_context *fc)
  99. {
  100. struct fuse_fs_context *ctx = fc->fs_private;
  101. kfree(ctx);
  102. }
  103. static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
  104. {
  105. struct virtio_fs *fs = vq->vdev->priv;
  106. return &fs->vqs[vq->index];
  107. }
  108. static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
  109. {
  110. return &vq_to_fsvq(vq)->fud->pq;
  111. }
  112. /* Should be called with fsvq->lock held. */
  113. static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
  114. {
  115. fsvq->in_flight++;
  116. }
  117. /* Should be called with fsvq->lock held. */
  118. static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
  119. {
  120. WARN_ON(fsvq->in_flight <= 0);
  121. fsvq->in_flight--;
  122. if (!fsvq->in_flight)
  123. complete(&fsvq->in_flight_zero);
  124. }
  125. static void release_virtio_fs_obj(struct kref *ref)
  126. {
  127. struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
  128. kfree(vfs->vqs);
  129. kfree(vfs);
  130. }
  131. /* Make sure virtiofs_mutex is held */
  132. static void virtio_fs_put(struct virtio_fs *fs)
  133. {
  134. kref_put(&fs->refcount, release_virtio_fs_obj);
  135. }
  136. static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
  137. {
  138. struct virtio_fs *vfs = fiq->priv;
  139. mutex_lock(&virtio_fs_mutex);
  140. virtio_fs_put(vfs);
  141. mutex_unlock(&virtio_fs_mutex);
  142. }
  143. static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
  144. {
  145. WARN_ON(fsvq->in_flight < 0);
  146. /* Wait for in flight requests to finish.*/
  147. spin_lock(&fsvq->lock);
  148. if (fsvq->in_flight) {
  149. /* We are holding virtio_fs_mutex. There should not be any
  150. * waiters waiting for completion.
  151. */
  152. reinit_completion(&fsvq->in_flight_zero);
  153. spin_unlock(&fsvq->lock);
  154. wait_for_completion(&fsvq->in_flight_zero);
  155. } else {
  156. spin_unlock(&fsvq->lock);
  157. }
  158. flush_work(&fsvq->done_work);
  159. flush_delayed_work(&fsvq->dispatch_work);
  160. }
  161. static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
  162. {
  163. struct virtio_fs_vq *fsvq;
  164. int i;
  165. for (i = 0; i < fs->nvqs; i++) {
  166. fsvq = &fs->vqs[i];
  167. virtio_fs_drain_queue(fsvq);
  168. }
  169. }
  170. static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
  171. {
  172. /* Provides mutual exclusion between ->remove and ->kill_sb
  173. * paths. We don't want both of these draining queue at the
  174. * same time. Current completion logic reinits completion
  175. * and that means there should not be any other thread
  176. * doing reinit or waiting for completion already.
  177. */
  178. mutex_lock(&virtio_fs_mutex);
  179. virtio_fs_drain_all_queues_locked(fs);
  180. mutex_unlock(&virtio_fs_mutex);
  181. }
  182. static void virtio_fs_start_all_queues(struct virtio_fs *fs)
  183. {
  184. struct virtio_fs_vq *fsvq;
  185. int i;
  186. for (i = 0; i < fs->nvqs; i++) {
  187. fsvq = &fs->vqs[i];
  188. spin_lock(&fsvq->lock);
  189. fsvq->connected = true;
  190. spin_unlock(&fsvq->lock);
  191. }
  192. }
  193. /* Add a new instance to the list or return -EEXIST if tag name exists*/
  194. static int virtio_fs_add_instance(struct virtio_fs *fs)
  195. {
  196. struct virtio_fs *fs2;
  197. bool duplicate = false;
  198. mutex_lock(&virtio_fs_mutex);
  199. list_for_each_entry(fs2, &virtio_fs_instances, list) {
  200. if (strcmp(fs->tag, fs2->tag) == 0)
  201. duplicate = true;
  202. }
  203. if (!duplicate)
  204. list_add_tail(&fs->list, &virtio_fs_instances);
  205. mutex_unlock(&virtio_fs_mutex);
  206. if (duplicate)
  207. return -EEXIST;
  208. return 0;
  209. }
  210. /* Return the virtio_fs with a given tag, or NULL */
  211. static struct virtio_fs *virtio_fs_find_instance(const char *tag)
  212. {
  213. struct virtio_fs *fs;
  214. mutex_lock(&virtio_fs_mutex);
  215. list_for_each_entry(fs, &virtio_fs_instances, list) {
  216. if (strcmp(fs->tag, tag) == 0) {
  217. kref_get(&fs->refcount);
  218. goto found;
  219. }
  220. }
  221. fs = NULL; /* not found */
  222. found:
  223. mutex_unlock(&virtio_fs_mutex);
  224. return fs;
  225. }
  226. static void virtio_fs_free_devs(struct virtio_fs *fs)
  227. {
  228. unsigned int i;
  229. for (i = 0; i < fs->nvqs; i++) {
  230. struct virtio_fs_vq *fsvq = &fs->vqs[i];
  231. if (!fsvq->fud)
  232. continue;
  233. fuse_dev_free(fsvq->fud);
  234. fsvq->fud = NULL;
  235. }
  236. }
  237. /* Read filesystem name from virtio config into fs->tag (must kfree()). */
  238. static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
  239. {
  240. char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
  241. char *end;
  242. size_t len;
  243. virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
  244. &tag_buf, sizeof(tag_buf));
  245. end = memchr(tag_buf, '\0', sizeof(tag_buf));
  246. if (end == tag_buf)
  247. return -EINVAL; /* empty tag */
  248. if (!end)
  249. end = &tag_buf[sizeof(tag_buf)];
  250. len = end - tag_buf;
  251. fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
  252. if (!fs->tag)
  253. return -ENOMEM;
  254. memcpy(fs->tag, tag_buf, len);
  255. fs->tag[len] = '\0';
  256. return 0;
  257. }
  258. /* Work function for hiprio completion */
  259. static void virtio_fs_hiprio_done_work(struct work_struct *work)
  260. {
  261. struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
  262. done_work);
  263. struct virtqueue *vq = fsvq->vq;
  264. /* Free completed FUSE_FORGET requests */
  265. spin_lock(&fsvq->lock);
  266. do {
  267. unsigned int len;
  268. void *req;
  269. virtqueue_disable_cb(vq);
  270. while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
  271. kfree(req);
  272. dec_in_flight_req(fsvq);
  273. }
  274. } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
  275. spin_unlock(&fsvq->lock);
  276. }
  277. static void virtio_fs_request_dispatch_work(struct work_struct *work)
  278. {
  279. struct fuse_req *req;
  280. struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
  281. dispatch_work.work);
  282. int ret;
  283. pr_debug("virtio-fs: worker %s called.\n", __func__);
  284. while (1) {
  285. spin_lock(&fsvq->lock);
  286. req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
  287. list);
  288. if (!req) {
  289. spin_unlock(&fsvq->lock);
  290. break;
  291. }
  292. list_del_init(&req->list);
  293. spin_unlock(&fsvq->lock);
  294. fuse_request_end(req);
  295. }
  296. /* Dispatch pending requests */
  297. while (1) {
  298. spin_lock(&fsvq->lock);
  299. req = list_first_entry_or_null(&fsvq->queued_reqs,
  300. struct fuse_req, list);
  301. if (!req) {
  302. spin_unlock(&fsvq->lock);
  303. return;
  304. }
  305. list_del_init(&req->list);
  306. spin_unlock(&fsvq->lock);
  307. ret = virtio_fs_enqueue_req(fsvq, req, true);
  308. if (ret < 0) {
  309. if (ret == -ENOMEM || ret == -ENOSPC) {
  310. spin_lock(&fsvq->lock);
  311. list_add_tail(&req->list, &fsvq->queued_reqs);
  312. schedule_delayed_work(&fsvq->dispatch_work,
  313. msecs_to_jiffies(1));
  314. spin_unlock(&fsvq->lock);
  315. return;
  316. }
  317. req->out.h.error = ret;
  318. spin_lock(&fsvq->lock);
  319. dec_in_flight_req(fsvq);
  320. spin_unlock(&fsvq->lock);
  321. pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
  322. ret);
  323. fuse_request_end(req);
  324. }
  325. }
  326. }
  327. /*
  328. * Returns 1 if queue is full and sender should wait a bit before sending
  329. * next request, 0 otherwise.
  330. */
  331. static int send_forget_request(struct virtio_fs_vq *fsvq,
  332. struct virtio_fs_forget *forget,
  333. bool in_flight)
  334. {
  335. struct scatterlist sg;
  336. struct virtqueue *vq;
  337. int ret = 0;
  338. bool notify;
  339. struct virtio_fs_forget_req *req = &forget->req;
  340. spin_lock(&fsvq->lock);
  341. if (!fsvq->connected) {
  342. if (in_flight)
  343. dec_in_flight_req(fsvq);
  344. kfree(forget);
  345. goto out;
  346. }
  347. sg_init_one(&sg, req, sizeof(*req));
  348. vq = fsvq->vq;
  349. dev_dbg(&vq->vdev->dev, "%s\n", __func__);
  350. ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
  351. if (ret < 0) {
  352. if (ret == -ENOMEM || ret == -ENOSPC) {
  353. pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
  354. ret);
  355. list_add_tail(&forget->list, &fsvq->queued_reqs);
  356. schedule_delayed_work(&fsvq->dispatch_work,
  357. msecs_to_jiffies(1));
  358. if (!in_flight)
  359. inc_in_flight_req(fsvq);
  360. /* Queue is full */
  361. ret = 1;
  362. } else {
  363. pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
  364. ret);
  365. kfree(forget);
  366. if (in_flight)
  367. dec_in_flight_req(fsvq);
  368. }
  369. goto out;
  370. }
  371. if (!in_flight)
  372. inc_in_flight_req(fsvq);
  373. notify = virtqueue_kick_prepare(vq);
  374. spin_unlock(&fsvq->lock);
  375. if (notify)
  376. virtqueue_notify(vq);
  377. return ret;
  378. out:
  379. spin_unlock(&fsvq->lock);
  380. return ret;
  381. }
  382. static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
  383. {
  384. struct virtio_fs_forget *forget;
  385. struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
  386. dispatch_work.work);
  387. pr_debug("virtio-fs: worker %s called.\n", __func__);
  388. while (1) {
  389. spin_lock(&fsvq->lock);
  390. forget = list_first_entry_or_null(&fsvq->queued_reqs,
  391. struct virtio_fs_forget, list);
  392. if (!forget) {
  393. spin_unlock(&fsvq->lock);
  394. return;
  395. }
  396. list_del(&forget->list);
  397. spin_unlock(&fsvq->lock);
  398. if (send_forget_request(fsvq, forget, true))
  399. return;
  400. }
  401. }
  402. /* Allocate and copy args into req->argbuf */
  403. static int copy_args_to_argbuf(struct fuse_req *req)
  404. {
  405. struct fuse_args *args = req->args;
  406. unsigned int offset = 0;
  407. unsigned int num_in;
  408. unsigned int num_out;
  409. unsigned int len;
  410. unsigned int i;
  411. num_in = args->in_numargs - args->in_pages;
  412. num_out = args->out_numargs - args->out_pages;
  413. len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
  414. fuse_len_args(num_out, args->out_args);
  415. req->argbuf = kmalloc(len, GFP_ATOMIC);
  416. if (!req->argbuf)
  417. return -ENOMEM;
  418. for (i = 0; i < num_in; i++) {
  419. memcpy(req->argbuf + offset,
  420. args->in_args[i].value,
  421. args->in_args[i].size);
  422. offset += args->in_args[i].size;
  423. }
  424. return 0;
  425. }
  426. /* Copy args out of and free req->argbuf */
  427. static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
  428. {
  429. unsigned int remaining;
  430. unsigned int offset;
  431. unsigned int num_in;
  432. unsigned int num_out;
  433. unsigned int i;
  434. remaining = req->out.h.len - sizeof(req->out.h);
  435. num_in = args->in_numargs - args->in_pages;
  436. num_out = args->out_numargs - args->out_pages;
  437. offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
  438. for (i = 0; i < num_out; i++) {
  439. unsigned int argsize = args->out_args[i].size;
  440. if (args->out_argvar &&
  441. i == args->out_numargs - 1 &&
  442. argsize > remaining) {
  443. argsize = remaining;
  444. }
  445. memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
  446. offset += argsize;
  447. if (i != args->out_numargs - 1)
  448. remaining -= argsize;
  449. }
  450. /* Store the actual size of the variable-length arg */
  451. if (args->out_argvar)
  452. args->out_args[args->out_numargs - 1].size = remaining;
  453. kfree(req->argbuf);
  454. req->argbuf = NULL;
  455. }
  456. /* Work function for request completion */
  457. static void virtio_fs_request_complete(struct fuse_req *req,
  458. struct virtio_fs_vq *fsvq)
  459. {
  460. struct fuse_pqueue *fpq = &fsvq->fud->pq;
  461. struct fuse_args *args;
  462. struct fuse_args_pages *ap;
  463. unsigned int len, i, thislen;
  464. struct page *page;
  465. /*
  466. * TODO verify that server properly follows FUSE protocol
  467. * (oh.uniq, oh.len)
  468. */
  469. args = req->args;
  470. copy_args_from_argbuf(args, req);
  471. if (args->out_pages && args->page_zeroing) {
  472. len = args->out_args[args->out_numargs - 1].size;
  473. ap = container_of(args, typeof(*ap), args);
  474. for (i = 0; i < ap->num_pages; i++) {
  475. thislen = ap->descs[i].length;
  476. if (len < thislen) {
  477. WARN_ON(ap->descs[i].offset);
  478. page = ap->pages[i];
  479. zero_user_segment(page, len, thislen);
  480. len = 0;
  481. } else {
  482. len -= thislen;
  483. }
  484. }
  485. }
  486. spin_lock(&fpq->lock);
  487. clear_bit(FR_SENT, &req->flags);
  488. spin_unlock(&fpq->lock);
  489. fuse_request_end(req);
  490. spin_lock(&fsvq->lock);
  491. dec_in_flight_req(fsvq);
  492. spin_unlock(&fsvq->lock);
  493. }
  494. static void virtio_fs_complete_req_work(struct work_struct *work)
  495. {
  496. struct virtio_fs_req_work *w =
  497. container_of(work, typeof(*w), done_work);
  498. virtio_fs_request_complete(w->req, w->fsvq);
  499. kfree(w);
  500. }
  501. static void virtio_fs_requests_done_work(struct work_struct *work)
  502. {
  503. struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
  504. done_work);
  505. struct fuse_pqueue *fpq = &fsvq->fud->pq;
  506. struct virtqueue *vq = fsvq->vq;
  507. struct fuse_req *req;
  508. struct fuse_req *next;
  509. unsigned int len;
  510. LIST_HEAD(reqs);
  511. /* Collect completed requests off the virtqueue */
  512. spin_lock(&fsvq->lock);
  513. do {
  514. virtqueue_disable_cb(vq);
  515. while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
  516. spin_lock(&fpq->lock);
  517. list_move_tail(&req->list, &reqs);
  518. spin_unlock(&fpq->lock);
  519. }
  520. } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
  521. spin_unlock(&fsvq->lock);
  522. /* End requests */
  523. list_for_each_entry_safe(req, next, &reqs, list) {
  524. list_del_init(&req->list);
  525. /* blocking async request completes in a worker context */
  526. if (req->args->may_block) {
  527. struct virtio_fs_req_work *w;
  528. w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
  529. INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
  530. w->fsvq = fsvq;
  531. w->req = req;
  532. schedule_work(&w->done_work);
  533. } else {
  534. virtio_fs_request_complete(req, fsvq);
  535. }
  536. }
  537. }
  538. /* Virtqueue interrupt handler */
  539. static void virtio_fs_vq_done(struct virtqueue *vq)
  540. {
  541. struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
  542. dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
  543. schedule_work(&fsvq->done_work);
  544. }
  545. static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
  546. int vq_type)
  547. {
  548. strncpy(fsvq->name, name, VQ_NAME_LEN);
  549. spin_lock_init(&fsvq->lock);
  550. INIT_LIST_HEAD(&fsvq->queued_reqs);
  551. INIT_LIST_HEAD(&fsvq->end_reqs);
  552. init_completion(&fsvq->in_flight_zero);
  553. if (vq_type == VQ_REQUEST) {
  554. INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
  555. INIT_DELAYED_WORK(&fsvq->dispatch_work,
  556. virtio_fs_request_dispatch_work);
  557. } else {
  558. INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
  559. INIT_DELAYED_WORK(&fsvq->dispatch_work,
  560. virtio_fs_hiprio_dispatch_work);
  561. }
  562. }
  563. /* Initialize virtqueues */
  564. static int virtio_fs_setup_vqs(struct virtio_device *vdev,
  565. struct virtio_fs *fs)
  566. {
  567. struct virtqueue **vqs;
  568. vq_callback_t **callbacks;
  569. const char **names;
  570. unsigned int i;
  571. int ret = 0;
  572. virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
  573. &fs->num_request_queues);
  574. if (fs->num_request_queues == 0)
  575. return -EINVAL;
  576. fs->nvqs = VQ_REQUEST + fs->num_request_queues;
  577. fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
  578. if (!fs->vqs)
  579. return -ENOMEM;
  580. vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
  581. callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
  582. GFP_KERNEL);
  583. names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
  584. if (!vqs || !callbacks || !names) {
  585. ret = -ENOMEM;
  586. goto out;
  587. }
  588. /* Initialize the hiprio/forget request virtqueue */
  589. callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
  590. virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
  591. names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
  592. /* Initialize the requests virtqueues */
  593. for (i = VQ_REQUEST; i < fs->nvqs; i++) {
  594. char vq_name[VQ_NAME_LEN];
  595. snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
  596. virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
  597. callbacks[i] = virtio_fs_vq_done;
  598. names[i] = fs->vqs[i].name;
  599. }
  600. ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
  601. if (ret < 0)
  602. goto out;
  603. for (i = 0; i < fs->nvqs; i++)
  604. fs->vqs[i].vq = vqs[i];
  605. virtio_fs_start_all_queues(fs);
  606. out:
  607. kfree(names);
  608. kfree(callbacks);
  609. kfree(vqs);
  610. if (ret)
  611. kfree(fs->vqs);
  612. return ret;
  613. }
  614. /* Free virtqueues (device must already be reset) */
  615. static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
  616. struct virtio_fs *fs)
  617. {
  618. vdev->config->del_vqs(vdev);
  619. }
  620. /* Map a window offset to a page frame number. The window offset will have
  621. * been produced by .iomap_begin(), which maps a file offset to a window
  622. * offset.
  623. */
  624. static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
  625. long nr_pages, void **kaddr, pfn_t *pfn)
  626. {
  627. struct virtio_fs *fs = dax_get_private(dax_dev);
  628. phys_addr_t offset = PFN_PHYS(pgoff);
  629. size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
  630. if (kaddr)
  631. *kaddr = fs->window_kaddr + offset;
  632. if (pfn)
  633. *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
  634. PFN_DEV | PFN_MAP);
  635. return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
  636. }
  637. static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
  638. pgoff_t pgoff, void *addr,
  639. size_t bytes, struct iov_iter *i)
  640. {
  641. return copy_from_iter(addr, bytes, i);
  642. }
  643. static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
  644. pgoff_t pgoff, void *addr,
  645. size_t bytes, struct iov_iter *i)
  646. {
  647. return copy_to_iter(addr, bytes, i);
  648. }
  649. static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
  650. pgoff_t pgoff, size_t nr_pages)
  651. {
  652. long rc;
  653. void *kaddr;
  654. rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
  655. if (rc < 0)
  656. return rc;
  657. memset(kaddr, 0, nr_pages << PAGE_SHIFT);
  658. dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
  659. return 0;
  660. }
  661. static const struct dax_operations virtio_fs_dax_ops = {
  662. .direct_access = virtio_fs_direct_access,
  663. .copy_from_iter = virtio_fs_copy_from_iter,
  664. .copy_to_iter = virtio_fs_copy_to_iter,
  665. .zero_page_range = virtio_fs_zero_page_range,
  666. };
  667. static void virtio_fs_cleanup_dax(void *data)
  668. {
  669. struct dax_device *dax_dev = data;
  670. kill_dax(dax_dev);
  671. put_dax(dax_dev);
  672. }
  673. static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
  674. {
  675. struct virtio_shm_region cache_reg;
  676. struct dev_pagemap *pgmap;
  677. bool have_cache;
  678. if (!IS_ENABLED(CONFIG_FUSE_DAX))
  679. return 0;
  680. /* Get cache region */
  681. have_cache = virtio_get_shm_region(vdev, &cache_reg,
  682. (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
  683. if (!have_cache) {
  684. dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
  685. return 0;
  686. }
  687. if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
  688. dev_name(&vdev->dev))) {
  689. dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
  690. cache_reg.addr, cache_reg.len);
  691. return -EBUSY;
  692. }
  693. dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
  694. cache_reg.addr);
  695. pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
  696. if (!pgmap)
  697. return -ENOMEM;
  698. pgmap->type = MEMORY_DEVICE_FS_DAX;
  699. /* Ideally we would directly use the PCI BAR resource but
  700. * devm_memremap_pages() wants its own copy in pgmap. So
  701. * initialize a struct resource from scratch (only the start
  702. * and end fields will be used).
  703. */
  704. pgmap->range = (struct range) {
  705. .start = (phys_addr_t) cache_reg.addr,
  706. .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
  707. };
  708. pgmap->nr_range = 1;
  709. fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
  710. if (IS_ERR(fs->window_kaddr))
  711. return PTR_ERR(fs->window_kaddr);
  712. fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
  713. fs->window_len = (phys_addr_t) cache_reg.len;
  714. dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
  715. __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
  716. fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
  717. if (IS_ERR(fs->dax_dev))
  718. return PTR_ERR(fs->dax_dev);
  719. return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
  720. fs->dax_dev);
  721. }
  722. static int virtio_fs_probe(struct virtio_device *vdev)
  723. {
  724. struct virtio_fs *fs;
  725. int ret;
  726. fs = kzalloc(sizeof(*fs), GFP_KERNEL);
  727. if (!fs)
  728. return -ENOMEM;
  729. kref_init(&fs->refcount);
  730. vdev->priv = fs;
  731. ret = virtio_fs_read_tag(vdev, fs);
  732. if (ret < 0)
  733. goto out;
  734. ret = virtio_fs_setup_vqs(vdev, fs);
  735. if (ret < 0)
  736. goto out;
  737. /* TODO vq affinity */
  738. ret = virtio_fs_setup_dax(vdev, fs);
  739. if (ret < 0)
  740. goto out_vqs;
  741. /* Bring the device online in case the filesystem is mounted and
  742. * requests need to be sent before we return.
  743. */
  744. virtio_device_ready(vdev);
  745. ret = virtio_fs_add_instance(fs);
  746. if (ret < 0)
  747. goto out_vqs;
  748. return 0;
  749. out_vqs:
  750. vdev->config->reset(vdev);
  751. virtio_fs_cleanup_vqs(vdev, fs);
  752. kfree(fs->vqs);
  753. out:
  754. vdev->priv = NULL;
  755. kfree(fs);
  756. return ret;
  757. }
  758. static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
  759. {
  760. struct virtio_fs_vq *fsvq;
  761. int i;
  762. for (i = 0; i < fs->nvqs; i++) {
  763. fsvq = &fs->vqs[i];
  764. spin_lock(&fsvq->lock);
  765. fsvq->connected = false;
  766. spin_unlock(&fsvq->lock);
  767. }
  768. }
  769. static void virtio_fs_remove(struct virtio_device *vdev)
  770. {
  771. struct virtio_fs *fs = vdev->priv;
  772. mutex_lock(&virtio_fs_mutex);
  773. /* This device is going away. No one should get new reference */
  774. list_del_init(&fs->list);
  775. virtio_fs_stop_all_queues(fs);
  776. virtio_fs_drain_all_queues_locked(fs);
  777. vdev->config->reset(vdev);
  778. virtio_fs_cleanup_vqs(vdev, fs);
  779. vdev->priv = NULL;
  780. /* Put device reference on virtio_fs object */
  781. virtio_fs_put(fs);
  782. mutex_unlock(&virtio_fs_mutex);
  783. }
  784. #ifdef CONFIG_PM_SLEEP
  785. static int virtio_fs_freeze(struct virtio_device *vdev)
  786. {
  787. /* TODO need to save state here */
  788. pr_warn("virtio-fs: suspend/resume not yet supported\n");
  789. return -EOPNOTSUPP;
  790. }
  791. static int virtio_fs_restore(struct virtio_device *vdev)
  792. {
  793. /* TODO need to restore state here */
  794. return 0;
  795. }
  796. #endif /* CONFIG_PM_SLEEP */
  797. static const struct virtio_device_id id_table[] = {
  798. { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
  799. {},
  800. };
  801. static const unsigned int feature_table[] = {};
  802. static struct virtio_driver virtio_fs_driver = {
  803. .driver.name = KBUILD_MODNAME,
  804. .driver.owner = THIS_MODULE,
  805. .id_table = id_table,
  806. .feature_table = feature_table,
  807. .feature_table_size = ARRAY_SIZE(feature_table),
  808. .probe = virtio_fs_probe,
  809. .remove = virtio_fs_remove,
  810. #ifdef CONFIG_PM_SLEEP
  811. .freeze = virtio_fs_freeze,
  812. .restore = virtio_fs_restore,
  813. #endif
  814. };
  815. static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
  816. __releases(fiq->lock)
  817. {
  818. struct fuse_forget_link *link;
  819. struct virtio_fs_forget *forget;
  820. struct virtio_fs_forget_req *req;
  821. struct virtio_fs *fs;
  822. struct virtio_fs_vq *fsvq;
  823. u64 unique;
  824. link = fuse_dequeue_forget(fiq, 1, NULL);
  825. unique = fuse_get_unique(fiq);
  826. fs = fiq->priv;
  827. fsvq = &fs->vqs[VQ_HIPRIO];
  828. spin_unlock(&fiq->lock);
  829. /* Allocate a buffer for the request */
  830. forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
  831. req = &forget->req;
  832. req->ih = (struct fuse_in_header){
  833. .opcode = FUSE_FORGET,
  834. .nodeid = link->forget_one.nodeid,
  835. .unique = unique,
  836. .len = sizeof(*req),
  837. };
  838. req->arg = (struct fuse_forget_in){
  839. .nlookup = link->forget_one.nlookup,
  840. };
  841. send_forget_request(fsvq, forget, false);
  842. kfree(link);
  843. }
  844. static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
  845. __releases(fiq->lock)
  846. {
  847. /*
  848. * TODO interrupts.
  849. *
  850. * Normal fs operations on a local filesystems aren't interruptible.
  851. * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
  852. * with shared lock between host and guest.
  853. */
  854. spin_unlock(&fiq->lock);
  855. }
  856. /* Count number of scatter-gather elements required */
  857. static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
  858. unsigned int num_pages,
  859. unsigned int total_len)
  860. {
  861. unsigned int i;
  862. unsigned int this_len;
  863. for (i = 0; i < num_pages && total_len; i++) {
  864. this_len = min(page_descs[i].length, total_len);
  865. total_len -= this_len;
  866. }
  867. return i;
  868. }
  869. /* Return the number of scatter-gather list elements required */
  870. static unsigned int sg_count_fuse_req(struct fuse_req *req)
  871. {
  872. struct fuse_args *args = req->args;
  873. struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
  874. unsigned int size, total_sgs = 1 /* fuse_in_header */;
  875. if (args->in_numargs - args->in_pages)
  876. total_sgs += 1;
  877. if (args->in_pages) {
  878. size = args->in_args[args->in_numargs - 1].size;
  879. total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
  880. size);
  881. }
  882. if (!test_bit(FR_ISREPLY, &req->flags))
  883. return total_sgs;
  884. total_sgs += 1 /* fuse_out_header */;
  885. if (args->out_numargs - args->out_pages)
  886. total_sgs += 1;
  887. if (args->out_pages) {
  888. size = args->out_args[args->out_numargs - 1].size;
  889. total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
  890. size);
  891. }
  892. return total_sgs;
  893. }
  894. /* Add pages to scatter-gather list and return number of elements used */
  895. static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
  896. struct page **pages,
  897. struct fuse_page_desc *page_descs,
  898. unsigned int num_pages,
  899. unsigned int total_len)
  900. {
  901. unsigned int i;
  902. unsigned int this_len;
  903. for (i = 0; i < num_pages && total_len; i++) {
  904. sg_init_table(&sg[i], 1);
  905. this_len = min(page_descs[i].length, total_len);
  906. sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
  907. total_len -= this_len;
  908. }
  909. return i;
  910. }
  911. /* Add args to scatter-gather list and return number of elements used */
  912. static unsigned int sg_init_fuse_args(struct scatterlist *sg,
  913. struct fuse_req *req,
  914. struct fuse_arg *args,
  915. unsigned int numargs,
  916. bool argpages,
  917. void *argbuf,
  918. unsigned int *len_used)
  919. {
  920. struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
  921. unsigned int total_sgs = 0;
  922. unsigned int len;
  923. len = fuse_len_args(numargs - argpages, args);
  924. if (len)
  925. sg_init_one(&sg[total_sgs++], argbuf, len);
  926. if (argpages)
  927. total_sgs += sg_init_fuse_pages(&sg[total_sgs],
  928. ap->pages, ap->descs,
  929. ap->num_pages,
  930. args[numargs - 1].size);
  931. if (len_used)
  932. *len_used = len;
  933. return total_sgs;
  934. }
  935. /* Add a request to a virtqueue and kick the device */
  936. static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
  937. struct fuse_req *req, bool in_flight)
  938. {
  939. /* requests need at least 4 elements */
  940. struct scatterlist *stack_sgs[6];
  941. struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
  942. struct scatterlist **sgs = stack_sgs;
  943. struct scatterlist *sg = stack_sg;
  944. struct virtqueue *vq;
  945. struct fuse_args *args = req->args;
  946. unsigned int argbuf_used = 0;
  947. unsigned int out_sgs = 0;
  948. unsigned int in_sgs = 0;
  949. unsigned int total_sgs;
  950. unsigned int i;
  951. int ret;
  952. bool notify;
  953. struct fuse_pqueue *fpq;
  954. /* Does the sglist fit on the stack? */
  955. total_sgs = sg_count_fuse_req(req);
  956. if (total_sgs > ARRAY_SIZE(stack_sgs)) {
  957. sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
  958. sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
  959. if (!sgs || !sg) {
  960. ret = -ENOMEM;
  961. goto out;
  962. }
  963. }
  964. /* Use a bounce buffer since stack args cannot be mapped */
  965. ret = copy_args_to_argbuf(req);
  966. if (ret < 0)
  967. goto out;
  968. /* Request elements */
  969. sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
  970. out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
  971. (struct fuse_arg *)args->in_args,
  972. args->in_numargs, args->in_pages,
  973. req->argbuf, &argbuf_used);
  974. /* Reply elements */
  975. if (test_bit(FR_ISREPLY, &req->flags)) {
  976. sg_init_one(&sg[out_sgs + in_sgs++],
  977. &req->out.h, sizeof(req->out.h));
  978. in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
  979. args->out_args, args->out_numargs,
  980. args->out_pages,
  981. req->argbuf + argbuf_used, NULL);
  982. }
  983. WARN_ON(out_sgs + in_sgs != total_sgs);
  984. for (i = 0; i < total_sgs; i++)
  985. sgs[i] = &sg[i];
  986. spin_lock(&fsvq->lock);
  987. if (!fsvq->connected) {
  988. spin_unlock(&fsvq->lock);
  989. ret = -ENOTCONN;
  990. goto out;
  991. }
  992. vq = fsvq->vq;
  993. ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
  994. if (ret < 0) {
  995. spin_unlock(&fsvq->lock);
  996. goto out;
  997. }
  998. /* Request successfully sent. */
  999. fpq = &fsvq->fud->pq;
  1000. spin_lock(&fpq->lock);
  1001. list_add_tail(&req->list, fpq->processing);
  1002. spin_unlock(&fpq->lock);
  1003. set_bit(FR_SENT, &req->flags);
  1004. /* matches barrier in request_wait_answer() */
  1005. smp_mb__after_atomic();
  1006. if (!in_flight)
  1007. inc_in_flight_req(fsvq);
  1008. notify = virtqueue_kick_prepare(vq);
  1009. spin_unlock(&fsvq->lock);
  1010. if (notify)
  1011. virtqueue_notify(vq);
  1012. out:
  1013. if (ret < 0 && req->argbuf) {
  1014. kfree(req->argbuf);
  1015. req->argbuf = NULL;
  1016. }
  1017. if (sgs != stack_sgs) {
  1018. kfree(sgs);
  1019. kfree(sg);
  1020. }
  1021. return ret;
  1022. }
  1023. static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
  1024. __releases(fiq->lock)
  1025. {
  1026. unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
  1027. struct virtio_fs *fs;
  1028. struct fuse_req *req;
  1029. struct virtio_fs_vq *fsvq;
  1030. int ret;
  1031. WARN_ON(list_empty(&fiq->pending));
  1032. req = list_last_entry(&fiq->pending, struct fuse_req, list);
  1033. clear_bit(FR_PENDING, &req->flags);
  1034. list_del_init(&req->list);
  1035. WARN_ON(!list_empty(&fiq->pending));
  1036. spin_unlock(&fiq->lock);
  1037. fs = fiq->priv;
  1038. pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
  1039. __func__, req->in.h.opcode, req->in.h.unique,
  1040. req->in.h.nodeid, req->in.h.len,
  1041. fuse_len_args(req->args->out_numargs, req->args->out_args));
  1042. fsvq = &fs->vqs[queue_id];
  1043. ret = virtio_fs_enqueue_req(fsvq, req, false);
  1044. if (ret < 0) {
  1045. if (ret == -ENOMEM || ret == -ENOSPC) {
  1046. /*
  1047. * Virtqueue full. Retry submission from worker
  1048. * context as we might be holding fc->bg_lock.
  1049. */
  1050. spin_lock(&fsvq->lock);
  1051. list_add_tail(&req->list, &fsvq->queued_reqs);
  1052. inc_in_flight_req(fsvq);
  1053. schedule_delayed_work(&fsvq->dispatch_work,
  1054. msecs_to_jiffies(1));
  1055. spin_unlock(&fsvq->lock);
  1056. return;
  1057. }
  1058. req->out.h.error = ret;
  1059. pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
  1060. /* Can't end request in submission context. Use a worker */
  1061. spin_lock(&fsvq->lock);
  1062. list_add_tail(&req->list, &fsvq->end_reqs);
  1063. schedule_delayed_work(&fsvq->dispatch_work, 0);
  1064. spin_unlock(&fsvq->lock);
  1065. return;
  1066. }
  1067. }
  1068. static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
  1069. .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
  1070. .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
  1071. .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
  1072. .release = virtio_fs_fiq_release,
  1073. };
  1074. static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
  1075. {
  1076. ctx->rootmode = S_IFDIR;
  1077. ctx->default_permissions = 1;
  1078. ctx->allow_other = 1;
  1079. ctx->max_read = UINT_MAX;
  1080. ctx->blksize = 512;
  1081. ctx->destroy = true;
  1082. ctx->no_control = true;
  1083. ctx->no_force_umount = true;
  1084. }
  1085. static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
  1086. {
  1087. struct fuse_mount *fm = get_fuse_mount_super(sb);
  1088. struct fuse_conn *fc = fm->fc;
  1089. struct virtio_fs *fs = fc->iq.priv;
  1090. struct fuse_fs_context *ctx = fsc->fs_private;
  1091. unsigned int i;
  1092. int err;
  1093. virtio_fs_ctx_set_defaults(ctx);
  1094. mutex_lock(&virtio_fs_mutex);
  1095. /* After holding mutex, make sure virtiofs device is still there.
  1096. * Though we are holding a reference to it, drive ->remove might
  1097. * still have cleaned up virtual queues. In that case bail out.
  1098. */
  1099. err = -EINVAL;
  1100. if (list_empty(&fs->list)) {
  1101. pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
  1102. goto err;
  1103. }
  1104. err = -ENOMEM;
  1105. /* Allocate fuse_dev for hiprio and notification queues */
  1106. for (i = 0; i < fs->nvqs; i++) {
  1107. struct virtio_fs_vq *fsvq = &fs->vqs[i];
  1108. fsvq->fud = fuse_dev_alloc();
  1109. if (!fsvq->fud)
  1110. goto err_free_fuse_devs;
  1111. }
  1112. /* virtiofs allocates and installs its own fuse devices */
  1113. ctx->fudptr = NULL;
  1114. if (ctx->dax) {
  1115. if (!fs->dax_dev) {
  1116. err = -EINVAL;
  1117. pr_err("virtio-fs: dax can't be enabled as filesystem"
  1118. " device does not support it.\n");
  1119. goto err_free_fuse_devs;
  1120. }
  1121. ctx->dax_dev = fs->dax_dev;
  1122. }
  1123. err = fuse_fill_super_common(sb, ctx);
  1124. if (err < 0)
  1125. goto err_free_fuse_devs;
  1126. for (i = 0; i < fs->nvqs; i++) {
  1127. struct virtio_fs_vq *fsvq = &fs->vqs[i];
  1128. fuse_dev_install(fsvq->fud, fc);
  1129. }
  1130. /* Previous unmount will stop all queues. Start these again */
  1131. virtio_fs_start_all_queues(fs);
  1132. fuse_send_init(fm);
  1133. mutex_unlock(&virtio_fs_mutex);
  1134. return 0;
  1135. err_free_fuse_devs:
  1136. virtio_fs_free_devs(fs);
  1137. err:
  1138. mutex_unlock(&virtio_fs_mutex);
  1139. return err;
  1140. }
  1141. static void virtio_fs_conn_destroy(struct fuse_mount *fm)
  1142. {
  1143. struct fuse_conn *fc = fm->fc;
  1144. struct virtio_fs *vfs = fc->iq.priv;
  1145. struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
  1146. /* Stop dax worker. Soon evict_inodes() will be called which
  1147. * will free all memory ranges belonging to all inodes.
  1148. */
  1149. if (IS_ENABLED(CONFIG_FUSE_DAX))
  1150. fuse_dax_cancel_work(fc);
  1151. /* Stop forget queue. Soon destroy will be sent */
  1152. spin_lock(&fsvq->lock);
  1153. fsvq->connected = false;
  1154. spin_unlock(&fsvq->lock);
  1155. virtio_fs_drain_all_queues(vfs);
  1156. fuse_conn_destroy(fm);
  1157. /* fuse_conn_destroy() must have sent destroy. Stop all queues
  1158. * and drain one more time and free fuse devices. Freeing fuse
  1159. * devices will drop their reference on fuse_conn and that in
  1160. * turn will drop its reference on virtio_fs object.
  1161. */
  1162. virtio_fs_stop_all_queues(vfs);
  1163. virtio_fs_drain_all_queues(vfs);
  1164. virtio_fs_free_devs(vfs);
  1165. }
  1166. static void virtio_kill_sb(struct super_block *sb)
  1167. {
  1168. struct fuse_mount *fm = get_fuse_mount_super(sb);
  1169. bool last;
  1170. /* If mount failed, we can still be called without any fc */
  1171. if (fm) {
  1172. last = fuse_mount_remove(fm);
  1173. if (last)
  1174. virtio_fs_conn_destroy(fm);
  1175. }
  1176. kill_anon_super(sb);
  1177. }
  1178. static int virtio_fs_test_super(struct super_block *sb,
  1179. struct fs_context *fsc)
  1180. {
  1181. struct fuse_mount *fsc_fm = fsc->s_fs_info;
  1182. struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
  1183. return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
  1184. }
  1185. static int virtio_fs_set_super(struct super_block *sb,
  1186. struct fs_context *fsc)
  1187. {
  1188. int err;
  1189. err = get_anon_bdev(&sb->s_dev);
  1190. if (!err)
  1191. fuse_mount_get(fsc->s_fs_info);
  1192. return err;
  1193. }
  1194. static int virtio_fs_get_tree(struct fs_context *fsc)
  1195. {
  1196. struct virtio_fs *fs;
  1197. struct super_block *sb;
  1198. struct fuse_conn *fc;
  1199. struct fuse_mount *fm;
  1200. int err;
  1201. /* This gets a reference on virtio_fs object. This ptr gets installed
  1202. * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
  1203. * to drop the reference to this object.
  1204. */
  1205. fs = virtio_fs_find_instance(fsc->source);
  1206. if (!fs) {
  1207. pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
  1208. return -EINVAL;
  1209. }
  1210. fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
  1211. if (!fc) {
  1212. mutex_lock(&virtio_fs_mutex);
  1213. virtio_fs_put(fs);
  1214. mutex_unlock(&virtio_fs_mutex);
  1215. return -ENOMEM;
  1216. }
  1217. fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
  1218. if (!fm) {
  1219. mutex_lock(&virtio_fs_mutex);
  1220. virtio_fs_put(fs);
  1221. mutex_unlock(&virtio_fs_mutex);
  1222. kfree(fc);
  1223. return -ENOMEM;
  1224. }
  1225. fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
  1226. fc->release = fuse_free_conn;
  1227. fc->delete_stale = true;
  1228. fc->auto_submounts = true;
  1229. fsc->s_fs_info = fm;
  1230. sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super);
  1231. fuse_mount_put(fm);
  1232. if (IS_ERR(sb))
  1233. return PTR_ERR(sb);
  1234. if (!sb->s_root) {
  1235. err = virtio_fs_fill_super(sb, fsc);
  1236. if (err) {
  1237. fuse_mount_put(fm);
  1238. sb->s_fs_info = NULL;
  1239. deactivate_locked_super(sb);
  1240. return err;
  1241. }
  1242. sb->s_flags |= SB_ACTIVE;
  1243. }
  1244. WARN_ON(fsc->root);
  1245. fsc->root = dget(sb->s_root);
  1246. return 0;
  1247. }
  1248. static const struct fs_context_operations virtio_fs_context_ops = {
  1249. .free = virtio_fs_free_fc,
  1250. .parse_param = virtio_fs_parse_param,
  1251. .get_tree = virtio_fs_get_tree,
  1252. };
  1253. static int virtio_fs_init_fs_context(struct fs_context *fsc)
  1254. {
  1255. struct fuse_fs_context *ctx;
  1256. ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
  1257. if (!ctx)
  1258. return -ENOMEM;
  1259. fsc->fs_private = ctx;
  1260. fsc->ops = &virtio_fs_context_ops;
  1261. return 0;
  1262. }
  1263. static struct file_system_type virtio_fs_type = {
  1264. .owner = THIS_MODULE,
  1265. .name = "virtiofs",
  1266. .init_fs_context = virtio_fs_init_fs_context,
  1267. .kill_sb = virtio_kill_sb,
  1268. };
  1269. static int __init virtio_fs_init(void)
  1270. {
  1271. int ret;
  1272. ret = register_virtio_driver(&virtio_fs_driver);
  1273. if (ret < 0)
  1274. return ret;
  1275. ret = register_filesystem(&virtio_fs_type);
  1276. if (ret < 0) {
  1277. unregister_virtio_driver(&virtio_fs_driver);
  1278. return ret;
  1279. }
  1280. return 0;
  1281. }
  1282. module_init(virtio_fs_init);
  1283. static void __exit virtio_fs_exit(void)
  1284. {
  1285. unregister_filesystem(&virtio_fs_type);
  1286. unregister_virtio_driver(&virtio_fs_driver);
  1287. }
  1288. module_exit(virtio_fs_exit);
  1289. MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
  1290. MODULE_DESCRIPTION("Virtio Filesystem");
  1291. MODULE_LICENSE("GPL");
  1292. MODULE_ALIAS_FS(KBUILD_MODNAME);
  1293. MODULE_DEVICE_TABLE(virtio, id_table);