dev.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/poll.h>
  11. #include <linux/uio.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/file.h>
  15. #include <linux/slab.h>
  16. MODULE_ALIAS_MISCDEV(FUSE_MINOR);
  17. static struct kmem_cache *fuse_req_cachep;
  18. static struct fuse_conn *fuse_get_conn(struct file *file)
  19. {
  20. /*
  21. * Lockless access is OK, because file->private data is set
  22. * once during mount and is valid until the file is released.
  23. */
  24. return file->private_data;
  25. }
  26. static void fuse_request_init(struct fuse_req *req)
  27. {
  28. memset(req, 0, sizeof(*req));
  29. INIT_LIST_HEAD(&req->list);
  30. INIT_LIST_HEAD(&req->intr_entry);
  31. init_waitqueue_head(&req->waitq);
  32. atomic_set(&req->count, 1);
  33. }
  34. struct fuse_req *fuse_request_alloc(void)
  35. {
  36. struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
  37. if (req)
  38. fuse_request_init(req);
  39. return req;
  40. }
  41. void fuse_request_free(struct fuse_req *req)
  42. {
  43. kmem_cache_free(fuse_req_cachep, req);
  44. }
  45. static void block_sigs(sigset_t *oldset)
  46. {
  47. sigset_t mask;
  48. siginitsetinv(&mask, sigmask(SIGKILL));
  49. sigprocmask(SIG_BLOCK, &mask, oldset);
  50. }
  51. static void restore_sigs(sigset_t *oldset)
  52. {
  53. sigprocmask(SIG_SETMASK, oldset, NULL);
  54. }
  55. static void __fuse_get_request(struct fuse_req *req)
  56. {
  57. atomic_inc(&req->count);
  58. }
  59. /* Must be called with > 1 refcount */
  60. static void __fuse_put_request(struct fuse_req *req)
  61. {
  62. BUG_ON(atomic_read(&req->count) < 2);
  63. atomic_dec(&req->count);
  64. }
  65. static void fuse_req_init_context(struct fuse_req *req)
  66. {
  67. req->in.h.uid = current->fsuid;
  68. req->in.h.gid = current->fsgid;
  69. req->in.h.pid = current->pid;
  70. }
  71. struct fuse_req *fuse_get_req(struct fuse_conn *fc)
  72. {
  73. struct fuse_req *req;
  74. sigset_t oldset;
  75. int intr;
  76. int err;
  77. atomic_inc(&fc->num_waiting);
  78. block_sigs(&oldset);
  79. intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
  80. restore_sigs(&oldset);
  81. err = -EINTR;
  82. if (intr)
  83. goto out;
  84. err = -ENOTCONN;
  85. if (!fc->connected)
  86. goto out;
  87. req = fuse_request_alloc();
  88. err = -ENOMEM;
  89. if (!req)
  90. goto out;
  91. fuse_req_init_context(req);
  92. req->waiting = 1;
  93. return req;
  94. out:
  95. atomic_dec(&fc->num_waiting);
  96. return ERR_PTR(err);
  97. }
  98. /*
  99. * Return request in fuse_file->reserved_req. However that may
  100. * currently be in use. If that is the case, wait for it to become
  101. * available.
  102. */
  103. static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
  104. struct file *file)
  105. {
  106. struct fuse_req *req = NULL;
  107. struct fuse_file *ff = file->private_data;
  108. do {
  109. wait_event(fc->blocked_waitq, ff->reserved_req);
  110. spin_lock(&fc->lock);
  111. if (ff->reserved_req) {
  112. req = ff->reserved_req;
  113. ff->reserved_req = NULL;
  114. get_file(file);
  115. req->stolen_file = file;
  116. }
  117. spin_unlock(&fc->lock);
  118. } while (!req);
  119. return req;
  120. }
  121. /*
  122. * Put stolen request back into fuse_file->reserved_req
  123. */
  124. static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
  125. {
  126. struct file *file = req->stolen_file;
  127. struct fuse_file *ff = file->private_data;
  128. spin_lock(&fc->lock);
  129. fuse_request_init(req);
  130. BUG_ON(ff->reserved_req);
  131. ff->reserved_req = req;
  132. wake_up(&fc->blocked_waitq);
  133. spin_unlock(&fc->lock);
  134. fput(file);
  135. }
  136. /*
  137. * Gets a requests for a file operation, always succeeds
  138. *
  139. * This is used for sending the FLUSH request, which must get to
  140. * userspace, due to POSIX locks which may need to be unlocked.
  141. *
  142. * If allocation fails due to OOM, use the reserved request in
  143. * fuse_file.
  144. *
  145. * This is very unlikely to deadlock accidentally, since the
  146. * filesystem should not have it's own file open. If deadlock is
  147. * intentional, it can still be broken by "aborting" the filesystem.
  148. */
  149. struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
  150. {
  151. struct fuse_req *req;
  152. atomic_inc(&fc->num_waiting);
  153. wait_event(fc->blocked_waitq, !fc->blocked);
  154. req = fuse_request_alloc();
  155. if (!req)
  156. req = get_reserved_req(fc, file);
  157. fuse_req_init_context(req);
  158. req->waiting = 1;
  159. return req;
  160. }
  161. void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
  162. {
  163. if (atomic_dec_and_test(&req->count)) {
  164. if (req->waiting)
  165. atomic_dec(&fc->num_waiting);
  166. if (req->stolen_file)
  167. put_reserved_req(fc, req);
  168. else
  169. fuse_request_free(req);
  170. }
  171. }
  172. /*
  173. * This function is called when a request is finished. Either a reply
  174. * has arrived or it was aborted (and not yet sent) or some error
  175. * occurred during communication with userspace, or the device file
  176. * was closed. The requester thread is woken up (if still waiting),
  177. * the 'end' callback is called if given, else the reference to the
  178. * request is released
  179. *
  180. * Called with fc->lock, unlocks it
  181. */
  182. static void request_end(struct fuse_conn *fc, struct fuse_req *req)
  183. __releases(fc->lock)
  184. {
  185. void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
  186. req->end = NULL;
  187. list_del(&req->list);
  188. list_del(&req->intr_entry);
  189. req->state = FUSE_REQ_FINISHED;
  190. if (req->background) {
  191. if (fc->num_background == FUSE_MAX_BACKGROUND) {
  192. fc->blocked = 0;
  193. wake_up_all(&fc->blocked_waitq);
  194. }
  195. fc->num_background--;
  196. }
  197. spin_unlock(&fc->lock);
  198. dput(req->dentry);
  199. mntput(req->vfsmount);
  200. if (req->file)
  201. fput(req->file);
  202. wake_up(&req->waitq);
  203. if (end)
  204. end(fc, req);
  205. else
  206. fuse_put_request(fc, req);
  207. }
  208. static void wait_answer_interruptible(struct fuse_conn *fc,
  209. struct fuse_req *req)
  210. {
  211. if (signal_pending(current))
  212. return;
  213. spin_unlock(&fc->lock);
  214. wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
  215. spin_lock(&fc->lock);
  216. }
  217. static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
  218. {
  219. list_add_tail(&req->intr_entry, &fc->interrupts);
  220. wake_up(&fc->waitq);
  221. kill_fasync(&fc->fasync, SIGIO, POLL_IN);
  222. }
  223. /* Called with fc->lock held. Releases, and then reacquires it. */
  224. static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
  225. {
  226. if (!fc->no_interrupt) {
  227. /* Any signal may interrupt this */
  228. wait_answer_interruptible(fc, req);
  229. if (req->aborted)
  230. goto aborted;
  231. if (req->state == FUSE_REQ_FINISHED)
  232. return;
  233. req->interrupted = 1;
  234. if (req->state == FUSE_REQ_SENT)
  235. queue_interrupt(fc, req);
  236. }
  237. if (req->force) {
  238. spin_unlock(&fc->lock);
  239. wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
  240. spin_lock(&fc->lock);
  241. } else {
  242. sigset_t oldset;
  243. /* Only fatal signals may interrupt this */
  244. block_sigs(&oldset);
  245. wait_answer_interruptible(fc, req);
  246. restore_sigs(&oldset);
  247. }
  248. if (req->aborted)
  249. goto aborted;
  250. if (req->state == FUSE_REQ_FINISHED)
  251. return;
  252. req->out.h.error = -EINTR;
  253. req->aborted = 1;
  254. aborted:
  255. if (req->locked) {
  256. /* This is uninterruptible sleep, because data is
  257. being copied to/from the buffers of req. During
  258. locked state, there mustn't be any filesystem
  259. operation (e.g. page fault), since that could lead
  260. to deadlock */
  261. spin_unlock(&fc->lock);
  262. wait_event(req->waitq, !req->locked);
  263. spin_lock(&fc->lock);
  264. }
  265. if (req->state == FUSE_REQ_PENDING) {
  266. list_del(&req->list);
  267. __fuse_put_request(req);
  268. } else if (req->state == FUSE_REQ_SENT) {
  269. spin_unlock(&fc->lock);
  270. wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
  271. spin_lock(&fc->lock);
  272. }
  273. }
  274. static unsigned len_args(unsigned numargs, struct fuse_arg *args)
  275. {
  276. unsigned nbytes = 0;
  277. unsigned i;
  278. for (i = 0; i < numargs; i++)
  279. nbytes += args[i].size;
  280. return nbytes;
  281. }
  282. static u64 fuse_get_unique(struct fuse_conn *fc)
  283. {
  284. fc->reqctr++;
  285. /* zero is special */
  286. if (fc->reqctr == 0)
  287. fc->reqctr = 1;
  288. return fc->reqctr;
  289. }
  290. static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
  291. {
  292. req->in.h.unique = fuse_get_unique(fc);
  293. req->in.h.len = sizeof(struct fuse_in_header) +
  294. len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
  295. list_add_tail(&req->list, &fc->pending);
  296. req->state = FUSE_REQ_PENDING;
  297. if (!req->waiting) {
  298. req->waiting = 1;
  299. atomic_inc(&fc->num_waiting);
  300. }
  301. wake_up(&fc->waitq);
  302. kill_fasync(&fc->fasync, SIGIO, POLL_IN);
  303. }
  304. void request_send(struct fuse_conn *fc, struct fuse_req *req)
  305. {
  306. req->isreply = 1;
  307. spin_lock(&fc->lock);
  308. if (!fc->connected)
  309. req->out.h.error = -ENOTCONN;
  310. else if (fc->conn_error)
  311. req->out.h.error = -ECONNREFUSED;
  312. else {
  313. queue_request(fc, req);
  314. /* acquire extra reference, since request is still needed
  315. after request_end() */
  316. __fuse_get_request(req);
  317. request_wait_answer(fc, req);
  318. }
  319. spin_unlock(&fc->lock);
  320. }
  321. static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
  322. {
  323. spin_lock(&fc->lock);
  324. if (fc->connected) {
  325. req->background = 1;
  326. fc->num_background++;
  327. if (fc->num_background == FUSE_MAX_BACKGROUND)
  328. fc->blocked = 1;
  329. queue_request(fc, req);
  330. spin_unlock(&fc->lock);
  331. } else {
  332. req->out.h.error = -ENOTCONN;
  333. request_end(fc, req);
  334. }
  335. }
  336. void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
  337. {
  338. req->isreply = 0;
  339. request_send_nowait(fc, req);
  340. }
  341. void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
  342. {
  343. req->isreply = 1;
  344. request_send_nowait(fc, req);
  345. }
  346. /*
  347. * Lock the request. Up to the next unlock_request() there mustn't be
  348. * anything that could cause a page-fault. If the request was already
  349. * aborted bail out.
  350. */
  351. static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
  352. {
  353. int err = 0;
  354. if (req) {
  355. spin_lock(&fc->lock);
  356. if (req->aborted)
  357. err = -ENOENT;
  358. else
  359. req->locked = 1;
  360. spin_unlock(&fc->lock);
  361. }
  362. return err;
  363. }
  364. /*
  365. * Unlock request. If it was aborted during being locked, the
  366. * requester thread is currently waiting for it to be unlocked, so
  367. * wake it up.
  368. */
  369. static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
  370. {
  371. if (req) {
  372. spin_lock(&fc->lock);
  373. req->locked = 0;
  374. if (req->aborted)
  375. wake_up(&req->waitq);
  376. spin_unlock(&fc->lock);
  377. }
  378. }
  379. struct fuse_copy_state {
  380. struct fuse_conn *fc;
  381. int write;
  382. struct fuse_req *req;
  383. const struct iovec *iov;
  384. unsigned long nr_segs;
  385. unsigned long seglen;
  386. unsigned long addr;
  387. struct page *pg;
  388. void *mapaddr;
  389. void *buf;
  390. unsigned len;
  391. };
  392. static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
  393. int write, struct fuse_req *req,
  394. const struct iovec *iov, unsigned long nr_segs)
  395. {
  396. memset(cs, 0, sizeof(*cs));
  397. cs->fc = fc;
  398. cs->write = write;
  399. cs->req = req;
  400. cs->iov = iov;
  401. cs->nr_segs = nr_segs;
  402. }
  403. /* Unmap and put previous page of userspace buffer */
  404. static void fuse_copy_finish(struct fuse_copy_state *cs)
  405. {
  406. if (cs->mapaddr) {
  407. kunmap_atomic(cs->mapaddr, KM_USER0);
  408. if (cs->write) {
  409. flush_dcache_page(cs->pg);
  410. set_page_dirty_lock(cs->pg);
  411. }
  412. put_page(cs->pg);
  413. cs->mapaddr = NULL;
  414. }
  415. }
  416. /*
  417. * Get another pagefull of userspace buffer, and map it to kernel
  418. * address space, and lock request
  419. */
  420. static int fuse_copy_fill(struct fuse_copy_state *cs)
  421. {
  422. unsigned long offset;
  423. int err;
  424. unlock_request(cs->fc, cs->req);
  425. fuse_copy_finish(cs);
  426. if (!cs->seglen) {
  427. BUG_ON(!cs->nr_segs);
  428. cs->seglen = cs->iov[0].iov_len;
  429. cs->addr = (unsigned long) cs->iov[0].iov_base;
  430. cs->iov ++;
  431. cs->nr_segs --;
  432. }
  433. down_read(&current->mm->mmap_sem);
  434. err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
  435. &cs->pg, NULL);
  436. up_read(&current->mm->mmap_sem);
  437. if (err < 0)
  438. return err;
  439. BUG_ON(err != 1);
  440. offset = cs->addr % PAGE_SIZE;
  441. cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
  442. cs->buf = cs->mapaddr + offset;
  443. cs->len = min(PAGE_SIZE - offset, cs->seglen);
  444. cs->seglen -= cs->len;
  445. cs->addr += cs->len;
  446. return lock_request(cs->fc, cs->req);
  447. }
  448. /* Do as much copy to/from userspace buffer as we can */
  449. static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
  450. {
  451. unsigned ncpy = min(*size, cs->len);
  452. if (val) {
  453. if (cs->write)
  454. memcpy(cs->buf, *val, ncpy);
  455. else
  456. memcpy(*val, cs->buf, ncpy);
  457. *val += ncpy;
  458. }
  459. *size -= ncpy;
  460. cs->len -= ncpy;
  461. cs->buf += ncpy;
  462. return ncpy;
  463. }
  464. /*
  465. * Copy a page in the request to/from the userspace buffer. Must be
  466. * done atomically
  467. */
  468. static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
  469. unsigned offset, unsigned count, int zeroing)
  470. {
  471. if (page && zeroing && count < PAGE_SIZE) {
  472. void *mapaddr = kmap_atomic(page, KM_USER1);
  473. memset(mapaddr, 0, PAGE_SIZE);
  474. kunmap_atomic(mapaddr, KM_USER1);
  475. }
  476. while (count) {
  477. int err;
  478. if (!cs->len && (err = fuse_copy_fill(cs)))
  479. return err;
  480. if (page) {
  481. void *mapaddr = kmap_atomic(page, KM_USER1);
  482. void *buf = mapaddr + offset;
  483. offset += fuse_copy_do(cs, &buf, &count);
  484. kunmap_atomic(mapaddr, KM_USER1);
  485. } else
  486. offset += fuse_copy_do(cs, NULL, &count);
  487. }
  488. if (page && !cs->write)
  489. flush_dcache_page(page);
  490. return 0;
  491. }
  492. /* Copy pages in the request to/from userspace buffer */
  493. static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
  494. int zeroing)
  495. {
  496. unsigned i;
  497. struct fuse_req *req = cs->req;
  498. unsigned offset = req->page_offset;
  499. unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
  500. for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
  501. struct page *page = req->pages[i];
  502. int err = fuse_copy_page(cs, page, offset, count, zeroing);
  503. if (err)
  504. return err;
  505. nbytes -= count;
  506. count = min(nbytes, (unsigned) PAGE_SIZE);
  507. offset = 0;
  508. }
  509. return 0;
  510. }
  511. /* Copy a single argument in the request to/from userspace buffer */
  512. static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
  513. {
  514. while (size) {
  515. int err;
  516. if (!cs->len && (err = fuse_copy_fill(cs)))
  517. return err;
  518. fuse_copy_do(cs, &val, &size);
  519. }
  520. return 0;
  521. }
  522. /* Copy request arguments to/from userspace buffer */
  523. static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
  524. unsigned argpages, struct fuse_arg *args,
  525. int zeroing)
  526. {
  527. int err = 0;
  528. unsigned i;
  529. for (i = 0; !err && i < numargs; i++) {
  530. struct fuse_arg *arg = &args[i];
  531. if (i == numargs - 1 && argpages)
  532. err = fuse_copy_pages(cs, arg->size, zeroing);
  533. else
  534. err = fuse_copy_one(cs, arg->value, arg->size);
  535. }
  536. return err;
  537. }
  538. static int request_pending(struct fuse_conn *fc)
  539. {
  540. return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
  541. }
  542. /* Wait until a request is available on the pending list */
  543. static void request_wait(struct fuse_conn *fc)
  544. {
  545. DECLARE_WAITQUEUE(wait, current);
  546. add_wait_queue_exclusive(&fc->waitq, &wait);
  547. while (fc->connected && !request_pending(fc)) {
  548. set_current_state(TASK_INTERRUPTIBLE);
  549. if (signal_pending(current))
  550. break;
  551. spin_unlock(&fc->lock);
  552. schedule();
  553. spin_lock(&fc->lock);
  554. }
  555. set_current_state(TASK_RUNNING);
  556. remove_wait_queue(&fc->waitq, &wait);
  557. }
  558. /*
  559. * Transfer an interrupt request to userspace
  560. *
  561. * Unlike other requests this is assembled on demand, without a need
  562. * to allocate a separate fuse_req structure.
  563. *
  564. * Called with fc->lock held, releases it
  565. */
  566. static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
  567. const struct iovec *iov, unsigned long nr_segs)
  568. __releases(fc->lock)
  569. {
  570. struct fuse_copy_state cs;
  571. struct fuse_in_header ih;
  572. struct fuse_interrupt_in arg;
  573. unsigned reqsize = sizeof(ih) + sizeof(arg);
  574. int err;
  575. list_del_init(&req->intr_entry);
  576. req->intr_unique = fuse_get_unique(fc);
  577. memset(&ih, 0, sizeof(ih));
  578. memset(&arg, 0, sizeof(arg));
  579. ih.len = reqsize;
  580. ih.opcode = FUSE_INTERRUPT;
  581. ih.unique = req->intr_unique;
  582. arg.unique = req->in.h.unique;
  583. spin_unlock(&fc->lock);
  584. if (iov_length(iov, nr_segs) < reqsize)
  585. return -EINVAL;
  586. fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
  587. err = fuse_copy_one(&cs, &ih, sizeof(ih));
  588. if (!err)
  589. err = fuse_copy_one(&cs, &arg, sizeof(arg));
  590. fuse_copy_finish(&cs);
  591. return err ? err : reqsize;
  592. }
  593. /*
  594. * Read a single request into the userspace filesystem's buffer. This
  595. * function waits until a request is available, then removes it from
  596. * the pending list and copies request data to userspace buffer. If
  597. * no reply is needed (FORGET) or request has been aborted or there
  598. * was an error during the copying then it's finished by calling
  599. * request_end(). Otherwise add it to the processing list, and set
  600. * the 'sent' flag.
  601. */
  602. static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
  603. unsigned long nr_segs, loff_t pos)
  604. {
  605. int err;
  606. struct fuse_req *req;
  607. struct fuse_in *in;
  608. struct fuse_copy_state cs;
  609. unsigned reqsize;
  610. struct file *file = iocb->ki_filp;
  611. struct fuse_conn *fc = fuse_get_conn(file);
  612. if (!fc)
  613. return -EPERM;
  614. restart:
  615. spin_lock(&fc->lock);
  616. err = -EAGAIN;
  617. if ((file->f_flags & O_NONBLOCK) && fc->connected &&
  618. !request_pending(fc))
  619. goto err_unlock;
  620. request_wait(fc);
  621. err = -ENODEV;
  622. if (!fc->connected)
  623. goto err_unlock;
  624. err = -ERESTARTSYS;
  625. if (!request_pending(fc))
  626. goto err_unlock;
  627. if (!list_empty(&fc->interrupts)) {
  628. req = list_entry(fc->interrupts.next, struct fuse_req,
  629. intr_entry);
  630. return fuse_read_interrupt(fc, req, iov, nr_segs);
  631. }
  632. req = list_entry(fc->pending.next, struct fuse_req, list);
  633. req->state = FUSE_REQ_READING;
  634. list_move(&req->list, &fc->io);
  635. in = &req->in;
  636. reqsize = in->h.len;
  637. /* If request is too large, reply with an error and restart the read */
  638. if (iov_length(iov, nr_segs) < reqsize) {
  639. req->out.h.error = -EIO;
  640. /* SETXATTR is special, since it may contain too large data */
  641. if (in->h.opcode == FUSE_SETXATTR)
  642. req->out.h.error = -E2BIG;
  643. request_end(fc, req);
  644. goto restart;
  645. }
  646. spin_unlock(&fc->lock);
  647. fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
  648. err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
  649. if (!err)
  650. err = fuse_copy_args(&cs, in->numargs, in->argpages,
  651. (struct fuse_arg *) in->args, 0);
  652. fuse_copy_finish(&cs);
  653. spin_lock(&fc->lock);
  654. req->locked = 0;
  655. if (!err && req->aborted)
  656. err = -ENOENT;
  657. if (err) {
  658. if (!req->aborted)
  659. req->out.h.error = -EIO;
  660. request_end(fc, req);
  661. return err;
  662. }
  663. if (!req->isreply)
  664. request_end(fc, req);
  665. else {
  666. req->state = FUSE_REQ_SENT;
  667. list_move_tail(&req->list, &fc->processing);
  668. if (req->interrupted)
  669. queue_interrupt(fc, req);
  670. spin_unlock(&fc->lock);
  671. }
  672. return reqsize;
  673. err_unlock:
  674. spin_unlock(&fc->lock);
  675. return err;
  676. }
  677. /* Look up request on processing list by unique ID */
  678. static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
  679. {
  680. struct list_head *entry;
  681. list_for_each(entry, &fc->processing) {
  682. struct fuse_req *req;
  683. req = list_entry(entry, struct fuse_req, list);
  684. if (req->in.h.unique == unique || req->intr_unique == unique)
  685. return req;
  686. }
  687. return NULL;
  688. }
  689. static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
  690. unsigned nbytes)
  691. {
  692. unsigned reqsize = sizeof(struct fuse_out_header);
  693. if (out->h.error)
  694. return nbytes != reqsize ? -EINVAL : 0;
  695. reqsize += len_args(out->numargs, out->args);
  696. if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
  697. return -EINVAL;
  698. else if (reqsize > nbytes) {
  699. struct fuse_arg *lastarg = &out->args[out->numargs-1];
  700. unsigned diffsize = reqsize - nbytes;
  701. if (diffsize > lastarg->size)
  702. return -EINVAL;
  703. lastarg->size -= diffsize;
  704. }
  705. return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
  706. out->page_zeroing);
  707. }
  708. /*
  709. * Write a single reply to a request. First the header is copied from
  710. * the write buffer. The request is then searched on the processing
  711. * list by the unique ID found in the header. If found, then remove
  712. * it from the list and copy the rest of the buffer to the request.
  713. * The request is finished by calling request_end()
  714. */
  715. static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
  716. unsigned long nr_segs, loff_t pos)
  717. {
  718. int err;
  719. unsigned nbytes = iov_length(iov, nr_segs);
  720. struct fuse_req *req;
  721. struct fuse_out_header oh;
  722. struct fuse_copy_state cs;
  723. struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
  724. if (!fc)
  725. return -EPERM;
  726. fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
  727. if (nbytes < sizeof(struct fuse_out_header))
  728. return -EINVAL;
  729. err = fuse_copy_one(&cs, &oh, sizeof(oh));
  730. if (err)
  731. goto err_finish;
  732. err = -EINVAL;
  733. if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
  734. oh.len != nbytes)
  735. goto err_finish;
  736. spin_lock(&fc->lock);
  737. err = -ENOENT;
  738. if (!fc->connected)
  739. goto err_unlock;
  740. req = request_find(fc, oh.unique);
  741. if (!req)
  742. goto err_unlock;
  743. if (req->aborted) {
  744. spin_unlock(&fc->lock);
  745. fuse_copy_finish(&cs);
  746. spin_lock(&fc->lock);
  747. request_end(fc, req);
  748. return -ENOENT;
  749. }
  750. /* Is it an interrupt reply? */
  751. if (req->intr_unique == oh.unique) {
  752. err = -EINVAL;
  753. if (nbytes != sizeof(struct fuse_out_header))
  754. goto err_unlock;
  755. if (oh.error == -ENOSYS)
  756. fc->no_interrupt = 1;
  757. else if (oh.error == -EAGAIN)
  758. queue_interrupt(fc, req);
  759. spin_unlock(&fc->lock);
  760. fuse_copy_finish(&cs);
  761. return nbytes;
  762. }
  763. req->state = FUSE_REQ_WRITING;
  764. list_move(&req->list, &fc->io);
  765. req->out.h = oh;
  766. req->locked = 1;
  767. cs.req = req;
  768. spin_unlock(&fc->lock);
  769. err = copy_out_args(&cs, &req->out, nbytes);
  770. fuse_copy_finish(&cs);
  771. spin_lock(&fc->lock);
  772. req->locked = 0;
  773. if (!err) {
  774. if (req->aborted)
  775. err = -ENOENT;
  776. } else if (!req->aborted)
  777. req->out.h.error = -EIO;
  778. request_end(fc, req);
  779. return err ? err : nbytes;
  780. err_unlock:
  781. spin_unlock(&fc->lock);
  782. err_finish:
  783. fuse_copy_finish(&cs);
  784. return err;
  785. }
  786. static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  787. {
  788. unsigned mask = POLLOUT | POLLWRNORM;
  789. struct fuse_conn *fc = fuse_get_conn(file);
  790. if (!fc)
  791. return POLLERR;
  792. poll_wait(file, &fc->waitq, wait);
  793. spin_lock(&fc->lock);
  794. if (!fc->connected)
  795. mask = POLLERR;
  796. else if (request_pending(fc))
  797. mask |= POLLIN | POLLRDNORM;
  798. spin_unlock(&fc->lock);
  799. return mask;
  800. }
  801. /*
  802. * Abort all requests on the given list (pending or processing)
  803. *
  804. * This function releases and reacquires fc->lock
  805. */
  806. static void end_requests(struct fuse_conn *fc, struct list_head *head)
  807. {
  808. while (!list_empty(head)) {
  809. struct fuse_req *req;
  810. req = list_entry(head->next, struct fuse_req, list);
  811. req->out.h.error = -ECONNABORTED;
  812. request_end(fc, req);
  813. spin_lock(&fc->lock);
  814. }
  815. }
  816. /*
  817. * Abort requests under I/O
  818. *
  819. * The requests are set to aborted and finished, and the request
  820. * waiter is woken up. This will make request_wait_answer() wait
  821. * until the request is unlocked and then return.
  822. *
  823. * If the request is asynchronous, then the end function needs to be
  824. * called after waiting for the request to be unlocked (if it was
  825. * locked).
  826. */
  827. static void end_io_requests(struct fuse_conn *fc)
  828. {
  829. while (!list_empty(&fc->io)) {
  830. struct fuse_req *req =
  831. list_entry(fc->io.next, struct fuse_req, list);
  832. void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
  833. req->aborted = 1;
  834. req->out.h.error = -ECONNABORTED;
  835. req->state = FUSE_REQ_FINISHED;
  836. list_del_init(&req->list);
  837. wake_up(&req->waitq);
  838. if (end) {
  839. req->end = NULL;
  840. /* The end function will consume this reference */
  841. __fuse_get_request(req);
  842. spin_unlock(&fc->lock);
  843. wait_event(req->waitq, !req->locked);
  844. end(fc, req);
  845. spin_lock(&fc->lock);
  846. }
  847. }
  848. }
  849. /*
  850. * Abort all requests.
  851. *
  852. * Emergency exit in case of a malicious or accidental deadlock, or
  853. * just a hung filesystem.
  854. *
  855. * The same effect is usually achievable through killing the
  856. * filesystem daemon and all users of the filesystem. The exception
  857. * is the combination of an asynchronous request and the tricky
  858. * deadlock (see Documentation/filesystems/fuse.txt).
  859. *
  860. * During the aborting, progression of requests from the pending and
  861. * processing lists onto the io list, and progression of new requests
  862. * onto the pending list is prevented by req->connected being false.
  863. *
  864. * Progression of requests under I/O to the processing list is
  865. * prevented by the req->aborted flag being true for these requests.
  866. * For this reason requests on the io list must be aborted first.
  867. */
  868. void fuse_abort_conn(struct fuse_conn *fc)
  869. {
  870. spin_lock(&fc->lock);
  871. if (fc->connected) {
  872. fc->connected = 0;
  873. fc->blocked = 0;
  874. end_io_requests(fc);
  875. end_requests(fc, &fc->pending);
  876. end_requests(fc, &fc->processing);
  877. wake_up_all(&fc->waitq);
  878. wake_up_all(&fc->blocked_waitq);
  879. kill_fasync(&fc->fasync, SIGIO, POLL_IN);
  880. }
  881. spin_unlock(&fc->lock);
  882. }
  883. static int fuse_dev_release(struct inode *inode, struct file *file)
  884. {
  885. struct fuse_conn *fc = fuse_get_conn(file);
  886. if (fc) {
  887. spin_lock(&fc->lock);
  888. fc->connected = 0;
  889. end_requests(fc, &fc->pending);
  890. end_requests(fc, &fc->processing);
  891. spin_unlock(&fc->lock);
  892. fasync_helper(-1, file, 0, &fc->fasync);
  893. fuse_conn_put(fc);
  894. }
  895. return 0;
  896. }
  897. static int fuse_dev_fasync(int fd, struct file *file, int on)
  898. {
  899. struct fuse_conn *fc = fuse_get_conn(file);
  900. if (!fc)
  901. return -EPERM;
  902. /* No locking - fasync_helper does its own locking */
  903. return fasync_helper(fd, file, on, &fc->fasync);
  904. }
  905. const struct file_operations fuse_dev_operations = {
  906. .owner = THIS_MODULE,
  907. .llseek = no_llseek,
  908. .read = do_sync_read,
  909. .aio_read = fuse_dev_read,
  910. .write = do_sync_write,
  911. .aio_write = fuse_dev_write,
  912. .poll = fuse_dev_poll,
  913. .release = fuse_dev_release,
  914. .fasync = fuse_dev_fasync,
  915. };
  916. static struct miscdevice fuse_miscdevice = {
  917. .minor = FUSE_MINOR,
  918. .name = "fuse",
  919. .fops = &fuse_dev_operations,
  920. };
  921. int __init fuse_dev_init(void)
  922. {
  923. int err = -ENOMEM;
  924. fuse_req_cachep = kmem_cache_create("fuse_request",
  925. sizeof(struct fuse_req),
  926. 0, 0, NULL, NULL);
  927. if (!fuse_req_cachep)
  928. goto out;
  929. err = misc_register(&fuse_miscdevice);
  930. if (err)
  931. goto out_cache_clean;
  932. return 0;
  933. out_cache_clean:
  934. kmem_cache_destroy(fuse_req_cachep);
  935. out:
  936. return err;
  937. }
  938. void fuse_dev_cleanup(void)
  939. {
  940. misc_deregister(&fuse_miscdevice);
  941. kmem_cache_destroy(fuse_req_cachep);
  942. }