dir.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/pagemap.h>
  9. #include <linux/file.h>
  10. #include <linux/fs_context.h>
  11. #include <linux/sched.h>
  12. #include <linux/namei.h>
  13. #include <linux/slab.h>
  14. #include <linux/xattr.h>
  15. #include <linux/iversion.h>
  16. #include <linux/posix_acl.h>
  17. static void fuse_advise_use_readdirplus(struct inode *dir)
  18. {
  19. struct fuse_inode *fi = get_fuse_inode(dir);
  20. set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
  21. }
  22. #if BITS_PER_LONG >= 64
  23. static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
  24. {
  25. entry->d_fsdata = (void *) time;
  26. }
  27. static inline u64 fuse_dentry_time(const struct dentry *entry)
  28. {
  29. return (u64)entry->d_fsdata;
  30. }
  31. #else
  32. union fuse_dentry {
  33. u64 time;
  34. struct rcu_head rcu;
  35. };
  36. static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
  37. {
  38. ((union fuse_dentry *) dentry->d_fsdata)->time = time;
  39. }
  40. static inline u64 fuse_dentry_time(const struct dentry *entry)
  41. {
  42. return ((union fuse_dentry *) entry->d_fsdata)->time;
  43. }
  44. #endif
  45. static void fuse_dentry_settime(struct dentry *dentry, u64 time)
  46. {
  47. struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
  48. bool delete = !time && fc->delete_stale;
  49. /*
  50. * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
  51. * Don't care about races, either way it's just an optimization
  52. */
  53. if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
  54. (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
  55. spin_lock(&dentry->d_lock);
  56. if (!delete)
  57. dentry->d_flags &= ~DCACHE_OP_DELETE;
  58. else
  59. dentry->d_flags |= DCACHE_OP_DELETE;
  60. spin_unlock(&dentry->d_lock);
  61. }
  62. __fuse_dentry_settime(dentry, time);
  63. }
  64. /*
  65. * FUSE caches dentries and attributes with separate timeout. The
  66. * time in jiffies until the dentry/attributes are valid is stored in
  67. * dentry->d_fsdata and fuse_inode->i_time respectively.
  68. */
  69. /*
  70. * Calculate the time in jiffies until a dentry/attributes are valid
  71. */
  72. static u64 time_to_jiffies(u64 sec, u32 nsec)
  73. {
  74. if (sec || nsec) {
  75. struct timespec64 ts = {
  76. sec,
  77. min_t(u32, nsec, NSEC_PER_SEC - 1)
  78. };
  79. return get_jiffies_64() + timespec64_to_jiffies(&ts);
  80. } else
  81. return 0;
  82. }
  83. /*
  84. * Set dentry and possibly attribute timeouts from the lookup/mk*
  85. * replies
  86. */
  87. void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
  88. {
  89. fuse_dentry_settime(entry,
  90. time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
  91. }
  92. static u64 attr_timeout(struct fuse_attr_out *o)
  93. {
  94. return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
  95. }
  96. u64 entry_attr_timeout(struct fuse_entry_out *o)
  97. {
  98. return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
  99. }
  100. static void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
  101. {
  102. set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
  103. }
  104. /*
  105. * Mark the attributes as stale, so that at the next call to
  106. * ->getattr() they will be fetched from userspace
  107. */
  108. void fuse_invalidate_attr(struct inode *inode)
  109. {
  110. fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
  111. }
  112. static void fuse_dir_changed(struct inode *dir)
  113. {
  114. fuse_invalidate_attr(dir);
  115. inode_maybe_inc_iversion(dir, false);
  116. }
  117. /**
  118. * Mark the attributes as stale due to an atime change. Avoid the invalidate if
  119. * atime is not used.
  120. */
  121. void fuse_invalidate_atime(struct inode *inode)
  122. {
  123. if (!IS_RDONLY(inode))
  124. fuse_invalidate_attr_mask(inode, STATX_ATIME);
  125. }
  126. /*
  127. * Just mark the entry as stale, so that a next attempt to look it up
  128. * will result in a new lookup call to userspace
  129. *
  130. * This is called when a dentry is about to become negative and the
  131. * timeout is unknown (unlink, rmdir, rename and in some cases
  132. * lookup)
  133. */
  134. void fuse_invalidate_entry_cache(struct dentry *entry)
  135. {
  136. fuse_dentry_settime(entry, 0);
  137. }
  138. /*
  139. * Same as fuse_invalidate_entry_cache(), but also try to remove the
  140. * dentry from the hash
  141. */
  142. static void fuse_invalidate_entry(struct dentry *entry)
  143. {
  144. d_invalidate(entry);
  145. fuse_invalidate_entry_cache(entry);
  146. }
  147. static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
  148. u64 nodeid, const struct qstr *name,
  149. struct fuse_entry_out *outarg)
  150. {
  151. memset(outarg, 0, sizeof(struct fuse_entry_out));
  152. args->opcode = FUSE_LOOKUP;
  153. args->nodeid = nodeid;
  154. args->in_numargs = 1;
  155. args->in_args[0].size = name->len + 1;
  156. args->in_args[0].value = name->name;
  157. args->out_numargs = 1;
  158. args->out_args[0].size = sizeof(struct fuse_entry_out);
  159. args->out_args[0].value = outarg;
  160. }
  161. /*
  162. * Check whether the dentry is still valid
  163. *
  164. * If the entry validity timeout has expired and the dentry is
  165. * positive, try to redo the lookup. If the lookup results in a
  166. * different inode, then let the VFS invalidate the dentry and redo
  167. * the lookup once more. If the lookup results in the same inode,
  168. * then refresh the attributes, timeouts and mark the dentry valid.
  169. */
  170. static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
  171. {
  172. struct inode *inode;
  173. struct dentry *parent;
  174. struct fuse_mount *fm;
  175. struct fuse_inode *fi;
  176. int ret;
  177. inode = d_inode_rcu(entry);
  178. if (inode && fuse_is_bad(inode))
  179. goto invalid;
  180. else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
  181. (flags & LOOKUP_REVAL)) {
  182. struct fuse_entry_out outarg;
  183. FUSE_ARGS(args);
  184. struct fuse_forget_link *forget;
  185. u64 attr_version;
  186. /* For negative dentries, always do a fresh lookup */
  187. if (!inode)
  188. goto invalid;
  189. ret = -ECHILD;
  190. if (flags & LOOKUP_RCU)
  191. goto out;
  192. fm = get_fuse_mount(inode);
  193. forget = fuse_alloc_forget();
  194. ret = -ENOMEM;
  195. if (!forget)
  196. goto out;
  197. attr_version = fuse_get_attr_version(fm->fc);
  198. parent = dget_parent(entry);
  199. fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
  200. &entry->d_name, &outarg);
  201. ret = fuse_simple_request(fm, &args);
  202. dput(parent);
  203. /* Zero nodeid is same as -ENOENT */
  204. if (!ret && !outarg.nodeid)
  205. ret = -ENOENT;
  206. if (!ret) {
  207. fi = get_fuse_inode(inode);
  208. if (outarg.nodeid != get_node_id(inode) ||
  209. (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) {
  210. fuse_queue_forget(fm->fc, forget,
  211. outarg.nodeid, 1);
  212. goto invalid;
  213. }
  214. spin_lock(&fi->lock);
  215. fi->nlookup++;
  216. spin_unlock(&fi->lock);
  217. }
  218. kfree(forget);
  219. if (ret == -ENOMEM)
  220. goto out;
  221. if (ret || fuse_invalid_attr(&outarg.attr) ||
  222. fuse_stale_inode(inode, outarg.generation, &outarg.attr))
  223. goto invalid;
  224. forget_all_cached_acls(inode);
  225. fuse_change_attributes(inode, &outarg.attr,
  226. entry_attr_timeout(&outarg),
  227. attr_version);
  228. fuse_change_entry_timeout(entry, &outarg);
  229. } else if (inode) {
  230. fi = get_fuse_inode(inode);
  231. if (flags & LOOKUP_RCU) {
  232. if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
  233. return -ECHILD;
  234. } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
  235. parent = dget_parent(entry);
  236. fuse_advise_use_readdirplus(d_inode(parent));
  237. dput(parent);
  238. }
  239. }
  240. ret = 1;
  241. out:
  242. return ret;
  243. invalid:
  244. ret = 0;
  245. goto out;
  246. }
  247. #if BITS_PER_LONG < 64
  248. static int fuse_dentry_init(struct dentry *dentry)
  249. {
  250. dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
  251. GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
  252. return dentry->d_fsdata ? 0 : -ENOMEM;
  253. }
  254. static void fuse_dentry_release(struct dentry *dentry)
  255. {
  256. union fuse_dentry *fd = dentry->d_fsdata;
  257. kfree_rcu(fd, rcu);
  258. }
  259. #endif
  260. static int fuse_dentry_delete(const struct dentry *dentry)
  261. {
  262. return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
  263. }
  264. /*
  265. * Create a fuse_mount object with a new superblock (with path->dentry
  266. * as the root), and return that mount so it can be auto-mounted on
  267. * @path.
  268. */
  269. static struct vfsmount *fuse_dentry_automount(struct path *path)
  270. {
  271. struct fs_context *fsc;
  272. struct fuse_mount *parent_fm = get_fuse_mount_super(path->mnt->mnt_sb);
  273. struct fuse_conn *fc = parent_fm->fc;
  274. struct fuse_mount *fm;
  275. struct vfsmount *mnt;
  276. struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
  277. struct super_block *sb;
  278. int err;
  279. fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
  280. if (IS_ERR(fsc)) {
  281. err = PTR_ERR(fsc);
  282. goto out;
  283. }
  284. err = -ENOMEM;
  285. fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
  286. if (!fm)
  287. goto out_put_fsc;
  288. refcount_set(&fm->count, 1);
  289. fsc->s_fs_info = fm;
  290. sb = sget_fc(fsc, NULL, set_anon_super_fc);
  291. if (IS_ERR(sb)) {
  292. err = PTR_ERR(sb);
  293. fuse_mount_put(fm);
  294. goto out_put_fsc;
  295. }
  296. fm->fc = fuse_conn_get(fc);
  297. /* Initialize superblock, making @mp_fi its root */
  298. err = fuse_fill_super_submount(sb, mp_fi);
  299. if (err) {
  300. fuse_conn_put(fc);
  301. kfree(fm);
  302. sb->s_fs_info = NULL;
  303. goto out_put_sb;
  304. }
  305. down_write(&fc->killsb);
  306. list_add_tail(&fm->fc_entry, &fc->mounts);
  307. up_write(&fc->killsb);
  308. sb->s_flags |= SB_ACTIVE;
  309. fsc->root = dget(sb->s_root);
  310. /*
  311. * FIXME: setting SB_BORN requires a write barrier for
  312. * super_cache_count(). We should actually come
  313. * up with a proper ->get_tree() implementation
  314. * for submounts and call vfs_get_tree() to take
  315. * care of the write barrier.
  316. */
  317. smp_wmb();
  318. sb->s_flags |= SB_BORN;
  319. /* We are done configuring the superblock, so unlock it */
  320. up_write(&sb->s_umount);
  321. /* Create the submount */
  322. mnt = vfs_create_mount(fsc);
  323. if (IS_ERR(mnt)) {
  324. err = PTR_ERR(mnt);
  325. goto out_put_fsc;
  326. }
  327. mntget(mnt);
  328. put_fs_context(fsc);
  329. return mnt;
  330. out_put_sb:
  331. /*
  332. * Only jump here when fsc->root is NULL and sb is still locked
  333. * (otherwise put_fs_context() will put the superblock)
  334. */
  335. deactivate_locked_super(sb);
  336. out_put_fsc:
  337. put_fs_context(fsc);
  338. out:
  339. return ERR_PTR(err);
  340. }
  341. /*
  342. * Get the canonical path. Since we must translate to a path, this must be done
  343. * in the context of the userspace daemon, however, the userspace daemon cannot
  344. * look up paths on its own. Instead, we handle the lookup as a special case
  345. * inside of the write request.
  346. */
  347. static void fuse_dentry_canonical_path(const struct path *path,
  348. struct path *canonical_path)
  349. {
  350. struct inode *inode = d_inode(path->dentry);
  351. //struct fuse_conn *fc = get_fuse_conn(inode);
  352. struct fuse_mount *fm = get_fuse_mount_super(path->mnt->mnt_sb);
  353. FUSE_ARGS(args);
  354. char *path_name;
  355. int err;
  356. path_name = (char *)get_zeroed_page(GFP_KERNEL);
  357. if (!path_name)
  358. goto default_path;
  359. args.opcode = FUSE_CANONICAL_PATH;
  360. args.nodeid = get_node_id(inode);
  361. args.in_numargs = 0;
  362. args.out_numargs = 1;
  363. args.out_args[0].size = PATH_MAX;
  364. args.out_args[0].value = path_name;
  365. args.canonical_path = canonical_path;
  366. args.out_argvar = 1;
  367. err = fuse_simple_request(fm, &args);
  368. free_page((unsigned long)path_name);
  369. if (err > 0)
  370. return;
  371. default_path:
  372. canonical_path->dentry = path->dentry;
  373. canonical_path->mnt = path->mnt;
  374. path_get(canonical_path);
  375. }
  376. const struct dentry_operations fuse_dentry_operations = {
  377. .d_revalidate = fuse_dentry_revalidate,
  378. .d_delete = fuse_dentry_delete,
  379. #if BITS_PER_LONG < 64
  380. .d_init = fuse_dentry_init,
  381. .d_release = fuse_dentry_release,
  382. #endif
  383. .d_automount = fuse_dentry_automount,
  384. .d_canonical_path = fuse_dentry_canonical_path,
  385. };
  386. const struct dentry_operations fuse_root_dentry_operations = {
  387. #if BITS_PER_LONG < 64
  388. .d_init = fuse_dentry_init,
  389. .d_release = fuse_dentry_release,
  390. #endif
  391. };
  392. int fuse_valid_type(int m)
  393. {
  394. return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
  395. S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
  396. }
  397. bool fuse_invalid_attr(struct fuse_attr *attr)
  398. {
  399. return !fuse_valid_type(attr->mode) ||
  400. attr->size > LLONG_MAX;
  401. }
  402. int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
  403. struct fuse_entry_out *outarg, struct inode **inode)
  404. {
  405. struct fuse_mount *fm = get_fuse_mount_super(sb);
  406. FUSE_ARGS(args);
  407. struct fuse_forget_link *forget;
  408. u64 attr_version;
  409. int err;
  410. *inode = NULL;
  411. err = -ENAMETOOLONG;
  412. if (name->len > FUSE_NAME_MAX)
  413. goto out;
  414. forget = fuse_alloc_forget();
  415. err = -ENOMEM;
  416. if (!forget)
  417. goto out;
  418. attr_version = fuse_get_attr_version(fm->fc);
  419. fuse_lookup_init(fm->fc, &args, nodeid, name, outarg);
  420. err = fuse_simple_request(fm, &args);
  421. /* Zero nodeid is same as -ENOENT, but with valid timeout */
  422. if (err || !outarg->nodeid)
  423. goto out_put_forget;
  424. err = -EIO;
  425. if (!outarg->nodeid)
  426. goto out_put_forget;
  427. if (fuse_invalid_attr(&outarg->attr))
  428. goto out_put_forget;
  429. *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
  430. &outarg->attr, entry_attr_timeout(outarg),
  431. attr_version);
  432. err = -ENOMEM;
  433. if (!*inode) {
  434. fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
  435. goto out;
  436. }
  437. err = 0;
  438. out_put_forget:
  439. kfree(forget);
  440. out:
  441. return err;
  442. }
  443. static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
  444. unsigned int flags)
  445. {
  446. int err;
  447. struct fuse_entry_out outarg;
  448. struct inode *inode;
  449. struct dentry *newent;
  450. bool outarg_valid = true;
  451. bool locked;
  452. if (fuse_is_bad(dir))
  453. return ERR_PTR(-EIO);
  454. locked = fuse_lock_inode(dir);
  455. err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
  456. &outarg, &inode);
  457. fuse_unlock_inode(dir, locked);
  458. if (err == -ENOENT) {
  459. outarg_valid = false;
  460. err = 0;
  461. }
  462. if (err)
  463. goto out_err;
  464. err = -EIO;
  465. if (inode && get_node_id(inode) == FUSE_ROOT_ID)
  466. goto out_iput;
  467. newent = d_splice_alias(inode, entry);
  468. err = PTR_ERR(newent);
  469. if (IS_ERR(newent))
  470. goto out_err;
  471. entry = newent ? newent : entry;
  472. if (outarg_valid)
  473. fuse_change_entry_timeout(entry, &outarg);
  474. else
  475. fuse_invalidate_entry_cache(entry);
  476. if (inode)
  477. fuse_advise_use_readdirplus(dir);
  478. return newent;
  479. out_iput:
  480. iput(inode);
  481. out_err:
  482. return ERR_PTR(err);
  483. }
  484. /*
  485. * Atomic create+open operation
  486. *
  487. * If the filesystem doesn't support this, then fall back to separate
  488. * 'mknod' + 'open' requests.
  489. */
  490. static int fuse_create_open(struct inode *dir, struct dentry *entry,
  491. struct file *file, unsigned flags,
  492. umode_t mode)
  493. {
  494. int err;
  495. struct inode *inode;
  496. struct fuse_conn *fc = get_fuse_conn(dir);
  497. struct fuse_mount *fm = get_fuse_mount(dir);
  498. FUSE_ARGS(args);
  499. struct fuse_forget_link *forget;
  500. struct fuse_create_in inarg;
  501. struct fuse_open_out outopen;
  502. struct fuse_entry_out outentry;
  503. struct fuse_inode *fi;
  504. struct fuse_file *ff;
  505. /* Userspace expects S_IFREG in create mode */
  506. BUG_ON((mode & S_IFMT) != S_IFREG);
  507. forget = fuse_alloc_forget();
  508. err = -ENOMEM;
  509. if (!forget)
  510. goto out_err;
  511. err = -ENOMEM;
  512. ff = fuse_file_alloc(fm);
  513. if (!ff)
  514. goto out_put_forget_req;
  515. if (!fm->fc->dont_mask)
  516. mode &= ~current_umask();
  517. flags &= ~O_NOCTTY;
  518. memset(&inarg, 0, sizeof(inarg));
  519. memset(&outentry, 0, sizeof(outentry));
  520. inarg.flags = flags;
  521. inarg.mode = mode;
  522. inarg.umask = current_umask();
  523. args.opcode = FUSE_CREATE;
  524. args.nodeid = get_node_id(dir);
  525. args.in_numargs = 2;
  526. args.in_args[0].size = sizeof(inarg);
  527. args.in_args[0].value = &inarg;
  528. args.in_args[1].size = entry->d_name.len + 1;
  529. args.in_args[1].value = entry->d_name.name;
  530. args.out_numargs = 2;
  531. args.out_args[0].size = sizeof(outentry);
  532. args.out_args[0].value = &outentry;
  533. args.out_args[1].size = sizeof(outopen);
  534. args.out_args[1].value = &outopen;
  535. err = fuse_simple_request(fm, &args);
  536. if (err)
  537. goto out_free_ff;
  538. err = -EIO;
  539. if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
  540. fuse_invalid_attr(&outentry.attr))
  541. goto out_free_ff;
  542. ff->fh = outopen.fh;
  543. ff->nodeid = outentry.nodeid;
  544. ff->open_flags = outopen.open_flags;
  545. fuse_passthrough_setup(fc, ff, &outopen);
  546. inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
  547. &outentry.attr, entry_attr_timeout(&outentry), 0);
  548. if (!inode) {
  549. flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
  550. fuse_sync_release(NULL, ff, flags);
  551. fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1);
  552. err = -ENOMEM;
  553. goto out_err;
  554. }
  555. kfree(forget);
  556. d_instantiate(entry, inode);
  557. fuse_change_entry_timeout(entry, &outentry);
  558. fuse_dir_changed(dir);
  559. err = finish_open(file, entry, generic_file_open);
  560. if (err) {
  561. fi = get_fuse_inode(inode);
  562. fuse_sync_release(fi, ff, flags);
  563. } else {
  564. file->private_data = ff;
  565. fuse_finish_open(inode, file);
  566. }
  567. return err;
  568. out_free_ff:
  569. fuse_file_free(ff);
  570. out_put_forget_req:
  571. kfree(forget);
  572. out_err:
  573. return err;
  574. }
  575. static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
  576. static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
  577. struct file *file, unsigned flags,
  578. umode_t mode)
  579. {
  580. int err;
  581. struct fuse_conn *fc = get_fuse_conn(dir);
  582. struct dentry *res = NULL;
  583. if (fuse_is_bad(dir))
  584. return -EIO;
  585. if (d_in_lookup(entry)) {
  586. res = fuse_lookup(dir, entry, 0);
  587. if (IS_ERR(res))
  588. return PTR_ERR(res);
  589. if (res)
  590. entry = res;
  591. }
  592. if (!(flags & O_CREAT) || d_really_is_positive(entry))
  593. goto no_open;
  594. /* Only creates */
  595. file->f_mode |= FMODE_CREATED;
  596. if (fc->no_create)
  597. goto mknod;
  598. err = fuse_create_open(dir, entry, file, flags, mode);
  599. if (err == -ENOSYS) {
  600. fc->no_create = 1;
  601. goto mknod;
  602. }
  603. out_dput:
  604. dput(res);
  605. return err;
  606. mknod:
  607. err = fuse_mknod(dir, entry, mode, 0);
  608. if (err)
  609. goto out_dput;
  610. no_open:
  611. return finish_no_open(file, res);
  612. }
  613. /*
  614. * Code shared between mknod, mkdir, symlink and link
  615. */
  616. static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
  617. struct inode *dir, struct dentry *entry,
  618. umode_t mode)
  619. {
  620. struct fuse_entry_out outarg;
  621. struct inode *inode;
  622. struct dentry *d;
  623. int err;
  624. struct fuse_forget_link *forget;
  625. if (fuse_is_bad(dir))
  626. return -EIO;
  627. forget = fuse_alloc_forget();
  628. if (!forget)
  629. return -ENOMEM;
  630. memset(&outarg, 0, sizeof(outarg));
  631. args->nodeid = get_node_id(dir);
  632. args->out_numargs = 1;
  633. args->out_args[0].size = sizeof(outarg);
  634. args->out_args[0].value = &outarg;
  635. err = fuse_simple_request(fm, args);
  636. if (err)
  637. goto out_put_forget_req;
  638. err = -EIO;
  639. if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
  640. goto out_put_forget_req;
  641. if ((outarg.attr.mode ^ mode) & S_IFMT)
  642. goto out_put_forget_req;
  643. inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
  644. &outarg.attr, entry_attr_timeout(&outarg), 0);
  645. if (!inode) {
  646. fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
  647. return -ENOMEM;
  648. }
  649. kfree(forget);
  650. d_drop(entry);
  651. d = d_splice_alias(inode, entry);
  652. if (IS_ERR(d))
  653. return PTR_ERR(d);
  654. if (d) {
  655. fuse_change_entry_timeout(d, &outarg);
  656. dput(d);
  657. } else {
  658. fuse_change_entry_timeout(entry, &outarg);
  659. }
  660. fuse_dir_changed(dir);
  661. return 0;
  662. out_put_forget_req:
  663. kfree(forget);
  664. return err;
  665. }
  666. static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
  667. dev_t rdev)
  668. {
  669. struct fuse_mknod_in inarg;
  670. struct fuse_mount *fm = get_fuse_mount(dir);
  671. FUSE_ARGS(args);
  672. if (!fm->fc->dont_mask)
  673. mode &= ~current_umask();
  674. memset(&inarg, 0, sizeof(inarg));
  675. inarg.mode = mode;
  676. inarg.rdev = new_encode_dev(rdev);
  677. inarg.umask = current_umask();
  678. args.opcode = FUSE_MKNOD;
  679. args.in_numargs = 2;
  680. args.in_args[0].size = sizeof(inarg);
  681. args.in_args[0].value = &inarg;
  682. args.in_args[1].size = entry->d_name.len + 1;
  683. args.in_args[1].value = entry->d_name.name;
  684. return create_new_entry(fm, &args, dir, entry, mode);
  685. }
  686. static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
  687. bool excl)
  688. {
  689. return fuse_mknod(dir, entry, mode, 0);
  690. }
  691. static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
  692. {
  693. struct fuse_mkdir_in inarg;
  694. struct fuse_mount *fm = get_fuse_mount(dir);
  695. FUSE_ARGS(args);
  696. if (!fm->fc->dont_mask)
  697. mode &= ~current_umask();
  698. memset(&inarg, 0, sizeof(inarg));
  699. inarg.mode = mode;
  700. inarg.umask = current_umask();
  701. args.opcode = FUSE_MKDIR;
  702. args.in_numargs = 2;
  703. args.in_args[0].size = sizeof(inarg);
  704. args.in_args[0].value = &inarg;
  705. args.in_args[1].size = entry->d_name.len + 1;
  706. args.in_args[1].value = entry->d_name.name;
  707. return create_new_entry(fm, &args, dir, entry, S_IFDIR);
  708. }
  709. static int fuse_symlink(struct inode *dir, struct dentry *entry,
  710. const char *link)
  711. {
  712. struct fuse_mount *fm = get_fuse_mount(dir);
  713. unsigned len = strlen(link) + 1;
  714. FUSE_ARGS(args);
  715. args.opcode = FUSE_SYMLINK;
  716. args.in_numargs = 2;
  717. args.in_args[0].size = entry->d_name.len + 1;
  718. args.in_args[0].value = entry->d_name.name;
  719. args.in_args[1].size = len;
  720. args.in_args[1].value = link;
  721. return create_new_entry(fm, &args, dir, entry, S_IFLNK);
  722. }
  723. void fuse_flush_time_update(struct inode *inode)
  724. {
  725. int err = sync_inode_metadata(inode, 1);
  726. mapping_set_error(inode->i_mapping, err);
  727. }
  728. void fuse_update_ctime(struct inode *inode)
  729. {
  730. if (!IS_NOCMTIME(inode)) {
  731. inode->i_ctime = current_time(inode);
  732. mark_inode_dirty_sync(inode);
  733. fuse_flush_time_update(inode);
  734. }
  735. }
  736. static int fuse_unlink(struct inode *dir, struct dentry *entry)
  737. {
  738. int err;
  739. struct fuse_mount *fm = get_fuse_mount(dir);
  740. FUSE_ARGS(args);
  741. if (fuse_is_bad(dir))
  742. return -EIO;
  743. args.opcode = FUSE_UNLINK;
  744. args.nodeid = get_node_id(dir);
  745. args.in_numargs = 1;
  746. args.in_args[0].size = entry->d_name.len + 1;
  747. args.in_args[0].value = entry->d_name.name;
  748. err = fuse_simple_request(fm, &args);
  749. if (!err) {
  750. struct inode *inode = d_inode(entry);
  751. struct fuse_inode *fi = get_fuse_inode(inode);
  752. spin_lock(&fi->lock);
  753. fi->attr_version = atomic64_inc_return(&fm->fc->attr_version);
  754. /*
  755. * If i_nlink == 0 then unlink doesn't make sense, yet this can
  756. * happen if userspace filesystem is careless. It would be
  757. * difficult to enforce correct nlink usage so just ignore this
  758. * condition here
  759. */
  760. if (inode->i_nlink > 0)
  761. drop_nlink(inode);
  762. spin_unlock(&fi->lock);
  763. fuse_invalidate_attr(inode);
  764. fuse_dir_changed(dir);
  765. fuse_invalidate_entry_cache(entry);
  766. fuse_update_ctime(inode);
  767. } else if (err == -EINTR)
  768. fuse_invalidate_entry(entry);
  769. return err;
  770. }
  771. static int fuse_rmdir(struct inode *dir, struct dentry *entry)
  772. {
  773. int err;
  774. struct fuse_mount *fm = get_fuse_mount(dir);
  775. FUSE_ARGS(args);
  776. if (fuse_is_bad(dir))
  777. return -EIO;
  778. args.opcode = FUSE_RMDIR;
  779. args.nodeid = get_node_id(dir);
  780. args.in_numargs = 1;
  781. args.in_args[0].size = entry->d_name.len + 1;
  782. args.in_args[0].value = entry->d_name.name;
  783. err = fuse_simple_request(fm, &args);
  784. if (!err) {
  785. clear_nlink(d_inode(entry));
  786. fuse_dir_changed(dir);
  787. fuse_invalidate_entry_cache(entry);
  788. } else if (err == -EINTR)
  789. fuse_invalidate_entry(entry);
  790. return err;
  791. }
  792. static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
  793. struct inode *newdir, struct dentry *newent,
  794. unsigned int flags, int opcode, size_t argsize)
  795. {
  796. int err;
  797. struct fuse_rename2_in inarg;
  798. struct fuse_mount *fm = get_fuse_mount(olddir);
  799. FUSE_ARGS(args);
  800. memset(&inarg, 0, argsize);
  801. inarg.newdir = get_node_id(newdir);
  802. inarg.flags = flags;
  803. args.opcode = opcode;
  804. args.nodeid = get_node_id(olddir);
  805. args.in_numargs = 3;
  806. args.in_args[0].size = argsize;
  807. args.in_args[0].value = &inarg;
  808. args.in_args[1].size = oldent->d_name.len + 1;
  809. args.in_args[1].value = oldent->d_name.name;
  810. args.in_args[2].size = newent->d_name.len + 1;
  811. args.in_args[2].value = newent->d_name.name;
  812. err = fuse_simple_request(fm, &args);
  813. if (!err) {
  814. /* ctime changes */
  815. fuse_invalidate_attr(d_inode(oldent));
  816. fuse_update_ctime(d_inode(oldent));
  817. if (flags & RENAME_EXCHANGE) {
  818. fuse_invalidate_attr(d_inode(newent));
  819. fuse_update_ctime(d_inode(newent));
  820. }
  821. fuse_dir_changed(olddir);
  822. if (olddir != newdir)
  823. fuse_dir_changed(newdir);
  824. /* newent will end up negative */
  825. if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) {
  826. fuse_invalidate_attr(d_inode(newent));
  827. fuse_invalidate_entry_cache(newent);
  828. fuse_update_ctime(d_inode(newent));
  829. }
  830. } else if (err == -EINTR) {
  831. /* If request was interrupted, DEITY only knows if the
  832. rename actually took place. If the invalidation
  833. fails (e.g. some process has CWD under the renamed
  834. directory), then there can be inconsistency between
  835. the dcache and the real filesystem. Tough luck. */
  836. fuse_invalidate_entry(oldent);
  837. if (d_really_is_positive(newent))
  838. fuse_invalidate_entry(newent);
  839. }
  840. return err;
  841. }
  842. static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
  843. struct inode *newdir, struct dentry *newent,
  844. unsigned int flags)
  845. {
  846. struct fuse_conn *fc = get_fuse_conn(olddir);
  847. int err;
  848. if (fuse_is_bad(olddir))
  849. return -EIO;
  850. if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
  851. return -EINVAL;
  852. if (flags) {
  853. if (fc->no_rename2 || fc->minor < 23)
  854. return -EINVAL;
  855. err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
  856. FUSE_RENAME2,
  857. sizeof(struct fuse_rename2_in));
  858. if (err == -ENOSYS) {
  859. fc->no_rename2 = 1;
  860. err = -EINVAL;
  861. }
  862. } else {
  863. err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
  864. FUSE_RENAME,
  865. sizeof(struct fuse_rename_in));
  866. }
  867. return err;
  868. }
  869. static int fuse_link(struct dentry *entry, struct inode *newdir,
  870. struct dentry *newent)
  871. {
  872. int err;
  873. struct fuse_link_in inarg;
  874. struct inode *inode = d_inode(entry);
  875. struct fuse_mount *fm = get_fuse_mount(inode);
  876. FUSE_ARGS(args);
  877. memset(&inarg, 0, sizeof(inarg));
  878. inarg.oldnodeid = get_node_id(inode);
  879. args.opcode = FUSE_LINK;
  880. args.in_numargs = 2;
  881. args.in_args[0].size = sizeof(inarg);
  882. args.in_args[0].value = &inarg;
  883. args.in_args[1].size = newent->d_name.len + 1;
  884. args.in_args[1].value = newent->d_name.name;
  885. err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
  886. /* Contrary to "normal" filesystems it can happen that link
  887. makes two "logical" inodes point to the same "physical"
  888. inode. We invalidate the attributes of the old one, so it
  889. will reflect changes in the backing inode (link count,
  890. etc.)
  891. */
  892. if (!err) {
  893. struct fuse_inode *fi = get_fuse_inode(inode);
  894. spin_lock(&fi->lock);
  895. fi->attr_version = atomic64_inc_return(&fm->fc->attr_version);
  896. if (likely(inode->i_nlink < UINT_MAX))
  897. inc_nlink(inode);
  898. spin_unlock(&fi->lock);
  899. fuse_invalidate_attr(inode);
  900. fuse_update_ctime(inode);
  901. } else if (err == -EINTR) {
  902. fuse_invalidate_attr(inode);
  903. }
  904. return err;
  905. }
  906. static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
  907. struct kstat *stat)
  908. {
  909. unsigned int blkbits;
  910. struct fuse_conn *fc = get_fuse_conn(inode);
  911. /* see the comment in fuse_change_attributes() */
  912. if (fc->writeback_cache && S_ISREG(inode->i_mode)) {
  913. attr->size = i_size_read(inode);
  914. attr->mtime = inode->i_mtime.tv_sec;
  915. attr->mtimensec = inode->i_mtime.tv_nsec;
  916. attr->ctime = inode->i_ctime.tv_sec;
  917. attr->ctimensec = inode->i_ctime.tv_nsec;
  918. }
  919. stat->dev = inode->i_sb->s_dev;
  920. stat->ino = attr->ino;
  921. stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
  922. stat->nlink = attr->nlink;
  923. stat->uid = make_kuid(fc->user_ns, attr->uid);
  924. stat->gid = make_kgid(fc->user_ns, attr->gid);
  925. stat->rdev = inode->i_rdev;
  926. stat->atime.tv_sec = attr->atime;
  927. stat->atime.tv_nsec = attr->atimensec;
  928. stat->mtime.tv_sec = attr->mtime;
  929. stat->mtime.tv_nsec = attr->mtimensec;
  930. stat->ctime.tv_sec = attr->ctime;
  931. stat->ctime.tv_nsec = attr->ctimensec;
  932. stat->size = attr->size;
  933. stat->blocks = attr->blocks;
  934. if (attr->blksize != 0)
  935. blkbits = ilog2(attr->blksize);
  936. else
  937. blkbits = inode->i_sb->s_blocksize_bits;
  938. stat->blksize = 1 << blkbits;
  939. }
  940. static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
  941. struct file *file)
  942. {
  943. int err;
  944. struct fuse_getattr_in inarg;
  945. struct fuse_attr_out outarg;
  946. struct fuse_mount *fm = get_fuse_mount(inode);
  947. FUSE_ARGS(args);
  948. u64 attr_version;
  949. attr_version = fuse_get_attr_version(fm->fc);
  950. memset(&inarg, 0, sizeof(inarg));
  951. memset(&outarg, 0, sizeof(outarg));
  952. /* Directories have separate file-handle space */
  953. if (file && S_ISREG(inode->i_mode)) {
  954. struct fuse_file *ff = file->private_data;
  955. inarg.getattr_flags |= FUSE_GETATTR_FH;
  956. inarg.fh = ff->fh;
  957. }
  958. args.opcode = FUSE_GETATTR;
  959. args.nodeid = get_node_id(inode);
  960. args.in_numargs = 1;
  961. args.in_args[0].size = sizeof(inarg);
  962. args.in_args[0].value = &inarg;
  963. args.out_numargs = 1;
  964. args.out_args[0].size = sizeof(outarg);
  965. args.out_args[0].value = &outarg;
  966. err = fuse_simple_request(fm, &args);
  967. if (!err) {
  968. if (fuse_invalid_attr(&outarg.attr) ||
  969. inode_wrong_type(inode, outarg.attr.mode)) {
  970. fuse_make_bad(inode);
  971. err = -EIO;
  972. } else {
  973. fuse_change_attributes(inode, &outarg.attr,
  974. attr_timeout(&outarg),
  975. attr_version);
  976. if (stat)
  977. fuse_fillattr(inode, &outarg.attr, stat);
  978. }
  979. }
  980. return err;
  981. }
  982. static int fuse_update_get_attr(struct inode *inode, struct file *file,
  983. struct kstat *stat, u32 request_mask,
  984. unsigned int flags)
  985. {
  986. struct fuse_inode *fi = get_fuse_inode(inode);
  987. int err = 0;
  988. bool sync;
  989. if (flags & AT_STATX_FORCE_SYNC)
  990. sync = true;
  991. else if (flags & AT_STATX_DONT_SYNC)
  992. sync = false;
  993. else if (request_mask & READ_ONCE(fi->inval_mask))
  994. sync = true;
  995. else
  996. sync = time_before64(fi->i_time, get_jiffies_64());
  997. if (sync) {
  998. forget_all_cached_acls(inode);
  999. err = fuse_do_getattr(inode, stat, file);
  1000. } else if (stat) {
  1001. generic_fillattr(inode, stat);
  1002. stat->mode = fi->orig_i_mode;
  1003. stat->ino = fi->orig_ino;
  1004. }
  1005. return err;
  1006. }
  1007. int fuse_update_attributes(struct inode *inode, struct file *file)
  1008. {
  1009. /* Do *not* need to get atime for internal purposes */
  1010. return fuse_update_get_attr(inode, file, NULL,
  1011. STATX_BASIC_STATS & ~STATX_ATIME, 0);
  1012. }
  1013. int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
  1014. u64 child_nodeid, struct qstr *name)
  1015. {
  1016. int err = -ENOTDIR;
  1017. struct inode *parent;
  1018. struct dentry *dir;
  1019. struct dentry *entry;
  1020. parent = fuse_ilookup(fc, parent_nodeid, NULL);
  1021. if (!parent)
  1022. return -ENOENT;
  1023. inode_lock_nested(parent, I_MUTEX_PARENT);
  1024. if (!S_ISDIR(parent->i_mode))
  1025. goto unlock;
  1026. err = -ENOENT;
  1027. dir = d_find_alias(parent);
  1028. if (!dir)
  1029. goto unlock;
  1030. name->hash = full_name_hash(dir, name->name, name->len);
  1031. entry = d_lookup(dir, name);
  1032. dput(dir);
  1033. if (!entry)
  1034. goto unlock;
  1035. fuse_dir_changed(parent);
  1036. fuse_invalidate_entry(entry);
  1037. if (child_nodeid != 0 && d_really_is_positive(entry)) {
  1038. inode_lock(d_inode(entry));
  1039. if (get_node_id(d_inode(entry)) != child_nodeid) {
  1040. err = -ENOENT;
  1041. goto badentry;
  1042. }
  1043. if (d_mountpoint(entry)) {
  1044. err = -EBUSY;
  1045. goto badentry;
  1046. }
  1047. if (d_is_dir(entry)) {
  1048. shrink_dcache_parent(entry);
  1049. if (!simple_empty(entry)) {
  1050. err = -ENOTEMPTY;
  1051. goto badentry;
  1052. }
  1053. d_inode(entry)->i_flags |= S_DEAD;
  1054. }
  1055. dont_mount(entry);
  1056. clear_nlink(d_inode(entry));
  1057. err = 0;
  1058. badentry:
  1059. inode_unlock(d_inode(entry));
  1060. if (!err)
  1061. d_delete(entry);
  1062. } else {
  1063. err = 0;
  1064. }
  1065. dput(entry);
  1066. unlock:
  1067. inode_unlock(parent);
  1068. iput(parent);
  1069. return err;
  1070. }
  1071. /*
  1072. * Calling into a user-controlled filesystem gives the filesystem
  1073. * daemon ptrace-like capabilities over the current process. This
  1074. * means, that the filesystem daemon is able to record the exact
  1075. * filesystem operations performed, and can also control the behavior
  1076. * of the requester process in otherwise impossible ways. For example
  1077. * it can delay the operation for arbitrary length of time allowing
  1078. * DoS against the requester.
  1079. *
  1080. * For this reason only those processes can call into the filesystem,
  1081. * for which the owner of the mount has ptrace privilege. This
  1082. * excludes processes started by other users, suid or sgid processes.
  1083. */
  1084. int fuse_allow_current_process(struct fuse_conn *fc)
  1085. {
  1086. const struct cred *cred;
  1087. if (fc->allow_other)
  1088. return current_in_userns(fc->user_ns);
  1089. cred = current_cred();
  1090. if (uid_eq(cred->euid, fc->user_id) &&
  1091. uid_eq(cred->suid, fc->user_id) &&
  1092. uid_eq(cred->uid, fc->user_id) &&
  1093. gid_eq(cred->egid, fc->group_id) &&
  1094. gid_eq(cred->sgid, fc->group_id) &&
  1095. gid_eq(cred->gid, fc->group_id))
  1096. return 1;
  1097. return 0;
  1098. }
  1099. static int fuse_access(struct inode *inode, int mask)
  1100. {
  1101. struct fuse_mount *fm = get_fuse_mount(inode);
  1102. FUSE_ARGS(args);
  1103. struct fuse_access_in inarg;
  1104. int err;
  1105. BUG_ON(mask & MAY_NOT_BLOCK);
  1106. if (fm->fc->no_access)
  1107. return 0;
  1108. memset(&inarg, 0, sizeof(inarg));
  1109. inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
  1110. args.opcode = FUSE_ACCESS;
  1111. args.nodeid = get_node_id(inode);
  1112. args.in_numargs = 1;
  1113. args.in_args[0].size = sizeof(inarg);
  1114. args.in_args[0].value = &inarg;
  1115. err = fuse_simple_request(fm, &args);
  1116. if (err == -ENOSYS) {
  1117. fm->fc->no_access = 1;
  1118. err = 0;
  1119. }
  1120. return err;
  1121. }
  1122. static int fuse_perm_getattr(struct inode *inode, int mask)
  1123. {
  1124. if (mask & MAY_NOT_BLOCK)
  1125. return -ECHILD;
  1126. forget_all_cached_acls(inode);
  1127. return fuse_do_getattr(inode, NULL, NULL);
  1128. }
  1129. /*
  1130. * Check permission. The two basic access models of FUSE are:
  1131. *
  1132. * 1) Local access checking ('default_permissions' mount option) based
  1133. * on file mode. This is the plain old disk filesystem permission
  1134. * modell.
  1135. *
  1136. * 2) "Remote" access checking, where server is responsible for
  1137. * checking permission in each inode operation. An exception to this
  1138. * is if ->permission() was invoked from sys_access() in which case an
  1139. * access request is sent. Execute permission is still checked
  1140. * locally based on file mode.
  1141. */
  1142. static int fuse_permission(struct inode *inode, int mask)
  1143. {
  1144. struct fuse_conn *fc = get_fuse_conn(inode);
  1145. bool refreshed = false;
  1146. int err = 0;
  1147. if (fuse_is_bad(inode))
  1148. return -EIO;
  1149. if (!fuse_allow_current_process(fc))
  1150. return -EACCES;
  1151. /*
  1152. * If attributes are needed, refresh them before proceeding
  1153. */
  1154. if (fc->default_permissions ||
  1155. ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
  1156. struct fuse_inode *fi = get_fuse_inode(inode);
  1157. u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
  1158. if (perm_mask & READ_ONCE(fi->inval_mask) ||
  1159. time_before64(fi->i_time, get_jiffies_64())) {
  1160. refreshed = true;
  1161. err = fuse_perm_getattr(inode, mask);
  1162. if (err)
  1163. return err;
  1164. }
  1165. }
  1166. if (fc->default_permissions) {
  1167. err = generic_permission(inode, mask);
  1168. /* If permission is denied, try to refresh file
  1169. attributes. This is also needed, because the root
  1170. node will at first have no permissions */
  1171. if (err == -EACCES && !refreshed) {
  1172. err = fuse_perm_getattr(inode, mask);
  1173. if (!err)
  1174. err = generic_permission(inode, mask);
  1175. }
  1176. /* Note: the opposite of the above test does not
  1177. exist. So if permissions are revoked this won't be
  1178. noticed immediately, only after the attribute
  1179. timeout has expired */
  1180. } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
  1181. err = fuse_access(inode, mask);
  1182. } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
  1183. if (!(inode->i_mode & S_IXUGO)) {
  1184. if (refreshed)
  1185. return -EACCES;
  1186. err = fuse_perm_getattr(inode, mask);
  1187. if (!err && !(inode->i_mode & S_IXUGO))
  1188. return -EACCES;
  1189. }
  1190. }
  1191. return err;
  1192. }
  1193. static int fuse_readlink_page(struct inode *inode, struct page *page)
  1194. {
  1195. struct fuse_mount *fm = get_fuse_mount(inode);
  1196. struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
  1197. struct fuse_args_pages ap = {
  1198. .num_pages = 1,
  1199. .pages = &page,
  1200. .descs = &desc,
  1201. };
  1202. char *link;
  1203. ssize_t res;
  1204. ap.args.opcode = FUSE_READLINK;
  1205. ap.args.nodeid = get_node_id(inode);
  1206. ap.args.out_pages = true;
  1207. ap.args.out_argvar = true;
  1208. ap.args.page_zeroing = true;
  1209. ap.args.out_numargs = 1;
  1210. ap.args.out_args[0].size = desc.length;
  1211. res = fuse_simple_request(fm, &ap.args);
  1212. fuse_invalidate_atime(inode);
  1213. if (res < 0)
  1214. return res;
  1215. if (WARN_ON(res >= PAGE_SIZE))
  1216. return -EIO;
  1217. link = page_address(page);
  1218. link[res] = '\0';
  1219. return 0;
  1220. }
  1221. static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
  1222. struct delayed_call *callback)
  1223. {
  1224. struct fuse_conn *fc = get_fuse_conn(inode);
  1225. struct page *page;
  1226. int err;
  1227. err = -EIO;
  1228. if (fuse_is_bad(inode))
  1229. goto out_err;
  1230. if (fc->cache_symlinks)
  1231. return page_get_link(dentry, inode, callback);
  1232. err = -ECHILD;
  1233. if (!dentry)
  1234. goto out_err;
  1235. page = alloc_page(GFP_KERNEL);
  1236. err = -ENOMEM;
  1237. if (!page)
  1238. goto out_err;
  1239. err = fuse_readlink_page(inode, page);
  1240. if (err) {
  1241. __free_page(page);
  1242. goto out_err;
  1243. }
  1244. set_delayed_call(callback, page_put_link, page);
  1245. return page_address(page);
  1246. out_err:
  1247. return ERR_PTR(err);
  1248. }
  1249. static int fuse_dir_open(struct inode *inode, struct file *file)
  1250. {
  1251. return fuse_open_common(inode, file, true);
  1252. }
  1253. static int fuse_dir_release(struct inode *inode, struct file *file)
  1254. {
  1255. fuse_release_common(file, true);
  1256. return 0;
  1257. }
  1258. static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
  1259. int datasync)
  1260. {
  1261. struct inode *inode = file->f_mapping->host;
  1262. struct fuse_conn *fc = get_fuse_conn(inode);
  1263. int err;
  1264. if (fuse_is_bad(inode))
  1265. return -EIO;
  1266. if (fc->no_fsyncdir)
  1267. return 0;
  1268. inode_lock(inode);
  1269. err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
  1270. if (err == -ENOSYS) {
  1271. fc->no_fsyncdir = 1;
  1272. err = 0;
  1273. }
  1274. inode_unlock(inode);
  1275. return err;
  1276. }
  1277. static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
  1278. unsigned long arg)
  1279. {
  1280. struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
  1281. /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
  1282. if (fc->minor < 18)
  1283. return -ENOTTY;
  1284. return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
  1285. }
  1286. static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
  1287. unsigned long arg)
  1288. {
  1289. struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
  1290. if (fc->minor < 18)
  1291. return -ENOTTY;
  1292. return fuse_ioctl_common(file, cmd, arg,
  1293. FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
  1294. }
  1295. static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
  1296. {
  1297. /* Always update if mtime is explicitly set */
  1298. if (ivalid & ATTR_MTIME_SET)
  1299. return true;
  1300. /* Or if kernel i_mtime is the official one */
  1301. if (trust_local_mtime)
  1302. return true;
  1303. /* If it's an open(O_TRUNC) or an ftruncate(), don't update */
  1304. if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
  1305. return false;
  1306. /* In all other cases update */
  1307. return true;
  1308. }
  1309. static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
  1310. struct fuse_setattr_in *arg, bool trust_local_cmtime)
  1311. {
  1312. unsigned ivalid = iattr->ia_valid;
  1313. if (ivalid & ATTR_MODE)
  1314. arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
  1315. if (ivalid & ATTR_UID)
  1316. arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
  1317. if (ivalid & ATTR_GID)
  1318. arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
  1319. if (ivalid & ATTR_SIZE)
  1320. arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
  1321. if (ivalid & ATTR_ATIME) {
  1322. arg->valid |= FATTR_ATIME;
  1323. arg->atime = iattr->ia_atime.tv_sec;
  1324. arg->atimensec = iattr->ia_atime.tv_nsec;
  1325. if (!(ivalid & ATTR_ATIME_SET))
  1326. arg->valid |= FATTR_ATIME_NOW;
  1327. }
  1328. if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
  1329. arg->valid |= FATTR_MTIME;
  1330. arg->mtime = iattr->ia_mtime.tv_sec;
  1331. arg->mtimensec = iattr->ia_mtime.tv_nsec;
  1332. if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
  1333. arg->valid |= FATTR_MTIME_NOW;
  1334. }
  1335. if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
  1336. arg->valid |= FATTR_CTIME;
  1337. arg->ctime = iattr->ia_ctime.tv_sec;
  1338. arg->ctimensec = iattr->ia_ctime.tv_nsec;
  1339. }
  1340. }
  1341. /*
  1342. * Prevent concurrent writepages on inode
  1343. *
  1344. * This is done by adding a negative bias to the inode write counter
  1345. * and waiting for all pending writes to finish.
  1346. */
  1347. void fuse_set_nowrite(struct inode *inode)
  1348. {
  1349. struct fuse_inode *fi = get_fuse_inode(inode);
  1350. BUG_ON(!inode_is_locked(inode));
  1351. spin_lock(&fi->lock);
  1352. BUG_ON(fi->writectr < 0);
  1353. fi->writectr += FUSE_NOWRITE;
  1354. spin_unlock(&fi->lock);
  1355. wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
  1356. }
  1357. /*
  1358. * Allow writepages on inode
  1359. *
  1360. * Remove the bias from the writecounter and send any queued
  1361. * writepages.
  1362. */
  1363. static void __fuse_release_nowrite(struct inode *inode)
  1364. {
  1365. struct fuse_inode *fi = get_fuse_inode(inode);
  1366. BUG_ON(fi->writectr != FUSE_NOWRITE);
  1367. fi->writectr = 0;
  1368. fuse_flush_writepages(inode);
  1369. }
  1370. void fuse_release_nowrite(struct inode *inode)
  1371. {
  1372. struct fuse_inode *fi = get_fuse_inode(inode);
  1373. spin_lock(&fi->lock);
  1374. __fuse_release_nowrite(inode);
  1375. spin_unlock(&fi->lock);
  1376. }
  1377. static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
  1378. struct inode *inode,
  1379. struct fuse_setattr_in *inarg_p,
  1380. struct fuse_attr_out *outarg_p)
  1381. {
  1382. args->opcode = FUSE_SETATTR;
  1383. args->nodeid = get_node_id(inode);
  1384. args->in_numargs = 1;
  1385. args->in_args[0].size = sizeof(*inarg_p);
  1386. args->in_args[0].value = inarg_p;
  1387. args->out_numargs = 1;
  1388. args->out_args[0].size = sizeof(*outarg_p);
  1389. args->out_args[0].value = outarg_p;
  1390. }
  1391. /*
  1392. * Flush inode->i_mtime to the server
  1393. */
  1394. int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
  1395. {
  1396. struct fuse_mount *fm = get_fuse_mount(inode);
  1397. FUSE_ARGS(args);
  1398. struct fuse_setattr_in inarg;
  1399. struct fuse_attr_out outarg;
  1400. memset(&inarg, 0, sizeof(inarg));
  1401. memset(&outarg, 0, sizeof(outarg));
  1402. inarg.valid = FATTR_MTIME;
  1403. inarg.mtime = inode->i_mtime.tv_sec;
  1404. inarg.mtimensec = inode->i_mtime.tv_nsec;
  1405. if (fm->fc->minor >= 23) {
  1406. inarg.valid |= FATTR_CTIME;
  1407. inarg.ctime = inode->i_ctime.tv_sec;
  1408. inarg.ctimensec = inode->i_ctime.tv_nsec;
  1409. }
  1410. if (ff) {
  1411. inarg.valid |= FATTR_FH;
  1412. inarg.fh = ff->fh;
  1413. }
  1414. fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg);
  1415. return fuse_simple_request(fm, &args);
  1416. }
  1417. /*
  1418. * Set attributes, and at the same time refresh them.
  1419. *
  1420. * Truncation is slightly complicated, because the 'truncate' request
  1421. * may fail, in which case we don't want to touch the mapping.
  1422. * vmtruncate() doesn't allow for this case, so do the rlimit checking
  1423. * and the actual truncation by hand.
  1424. */
  1425. int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
  1426. struct file *file)
  1427. {
  1428. struct inode *inode = d_inode(dentry);
  1429. struct fuse_mount *fm = get_fuse_mount(inode);
  1430. struct fuse_conn *fc = fm->fc;
  1431. struct fuse_inode *fi = get_fuse_inode(inode);
  1432. FUSE_ARGS(args);
  1433. struct fuse_setattr_in inarg;
  1434. struct fuse_attr_out outarg;
  1435. bool is_truncate = false;
  1436. bool is_wb = fc->writeback_cache;
  1437. loff_t oldsize;
  1438. int err;
  1439. bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode);
  1440. bool fault_blocked = false;
  1441. if (!fc->default_permissions)
  1442. attr->ia_valid |= ATTR_FORCE;
  1443. err = setattr_prepare(dentry, attr);
  1444. if (err)
  1445. return err;
  1446. if (attr->ia_valid & ATTR_SIZE) {
  1447. if (WARN_ON(!S_ISREG(inode->i_mode)))
  1448. return -EIO;
  1449. is_truncate = true;
  1450. }
  1451. if (FUSE_IS_DAX(inode) && is_truncate) {
  1452. down_write(&fi->i_mmap_sem);
  1453. fault_blocked = true;
  1454. err = fuse_dax_break_layouts(inode, 0, 0);
  1455. if (err) {
  1456. up_write(&fi->i_mmap_sem);
  1457. return err;
  1458. }
  1459. }
  1460. if (attr->ia_valid & ATTR_OPEN) {
  1461. /* This is coming from open(..., ... | O_TRUNC); */
  1462. WARN_ON(!(attr->ia_valid & ATTR_SIZE));
  1463. WARN_ON(attr->ia_size != 0);
  1464. if (fc->atomic_o_trunc) {
  1465. /*
  1466. * No need to send request to userspace, since actual
  1467. * truncation has already been done by OPEN. But still
  1468. * need to truncate page cache.
  1469. */
  1470. i_size_write(inode, 0);
  1471. truncate_pagecache(inode, 0);
  1472. goto out;
  1473. }
  1474. file = NULL;
  1475. }
  1476. /* Flush dirty data/metadata before non-truncate SETATTR */
  1477. if (is_wb && S_ISREG(inode->i_mode) &&
  1478. attr->ia_valid &
  1479. (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
  1480. ATTR_TIMES_SET)) {
  1481. err = write_inode_now(inode, true);
  1482. if (err)
  1483. return err;
  1484. fuse_set_nowrite(inode);
  1485. fuse_release_nowrite(inode);
  1486. }
  1487. if (is_truncate) {
  1488. fuse_set_nowrite(inode);
  1489. set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
  1490. if (trust_local_cmtime && attr->ia_size != inode->i_size)
  1491. attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
  1492. }
  1493. memset(&inarg, 0, sizeof(inarg));
  1494. memset(&outarg, 0, sizeof(outarg));
  1495. iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
  1496. if (file) {
  1497. struct fuse_file *ff = file->private_data;
  1498. inarg.valid |= FATTR_FH;
  1499. inarg.fh = ff->fh;
  1500. }
  1501. if (attr->ia_valid & ATTR_SIZE) {
  1502. /* For mandatory locking in truncate */
  1503. inarg.valid |= FATTR_LOCKOWNER;
  1504. inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
  1505. }
  1506. fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
  1507. err = fuse_simple_request(fm, &args);
  1508. if (err) {
  1509. if (err == -EINTR)
  1510. fuse_invalidate_attr(inode);
  1511. goto error;
  1512. }
  1513. if (fuse_invalid_attr(&outarg.attr) ||
  1514. inode_wrong_type(inode, outarg.attr.mode)) {
  1515. fuse_make_bad(inode);
  1516. err = -EIO;
  1517. goto error;
  1518. }
  1519. spin_lock(&fi->lock);
  1520. /* the kernel maintains i_mtime locally */
  1521. if (trust_local_cmtime) {
  1522. if (attr->ia_valid & ATTR_MTIME)
  1523. inode->i_mtime = attr->ia_mtime;
  1524. if (attr->ia_valid & ATTR_CTIME)
  1525. inode->i_ctime = attr->ia_ctime;
  1526. /* FIXME: clear I_DIRTY_SYNC? */
  1527. }
  1528. fuse_change_attributes_common(inode, &outarg.attr,
  1529. attr_timeout(&outarg));
  1530. oldsize = inode->i_size;
  1531. /* see the comment in fuse_change_attributes() */
  1532. if (!is_wb || is_truncate || !S_ISREG(inode->i_mode))
  1533. i_size_write(inode, outarg.attr.size);
  1534. if (is_truncate) {
  1535. /* NOTE: this may release/reacquire fi->lock */
  1536. __fuse_release_nowrite(inode);
  1537. }
  1538. spin_unlock(&fi->lock);
  1539. /*
  1540. * Only call invalidate_inode_pages2() after removing
  1541. * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
  1542. */
  1543. if ((is_truncate || !is_wb) &&
  1544. S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
  1545. truncate_pagecache(inode, outarg.attr.size);
  1546. invalidate_inode_pages2(inode->i_mapping);
  1547. }
  1548. clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
  1549. out:
  1550. if (fault_blocked)
  1551. up_write(&fi->i_mmap_sem);
  1552. return 0;
  1553. error:
  1554. if (is_truncate)
  1555. fuse_release_nowrite(inode);
  1556. clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
  1557. if (fault_blocked)
  1558. up_write(&fi->i_mmap_sem);
  1559. return err;
  1560. }
  1561. static int fuse_setattr(struct dentry *entry, struct iattr *attr)
  1562. {
  1563. struct inode *inode = d_inode(entry);
  1564. struct fuse_conn *fc = get_fuse_conn(inode);
  1565. struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
  1566. int ret;
  1567. if (fuse_is_bad(inode))
  1568. return -EIO;
  1569. if (!fuse_allow_current_process(get_fuse_conn(inode)))
  1570. return -EACCES;
  1571. if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
  1572. attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
  1573. ATTR_MODE);
  1574. /*
  1575. * The only sane way to reliably kill suid/sgid is to do it in
  1576. * the userspace filesystem
  1577. *
  1578. * This should be done on write(), truncate() and chown().
  1579. */
  1580. if (!fc->handle_killpriv) {
  1581. /*
  1582. * ia_mode calculation may have used stale i_mode.
  1583. * Refresh and recalculate.
  1584. */
  1585. ret = fuse_do_getattr(inode, NULL, file);
  1586. if (ret)
  1587. return ret;
  1588. attr->ia_mode = inode->i_mode;
  1589. if (inode->i_mode & S_ISUID) {
  1590. attr->ia_valid |= ATTR_MODE;
  1591. attr->ia_mode &= ~S_ISUID;
  1592. }
  1593. if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
  1594. attr->ia_valid |= ATTR_MODE;
  1595. attr->ia_mode &= ~S_ISGID;
  1596. }
  1597. }
  1598. }
  1599. if (!attr->ia_valid)
  1600. return 0;
  1601. ret = fuse_do_setattr(entry, attr, file);
  1602. if (!ret) {
  1603. /*
  1604. * If filesystem supports acls it may have updated acl xattrs in
  1605. * the filesystem, so forget cached acls for the inode.
  1606. */
  1607. if (fc->posix_acl)
  1608. forget_all_cached_acls(inode);
  1609. /* Directory mode changed, may need to revalidate access */
  1610. if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
  1611. fuse_invalidate_entry_cache(entry);
  1612. }
  1613. return ret;
  1614. }
  1615. static int fuse_getattr(const struct path *path, struct kstat *stat,
  1616. u32 request_mask, unsigned int flags)
  1617. {
  1618. struct inode *inode = d_inode(path->dentry);
  1619. struct fuse_conn *fc = get_fuse_conn(inode);
  1620. if (fuse_is_bad(inode))
  1621. return -EIO;
  1622. if (!fuse_allow_current_process(fc)) {
  1623. if (!request_mask) {
  1624. /*
  1625. * If user explicitly requested *nothing* then don't
  1626. * error out, but return st_dev only.
  1627. */
  1628. stat->result_mask = 0;
  1629. stat->dev = inode->i_sb->s_dev;
  1630. return 0;
  1631. }
  1632. return -EACCES;
  1633. }
  1634. return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
  1635. }
  1636. static const struct inode_operations fuse_dir_inode_operations = {
  1637. .lookup = fuse_lookup,
  1638. .mkdir = fuse_mkdir,
  1639. .symlink = fuse_symlink,
  1640. .unlink = fuse_unlink,
  1641. .rmdir = fuse_rmdir,
  1642. .rename = fuse_rename2,
  1643. .link = fuse_link,
  1644. .setattr = fuse_setattr,
  1645. .create = fuse_create,
  1646. .atomic_open = fuse_atomic_open,
  1647. .mknod = fuse_mknod,
  1648. .permission = fuse_permission,
  1649. .getattr = fuse_getattr,
  1650. .listxattr = fuse_listxattr,
  1651. .get_acl = fuse_get_acl,
  1652. .set_acl = fuse_set_acl,
  1653. };
  1654. static const struct file_operations fuse_dir_operations = {
  1655. .llseek = generic_file_llseek,
  1656. .read = generic_read_dir,
  1657. .iterate_shared = fuse_readdir,
  1658. .open = fuse_dir_open,
  1659. .release = fuse_dir_release,
  1660. .fsync = fuse_dir_fsync,
  1661. .unlocked_ioctl = fuse_dir_ioctl,
  1662. .compat_ioctl = fuse_dir_compat_ioctl,
  1663. };
  1664. static const struct inode_operations fuse_common_inode_operations = {
  1665. .setattr = fuse_setattr,
  1666. .permission = fuse_permission,
  1667. .getattr = fuse_getattr,
  1668. .listxattr = fuse_listxattr,
  1669. .get_acl = fuse_get_acl,
  1670. .set_acl = fuse_set_acl,
  1671. };
  1672. static const struct inode_operations fuse_symlink_inode_operations = {
  1673. .setattr = fuse_setattr,
  1674. .get_link = fuse_get_link,
  1675. .getattr = fuse_getattr,
  1676. .listxattr = fuse_listxattr,
  1677. };
  1678. void fuse_init_common(struct inode *inode)
  1679. {
  1680. inode->i_op = &fuse_common_inode_operations;
  1681. }
  1682. void fuse_init_dir(struct inode *inode)
  1683. {
  1684. struct fuse_inode *fi = get_fuse_inode(inode);
  1685. inode->i_op = &fuse_dir_inode_operations;
  1686. inode->i_fop = &fuse_dir_operations;
  1687. spin_lock_init(&fi->rdc.lock);
  1688. fi->rdc.cached = false;
  1689. fi->rdc.size = 0;
  1690. fi->rdc.pos = 0;
  1691. fi->rdc.version = 0;
  1692. }
  1693. static int fuse_symlink_readpage(struct file *null, struct page *page)
  1694. {
  1695. int err = fuse_readlink_page(page->mapping->host, page);
  1696. if (!err)
  1697. SetPageUptodate(page);
  1698. unlock_page(page);
  1699. return err;
  1700. }
  1701. static const struct address_space_operations fuse_symlink_aops = {
  1702. .readpage = fuse_symlink_readpage,
  1703. };
  1704. void fuse_init_symlink(struct inode *inode)
  1705. {
  1706. inode->i_op = &fuse_symlink_inode_operations;
  1707. inode->i_data.a_ops = &fuse_symlink_aops;
  1708. inode_nohighmem(inode);
  1709. }