libfs.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * fs/libfs.c
  4. * Library for filesystems writers.
  5. */
  6. #include <linux/blkdev.h>
  7. #include <linux/export.h>
  8. #include <linux/pagemap.h>
  9. #include <linux/slab.h>
  10. #include <linux/cred.h>
  11. #include <linux/mount.h>
  12. #include <linux/vfs.h>
  13. #include <linux/quotaops.h>
  14. #include <linux/mutex.h>
  15. #include <linux/namei.h>
  16. #include <linux/exportfs.h>
  17. #include <linux/writeback.h>
  18. #include <linux/buffer_head.h> /* sync_mapping_buffers */
  19. #include <linux/fs_context.h>
  20. #include <linux/pseudo_fs.h>
  21. #include <linux/fsnotify.h>
  22. #include <linux/unicode.h>
  23. #include <linux/fscrypt.h>
  24. #include <linux/uaccess.h>
  25. #include "internal.h"
  26. int simple_getattr(const struct path *path, struct kstat *stat,
  27. u32 request_mask, unsigned int query_flags)
  28. {
  29. struct inode *inode = d_inode(path->dentry);
  30. generic_fillattr(inode, stat);
  31. stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
  32. return 0;
  33. }
  34. EXPORT_SYMBOL(simple_getattr);
  35. int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
  36. {
  37. buf->f_type = dentry->d_sb->s_magic;
  38. buf->f_bsize = PAGE_SIZE;
  39. buf->f_namelen = NAME_MAX;
  40. return 0;
  41. }
  42. EXPORT_SYMBOL(simple_statfs);
  43. /*
  44. * Retaining negative dentries for an in-memory filesystem just wastes
  45. * memory and lookup time: arrange for them to be deleted immediately.
  46. */
  47. int always_delete_dentry(const struct dentry *dentry)
  48. {
  49. return 1;
  50. }
  51. EXPORT_SYMBOL(always_delete_dentry);
  52. const struct dentry_operations simple_dentry_operations = {
  53. .d_delete = always_delete_dentry,
  54. };
  55. EXPORT_SYMBOL(simple_dentry_operations);
  56. /*
  57. * Lookup the data. This is trivial - if the dentry didn't already
  58. * exist, we know it is negative. Set d_op to delete negative dentries.
  59. */
  60. struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
  61. {
  62. if (dentry->d_name.len > NAME_MAX)
  63. return ERR_PTR(-ENAMETOOLONG);
  64. if (!dentry->d_sb->s_d_op)
  65. d_set_d_op(dentry, &simple_dentry_operations);
  66. d_add(dentry, NULL);
  67. return NULL;
  68. }
  69. EXPORT_SYMBOL(simple_lookup);
  70. int dcache_dir_open(struct inode *inode, struct file *file)
  71. {
  72. file->private_data = d_alloc_cursor(file->f_path.dentry);
  73. return file->private_data ? 0 : -ENOMEM;
  74. }
  75. EXPORT_SYMBOL(dcache_dir_open);
  76. int dcache_dir_close(struct inode *inode, struct file *file)
  77. {
  78. dput(file->private_data);
  79. return 0;
  80. }
  81. EXPORT_SYMBOL(dcache_dir_close);
  82. /* parent is locked at least shared */
  83. /*
  84. * Returns an element of siblings' list.
  85. * We are looking for <count>th positive after <p>; if
  86. * found, dentry is grabbed and returned to caller.
  87. * If no such element exists, NULL is returned.
  88. */
  89. static struct dentry *scan_positives(struct dentry *cursor,
  90. struct list_head *p,
  91. loff_t count,
  92. struct dentry *last)
  93. {
  94. struct dentry *dentry = cursor->d_parent, *found = NULL;
  95. spin_lock(&dentry->d_lock);
  96. while ((p = p->next) != &dentry->d_subdirs) {
  97. struct dentry *d = list_entry(p, struct dentry, d_child);
  98. // we must at least skip cursors, to avoid livelocks
  99. if (d->d_flags & DCACHE_DENTRY_CURSOR)
  100. continue;
  101. if (simple_positive(d) && !--count) {
  102. spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
  103. if (simple_positive(d))
  104. found = dget_dlock(d);
  105. spin_unlock(&d->d_lock);
  106. if (likely(found))
  107. break;
  108. count = 1;
  109. }
  110. if (need_resched()) {
  111. list_move(&cursor->d_child, p);
  112. p = &cursor->d_child;
  113. spin_unlock(&dentry->d_lock);
  114. cond_resched();
  115. spin_lock(&dentry->d_lock);
  116. }
  117. }
  118. spin_unlock(&dentry->d_lock);
  119. dput(last);
  120. return found;
  121. }
  122. loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
  123. {
  124. struct dentry *dentry = file->f_path.dentry;
  125. switch (whence) {
  126. case 1:
  127. offset += file->f_pos;
  128. fallthrough;
  129. case 0:
  130. if (offset >= 0)
  131. break;
  132. fallthrough;
  133. default:
  134. return -EINVAL;
  135. }
  136. if (offset != file->f_pos) {
  137. struct dentry *cursor = file->private_data;
  138. struct dentry *to = NULL;
  139. inode_lock_shared(dentry->d_inode);
  140. if (offset > 2)
  141. to = scan_positives(cursor, &dentry->d_subdirs,
  142. offset - 2, NULL);
  143. spin_lock(&dentry->d_lock);
  144. if (to)
  145. list_move(&cursor->d_child, &to->d_child);
  146. else
  147. list_del_init(&cursor->d_child);
  148. spin_unlock(&dentry->d_lock);
  149. dput(to);
  150. file->f_pos = offset;
  151. inode_unlock_shared(dentry->d_inode);
  152. }
  153. return offset;
  154. }
  155. EXPORT_SYMBOL(dcache_dir_lseek);
  156. /* Relationship between i_mode and the DT_xxx types */
  157. static inline unsigned char dt_type(struct inode *inode)
  158. {
  159. return (inode->i_mode >> 12) & 15;
  160. }
  161. /*
  162. * Directory is locked and all positive dentries in it are safe, since
  163. * for ramfs-type trees they can't go away without unlink() or rmdir(),
  164. * both impossible due to the lock on directory.
  165. */
  166. int dcache_readdir(struct file *file, struct dir_context *ctx)
  167. {
  168. struct dentry *dentry = file->f_path.dentry;
  169. struct dentry *cursor = file->private_data;
  170. struct list_head *anchor = &dentry->d_subdirs;
  171. struct dentry *next = NULL;
  172. struct list_head *p;
  173. if (!dir_emit_dots(file, ctx))
  174. return 0;
  175. if (ctx->pos == 2)
  176. p = anchor;
  177. else if (!list_empty(&cursor->d_child))
  178. p = &cursor->d_child;
  179. else
  180. return 0;
  181. while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
  182. if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
  183. d_inode(next)->i_ino, dt_type(d_inode(next))))
  184. break;
  185. ctx->pos++;
  186. p = &next->d_child;
  187. }
  188. spin_lock(&dentry->d_lock);
  189. if (next)
  190. list_move_tail(&cursor->d_child, &next->d_child);
  191. else
  192. list_del_init(&cursor->d_child);
  193. spin_unlock(&dentry->d_lock);
  194. dput(next);
  195. return 0;
  196. }
  197. EXPORT_SYMBOL(dcache_readdir);
  198. ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
  199. {
  200. return -EISDIR;
  201. }
  202. EXPORT_SYMBOL_NS(generic_read_dir, ANDROID_GKI_VFS_EXPORT_ONLY);
  203. const struct file_operations simple_dir_operations = {
  204. .open = dcache_dir_open,
  205. .release = dcache_dir_close,
  206. .llseek = dcache_dir_lseek,
  207. .read = generic_read_dir,
  208. .iterate_shared = dcache_readdir,
  209. .fsync = noop_fsync,
  210. };
  211. EXPORT_SYMBOL(simple_dir_operations);
  212. const struct inode_operations simple_dir_inode_operations = {
  213. .lookup = simple_lookup,
  214. };
  215. EXPORT_SYMBOL(simple_dir_inode_operations);
  216. static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
  217. {
  218. struct dentry *child = NULL;
  219. struct list_head *p = prev ? &prev->d_child : &parent->d_subdirs;
  220. spin_lock(&parent->d_lock);
  221. while ((p = p->next) != &parent->d_subdirs) {
  222. struct dentry *d = container_of(p, struct dentry, d_child);
  223. if (simple_positive(d)) {
  224. spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
  225. if (simple_positive(d))
  226. child = dget_dlock(d);
  227. spin_unlock(&d->d_lock);
  228. if (likely(child))
  229. break;
  230. }
  231. }
  232. spin_unlock(&parent->d_lock);
  233. dput(prev);
  234. return child;
  235. }
  236. void simple_recursive_removal(struct dentry *dentry,
  237. void (*callback)(struct dentry *))
  238. {
  239. struct dentry *this = dget(dentry);
  240. while (true) {
  241. struct dentry *victim = NULL, *child;
  242. struct inode *inode = this->d_inode;
  243. inode_lock(inode);
  244. if (d_is_dir(this))
  245. inode->i_flags |= S_DEAD;
  246. while ((child = find_next_child(this, victim)) == NULL) {
  247. // kill and ascend
  248. // update metadata while it's still locked
  249. inode->i_ctime = current_time(inode);
  250. clear_nlink(inode);
  251. inode_unlock(inode);
  252. victim = this;
  253. this = this->d_parent;
  254. inode = this->d_inode;
  255. inode_lock(inode);
  256. if (simple_positive(victim)) {
  257. d_invalidate(victim); // avoid lost mounts
  258. if (d_is_dir(victim))
  259. fsnotify_rmdir(inode, victim);
  260. else
  261. fsnotify_unlink(inode, victim);
  262. if (callback)
  263. callback(victim);
  264. dput(victim); // unpin it
  265. }
  266. if (victim == dentry) {
  267. inode->i_ctime = inode->i_mtime =
  268. current_time(inode);
  269. if (d_is_dir(dentry))
  270. drop_nlink(inode);
  271. inode_unlock(inode);
  272. dput(dentry);
  273. return;
  274. }
  275. }
  276. inode_unlock(inode);
  277. this = child;
  278. }
  279. }
  280. EXPORT_SYMBOL(simple_recursive_removal);
  281. static const struct super_operations simple_super_operations = {
  282. .statfs = simple_statfs,
  283. };
  284. static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
  285. {
  286. struct pseudo_fs_context *ctx = fc->fs_private;
  287. struct inode *root;
  288. s->s_maxbytes = MAX_LFS_FILESIZE;
  289. s->s_blocksize = PAGE_SIZE;
  290. s->s_blocksize_bits = PAGE_SHIFT;
  291. s->s_magic = ctx->magic;
  292. s->s_op = ctx->ops ?: &simple_super_operations;
  293. s->s_xattr = ctx->xattr;
  294. s->s_time_gran = 1;
  295. root = new_inode(s);
  296. if (!root)
  297. return -ENOMEM;
  298. /*
  299. * since this is the first inode, make it number 1. New inodes created
  300. * after this must take care not to collide with it (by passing
  301. * max_reserved of 1 to iunique).
  302. */
  303. root->i_ino = 1;
  304. root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
  305. root->i_atime = root->i_mtime = root->i_ctime = current_time(root);
  306. s->s_root = d_make_root(root);
  307. if (!s->s_root)
  308. return -ENOMEM;
  309. s->s_d_op = ctx->dops;
  310. return 0;
  311. }
  312. static int pseudo_fs_get_tree(struct fs_context *fc)
  313. {
  314. return get_tree_nodev(fc, pseudo_fs_fill_super);
  315. }
  316. static void pseudo_fs_free(struct fs_context *fc)
  317. {
  318. kfree(fc->fs_private);
  319. }
  320. static const struct fs_context_operations pseudo_fs_context_ops = {
  321. .free = pseudo_fs_free,
  322. .get_tree = pseudo_fs_get_tree,
  323. };
  324. /*
  325. * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
  326. * will never be mountable)
  327. */
  328. struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
  329. unsigned long magic)
  330. {
  331. struct pseudo_fs_context *ctx;
  332. ctx = kzalloc(sizeof(struct pseudo_fs_context), GFP_KERNEL);
  333. if (likely(ctx)) {
  334. ctx->magic = magic;
  335. fc->fs_private = ctx;
  336. fc->ops = &pseudo_fs_context_ops;
  337. fc->sb_flags |= SB_NOUSER;
  338. fc->global = true;
  339. }
  340. return ctx;
  341. }
  342. EXPORT_SYMBOL(init_pseudo);
  343. int simple_open(struct inode *inode, struct file *file)
  344. {
  345. if (inode->i_private)
  346. file->private_data = inode->i_private;
  347. return 0;
  348. }
  349. EXPORT_SYMBOL(simple_open);
  350. int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
  351. {
  352. struct inode *inode = d_inode(old_dentry);
  353. inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
  354. inc_nlink(inode);
  355. ihold(inode);
  356. dget(dentry);
  357. d_instantiate(dentry, inode);
  358. return 0;
  359. }
  360. EXPORT_SYMBOL(simple_link);
  361. int simple_empty(struct dentry *dentry)
  362. {
  363. struct dentry *child;
  364. int ret = 0;
  365. spin_lock(&dentry->d_lock);
  366. list_for_each_entry(child, &dentry->d_subdirs, d_child) {
  367. spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
  368. if (simple_positive(child)) {
  369. spin_unlock(&child->d_lock);
  370. goto out;
  371. }
  372. spin_unlock(&child->d_lock);
  373. }
  374. ret = 1;
  375. out:
  376. spin_unlock(&dentry->d_lock);
  377. return ret;
  378. }
  379. EXPORT_SYMBOL(simple_empty);
  380. int simple_unlink(struct inode *dir, struct dentry *dentry)
  381. {
  382. struct inode *inode = d_inode(dentry);
  383. inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
  384. drop_nlink(inode);
  385. dput(dentry);
  386. return 0;
  387. }
  388. EXPORT_SYMBOL(simple_unlink);
  389. int simple_rmdir(struct inode *dir, struct dentry *dentry)
  390. {
  391. if (!simple_empty(dentry))
  392. return -ENOTEMPTY;
  393. drop_nlink(d_inode(dentry));
  394. simple_unlink(dir, dentry);
  395. drop_nlink(dir);
  396. return 0;
  397. }
  398. EXPORT_SYMBOL(simple_rmdir);
  399. int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
  400. struct inode *new_dir, struct dentry *new_dentry,
  401. unsigned int flags)
  402. {
  403. struct inode *inode = d_inode(old_dentry);
  404. int they_are_dirs = d_is_dir(old_dentry);
  405. if (flags & ~RENAME_NOREPLACE)
  406. return -EINVAL;
  407. if (!simple_empty(new_dentry))
  408. return -ENOTEMPTY;
  409. if (d_really_is_positive(new_dentry)) {
  410. simple_unlink(new_dir, new_dentry);
  411. if (they_are_dirs) {
  412. drop_nlink(d_inode(new_dentry));
  413. drop_nlink(old_dir);
  414. }
  415. } else if (they_are_dirs) {
  416. drop_nlink(old_dir);
  417. inc_nlink(new_dir);
  418. }
  419. old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
  420. new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
  421. return 0;
  422. }
  423. EXPORT_SYMBOL(simple_rename);
  424. /**
  425. * simple_setattr - setattr for simple filesystem
  426. * @dentry: dentry
  427. * @iattr: iattr structure
  428. *
  429. * Returns 0 on success, -error on failure.
  430. *
  431. * simple_setattr is a simple ->setattr implementation without a proper
  432. * implementation of size changes.
  433. *
  434. * It can either be used for in-memory filesystems or special files
  435. * on simple regular filesystems. Anything that needs to change on-disk
  436. * or wire state on size changes needs its own setattr method.
  437. */
  438. int simple_setattr(struct dentry *dentry, struct iattr *iattr)
  439. {
  440. struct inode *inode = d_inode(dentry);
  441. int error;
  442. error = setattr_prepare(dentry, iattr);
  443. if (error)
  444. return error;
  445. if (iattr->ia_valid & ATTR_SIZE)
  446. truncate_setsize(inode, iattr->ia_size);
  447. setattr_copy(inode, iattr);
  448. mark_inode_dirty(inode);
  449. return 0;
  450. }
  451. EXPORT_SYMBOL(simple_setattr);
  452. int simple_readpage(struct file *file, struct page *page)
  453. {
  454. clear_highpage(page);
  455. flush_dcache_page(page);
  456. SetPageUptodate(page);
  457. unlock_page(page);
  458. return 0;
  459. }
  460. EXPORT_SYMBOL(simple_readpage);
  461. int simple_write_begin(struct file *file, struct address_space *mapping,
  462. loff_t pos, unsigned len, unsigned flags,
  463. struct page **pagep, void **fsdata)
  464. {
  465. struct page *page;
  466. pgoff_t index;
  467. index = pos >> PAGE_SHIFT;
  468. page = grab_cache_page_write_begin(mapping, index, flags);
  469. if (!page)
  470. return -ENOMEM;
  471. *pagep = page;
  472. if (!PageUptodate(page) && (len != PAGE_SIZE)) {
  473. unsigned from = pos & (PAGE_SIZE - 1);
  474. zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
  475. }
  476. return 0;
  477. }
  478. EXPORT_SYMBOL(simple_write_begin);
  479. /**
  480. * simple_write_end - .write_end helper for non-block-device FSes
  481. * @file: See .write_end of address_space_operations
  482. * @mapping: "
  483. * @pos: "
  484. * @len: "
  485. * @copied: "
  486. * @page: "
  487. * @fsdata: "
  488. *
  489. * simple_write_end does the minimum needed for updating a page after writing is
  490. * done. It has the same API signature as the .write_end of
  491. * address_space_operations vector. So it can just be set onto .write_end for
  492. * FSes that don't need any other processing. i_mutex is assumed to be held.
  493. * Block based filesystems should use generic_write_end().
  494. * NOTE: Even though i_size might get updated by this function, mark_inode_dirty
  495. * is not called, so a filesystem that actually does store data in .write_inode
  496. * should extend on what's done here with a call to mark_inode_dirty() in the
  497. * case that i_size has changed.
  498. *
  499. * Use *ONLY* with simple_readpage()
  500. */
  501. int simple_write_end(struct file *file, struct address_space *mapping,
  502. loff_t pos, unsigned len, unsigned copied,
  503. struct page *page, void *fsdata)
  504. {
  505. struct inode *inode = page->mapping->host;
  506. loff_t last_pos = pos + copied;
  507. /* zero the stale part of the page if we did a short copy */
  508. if (!PageUptodate(page)) {
  509. if (copied < len) {
  510. unsigned from = pos & (PAGE_SIZE - 1);
  511. zero_user(page, from + copied, len - copied);
  512. }
  513. SetPageUptodate(page);
  514. }
  515. /*
  516. * No need to use i_size_read() here, the i_size
  517. * cannot change under us because we hold the i_mutex.
  518. */
  519. if (last_pos > inode->i_size)
  520. i_size_write(inode, last_pos);
  521. set_page_dirty(page);
  522. unlock_page(page);
  523. put_page(page);
  524. return copied;
  525. }
  526. EXPORT_SYMBOL(simple_write_end);
  527. /*
  528. * the inodes created here are not hashed. If you use iunique to generate
  529. * unique inode values later for this filesystem, then you must take care
  530. * to pass it an appropriate max_reserved value to avoid collisions.
  531. */
  532. int simple_fill_super(struct super_block *s, unsigned long magic,
  533. const struct tree_descr *files)
  534. {
  535. struct inode *inode;
  536. struct dentry *root;
  537. struct dentry *dentry;
  538. int i;
  539. s->s_blocksize = PAGE_SIZE;
  540. s->s_blocksize_bits = PAGE_SHIFT;
  541. s->s_magic = magic;
  542. s->s_op = &simple_super_operations;
  543. s->s_time_gran = 1;
  544. inode = new_inode(s);
  545. if (!inode)
  546. return -ENOMEM;
  547. /*
  548. * because the root inode is 1, the files array must not contain an
  549. * entry at index 1
  550. */
  551. inode->i_ino = 1;
  552. inode->i_mode = S_IFDIR | 0755;
  553. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  554. inode->i_op = &simple_dir_inode_operations;
  555. inode->i_fop = &simple_dir_operations;
  556. set_nlink(inode, 2);
  557. root = d_make_root(inode);
  558. if (!root)
  559. return -ENOMEM;
  560. for (i = 0; !files->name || files->name[0]; i++, files++) {
  561. if (!files->name)
  562. continue;
  563. /* warn if it tries to conflict with the root inode */
  564. if (unlikely(i == 1))
  565. printk(KERN_WARNING "%s: %s passed in a files array"
  566. "with an index of 1!\n", __func__,
  567. s->s_type->name);
  568. dentry = d_alloc_name(root, files->name);
  569. if (!dentry)
  570. goto out;
  571. inode = new_inode(s);
  572. if (!inode) {
  573. dput(dentry);
  574. goto out;
  575. }
  576. inode->i_mode = S_IFREG | files->mode;
  577. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  578. inode->i_fop = files->ops;
  579. inode->i_ino = i;
  580. d_add(dentry, inode);
  581. }
  582. s->s_root = root;
  583. return 0;
  584. out:
  585. d_genocide(root);
  586. shrink_dcache_parent(root);
  587. dput(root);
  588. return -ENOMEM;
  589. }
  590. EXPORT_SYMBOL(simple_fill_super);
  591. static DEFINE_SPINLOCK(pin_fs_lock);
  592. int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count)
  593. {
  594. struct vfsmount *mnt = NULL;
  595. spin_lock(&pin_fs_lock);
  596. if (unlikely(!*mount)) {
  597. spin_unlock(&pin_fs_lock);
  598. mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
  599. if (IS_ERR(mnt))
  600. return PTR_ERR(mnt);
  601. spin_lock(&pin_fs_lock);
  602. if (!*mount)
  603. *mount = mnt;
  604. }
  605. mntget(*mount);
  606. ++*count;
  607. spin_unlock(&pin_fs_lock);
  608. mntput(mnt);
  609. return 0;
  610. }
  611. EXPORT_SYMBOL(simple_pin_fs);
  612. void simple_release_fs(struct vfsmount **mount, int *count)
  613. {
  614. struct vfsmount *mnt;
  615. spin_lock(&pin_fs_lock);
  616. mnt = *mount;
  617. if (!--*count)
  618. *mount = NULL;
  619. spin_unlock(&pin_fs_lock);
  620. mntput(mnt);
  621. }
  622. EXPORT_SYMBOL(simple_release_fs);
  623. /**
  624. * simple_read_from_buffer - copy data from the buffer to user space
  625. * @to: the user space buffer to read to
  626. * @count: the maximum number of bytes to read
  627. * @ppos: the current position in the buffer
  628. * @from: the buffer to read from
  629. * @available: the size of the buffer
  630. *
  631. * The simple_read_from_buffer() function reads up to @count bytes from the
  632. * buffer @from at offset @ppos into the user space address starting at @to.
  633. *
  634. * On success, the number of bytes read is returned and the offset @ppos is
  635. * advanced by this number, or negative value is returned on error.
  636. **/
  637. ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
  638. const void *from, size_t available)
  639. {
  640. loff_t pos = *ppos;
  641. size_t ret;
  642. if (pos < 0)
  643. return -EINVAL;
  644. if (pos >= available || !count)
  645. return 0;
  646. if (count > available - pos)
  647. count = available - pos;
  648. ret = copy_to_user(to, from + pos, count);
  649. if (ret == count)
  650. return -EFAULT;
  651. count -= ret;
  652. *ppos = pos + count;
  653. return count;
  654. }
  655. EXPORT_SYMBOL(simple_read_from_buffer);
  656. /**
  657. * simple_write_to_buffer - copy data from user space to the buffer
  658. * @to: the buffer to write to
  659. * @available: the size of the buffer
  660. * @ppos: the current position in the buffer
  661. * @from: the user space buffer to read from
  662. * @count: the maximum number of bytes to read
  663. *
  664. * The simple_write_to_buffer() function reads up to @count bytes from the user
  665. * space address starting at @from into the buffer @to at offset @ppos.
  666. *
  667. * On success, the number of bytes written is returned and the offset @ppos is
  668. * advanced by this number, or negative value is returned on error.
  669. **/
  670. ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
  671. const void __user *from, size_t count)
  672. {
  673. loff_t pos = *ppos;
  674. size_t res;
  675. if (pos < 0)
  676. return -EINVAL;
  677. if (pos >= available || !count)
  678. return 0;
  679. if (count > available - pos)
  680. count = available - pos;
  681. res = copy_from_user(to + pos, from, count);
  682. if (res == count)
  683. return -EFAULT;
  684. count -= res;
  685. *ppos = pos + count;
  686. return count;
  687. }
  688. EXPORT_SYMBOL(simple_write_to_buffer);
  689. /**
  690. * memory_read_from_buffer - copy data from the buffer
  691. * @to: the kernel space buffer to read to
  692. * @count: the maximum number of bytes to read
  693. * @ppos: the current position in the buffer
  694. * @from: the buffer to read from
  695. * @available: the size of the buffer
  696. *
  697. * The memory_read_from_buffer() function reads up to @count bytes from the
  698. * buffer @from at offset @ppos into the kernel space address starting at @to.
  699. *
  700. * On success, the number of bytes read is returned and the offset @ppos is
  701. * advanced by this number, or negative value is returned on error.
  702. **/
  703. ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
  704. const void *from, size_t available)
  705. {
  706. loff_t pos = *ppos;
  707. if (pos < 0)
  708. return -EINVAL;
  709. if (pos >= available)
  710. return 0;
  711. if (count > available - pos)
  712. count = available - pos;
  713. memcpy(to, from + pos, count);
  714. *ppos = pos + count;
  715. return count;
  716. }
  717. EXPORT_SYMBOL(memory_read_from_buffer);
  718. /*
  719. * Transaction based IO.
  720. * The file expects a single write which triggers the transaction, and then
  721. * possibly a read which collects the result - which is stored in a
  722. * file-local buffer.
  723. */
  724. void simple_transaction_set(struct file *file, size_t n)
  725. {
  726. struct simple_transaction_argresp *ar = file->private_data;
  727. BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
  728. /*
  729. * The barrier ensures that ar->size will really remain zero until
  730. * ar->data is ready for reading.
  731. */
  732. smp_mb();
  733. ar->size = n;
  734. }
  735. EXPORT_SYMBOL(simple_transaction_set);
  736. char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
  737. {
  738. struct simple_transaction_argresp *ar;
  739. static DEFINE_SPINLOCK(simple_transaction_lock);
  740. if (size > SIMPLE_TRANSACTION_LIMIT - 1)
  741. return ERR_PTR(-EFBIG);
  742. ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL);
  743. if (!ar)
  744. return ERR_PTR(-ENOMEM);
  745. spin_lock(&simple_transaction_lock);
  746. /* only one write allowed per open */
  747. if (file->private_data) {
  748. spin_unlock(&simple_transaction_lock);
  749. free_page((unsigned long)ar);
  750. return ERR_PTR(-EBUSY);
  751. }
  752. file->private_data = ar;
  753. spin_unlock(&simple_transaction_lock);
  754. if (copy_from_user(ar->data, buf, size))
  755. return ERR_PTR(-EFAULT);
  756. return ar->data;
  757. }
  758. EXPORT_SYMBOL(simple_transaction_get);
  759. ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
  760. {
  761. struct simple_transaction_argresp *ar = file->private_data;
  762. if (!ar)
  763. return 0;
  764. return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
  765. }
  766. EXPORT_SYMBOL(simple_transaction_read);
  767. int simple_transaction_release(struct inode *inode, struct file *file)
  768. {
  769. free_page((unsigned long)file->private_data);
  770. return 0;
  771. }
  772. EXPORT_SYMBOL(simple_transaction_release);
  773. /* Simple attribute files */
  774. struct simple_attr {
  775. int (*get)(void *, u64 *);
  776. int (*set)(void *, u64);
  777. char get_buf[24]; /* enough to store a u64 and "\n\0" */
  778. char set_buf[24];
  779. void *data;
  780. const char *fmt; /* format for read operation */
  781. struct mutex mutex; /* protects access to these buffers */
  782. };
  783. /* simple_attr_open is called by an actual attribute open file operation
  784. * to set the attribute specific access operations. */
  785. int simple_attr_open(struct inode *inode, struct file *file,
  786. int (*get)(void *, u64 *), int (*set)(void *, u64),
  787. const char *fmt)
  788. {
  789. struct simple_attr *attr;
  790. attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  791. if (!attr)
  792. return -ENOMEM;
  793. attr->get = get;
  794. attr->set = set;
  795. attr->data = inode->i_private;
  796. attr->fmt = fmt;
  797. mutex_init(&attr->mutex);
  798. file->private_data = attr;
  799. return nonseekable_open(inode, file);
  800. }
  801. EXPORT_SYMBOL_GPL(simple_attr_open);
  802. int simple_attr_release(struct inode *inode, struct file *file)
  803. {
  804. kfree(file->private_data);
  805. return 0;
  806. }
  807. EXPORT_SYMBOL_GPL(simple_attr_release); /* GPL-only? This? Really? */
  808. /* read from the buffer that is filled with the get function */
  809. ssize_t simple_attr_read(struct file *file, char __user *buf,
  810. size_t len, loff_t *ppos)
  811. {
  812. struct simple_attr *attr;
  813. size_t size;
  814. ssize_t ret;
  815. attr = file->private_data;
  816. if (!attr->get)
  817. return -EACCES;
  818. ret = mutex_lock_interruptible(&attr->mutex);
  819. if (ret)
  820. return ret;
  821. if (*ppos && attr->get_buf[0]) {
  822. /* continued read */
  823. size = strlen(attr->get_buf);
  824. } else {
  825. /* first read */
  826. u64 val;
  827. ret = attr->get(attr->data, &val);
  828. if (ret)
  829. goto out;
  830. size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
  831. attr->fmt, (unsigned long long)val);
  832. }
  833. ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
  834. out:
  835. mutex_unlock(&attr->mutex);
  836. return ret;
  837. }
  838. EXPORT_SYMBOL_GPL(simple_attr_read);
  839. /* interpret the buffer as a number to call the set function with */
  840. ssize_t simple_attr_write(struct file *file, const char __user *buf,
  841. size_t len, loff_t *ppos)
  842. {
  843. struct simple_attr *attr;
  844. unsigned long long val;
  845. size_t size;
  846. ssize_t ret;
  847. attr = file->private_data;
  848. if (!attr->set)
  849. return -EACCES;
  850. ret = mutex_lock_interruptible(&attr->mutex);
  851. if (ret)
  852. return ret;
  853. ret = -EFAULT;
  854. size = min(sizeof(attr->set_buf) - 1, len);
  855. if (copy_from_user(attr->set_buf, buf, size))
  856. goto out;
  857. attr->set_buf[size] = '\0';
  858. ret = kstrtoull(attr->set_buf, 0, &val);
  859. if (ret)
  860. goto out;
  861. ret = attr->set(attr->data, val);
  862. if (ret == 0)
  863. ret = len; /* on success, claim we got the whole input */
  864. out:
  865. mutex_unlock(&attr->mutex);
  866. return ret;
  867. }
  868. EXPORT_SYMBOL_GPL(simple_attr_write);
  869. /**
  870. * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
  871. * @sb: filesystem to do the file handle conversion on
  872. * @fid: file handle to convert
  873. * @fh_len: length of the file handle in bytes
  874. * @fh_type: type of file handle
  875. * @get_inode: filesystem callback to retrieve inode
  876. *
  877. * This function decodes @fid as long as it has one of the well-known
  878. * Linux filehandle types and calls @get_inode on it to retrieve the
  879. * inode for the object specified in the file handle.
  880. */
  881. struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid,
  882. int fh_len, int fh_type, struct inode *(*get_inode)
  883. (struct super_block *sb, u64 ino, u32 gen))
  884. {
  885. struct inode *inode = NULL;
  886. if (fh_len < 2)
  887. return NULL;
  888. switch (fh_type) {
  889. case FILEID_INO32_GEN:
  890. case FILEID_INO32_GEN_PARENT:
  891. inode = get_inode(sb, fid->i32.ino, fid->i32.gen);
  892. break;
  893. }
  894. return d_obtain_alias(inode);
  895. }
  896. EXPORT_SYMBOL_GPL(generic_fh_to_dentry);
  897. /**
  898. * generic_fh_to_parent - generic helper for the fh_to_parent export operation
  899. * @sb: filesystem to do the file handle conversion on
  900. * @fid: file handle to convert
  901. * @fh_len: length of the file handle in bytes
  902. * @fh_type: type of file handle
  903. * @get_inode: filesystem callback to retrieve inode
  904. *
  905. * This function decodes @fid as long as it has one of the well-known
  906. * Linux filehandle types and calls @get_inode on it to retrieve the
  907. * inode for the _parent_ object specified in the file handle if it
  908. * is specified in the file handle, or NULL otherwise.
  909. */
  910. struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
  911. int fh_len, int fh_type, struct inode *(*get_inode)
  912. (struct super_block *sb, u64 ino, u32 gen))
  913. {
  914. struct inode *inode = NULL;
  915. if (fh_len <= 2)
  916. return NULL;
  917. switch (fh_type) {
  918. case FILEID_INO32_GEN_PARENT:
  919. inode = get_inode(sb, fid->i32.parent_ino,
  920. (fh_len > 3 ? fid->i32.parent_gen : 0));
  921. break;
  922. }
  923. return d_obtain_alias(inode);
  924. }
  925. EXPORT_SYMBOL_GPL(generic_fh_to_parent);
  926. /**
  927. * __generic_file_fsync - generic fsync implementation for simple filesystems
  928. *
  929. * @file: file to synchronize
  930. * @start: start offset in bytes
  931. * @end: end offset in bytes (inclusive)
  932. * @datasync: only synchronize essential metadata if true
  933. *
  934. * This is a generic implementation of the fsync method for simple
  935. * filesystems which track all non-inode metadata in the buffers list
  936. * hanging off the address_space structure.
  937. */
  938. int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
  939. int datasync)
  940. {
  941. struct inode *inode = file->f_mapping->host;
  942. int err;
  943. int ret;
  944. err = file_write_and_wait_range(file, start, end);
  945. if (err)
  946. return err;
  947. inode_lock(inode);
  948. ret = sync_mapping_buffers(inode->i_mapping);
  949. if (!(inode->i_state & I_DIRTY_ALL))
  950. goto out;
  951. if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
  952. goto out;
  953. err = sync_inode_metadata(inode, 1);
  954. if (ret == 0)
  955. ret = err;
  956. out:
  957. inode_unlock(inode);
  958. /* check and advance again to catch errors after syncing out buffers */
  959. err = file_check_and_advance_wb_err(file);
  960. if (ret == 0)
  961. ret = err;
  962. return ret;
  963. }
  964. EXPORT_SYMBOL(__generic_file_fsync);
  965. /**
  966. * generic_file_fsync - generic fsync implementation for simple filesystems
  967. * with flush
  968. * @file: file to synchronize
  969. * @start: start offset in bytes
  970. * @end: end offset in bytes (inclusive)
  971. * @datasync: only synchronize essential metadata if true
  972. *
  973. */
  974. int generic_file_fsync(struct file *file, loff_t start, loff_t end,
  975. int datasync)
  976. {
  977. struct inode *inode = file->f_mapping->host;
  978. int err;
  979. err = __generic_file_fsync(file, start, end, datasync);
  980. if (err)
  981. return err;
  982. return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
  983. }
  984. EXPORT_SYMBOL(generic_file_fsync);
  985. /**
  986. * generic_check_addressable - Check addressability of file system
  987. * @blocksize_bits: log of file system block size
  988. * @num_blocks: number of blocks in file system
  989. *
  990. * Determine whether a file system with @num_blocks blocks (and a
  991. * block size of 2**@blocksize_bits) is addressable by the sector_t
  992. * and page cache of the system. Return 0 if so and -EFBIG otherwise.
  993. */
  994. int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
  995. {
  996. u64 last_fs_block = num_blocks - 1;
  997. u64 last_fs_page =
  998. last_fs_block >> (PAGE_SHIFT - blocksize_bits);
  999. if (unlikely(num_blocks == 0))
  1000. return 0;
  1001. if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
  1002. return -EINVAL;
  1003. if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
  1004. (last_fs_page > (pgoff_t)(~0ULL))) {
  1005. return -EFBIG;
  1006. }
  1007. return 0;
  1008. }
  1009. EXPORT_SYMBOL(generic_check_addressable);
  1010. /*
  1011. * No-op implementation of ->fsync for in-memory filesystems.
  1012. */
  1013. int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  1014. {
  1015. return 0;
  1016. }
  1017. EXPORT_SYMBOL(noop_fsync);
  1018. int noop_set_page_dirty(struct page *page)
  1019. {
  1020. /*
  1021. * Unlike __set_page_dirty_no_writeback that handles dirty page
  1022. * tracking in the page object, dax does all dirty tracking in
  1023. * the inode address_space in response to mkwrite faults. In the
  1024. * dax case we only need to worry about potentially dirty CPU
  1025. * caches, not dirty page cache pages to write back.
  1026. *
  1027. * This callback is defined to prevent fallback to
  1028. * __set_page_dirty_buffers() in set_page_dirty().
  1029. */
  1030. return 0;
  1031. }
  1032. EXPORT_SYMBOL_GPL(noop_set_page_dirty);
  1033. void noop_invalidatepage(struct page *page, unsigned int offset,
  1034. unsigned int length)
  1035. {
  1036. /*
  1037. * There is no page cache to invalidate in the dax case, however
  1038. * we need this callback defined to prevent falling back to
  1039. * block_invalidatepage() in do_invalidatepage().
  1040. */
  1041. }
  1042. EXPORT_SYMBOL_GPL(noop_invalidatepage);
  1043. ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  1044. {
  1045. /*
  1046. * iomap based filesystems support direct I/O without need for
  1047. * this callback. However, it still needs to be set in
  1048. * inode->a_ops so that open/fcntl know that direct I/O is
  1049. * generally supported.
  1050. */
  1051. return -EINVAL;
  1052. }
  1053. EXPORT_SYMBOL_GPL(noop_direct_IO);
  1054. /* Because kfree isn't assignment-compatible with void(void*) ;-/ */
  1055. void kfree_link(void *p)
  1056. {
  1057. kfree(p);
  1058. }
  1059. EXPORT_SYMBOL(kfree_link);
  1060. /*
  1061. * nop .set_page_dirty method so that people can use .page_mkwrite on
  1062. * anon inodes.
  1063. */
  1064. static int anon_set_page_dirty(struct page *page)
  1065. {
  1066. return 0;
  1067. };
  1068. struct inode *alloc_anon_inode(struct super_block *s)
  1069. {
  1070. static const struct address_space_operations anon_aops = {
  1071. .set_page_dirty = anon_set_page_dirty,
  1072. };
  1073. struct inode *inode = new_inode_pseudo(s);
  1074. if (!inode)
  1075. return ERR_PTR(-ENOMEM);
  1076. inode->i_ino = get_next_ino();
  1077. inode->i_mapping->a_ops = &anon_aops;
  1078. /*
  1079. * Mark the inode dirty from the very beginning,
  1080. * that way it will never be moved to the dirty
  1081. * list because mark_inode_dirty() will think
  1082. * that it already _is_ on the dirty list.
  1083. */
  1084. inode->i_state = I_DIRTY;
  1085. inode->i_mode = S_IRUSR | S_IWUSR;
  1086. inode->i_uid = current_fsuid();
  1087. inode->i_gid = current_fsgid();
  1088. inode->i_flags |= S_PRIVATE;
  1089. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  1090. return inode;
  1091. }
  1092. EXPORT_SYMBOL(alloc_anon_inode);
  1093. /**
  1094. * simple_nosetlease - generic helper for prohibiting leases
  1095. * @filp: file pointer
  1096. * @arg: type of lease to obtain
  1097. * @flp: new lease supplied for insertion
  1098. * @priv: private data for lm_setup operation
  1099. *
  1100. * Generic helper for filesystems that do not wish to allow leases to be set.
  1101. * All arguments are ignored and it just returns -EINVAL.
  1102. */
  1103. int
  1104. simple_nosetlease(struct file *filp, long arg, struct file_lock **flp,
  1105. void **priv)
  1106. {
  1107. return -EINVAL;
  1108. }
  1109. EXPORT_SYMBOL(simple_nosetlease);
  1110. /**
  1111. * simple_get_link - generic helper to get the target of "fast" symlinks
  1112. * @dentry: not used here
  1113. * @inode: the symlink inode
  1114. * @done: not used here
  1115. *
  1116. * Generic helper for filesystems to use for symlink inodes where a pointer to
  1117. * the symlink target is stored in ->i_link. NOTE: this isn't normally called,
  1118. * since as an optimization the path lookup code uses any non-NULL ->i_link
  1119. * directly, without calling ->get_link(). But ->get_link() still must be set,
  1120. * to mark the inode_operations as being for a symlink.
  1121. *
  1122. * Return: the symlink target
  1123. */
  1124. const char *simple_get_link(struct dentry *dentry, struct inode *inode,
  1125. struct delayed_call *done)
  1126. {
  1127. return inode->i_link;
  1128. }
  1129. EXPORT_SYMBOL(simple_get_link);
  1130. const struct inode_operations simple_symlink_inode_operations = {
  1131. .get_link = simple_get_link,
  1132. };
  1133. EXPORT_SYMBOL(simple_symlink_inode_operations);
  1134. /*
  1135. * Operations for a permanently empty directory.
  1136. */
  1137. static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
  1138. {
  1139. return ERR_PTR(-ENOENT);
  1140. }
  1141. static int empty_dir_getattr(const struct path *path, struct kstat *stat,
  1142. u32 request_mask, unsigned int query_flags)
  1143. {
  1144. struct inode *inode = d_inode(path->dentry);
  1145. generic_fillattr(inode, stat);
  1146. return 0;
  1147. }
  1148. static int empty_dir_setattr(struct dentry *dentry, struct iattr *attr)
  1149. {
  1150. return -EPERM;
  1151. }
  1152. static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t size)
  1153. {
  1154. return -EOPNOTSUPP;
  1155. }
  1156. static const struct inode_operations empty_dir_inode_operations = {
  1157. .lookup = empty_dir_lookup,
  1158. .permission = generic_permission,
  1159. .setattr = empty_dir_setattr,
  1160. .getattr = empty_dir_getattr,
  1161. .listxattr = empty_dir_listxattr,
  1162. };
  1163. static loff_t empty_dir_llseek(struct file *file, loff_t offset, int whence)
  1164. {
  1165. /* An empty directory has two entries . and .. at offsets 0 and 1 */
  1166. return generic_file_llseek_size(file, offset, whence, 2, 2);
  1167. }
  1168. static int empty_dir_readdir(struct file *file, struct dir_context *ctx)
  1169. {
  1170. dir_emit_dots(file, ctx);
  1171. return 0;
  1172. }
  1173. static const struct file_operations empty_dir_operations = {
  1174. .llseek = empty_dir_llseek,
  1175. .read = generic_read_dir,
  1176. .iterate_shared = empty_dir_readdir,
  1177. .fsync = noop_fsync,
  1178. };
  1179. void make_empty_dir_inode(struct inode *inode)
  1180. {
  1181. set_nlink(inode, 2);
  1182. inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
  1183. inode->i_uid = GLOBAL_ROOT_UID;
  1184. inode->i_gid = GLOBAL_ROOT_GID;
  1185. inode->i_rdev = 0;
  1186. inode->i_size = 0;
  1187. inode->i_blkbits = PAGE_SHIFT;
  1188. inode->i_blocks = 0;
  1189. inode->i_op = &empty_dir_inode_operations;
  1190. inode->i_opflags &= ~IOP_XATTR;
  1191. inode->i_fop = &empty_dir_operations;
  1192. }
  1193. bool is_empty_dir_inode(struct inode *inode)
  1194. {
  1195. return (inode->i_fop == &empty_dir_operations) &&
  1196. (inode->i_op == &empty_dir_inode_operations);
  1197. }
  1198. #ifdef CONFIG_UNICODE
  1199. /*
  1200. * Determine if the name of a dentry should be casefolded.
  1201. *
  1202. * Return: if names will need casefolding
  1203. */
  1204. static bool needs_casefold(const struct inode *dir)
  1205. {
  1206. return IS_CASEFOLDED(dir) && dir->i_sb->s_encoding;
  1207. }
  1208. /**
  1209. * generic_ci_d_compare - generic d_compare implementation for casefolding filesystems
  1210. * @dentry: dentry whose name we are checking against
  1211. * @len: len of name of dentry
  1212. * @str: str pointer to name of dentry
  1213. * @name: Name to compare against
  1214. *
  1215. * Return: 0 if names match, 1 if mismatch, or -ERRNO
  1216. */
  1217. static int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
  1218. const char *str, const struct qstr *name)
  1219. {
  1220. const struct dentry *parent = READ_ONCE(dentry->d_parent);
  1221. const struct inode *dir = READ_ONCE(parent->d_inode);
  1222. const struct super_block *sb = dentry->d_sb;
  1223. const struct unicode_map *um = sb->s_encoding;
  1224. struct qstr qstr = QSTR_INIT(str, len);
  1225. char strbuf[DNAME_INLINE_LEN];
  1226. int ret;
  1227. if (!dir || !needs_casefold(dir))
  1228. goto fallback;
  1229. /*
  1230. * If the dentry name is stored in-line, then it may be concurrently
  1231. * modified by a rename. If this happens, the VFS will eventually retry
  1232. * the lookup, so it doesn't matter what ->d_compare() returns.
  1233. * However, it's unsafe to call utf8_strncasecmp() with an unstable
  1234. * string. Therefore, we have to copy the name into a temporary buffer.
  1235. */
  1236. if (len <= DNAME_INLINE_LEN - 1) {
  1237. memcpy(strbuf, str, len);
  1238. strbuf[len] = 0;
  1239. qstr.name = strbuf;
  1240. /* prevent compiler from optimizing out the temporary buffer */
  1241. barrier();
  1242. }
  1243. ret = utf8_strncasecmp(um, name, &qstr);
  1244. if (ret >= 0)
  1245. return ret;
  1246. if (sb_has_strict_encoding(sb))
  1247. return -EINVAL;
  1248. fallback:
  1249. if (len != name->len)
  1250. return 1;
  1251. return !!memcmp(str, name->name, len);
  1252. }
  1253. /**
  1254. * generic_ci_d_hash - generic d_hash implementation for casefolding filesystems
  1255. * @dentry: dentry of the parent directory
  1256. * @str: qstr of name whose hash we should fill in
  1257. *
  1258. * Return: 0 if hash was successful or unchanged, and -EINVAL on error
  1259. */
  1260. static int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
  1261. {
  1262. const struct inode *dir = READ_ONCE(dentry->d_inode);
  1263. struct super_block *sb = dentry->d_sb;
  1264. const struct unicode_map *um = sb->s_encoding;
  1265. int ret = 0;
  1266. if (!dir || !needs_casefold(dir))
  1267. return 0;
  1268. ret = utf8_casefold_hash(um, dentry, str);
  1269. if (ret < 0 && sb_has_strict_encoding(sb))
  1270. return -EINVAL;
  1271. return 0;
  1272. }
  1273. static const struct dentry_operations generic_ci_dentry_ops = {
  1274. .d_hash = generic_ci_d_hash,
  1275. .d_compare = generic_ci_d_compare,
  1276. };
  1277. #endif
  1278. #ifdef CONFIG_FS_ENCRYPTION
  1279. static const struct dentry_operations generic_encrypted_dentry_ops = {
  1280. .d_revalidate = fscrypt_d_revalidate,
  1281. };
  1282. #endif
  1283. #if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
  1284. static const struct dentry_operations generic_encrypted_ci_dentry_ops = {
  1285. .d_hash = generic_ci_d_hash,
  1286. .d_compare = generic_ci_d_compare,
  1287. .d_revalidate = fscrypt_d_revalidate,
  1288. };
  1289. #endif
  1290. /**
  1291. * generic_set_encrypted_ci_d_ops - helper for setting d_ops for given dentry
  1292. * @dentry: dentry to set ops on
  1293. *
  1294. * Casefolded directories need d_hash and d_compare set, so that the dentries
  1295. * contained in them are handled case-insensitively. Note that these operations
  1296. * are needed on the parent directory rather than on the dentries in it, and
  1297. * while the casefolding flag can be toggled on and off on an empty directory,
  1298. * dentry_operations can't be changed later. As a result, if the filesystem has
  1299. * casefolding support enabled at all, we have to give all dentries the
  1300. * casefolding operations even if their inode doesn't have the casefolding flag
  1301. * currently (and thus the casefolding ops would be no-ops for now).
  1302. *
  1303. * Encryption works differently in that the only dentry operation it needs is
  1304. * d_revalidate, which it only needs on dentries that have the no-key name flag.
  1305. * The no-key flag can't be set "later", so we don't have to worry about that.
  1306. *
  1307. * Finally, to maximize compatibility with overlayfs (which isn't compatible
  1308. * with certain dentry operations) and to avoid taking an unnecessary
  1309. * performance hit, we use custom dentry_operations for each possible
  1310. * combination rather than always installing all operations.
  1311. */
  1312. void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
  1313. {
  1314. #ifdef CONFIG_FS_ENCRYPTION
  1315. bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME;
  1316. #endif
  1317. #ifdef CONFIG_UNICODE
  1318. bool needs_ci_ops = dentry->d_sb->s_encoding;
  1319. #endif
  1320. #if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
  1321. if (needs_encrypt_ops && needs_ci_ops) {
  1322. d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops);
  1323. return;
  1324. }
  1325. #endif
  1326. #ifdef CONFIG_FS_ENCRYPTION
  1327. if (needs_encrypt_ops) {
  1328. d_set_d_op(dentry, &generic_encrypted_dentry_ops);
  1329. return;
  1330. }
  1331. #endif
  1332. #ifdef CONFIG_UNICODE
  1333. if (needs_ci_ops) {
  1334. d_set_d_op(dentry, &generic_ci_dentry_ops);
  1335. return;
  1336. }
  1337. #endif
  1338. }
  1339. EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops);