super.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * super.c - NILFS module and super block management.
  4. *
  5. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  6. *
  7. * Written by Ryusuke Konishi.
  8. */
  9. /*
  10. * linux/fs/ext2/super.c
  11. *
  12. * Copyright (C) 1992, 1993, 1994, 1995
  13. * Remy Card (card@masi.ibp.fr)
  14. * Laboratoire MASI - Institut Blaise Pascal
  15. * Universite Pierre et Marie Curie (Paris VI)
  16. *
  17. * from
  18. *
  19. * linux/fs/minix/inode.c
  20. *
  21. * Copyright (C) 1991, 1992 Linus Torvalds
  22. *
  23. * Big-endian to little-endian byte-swapping/bitmaps by
  24. * David S. Miller (davem@caip.rutgers.edu), 1995
  25. */
  26. #include <linux/module.h>
  27. #include <linux/string.h>
  28. #include <linux/slab.h>
  29. #include <linux/init.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/parser.h>
  32. #include <linux/crc32.h>
  33. #include <linux/vfs.h>
  34. #include <linux/writeback.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/mount.h>
  37. #include "nilfs.h"
  38. #include "export.h"
  39. #include "mdt.h"
  40. #include "alloc.h"
  41. #include "btree.h"
  42. #include "btnode.h"
  43. #include "page.h"
  44. #include "cpfile.h"
  45. #include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */
  46. #include "ifile.h"
  47. #include "dat.h"
  48. #include "segment.h"
  49. #include "segbuf.h"
  50. MODULE_AUTHOR("NTT Corp.");
  51. MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
  52. "(NILFS)");
  53. MODULE_LICENSE("GPL");
  54. MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
  55. static struct kmem_cache *nilfs_inode_cachep;
  56. struct kmem_cache *nilfs_transaction_cachep;
  57. struct kmem_cache *nilfs_segbuf_cachep;
  58. struct kmem_cache *nilfs_btree_path_cache;
  59. static int nilfs_setup_super(struct super_block *sb, int is_mount);
  60. static int nilfs_remount(struct super_block *sb, int *flags, char *data);
  61. void __nilfs_msg(struct super_block *sb, const char *fmt, ...)
  62. {
  63. struct va_format vaf;
  64. va_list args;
  65. int level;
  66. va_start(args, fmt);
  67. level = printk_get_level(fmt);
  68. vaf.fmt = printk_skip_level(fmt);
  69. vaf.va = &args;
  70. if (sb)
  71. printk("%c%cNILFS (%s): %pV\n",
  72. KERN_SOH_ASCII, level, sb->s_id, &vaf);
  73. else
  74. printk("%c%cNILFS: %pV\n",
  75. KERN_SOH_ASCII, level, &vaf);
  76. va_end(args);
  77. }
  78. static void nilfs_set_error(struct super_block *sb)
  79. {
  80. struct the_nilfs *nilfs = sb->s_fs_info;
  81. struct nilfs_super_block **sbp;
  82. down_write(&nilfs->ns_sem);
  83. if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
  84. nilfs->ns_mount_state |= NILFS_ERROR_FS;
  85. sbp = nilfs_prepare_super(sb, 0);
  86. if (likely(sbp)) {
  87. sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
  88. if (sbp[1])
  89. sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
  90. nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
  91. }
  92. }
  93. up_write(&nilfs->ns_sem);
  94. }
  95. /**
  96. * __nilfs_error() - report failure condition on a filesystem
  97. *
  98. * __nilfs_error() sets an ERROR_FS flag on the superblock as well as
  99. * reporting an error message. This function should be called when
  100. * NILFS detects incoherences or defects of meta data on disk.
  101. *
  102. * This implements the body of nilfs_error() macro. Normally,
  103. * nilfs_error() should be used. As for sustainable errors such as a
  104. * single-shot I/O error, nilfs_err() should be used instead.
  105. *
  106. * Callers should not add a trailing newline since this will do it.
  107. */
  108. void __nilfs_error(struct super_block *sb, const char *function,
  109. const char *fmt, ...)
  110. {
  111. struct the_nilfs *nilfs = sb->s_fs_info;
  112. struct va_format vaf;
  113. va_list args;
  114. va_start(args, fmt);
  115. vaf.fmt = fmt;
  116. vaf.va = &args;
  117. printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n",
  118. sb->s_id, function, &vaf);
  119. va_end(args);
  120. if (!sb_rdonly(sb)) {
  121. nilfs_set_error(sb);
  122. if (nilfs_test_opt(nilfs, ERRORS_RO)) {
  123. printk(KERN_CRIT "Remounting filesystem read-only\n");
  124. sb->s_flags |= SB_RDONLY;
  125. }
  126. }
  127. if (nilfs_test_opt(nilfs, ERRORS_PANIC))
  128. panic("NILFS (device %s): panic forced after error\n",
  129. sb->s_id);
  130. }
  131. struct inode *nilfs_alloc_inode(struct super_block *sb)
  132. {
  133. struct nilfs_inode_info *ii;
  134. ii = kmem_cache_alloc(nilfs_inode_cachep, GFP_NOFS);
  135. if (!ii)
  136. return NULL;
  137. ii->i_bh = NULL;
  138. ii->i_state = 0;
  139. ii->i_cno = 0;
  140. nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
  141. return &ii->vfs_inode;
  142. }
  143. static void nilfs_free_inode(struct inode *inode)
  144. {
  145. if (nilfs_is_metadata_file_inode(inode))
  146. nilfs_mdt_destroy(inode);
  147. kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
  148. }
  149. static int nilfs_sync_super(struct super_block *sb, int flag)
  150. {
  151. struct the_nilfs *nilfs = sb->s_fs_info;
  152. int err;
  153. retry:
  154. set_buffer_dirty(nilfs->ns_sbh[0]);
  155. if (nilfs_test_opt(nilfs, BARRIER)) {
  156. err = __sync_dirty_buffer(nilfs->ns_sbh[0],
  157. REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
  158. } else {
  159. err = sync_dirty_buffer(nilfs->ns_sbh[0]);
  160. }
  161. if (unlikely(err)) {
  162. nilfs_err(sb, "unable to write superblock: err=%d", err);
  163. if (err == -EIO && nilfs->ns_sbh[1]) {
  164. /*
  165. * sbp[0] points to newer log than sbp[1],
  166. * so copy sbp[0] to sbp[1] to take over sbp[0].
  167. */
  168. memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0],
  169. nilfs->ns_sbsize);
  170. nilfs_fall_back_super_block(nilfs);
  171. goto retry;
  172. }
  173. } else {
  174. struct nilfs_super_block *sbp = nilfs->ns_sbp[0];
  175. nilfs->ns_sbwcount++;
  176. /*
  177. * The latest segment becomes trailable from the position
  178. * written in superblock.
  179. */
  180. clear_nilfs_discontinued(nilfs);
  181. /* update GC protection for recent segments */
  182. if (nilfs->ns_sbh[1]) {
  183. if (flag == NILFS_SB_COMMIT_ALL) {
  184. set_buffer_dirty(nilfs->ns_sbh[1]);
  185. if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0)
  186. goto out;
  187. }
  188. if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) <
  189. le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno))
  190. sbp = nilfs->ns_sbp[1];
  191. }
  192. spin_lock(&nilfs->ns_last_segment_lock);
  193. nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq);
  194. spin_unlock(&nilfs->ns_last_segment_lock);
  195. }
  196. out:
  197. return err;
  198. }
  199. void nilfs_set_log_cursor(struct nilfs_super_block *sbp,
  200. struct the_nilfs *nilfs)
  201. {
  202. sector_t nfreeblocks;
  203. /* nilfs->ns_sem must be locked by the caller. */
  204. nilfs_count_free_blocks(nilfs, &nfreeblocks);
  205. sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks);
  206. spin_lock(&nilfs->ns_last_segment_lock);
  207. sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq);
  208. sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg);
  209. sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno);
  210. spin_unlock(&nilfs->ns_last_segment_lock);
  211. }
  212. struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
  213. int flip)
  214. {
  215. struct the_nilfs *nilfs = sb->s_fs_info;
  216. struct nilfs_super_block **sbp = nilfs->ns_sbp;
  217. /* nilfs->ns_sem must be locked by the caller. */
  218. if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
  219. if (sbp[1] &&
  220. sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
  221. memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
  222. } else {
  223. nilfs_crit(sb, "superblock broke");
  224. return NULL;
  225. }
  226. } else if (sbp[1] &&
  227. sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
  228. memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
  229. }
  230. if (flip && sbp[1])
  231. nilfs_swap_super_block(nilfs);
  232. return sbp;
  233. }
  234. int nilfs_commit_super(struct super_block *sb, int flag)
  235. {
  236. struct the_nilfs *nilfs = sb->s_fs_info;
  237. struct nilfs_super_block **sbp = nilfs->ns_sbp;
  238. time64_t t;
  239. /* nilfs->ns_sem must be locked by the caller. */
  240. t = ktime_get_real_seconds();
  241. nilfs->ns_sbwtime = t;
  242. sbp[0]->s_wtime = cpu_to_le64(t);
  243. sbp[0]->s_sum = 0;
  244. sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
  245. (unsigned char *)sbp[0],
  246. nilfs->ns_sbsize));
  247. if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) {
  248. sbp[1]->s_wtime = sbp[0]->s_wtime;
  249. sbp[1]->s_sum = 0;
  250. sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
  251. (unsigned char *)sbp[1],
  252. nilfs->ns_sbsize));
  253. }
  254. clear_nilfs_sb_dirty(nilfs);
  255. nilfs->ns_flushed_device = 1;
  256. /* make sure store to ns_flushed_device cannot be reordered */
  257. smp_wmb();
  258. return nilfs_sync_super(sb, flag);
  259. }
  260. /**
  261. * nilfs_cleanup_super() - write filesystem state for cleanup
  262. * @sb: super block instance to be unmounted or degraded to read-only
  263. *
  264. * This function restores state flags in the on-disk super block.
  265. * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
  266. * filesystem was not clean previously.
  267. */
  268. int nilfs_cleanup_super(struct super_block *sb)
  269. {
  270. struct the_nilfs *nilfs = sb->s_fs_info;
  271. struct nilfs_super_block **sbp;
  272. int flag = NILFS_SB_COMMIT;
  273. int ret = -EIO;
  274. sbp = nilfs_prepare_super(sb, 0);
  275. if (sbp) {
  276. sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
  277. nilfs_set_log_cursor(sbp[0], nilfs);
  278. if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) {
  279. /*
  280. * make the "clean" flag also to the opposite
  281. * super block if both super blocks point to
  282. * the same checkpoint.
  283. */
  284. sbp[1]->s_state = sbp[0]->s_state;
  285. flag = NILFS_SB_COMMIT_ALL;
  286. }
  287. ret = nilfs_commit_super(sb, flag);
  288. }
  289. return ret;
  290. }
  291. /**
  292. * nilfs_move_2nd_super - relocate secondary super block
  293. * @sb: super block instance
  294. * @sb2off: new offset of the secondary super block (in bytes)
  295. */
  296. static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
  297. {
  298. struct the_nilfs *nilfs = sb->s_fs_info;
  299. struct buffer_head *nsbh;
  300. struct nilfs_super_block *nsbp;
  301. sector_t blocknr, newblocknr;
  302. unsigned long offset;
  303. int sb2i; /* array index of the secondary superblock */
  304. int ret = 0;
  305. /* nilfs->ns_sem must be locked by the caller. */
  306. if (nilfs->ns_sbh[1] &&
  307. nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) {
  308. sb2i = 1;
  309. blocknr = nilfs->ns_sbh[1]->b_blocknr;
  310. } else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) {
  311. sb2i = 0;
  312. blocknr = nilfs->ns_sbh[0]->b_blocknr;
  313. } else {
  314. sb2i = -1;
  315. blocknr = 0;
  316. }
  317. if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off)
  318. goto out; /* super block location is unchanged */
  319. /* Get new super block buffer */
  320. newblocknr = sb2off >> nilfs->ns_blocksize_bits;
  321. offset = sb2off & (nilfs->ns_blocksize - 1);
  322. nsbh = sb_getblk(sb, newblocknr);
  323. if (!nsbh) {
  324. nilfs_warn(sb,
  325. "unable to move secondary superblock to block %llu",
  326. (unsigned long long)newblocknr);
  327. ret = -EIO;
  328. goto out;
  329. }
  330. nsbp = (void *)nsbh->b_data + offset;
  331. memset(nsbp, 0, nilfs->ns_blocksize);
  332. if (sb2i >= 0) {
  333. memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
  334. brelse(nilfs->ns_sbh[sb2i]);
  335. nilfs->ns_sbh[sb2i] = nsbh;
  336. nilfs->ns_sbp[sb2i] = nsbp;
  337. } else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) {
  338. /* secondary super block will be restored to index 1 */
  339. nilfs->ns_sbh[1] = nsbh;
  340. nilfs->ns_sbp[1] = nsbp;
  341. } else {
  342. brelse(nsbh);
  343. }
  344. out:
  345. return ret;
  346. }
  347. /**
  348. * nilfs_resize_fs - resize the filesystem
  349. * @sb: super block instance
  350. * @newsize: new size of the filesystem (in bytes)
  351. */
  352. int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
  353. {
  354. struct the_nilfs *nilfs = sb->s_fs_info;
  355. struct nilfs_super_block **sbp;
  356. __u64 devsize, newnsegs;
  357. loff_t sb2off;
  358. int ret;
  359. ret = -ERANGE;
  360. devsize = i_size_read(sb->s_bdev->bd_inode);
  361. if (newsize > devsize)
  362. goto out;
  363. /*
  364. * Write lock is required to protect some functions depending
  365. * on the number of segments, the number of reserved segments,
  366. * and so forth.
  367. */
  368. down_write(&nilfs->ns_segctor_sem);
  369. sb2off = NILFS_SB2_OFFSET_BYTES(newsize);
  370. newnsegs = sb2off >> nilfs->ns_blocksize_bits;
  371. do_div(newnsegs, nilfs->ns_blocks_per_segment);
  372. ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs);
  373. up_write(&nilfs->ns_segctor_sem);
  374. if (ret < 0)
  375. goto out;
  376. ret = nilfs_construct_segment(sb);
  377. if (ret < 0)
  378. goto out;
  379. down_write(&nilfs->ns_sem);
  380. nilfs_move_2nd_super(sb, sb2off);
  381. ret = -EIO;
  382. sbp = nilfs_prepare_super(sb, 0);
  383. if (likely(sbp)) {
  384. nilfs_set_log_cursor(sbp[0], nilfs);
  385. /*
  386. * Drop NILFS_RESIZE_FS flag for compatibility with
  387. * mount-time resize which may be implemented in a
  388. * future release.
  389. */
  390. sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) &
  391. ~NILFS_RESIZE_FS);
  392. sbp[0]->s_dev_size = cpu_to_le64(newsize);
  393. sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments);
  394. if (sbp[1])
  395. memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
  396. ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
  397. }
  398. up_write(&nilfs->ns_sem);
  399. /*
  400. * Reset the range of allocatable segments last. This order
  401. * is important in the case of expansion because the secondary
  402. * superblock must be protected from log write until migration
  403. * completes.
  404. */
  405. if (!ret)
  406. nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1);
  407. out:
  408. return ret;
  409. }
  410. static void nilfs_put_super(struct super_block *sb)
  411. {
  412. struct the_nilfs *nilfs = sb->s_fs_info;
  413. nilfs_detach_log_writer(sb);
  414. if (!sb_rdonly(sb)) {
  415. down_write(&nilfs->ns_sem);
  416. nilfs_cleanup_super(sb);
  417. up_write(&nilfs->ns_sem);
  418. }
  419. iput(nilfs->ns_sufile);
  420. iput(nilfs->ns_cpfile);
  421. iput(nilfs->ns_dat);
  422. destroy_nilfs(nilfs);
  423. sb->s_fs_info = NULL;
  424. }
  425. static int nilfs_sync_fs(struct super_block *sb, int wait)
  426. {
  427. struct the_nilfs *nilfs = sb->s_fs_info;
  428. struct nilfs_super_block **sbp;
  429. int err = 0;
  430. /* This function is called when super block should be written back */
  431. if (wait)
  432. err = nilfs_construct_segment(sb);
  433. down_write(&nilfs->ns_sem);
  434. if (nilfs_sb_dirty(nilfs)) {
  435. sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs));
  436. if (likely(sbp)) {
  437. nilfs_set_log_cursor(sbp[0], nilfs);
  438. nilfs_commit_super(sb, NILFS_SB_COMMIT);
  439. }
  440. }
  441. up_write(&nilfs->ns_sem);
  442. if (!err)
  443. err = nilfs_flush_device(nilfs);
  444. return err;
  445. }
  446. int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
  447. struct nilfs_root **rootp)
  448. {
  449. struct the_nilfs *nilfs = sb->s_fs_info;
  450. struct nilfs_root *root;
  451. struct nilfs_checkpoint *raw_cp;
  452. struct buffer_head *bh_cp;
  453. int err = -ENOMEM;
  454. root = nilfs_find_or_create_root(
  455. nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno);
  456. if (!root)
  457. return err;
  458. if (root->ifile)
  459. goto reuse; /* already attached checkpoint */
  460. down_read(&nilfs->ns_segctor_sem);
  461. err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
  462. &bh_cp);
  463. up_read(&nilfs->ns_segctor_sem);
  464. if (unlikely(err)) {
  465. if (err == -ENOENT || err == -EINVAL) {
  466. nilfs_err(sb,
  467. "Invalid checkpoint (checkpoint number=%llu)",
  468. (unsigned long long)cno);
  469. err = -EINVAL;
  470. }
  471. goto failed;
  472. }
  473. err = nilfs_ifile_read(sb, root, nilfs->ns_inode_size,
  474. &raw_cp->cp_ifile_inode, &root->ifile);
  475. if (err)
  476. goto failed_bh;
  477. atomic64_set(&root->inodes_count,
  478. le64_to_cpu(raw_cp->cp_inodes_count));
  479. atomic64_set(&root->blocks_count,
  480. le64_to_cpu(raw_cp->cp_blocks_count));
  481. nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);
  482. reuse:
  483. *rootp = root;
  484. return 0;
  485. failed_bh:
  486. nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);
  487. failed:
  488. nilfs_put_root(root);
  489. return err;
  490. }
  491. static int nilfs_freeze(struct super_block *sb)
  492. {
  493. struct the_nilfs *nilfs = sb->s_fs_info;
  494. int err;
  495. if (sb_rdonly(sb))
  496. return 0;
  497. /* Mark super block clean */
  498. down_write(&nilfs->ns_sem);
  499. err = nilfs_cleanup_super(sb);
  500. up_write(&nilfs->ns_sem);
  501. return err;
  502. }
  503. static int nilfs_unfreeze(struct super_block *sb)
  504. {
  505. struct the_nilfs *nilfs = sb->s_fs_info;
  506. if (sb_rdonly(sb))
  507. return 0;
  508. down_write(&nilfs->ns_sem);
  509. nilfs_setup_super(sb, false);
  510. up_write(&nilfs->ns_sem);
  511. return 0;
  512. }
  513. static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  514. {
  515. struct super_block *sb = dentry->d_sb;
  516. struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
  517. struct the_nilfs *nilfs = root->nilfs;
  518. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  519. unsigned long long blocks;
  520. unsigned long overhead;
  521. unsigned long nrsvblocks;
  522. sector_t nfreeblocks;
  523. u64 nmaxinodes, nfreeinodes;
  524. int err;
  525. /*
  526. * Compute all of the segment blocks
  527. *
  528. * The blocks before first segment and after last segment
  529. * are excluded.
  530. */
  531. blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments
  532. - nilfs->ns_first_data_block;
  533. nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment;
  534. /*
  535. * Compute the overhead
  536. *
  537. * When distributing meta data blocks outside segment structure,
  538. * We must count them as the overhead.
  539. */
  540. overhead = 0;
  541. err = nilfs_count_free_blocks(nilfs, &nfreeblocks);
  542. if (unlikely(err))
  543. return err;
  544. err = nilfs_ifile_count_free_inodes(root->ifile,
  545. &nmaxinodes, &nfreeinodes);
  546. if (unlikely(err)) {
  547. nilfs_warn(sb, "failed to count free inodes: err=%d", err);
  548. if (err == -ERANGE) {
  549. /*
  550. * If nilfs_palloc_count_max_entries() returns
  551. * -ERANGE error code then we simply treat
  552. * curent inodes count as maximum possible and
  553. * zero as free inodes value.
  554. */
  555. nmaxinodes = atomic64_read(&root->inodes_count);
  556. nfreeinodes = 0;
  557. err = 0;
  558. } else
  559. return err;
  560. }
  561. buf->f_type = NILFS_SUPER_MAGIC;
  562. buf->f_bsize = sb->s_blocksize;
  563. buf->f_blocks = blocks - overhead;
  564. buf->f_bfree = nfreeblocks;
  565. buf->f_bavail = (buf->f_bfree >= nrsvblocks) ?
  566. (buf->f_bfree - nrsvblocks) : 0;
  567. buf->f_files = nmaxinodes;
  568. buf->f_ffree = nfreeinodes;
  569. buf->f_namelen = NILFS_NAME_LEN;
  570. buf->f_fsid = u64_to_fsid(id);
  571. return 0;
  572. }
  573. static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry)
  574. {
  575. struct super_block *sb = dentry->d_sb;
  576. struct the_nilfs *nilfs = sb->s_fs_info;
  577. struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
  578. if (!nilfs_test_opt(nilfs, BARRIER))
  579. seq_puts(seq, ",nobarrier");
  580. if (root->cno != NILFS_CPTREE_CURRENT_CNO)
  581. seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno);
  582. if (nilfs_test_opt(nilfs, ERRORS_PANIC))
  583. seq_puts(seq, ",errors=panic");
  584. if (nilfs_test_opt(nilfs, ERRORS_CONT))
  585. seq_puts(seq, ",errors=continue");
  586. if (nilfs_test_opt(nilfs, STRICT_ORDER))
  587. seq_puts(seq, ",order=strict");
  588. if (nilfs_test_opt(nilfs, NORECOVERY))
  589. seq_puts(seq, ",norecovery");
  590. if (nilfs_test_opt(nilfs, DISCARD))
  591. seq_puts(seq, ",discard");
  592. return 0;
  593. }
  594. static const struct super_operations nilfs_sops = {
  595. .alloc_inode = nilfs_alloc_inode,
  596. .free_inode = nilfs_free_inode,
  597. .dirty_inode = nilfs_dirty_inode,
  598. .evict_inode = nilfs_evict_inode,
  599. .put_super = nilfs_put_super,
  600. .sync_fs = nilfs_sync_fs,
  601. .freeze_fs = nilfs_freeze,
  602. .unfreeze_fs = nilfs_unfreeze,
  603. .statfs = nilfs_statfs,
  604. .remount_fs = nilfs_remount,
  605. .show_options = nilfs_show_options
  606. };
  607. enum {
  608. Opt_err_cont, Opt_err_panic, Opt_err_ro,
  609. Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
  610. Opt_discard, Opt_nodiscard, Opt_err,
  611. };
  612. static match_table_t tokens = {
  613. {Opt_err_cont, "errors=continue"},
  614. {Opt_err_panic, "errors=panic"},
  615. {Opt_err_ro, "errors=remount-ro"},
  616. {Opt_barrier, "barrier"},
  617. {Opt_nobarrier, "nobarrier"},
  618. {Opt_snapshot, "cp=%u"},
  619. {Opt_order, "order=%s"},
  620. {Opt_norecovery, "norecovery"},
  621. {Opt_discard, "discard"},
  622. {Opt_nodiscard, "nodiscard"},
  623. {Opt_err, NULL}
  624. };
  625. static int parse_options(char *options, struct super_block *sb, int is_remount)
  626. {
  627. struct the_nilfs *nilfs = sb->s_fs_info;
  628. char *p;
  629. substring_t args[MAX_OPT_ARGS];
  630. if (!options)
  631. return 1;
  632. while ((p = strsep(&options, ",")) != NULL) {
  633. int token;
  634. if (!*p)
  635. continue;
  636. token = match_token(p, tokens, args);
  637. switch (token) {
  638. case Opt_barrier:
  639. nilfs_set_opt(nilfs, BARRIER);
  640. break;
  641. case Opt_nobarrier:
  642. nilfs_clear_opt(nilfs, BARRIER);
  643. break;
  644. case Opt_order:
  645. if (strcmp(args[0].from, "relaxed") == 0)
  646. /* Ordered data semantics */
  647. nilfs_clear_opt(nilfs, STRICT_ORDER);
  648. else if (strcmp(args[0].from, "strict") == 0)
  649. /* Strict in-order semantics */
  650. nilfs_set_opt(nilfs, STRICT_ORDER);
  651. else
  652. return 0;
  653. break;
  654. case Opt_err_panic:
  655. nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC);
  656. break;
  657. case Opt_err_ro:
  658. nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO);
  659. break;
  660. case Opt_err_cont:
  661. nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT);
  662. break;
  663. case Opt_snapshot:
  664. if (is_remount) {
  665. nilfs_err(sb,
  666. "\"%s\" option is invalid for remount",
  667. p);
  668. return 0;
  669. }
  670. break;
  671. case Opt_norecovery:
  672. nilfs_set_opt(nilfs, NORECOVERY);
  673. break;
  674. case Opt_discard:
  675. nilfs_set_opt(nilfs, DISCARD);
  676. break;
  677. case Opt_nodiscard:
  678. nilfs_clear_opt(nilfs, DISCARD);
  679. break;
  680. default:
  681. nilfs_err(sb, "unrecognized mount option \"%s\"", p);
  682. return 0;
  683. }
  684. }
  685. return 1;
  686. }
  687. static inline void
  688. nilfs_set_default_options(struct super_block *sb,
  689. struct nilfs_super_block *sbp)
  690. {
  691. struct the_nilfs *nilfs = sb->s_fs_info;
  692. nilfs->ns_mount_opt =
  693. NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
  694. }
  695. static int nilfs_setup_super(struct super_block *sb, int is_mount)
  696. {
  697. struct the_nilfs *nilfs = sb->s_fs_info;
  698. struct nilfs_super_block **sbp;
  699. int max_mnt_count;
  700. int mnt_count;
  701. /* nilfs->ns_sem must be locked by the caller. */
  702. sbp = nilfs_prepare_super(sb, 0);
  703. if (!sbp)
  704. return -EIO;
  705. if (!is_mount)
  706. goto skip_mount_setup;
  707. max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count);
  708. mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
  709. if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
  710. nilfs_warn(sb, "mounting fs with errors");
  711. #if 0
  712. } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) {
  713. nilfs_warn(sb, "maximal mount count reached");
  714. #endif
  715. }
  716. if (!max_mnt_count)
  717. sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
  718. sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1);
  719. sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds());
  720. skip_mount_setup:
  721. sbp[0]->s_state =
  722. cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
  723. /* synchronize sbp[1] with sbp[0] */
  724. if (sbp[1])
  725. memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
  726. return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
  727. }
  728. struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb,
  729. u64 pos, int blocksize,
  730. struct buffer_head **pbh)
  731. {
  732. unsigned long long sb_index = pos;
  733. unsigned long offset;
  734. offset = do_div(sb_index, blocksize);
  735. *pbh = sb_bread(sb, sb_index);
  736. if (!*pbh)
  737. return NULL;
  738. return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset);
  739. }
  740. int nilfs_store_magic_and_option(struct super_block *sb,
  741. struct nilfs_super_block *sbp,
  742. char *data)
  743. {
  744. struct the_nilfs *nilfs = sb->s_fs_info;
  745. sb->s_magic = le16_to_cpu(sbp->s_magic);
  746. /* FS independent flags */
  747. #ifdef NILFS_ATIME_DISABLE
  748. sb->s_flags |= SB_NOATIME;
  749. #endif
  750. nilfs_set_default_options(sb, sbp);
  751. nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
  752. nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
  753. nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
  754. nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
  755. return !parse_options(data, sb, 0) ? -EINVAL : 0;
  756. }
  757. int nilfs_check_feature_compatibility(struct super_block *sb,
  758. struct nilfs_super_block *sbp)
  759. {
  760. __u64 features;
  761. features = le64_to_cpu(sbp->s_feature_incompat) &
  762. ~NILFS_FEATURE_INCOMPAT_SUPP;
  763. if (features) {
  764. nilfs_err(sb,
  765. "couldn't mount because of unsupported optional features (%llx)",
  766. (unsigned long long)features);
  767. return -EINVAL;
  768. }
  769. features = le64_to_cpu(sbp->s_feature_compat_ro) &
  770. ~NILFS_FEATURE_COMPAT_RO_SUPP;
  771. if (!sb_rdonly(sb) && features) {
  772. nilfs_err(sb,
  773. "couldn't mount RDWR because of unsupported optional features (%llx)",
  774. (unsigned long long)features);
  775. return -EINVAL;
  776. }
  777. return 0;
  778. }
  779. static int nilfs_get_root_dentry(struct super_block *sb,
  780. struct nilfs_root *root,
  781. struct dentry **root_dentry)
  782. {
  783. struct inode *inode;
  784. struct dentry *dentry;
  785. int ret = 0;
  786. inode = nilfs_iget(sb, root, NILFS_ROOT_INO);
  787. if (IS_ERR(inode)) {
  788. ret = PTR_ERR(inode);
  789. nilfs_err(sb, "error %d getting root inode", ret);
  790. goto out;
  791. }
  792. if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) {
  793. iput(inode);
  794. nilfs_err(sb, "corrupt root inode");
  795. ret = -EINVAL;
  796. goto out;
  797. }
  798. if (root->cno == NILFS_CPTREE_CURRENT_CNO) {
  799. dentry = d_find_alias(inode);
  800. if (!dentry) {
  801. dentry = d_make_root(inode);
  802. if (!dentry) {
  803. ret = -ENOMEM;
  804. goto failed_dentry;
  805. }
  806. } else {
  807. iput(inode);
  808. }
  809. } else {
  810. dentry = d_obtain_root(inode);
  811. if (IS_ERR(dentry)) {
  812. ret = PTR_ERR(dentry);
  813. goto failed_dentry;
  814. }
  815. }
  816. *root_dentry = dentry;
  817. out:
  818. return ret;
  819. failed_dentry:
  820. nilfs_err(sb, "error %d getting root dentry", ret);
  821. goto out;
  822. }
  823. static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
  824. struct dentry **root_dentry)
  825. {
  826. struct the_nilfs *nilfs = s->s_fs_info;
  827. struct nilfs_root *root;
  828. int ret;
  829. mutex_lock(&nilfs->ns_snapshot_mount_mutex);
  830. down_read(&nilfs->ns_segctor_sem);
  831. ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
  832. up_read(&nilfs->ns_segctor_sem);
  833. if (ret < 0) {
  834. ret = (ret == -ENOENT) ? -EINVAL : ret;
  835. goto out;
  836. } else if (!ret) {
  837. nilfs_err(s,
  838. "The specified checkpoint is not a snapshot (checkpoint number=%llu)",
  839. (unsigned long long)cno);
  840. ret = -EINVAL;
  841. goto out;
  842. }
  843. ret = nilfs_attach_checkpoint(s, cno, false, &root);
  844. if (ret) {
  845. nilfs_err(s,
  846. "error %d while loading snapshot (checkpoint number=%llu)",
  847. ret, (unsigned long long)cno);
  848. goto out;
  849. }
  850. ret = nilfs_get_root_dentry(s, root, root_dentry);
  851. nilfs_put_root(root);
  852. out:
  853. mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
  854. return ret;
  855. }
  856. /**
  857. * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
  858. * @root_dentry: root dentry of the tree to be shrunk
  859. *
  860. * This function returns true if the tree was in-use.
  861. */
  862. static bool nilfs_tree_is_busy(struct dentry *root_dentry)
  863. {
  864. shrink_dcache_parent(root_dentry);
  865. return d_count(root_dentry) > 1;
  866. }
  867. int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
  868. {
  869. struct the_nilfs *nilfs = sb->s_fs_info;
  870. struct nilfs_root *root;
  871. struct inode *inode;
  872. struct dentry *dentry;
  873. int ret;
  874. if (cno > nilfs->ns_cno)
  875. return false;
  876. if (cno >= nilfs_last_cno(nilfs))
  877. return true; /* protect recent checkpoints */
  878. ret = false;
  879. root = nilfs_lookup_root(nilfs, cno);
  880. if (root) {
  881. inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO);
  882. if (inode) {
  883. dentry = d_find_alias(inode);
  884. if (dentry) {
  885. ret = nilfs_tree_is_busy(dentry);
  886. dput(dentry);
  887. }
  888. iput(inode);
  889. }
  890. nilfs_put_root(root);
  891. }
  892. return ret;
  893. }
  894. /**
  895. * nilfs_fill_super() - initialize a super block instance
  896. * @sb: super_block
  897. * @data: mount options
  898. * @silent: silent mode flag
  899. *
  900. * This function is called exclusively by nilfs->ns_mount_mutex.
  901. * So, the recovery process is protected from other simultaneous mounts.
  902. */
  903. static int
  904. nilfs_fill_super(struct super_block *sb, void *data, int silent)
  905. {
  906. struct the_nilfs *nilfs;
  907. struct nilfs_root *fsroot;
  908. __u64 cno;
  909. int err;
  910. nilfs = alloc_nilfs(sb);
  911. if (!nilfs)
  912. return -ENOMEM;
  913. sb->s_fs_info = nilfs;
  914. err = init_nilfs(nilfs, sb, (char *)data);
  915. if (err)
  916. goto failed_nilfs;
  917. sb->s_op = &nilfs_sops;
  918. sb->s_export_op = &nilfs_export_ops;
  919. sb->s_root = NULL;
  920. sb->s_time_gran = 1;
  921. sb->s_max_links = NILFS_LINK_MAX;
  922. sb->s_bdi = bdi_get(sb->s_bdev->bd_bdi);
  923. err = load_nilfs(nilfs, sb);
  924. if (err)
  925. goto failed_nilfs;
  926. cno = nilfs_last_cno(nilfs);
  927. err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
  928. if (err) {
  929. nilfs_err(sb,
  930. "error %d while loading last checkpoint (checkpoint number=%llu)",
  931. err, (unsigned long long)cno);
  932. goto failed_unload;
  933. }
  934. if (!sb_rdonly(sb)) {
  935. err = nilfs_attach_log_writer(sb, fsroot);
  936. if (err)
  937. goto failed_checkpoint;
  938. }
  939. err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root);
  940. if (err)
  941. goto failed_segctor;
  942. nilfs_put_root(fsroot);
  943. if (!sb_rdonly(sb)) {
  944. down_write(&nilfs->ns_sem);
  945. nilfs_setup_super(sb, true);
  946. up_write(&nilfs->ns_sem);
  947. }
  948. return 0;
  949. failed_segctor:
  950. nilfs_detach_log_writer(sb);
  951. failed_checkpoint:
  952. nilfs_put_root(fsroot);
  953. failed_unload:
  954. iput(nilfs->ns_sufile);
  955. iput(nilfs->ns_cpfile);
  956. iput(nilfs->ns_dat);
  957. failed_nilfs:
  958. destroy_nilfs(nilfs);
  959. return err;
  960. }
  961. static int nilfs_remount(struct super_block *sb, int *flags, char *data)
  962. {
  963. struct the_nilfs *nilfs = sb->s_fs_info;
  964. unsigned long old_sb_flags;
  965. unsigned long old_mount_opt;
  966. int err;
  967. sync_filesystem(sb);
  968. old_sb_flags = sb->s_flags;
  969. old_mount_opt = nilfs->ns_mount_opt;
  970. if (!parse_options(data, sb, 1)) {
  971. err = -EINVAL;
  972. goto restore_opts;
  973. }
  974. sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
  975. err = -EINVAL;
  976. if (!nilfs_valid_fs(nilfs)) {
  977. nilfs_warn(sb,
  978. "couldn't remount because the filesystem is in an incomplete recovery state");
  979. goto restore_opts;
  980. }
  981. if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
  982. goto out;
  983. if (*flags & SB_RDONLY) {
  984. /* Shutting down log writer */
  985. nilfs_detach_log_writer(sb);
  986. sb->s_flags |= SB_RDONLY;
  987. /*
  988. * Remounting a valid RW partition RDONLY, so set
  989. * the RDONLY flag and then mark the partition as valid again.
  990. */
  991. down_write(&nilfs->ns_sem);
  992. nilfs_cleanup_super(sb);
  993. up_write(&nilfs->ns_sem);
  994. } else {
  995. __u64 features;
  996. struct nilfs_root *root;
  997. /*
  998. * Mounting a RDONLY partition read-write, so reread and
  999. * store the current valid flag. (It may have been changed
  1000. * by fsck since we originally mounted the partition.)
  1001. */
  1002. down_read(&nilfs->ns_sem);
  1003. features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
  1004. ~NILFS_FEATURE_COMPAT_RO_SUPP;
  1005. up_read(&nilfs->ns_sem);
  1006. if (features) {
  1007. nilfs_warn(sb,
  1008. "couldn't remount RDWR because of unsupported optional features (%llx)",
  1009. (unsigned long long)features);
  1010. err = -EROFS;
  1011. goto restore_opts;
  1012. }
  1013. sb->s_flags &= ~SB_RDONLY;
  1014. root = NILFS_I(d_inode(sb->s_root))->i_root;
  1015. err = nilfs_attach_log_writer(sb, root);
  1016. if (err)
  1017. goto restore_opts;
  1018. down_write(&nilfs->ns_sem);
  1019. nilfs_setup_super(sb, true);
  1020. up_write(&nilfs->ns_sem);
  1021. }
  1022. out:
  1023. return 0;
  1024. restore_opts:
  1025. sb->s_flags = old_sb_flags;
  1026. nilfs->ns_mount_opt = old_mount_opt;
  1027. return err;
  1028. }
  1029. struct nilfs_super_data {
  1030. struct block_device *bdev;
  1031. __u64 cno;
  1032. int flags;
  1033. };
  1034. static int nilfs_parse_snapshot_option(const char *option,
  1035. const substring_t *arg,
  1036. struct nilfs_super_data *sd)
  1037. {
  1038. unsigned long long val;
  1039. const char *msg = NULL;
  1040. int err;
  1041. if (!(sd->flags & SB_RDONLY)) {
  1042. msg = "read-only option is not specified";
  1043. goto parse_error;
  1044. }
  1045. err = kstrtoull(arg->from, 0, &val);
  1046. if (err) {
  1047. if (err == -ERANGE)
  1048. msg = "too large checkpoint number";
  1049. else
  1050. msg = "malformed argument";
  1051. goto parse_error;
  1052. } else if (val == 0) {
  1053. msg = "invalid checkpoint number 0";
  1054. goto parse_error;
  1055. }
  1056. sd->cno = val;
  1057. return 0;
  1058. parse_error:
  1059. nilfs_err(NULL, "invalid option \"%s\": %s", option, msg);
  1060. return 1;
  1061. }
  1062. /**
  1063. * nilfs_identify - pre-read mount options needed to identify mount instance
  1064. * @data: mount options
  1065. * @sd: nilfs_super_data
  1066. */
  1067. static int nilfs_identify(char *data, struct nilfs_super_data *sd)
  1068. {
  1069. char *p, *options = data;
  1070. substring_t args[MAX_OPT_ARGS];
  1071. int token;
  1072. int ret = 0;
  1073. do {
  1074. p = strsep(&options, ",");
  1075. if (p != NULL && *p) {
  1076. token = match_token(p, tokens, args);
  1077. if (token == Opt_snapshot)
  1078. ret = nilfs_parse_snapshot_option(p, &args[0],
  1079. sd);
  1080. }
  1081. if (!options)
  1082. break;
  1083. BUG_ON(options == data);
  1084. *(options - 1) = ',';
  1085. } while (!ret);
  1086. return ret;
  1087. }
  1088. static int nilfs_set_bdev_super(struct super_block *s, void *data)
  1089. {
  1090. s->s_bdev = data;
  1091. s->s_dev = s->s_bdev->bd_dev;
  1092. return 0;
  1093. }
  1094. static int nilfs_test_bdev_super(struct super_block *s, void *data)
  1095. {
  1096. return (void *)s->s_bdev == data;
  1097. }
  1098. static struct dentry *
  1099. nilfs_mount(struct file_system_type *fs_type, int flags,
  1100. const char *dev_name, void *data)
  1101. {
  1102. struct nilfs_super_data sd;
  1103. struct super_block *s;
  1104. fmode_t mode = FMODE_READ | FMODE_EXCL;
  1105. struct dentry *root_dentry;
  1106. int err, s_new = false;
  1107. if (!(flags & SB_RDONLY))
  1108. mode |= FMODE_WRITE;
  1109. sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
  1110. if (IS_ERR(sd.bdev))
  1111. return ERR_CAST(sd.bdev);
  1112. sd.cno = 0;
  1113. sd.flags = flags;
  1114. if (nilfs_identify((char *)data, &sd)) {
  1115. err = -EINVAL;
  1116. goto failed;
  1117. }
  1118. /*
  1119. * once the super is inserted into the list by sget, s_umount
  1120. * will protect the lockfs code from trying to start a snapshot
  1121. * while we are mounting
  1122. */
  1123. mutex_lock(&sd.bdev->bd_fsfreeze_mutex);
  1124. if (sd.bdev->bd_fsfreeze_count > 0) {
  1125. mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
  1126. err = -EBUSY;
  1127. goto failed;
  1128. }
  1129. s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags,
  1130. sd.bdev);
  1131. mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
  1132. if (IS_ERR(s)) {
  1133. err = PTR_ERR(s);
  1134. goto failed;
  1135. }
  1136. if (!s->s_root) {
  1137. s_new = true;
  1138. /* New superblock instance created */
  1139. s->s_mode = mode;
  1140. snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
  1141. sb_set_blocksize(s, block_size(sd.bdev));
  1142. err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
  1143. if (err)
  1144. goto failed_super;
  1145. s->s_flags |= SB_ACTIVE;
  1146. } else if (!sd.cno) {
  1147. if (nilfs_tree_is_busy(s->s_root)) {
  1148. if ((flags ^ s->s_flags) & SB_RDONLY) {
  1149. nilfs_err(s,
  1150. "the device already has a %s mount.",
  1151. sb_rdonly(s) ? "read-only" : "read/write");
  1152. err = -EBUSY;
  1153. goto failed_super;
  1154. }
  1155. } else {
  1156. /*
  1157. * Try remount to setup mount states if the current
  1158. * tree is not mounted and only snapshots use this sb.
  1159. */
  1160. err = nilfs_remount(s, &flags, data);
  1161. if (err)
  1162. goto failed_super;
  1163. }
  1164. }
  1165. if (sd.cno) {
  1166. err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
  1167. if (err)
  1168. goto failed_super;
  1169. } else {
  1170. root_dentry = dget(s->s_root);
  1171. }
  1172. if (!s_new)
  1173. blkdev_put(sd.bdev, mode);
  1174. return root_dentry;
  1175. failed_super:
  1176. deactivate_locked_super(s);
  1177. failed:
  1178. if (!s_new)
  1179. blkdev_put(sd.bdev, mode);
  1180. return ERR_PTR(err);
  1181. }
  1182. struct file_system_type nilfs_fs_type = {
  1183. .owner = THIS_MODULE,
  1184. .name = "nilfs2",
  1185. .mount = nilfs_mount,
  1186. .kill_sb = kill_block_super,
  1187. .fs_flags = FS_REQUIRES_DEV,
  1188. };
  1189. MODULE_ALIAS_FS("nilfs2");
  1190. static void nilfs_inode_init_once(void *obj)
  1191. {
  1192. struct nilfs_inode_info *ii = obj;
  1193. INIT_LIST_HEAD(&ii->i_dirty);
  1194. #ifdef CONFIG_NILFS_XATTR
  1195. init_rwsem(&ii->xattr_sem);
  1196. #endif
  1197. address_space_init_once(&ii->i_btnode_cache);
  1198. ii->i_bmap = &ii->i_bmap_data;
  1199. inode_init_once(&ii->vfs_inode);
  1200. }
  1201. static void nilfs_segbuf_init_once(void *obj)
  1202. {
  1203. memset(obj, 0, sizeof(struct nilfs_segment_buffer));
  1204. }
  1205. static void nilfs_destroy_cachep(void)
  1206. {
  1207. /*
  1208. * Make sure all delayed rcu free inodes are flushed before we
  1209. * destroy cache.
  1210. */
  1211. rcu_barrier();
  1212. kmem_cache_destroy(nilfs_inode_cachep);
  1213. kmem_cache_destroy(nilfs_transaction_cachep);
  1214. kmem_cache_destroy(nilfs_segbuf_cachep);
  1215. kmem_cache_destroy(nilfs_btree_path_cache);
  1216. }
  1217. static int __init nilfs_init_cachep(void)
  1218. {
  1219. nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache",
  1220. sizeof(struct nilfs_inode_info), 0,
  1221. SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
  1222. nilfs_inode_init_once);
  1223. if (!nilfs_inode_cachep)
  1224. goto fail;
  1225. nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache",
  1226. sizeof(struct nilfs_transaction_info), 0,
  1227. SLAB_RECLAIM_ACCOUNT, NULL);
  1228. if (!nilfs_transaction_cachep)
  1229. goto fail;
  1230. nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache",
  1231. sizeof(struct nilfs_segment_buffer), 0,
  1232. SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once);
  1233. if (!nilfs_segbuf_cachep)
  1234. goto fail;
  1235. nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache",
  1236. sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX,
  1237. 0, 0, NULL);
  1238. if (!nilfs_btree_path_cache)
  1239. goto fail;
  1240. return 0;
  1241. fail:
  1242. nilfs_destroy_cachep();
  1243. return -ENOMEM;
  1244. }
  1245. static int __init init_nilfs_fs(void)
  1246. {
  1247. int err;
  1248. err = nilfs_init_cachep();
  1249. if (err)
  1250. goto fail;
  1251. err = nilfs_sysfs_init();
  1252. if (err)
  1253. goto free_cachep;
  1254. err = register_filesystem(&nilfs_fs_type);
  1255. if (err)
  1256. goto deinit_sysfs_entry;
  1257. printk(KERN_INFO "NILFS version 2 loaded\n");
  1258. return 0;
  1259. deinit_sysfs_entry:
  1260. nilfs_sysfs_exit();
  1261. free_cachep:
  1262. nilfs_destroy_cachep();
  1263. fail:
  1264. return err;
  1265. }
  1266. static void __exit exit_nilfs_fs(void)
  1267. {
  1268. nilfs_destroy_cachep();
  1269. nilfs_sysfs_exit();
  1270. unregister_filesystem(&nilfs_fs_type);
  1271. }
  1272. module_init(init_nilfs_fs)
  1273. module_exit(exit_nilfs_fs)