inode.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/inode.c
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/f2fs_fs.h>
  10. #include <linux/buffer_head.h>
  11. #include <linux/backing-dev.h>
  12. #include <linux/writeback.h>
  13. #include "f2fs.h"
  14. #include "node.h"
  15. #include "segment.h"
  16. #include "xattr.h"
  17. #include <trace/events/f2fs.h>
  18. #ifdef CONFIG_F2FS_FS_COMPRESSION
  19. extern const struct address_space_operations f2fs_compress_aops;
  20. #endif
  21. void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
  22. {
  23. if (is_inode_flag_set(inode, FI_NEW_INODE))
  24. return;
  25. if (f2fs_inode_dirtied(inode, sync))
  26. return;
  27. mark_inode_dirty_sync(inode);
  28. }
  29. void f2fs_set_inode_flags(struct inode *inode)
  30. {
  31. unsigned int flags = F2FS_I(inode)->i_flags;
  32. unsigned int new_fl = 0;
  33. if (flags & F2FS_SYNC_FL)
  34. new_fl |= S_SYNC;
  35. if (flags & F2FS_APPEND_FL)
  36. new_fl |= S_APPEND;
  37. if (flags & F2FS_IMMUTABLE_FL)
  38. new_fl |= S_IMMUTABLE;
  39. if (flags & F2FS_NOATIME_FL)
  40. new_fl |= S_NOATIME;
  41. if (flags & F2FS_DIRSYNC_FL)
  42. new_fl |= S_DIRSYNC;
  43. if (file_is_encrypt(inode))
  44. new_fl |= S_ENCRYPTED;
  45. if (file_is_verity(inode))
  46. new_fl |= S_VERITY;
  47. if (flags & F2FS_CASEFOLD_FL)
  48. new_fl |= S_CASEFOLD;
  49. inode_set_flags(inode, new_fl,
  50. S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
  51. S_ENCRYPTED|S_VERITY|S_CASEFOLD);
  52. }
  53. static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
  54. {
  55. int extra_size = get_extra_isize(inode);
  56. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  57. S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  58. if (ri->i_addr[extra_size])
  59. inode->i_rdev = old_decode_dev(
  60. le32_to_cpu(ri->i_addr[extra_size]));
  61. else
  62. inode->i_rdev = new_decode_dev(
  63. le32_to_cpu(ri->i_addr[extra_size + 1]));
  64. }
  65. }
  66. static int __written_first_block(struct f2fs_sb_info *sbi,
  67. struct f2fs_inode *ri)
  68. {
  69. block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
  70. if (!__is_valid_data_blkaddr(addr))
  71. return 1;
  72. if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
  73. return -EFSCORRUPTED;
  74. return 0;
  75. }
  76. static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
  77. {
  78. int extra_size = get_extra_isize(inode);
  79. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  80. if (old_valid_dev(inode->i_rdev)) {
  81. ri->i_addr[extra_size] =
  82. cpu_to_le32(old_encode_dev(inode->i_rdev));
  83. ri->i_addr[extra_size + 1] = 0;
  84. } else {
  85. ri->i_addr[extra_size] = 0;
  86. ri->i_addr[extra_size + 1] =
  87. cpu_to_le32(new_encode_dev(inode->i_rdev));
  88. ri->i_addr[extra_size + 2] = 0;
  89. }
  90. }
  91. }
  92. static void __recover_inline_status(struct inode *inode, struct page *ipage)
  93. {
  94. void *inline_data = inline_data_addr(inode, ipage);
  95. __le32 *start = inline_data;
  96. __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
  97. while (start < end) {
  98. if (*start++) {
  99. f2fs_wait_on_page_writeback(ipage, NODE, true, true);
  100. set_inode_flag(inode, FI_DATA_EXIST);
  101. set_raw_inline(inode, F2FS_INODE(ipage));
  102. set_page_dirty(ipage);
  103. return;
  104. }
  105. }
  106. return;
  107. }
  108. static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
  109. {
  110. struct f2fs_inode *ri = &F2FS_NODE(page)->i;
  111. if (!f2fs_sb_has_inode_chksum(sbi))
  112. return false;
  113. if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
  114. return false;
  115. if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
  116. i_inode_checksum))
  117. return false;
  118. return true;
  119. }
  120. static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
  121. {
  122. struct f2fs_node *node = F2FS_NODE(page);
  123. struct f2fs_inode *ri = &node->i;
  124. __le32 ino = node->footer.ino;
  125. __le32 gen = ri->i_generation;
  126. __u32 chksum, chksum_seed;
  127. __u32 dummy_cs = 0;
  128. unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
  129. unsigned int cs_size = sizeof(dummy_cs);
  130. chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
  131. sizeof(ino));
  132. chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
  133. chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
  134. chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
  135. offset += cs_size;
  136. chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
  137. F2FS_BLKSIZE - offset);
  138. return chksum;
  139. }
  140. bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
  141. {
  142. struct f2fs_inode *ri;
  143. __u32 provided, calculated;
  144. if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
  145. return true;
  146. #ifdef CONFIG_F2FS_CHECK_FS
  147. if (!f2fs_enable_inode_chksum(sbi, page))
  148. #else
  149. if (!f2fs_enable_inode_chksum(sbi, page) ||
  150. PageDirty(page) || PageWriteback(page))
  151. #endif
  152. return true;
  153. ri = &F2FS_NODE(page)->i;
  154. provided = le32_to_cpu(ri->i_inode_checksum);
  155. calculated = f2fs_inode_chksum(sbi, page);
  156. if (provided != calculated)
  157. f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
  158. page->index, ino_of_node(page), provided, calculated);
  159. return provided == calculated;
  160. }
  161. void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
  162. {
  163. struct f2fs_inode *ri = &F2FS_NODE(page)->i;
  164. if (!f2fs_enable_inode_chksum(sbi, page))
  165. return;
  166. ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
  167. }
  168. static bool sanity_check_inode(struct inode *inode, struct page *node_page)
  169. {
  170. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  171. struct f2fs_inode_info *fi = F2FS_I(inode);
  172. struct f2fs_inode *ri = F2FS_INODE(node_page);
  173. unsigned long long iblocks;
  174. iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
  175. if (!iblocks) {
  176. set_sbi_flag(sbi, SBI_NEED_FSCK);
  177. f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
  178. __func__, inode->i_ino, iblocks);
  179. return false;
  180. }
  181. if (ino_of_node(node_page) != nid_of_node(node_page)) {
  182. set_sbi_flag(sbi, SBI_NEED_FSCK);
  183. f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
  184. __func__, inode->i_ino,
  185. ino_of_node(node_page), nid_of_node(node_page));
  186. return false;
  187. }
  188. if (f2fs_sb_has_flexible_inline_xattr(sbi)
  189. && !f2fs_has_extra_attr(inode)) {
  190. set_sbi_flag(sbi, SBI_NEED_FSCK);
  191. f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
  192. __func__, inode->i_ino);
  193. return false;
  194. }
  195. if (f2fs_has_extra_attr(inode) &&
  196. !f2fs_sb_has_extra_attr(sbi)) {
  197. set_sbi_flag(sbi, SBI_NEED_FSCK);
  198. f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
  199. __func__, inode->i_ino);
  200. return false;
  201. }
  202. if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
  203. fi->i_extra_isize % sizeof(__le32)) {
  204. set_sbi_flag(sbi, SBI_NEED_FSCK);
  205. f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
  206. __func__, inode->i_ino, fi->i_extra_isize,
  207. F2FS_TOTAL_EXTRA_ATTR_SIZE);
  208. return false;
  209. }
  210. if (f2fs_has_extra_attr(inode) &&
  211. f2fs_sb_has_flexible_inline_xattr(sbi) &&
  212. f2fs_has_inline_xattr(inode) &&
  213. (!fi->i_inline_xattr_size ||
  214. fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
  215. set_sbi_flag(sbi, SBI_NEED_FSCK);
  216. f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
  217. __func__, inode->i_ino, fi->i_inline_xattr_size,
  218. MAX_INLINE_XATTR_SIZE);
  219. return false;
  220. }
  221. if (F2FS_I(inode)->extent_tree) {
  222. struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
  223. if (ei->len &&
  224. (!f2fs_is_valid_blkaddr(sbi, ei->blk,
  225. DATA_GENERIC_ENHANCE) ||
  226. !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
  227. DATA_GENERIC_ENHANCE))) {
  228. set_sbi_flag(sbi, SBI_NEED_FSCK);
  229. f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
  230. __func__, inode->i_ino,
  231. ei->blk, ei->fofs, ei->len);
  232. return false;
  233. }
  234. }
  235. if (f2fs_has_inline_data(inode) &&
  236. (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
  237. set_sbi_flag(sbi, SBI_NEED_FSCK);
  238. f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
  239. __func__, inode->i_ino, inode->i_mode);
  240. return false;
  241. }
  242. if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
  243. set_sbi_flag(sbi, SBI_NEED_FSCK);
  244. f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
  245. __func__, inode->i_ino, inode->i_mode);
  246. return false;
  247. }
  248. if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
  249. set_sbi_flag(sbi, SBI_NEED_FSCK);
  250. f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
  251. __func__, inode->i_ino);
  252. return false;
  253. }
  254. if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
  255. fi->i_flags & F2FS_COMPR_FL &&
  256. F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
  257. i_log_cluster_size)) {
  258. if (ri->i_compress_algorithm >= COMPRESS_MAX) {
  259. set_sbi_flag(sbi, SBI_NEED_FSCK);
  260. f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
  261. "compress algorithm: %u, run fsck to fix",
  262. __func__, inode->i_ino,
  263. ri->i_compress_algorithm);
  264. return false;
  265. }
  266. if (le64_to_cpu(ri->i_compr_blocks) >
  267. SECTOR_TO_BLOCK(inode->i_blocks)) {
  268. set_sbi_flag(sbi, SBI_NEED_FSCK);
  269. f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
  270. "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
  271. __func__, inode->i_ino,
  272. le64_to_cpu(ri->i_compr_blocks),
  273. SECTOR_TO_BLOCK(inode->i_blocks));
  274. return false;
  275. }
  276. if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
  277. ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
  278. set_sbi_flag(sbi, SBI_NEED_FSCK);
  279. f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
  280. "log cluster size: %u, run fsck to fix",
  281. __func__, inode->i_ino,
  282. ri->i_log_cluster_size);
  283. return false;
  284. }
  285. }
  286. return true;
  287. }
  288. static int do_read_inode(struct inode *inode)
  289. {
  290. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  291. struct f2fs_inode_info *fi = F2FS_I(inode);
  292. struct page *node_page;
  293. struct f2fs_inode *ri;
  294. projid_t i_projid;
  295. int err;
  296. /* Check if ino is within scope */
  297. if (f2fs_check_nid_range(sbi, inode->i_ino))
  298. return -EINVAL;
  299. node_page = f2fs_get_node_page(sbi, inode->i_ino);
  300. if (IS_ERR(node_page))
  301. return PTR_ERR(node_page);
  302. ri = F2FS_INODE(node_page);
  303. inode->i_mode = le16_to_cpu(ri->i_mode);
  304. i_uid_write(inode, le32_to_cpu(ri->i_uid));
  305. i_gid_write(inode, le32_to_cpu(ri->i_gid));
  306. set_nlink(inode, le32_to_cpu(ri->i_links));
  307. inode->i_size = le64_to_cpu(ri->i_size);
  308. inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
  309. inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
  310. inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
  311. inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
  312. inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
  313. inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
  314. inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
  315. inode->i_generation = le32_to_cpu(ri->i_generation);
  316. if (S_ISDIR(inode->i_mode))
  317. fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
  318. else if (S_ISREG(inode->i_mode))
  319. fi->i_gc_failures[GC_FAILURE_PIN] =
  320. le16_to_cpu(ri->i_gc_failures);
  321. fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
  322. fi->i_flags = le32_to_cpu(ri->i_flags);
  323. if (S_ISREG(inode->i_mode))
  324. fi->i_flags &= ~F2FS_PROJINHERIT_FL;
  325. bitmap_zero(fi->flags, FI_MAX);
  326. fi->i_advise = ri->i_advise;
  327. fi->i_pino = le32_to_cpu(ri->i_pino);
  328. fi->i_dir_level = ri->i_dir_level;
  329. f2fs_init_extent_tree(inode, node_page);
  330. get_inline_info(inode, ri);
  331. fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
  332. le16_to_cpu(ri->i_extra_isize) : 0;
  333. if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
  334. fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
  335. } else if (f2fs_has_inline_xattr(inode) ||
  336. f2fs_has_inline_dentry(inode)) {
  337. fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
  338. } else {
  339. /*
  340. * Previous inline data or directory always reserved 200 bytes
  341. * in inode layout, even if inline_xattr is disabled. In order
  342. * to keep inline_dentry's structure for backward compatibility,
  343. * we get the space back only from inline_data.
  344. */
  345. fi->i_inline_xattr_size = 0;
  346. }
  347. if (!sanity_check_inode(inode, node_page)) {
  348. f2fs_put_page(node_page, 1);
  349. return -EFSCORRUPTED;
  350. }
  351. /* check data exist */
  352. if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
  353. __recover_inline_status(inode, node_page);
  354. /* try to recover cold bit for non-dir inode */
  355. if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
  356. f2fs_wait_on_page_writeback(node_page, NODE, true, true);
  357. set_cold_node(node_page, false);
  358. set_page_dirty(node_page);
  359. }
  360. /* get rdev by using inline_info */
  361. __get_inode_rdev(inode, ri);
  362. if (S_ISREG(inode->i_mode)) {
  363. err = __written_first_block(sbi, ri);
  364. if (err < 0) {
  365. f2fs_put_page(node_page, 1);
  366. return err;
  367. }
  368. if (!err)
  369. set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
  370. }
  371. if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
  372. fi->last_disk_size = inode->i_size;
  373. if (fi->i_flags & F2FS_PROJINHERIT_FL)
  374. set_inode_flag(inode, FI_PROJ_INHERIT);
  375. if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
  376. F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
  377. i_projid = (projid_t)le32_to_cpu(ri->i_projid);
  378. else
  379. i_projid = F2FS_DEF_PROJID;
  380. fi->i_projid = make_kprojid(&init_user_ns, i_projid);
  381. if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
  382. F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
  383. fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
  384. fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
  385. }
  386. if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
  387. (fi->i_flags & F2FS_COMPR_FL)) {
  388. if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
  389. i_log_cluster_size)) {
  390. atomic_set(&fi->i_compr_blocks,
  391. le64_to_cpu(ri->i_compr_blocks));
  392. fi->i_compress_algorithm = ri->i_compress_algorithm;
  393. fi->i_log_cluster_size = ri->i_log_cluster_size;
  394. fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
  395. fi->i_cluster_size = 1 << fi->i_log_cluster_size;
  396. set_inode_flag(inode, FI_COMPRESSED_FILE);
  397. }
  398. }
  399. F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
  400. F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
  401. F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
  402. F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
  403. f2fs_put_page(node_page, 1);
  404. stat_inc_inline_xattr(inode);
  405. stat_inc_inline_inode(inode);
  406. stat_inc_inline_dir(inode);
  407. stat_inc_compr_inode(inode);
  408. stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
  409. return 0;
  410. }
  411. struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
  412. {
  413. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  414. struct inode *inode;
  415. int ret = 0;
  416. inode = iget_locked(sb, ino);
  417. if (!inode)
  418. return ERR_PTR(-ENOMEM);
  419. if (!(inode->i_state & I_NEW)) {
  420. trace_f2fs_iget(inode);
  421. return inode;
  422. }
  423. if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
  424. goto make_now;
  425. #ifdef CONFIG_F2FS_FS_COMPRESSION
  426. if (ino == F2FS_COMPRESS_INO(sbi))
  427. goto make_now;
  428. #endif
  429. ret = do_read_inode(inode);
  430. if (ret)
  431. goto bad_inode;
  432. make_now:
  433. if (ino == F2FS_NODE_INO(sbi)) {
  434. inode->i_mapping->a_ops = &f2fs_node_aops;
  435. mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
  436. } else if (ino == F2FS_META_INO(sbi)) {
  437. inode->i_mapping->a_ops = &f2fs_meta_aops;
  438. mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
  439. } else if (ino == F2FS_COMPRESS_INO(sbi)) {
  440. #ifdef CONFIG_F2FS_FS_COMPRESSION
  441. inode->i_mapping->a_ops = &f2fs_compress_aops;
  442. #endif
  443. mapping_set_gfp_mask(inode->i_mapping,
  444. GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
  445. } else if (S_ISREG(inode->i_mode)) {
  446. inode->i_op = &f2fs_file_inode_operations;
  447. inode->i_fop = &f2fs_file_operations;
  448. inode->i_mapping->a_ops = &f2fs_dblock_aops;
  449. } else if (S_ISDIR(inode->i_mode)) {
  450. inode->i_op = &f2fs_dir_inode_operations;
  451. inode->i_fop = &f2fs_dir_operations;
  452. inode->i_mapping->a_ops = &f2fs_dblock_aops;
  453. mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
  454. } else if (S_ISLNK(inode->i_mode)) {
  455. if (file_is_encrypt(inode))
  456. inode->i_op = &f2fs_encrypted_symlink_inode_operations;
  457. else
  458. inode->i_op = &f2fs_symlink_inode_operations;
  459. inode_nohighmem(inode);
  460. inode->i_mapping->a_ops = &f2fs_dblock_aops;
  461. } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  462. S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  463. inode->i_op = &f2fs_special_inode_operations;
  464. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  465. } else {
  466. ret = -EIO;
  467. goto bad_inode;
  468. }
  469. f2fs_set_inode_flags(inode);
  470. unlock_new_inode(inode);
  471. trace_f2fs_iget(inode);
  472. return inode;
  473. bad_inode:
  474. f2fs_inode_synced(inode);
  475. iget_failed(inode);
  476. trace_f2fs_iget_exit(inode, ret);
  477. return ERR_PTR(ret);
  478. }
  479. struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
  480. {
  481. struct inode *inode;
  482. retry:
  483. inode = f2fs_iget(sb, ino);
  484. if (IS_ERR(inode)) {
  485. if (PTR_ERR(inode) == -ENOMEM) {
  486. congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
  487. goto retry;
  488. }
  489. }
  490. return inode;
  491. }
  492. void f2fs_update_inode(struct inode *inode, struct page *node_page)
  493. {
  494. struct f2fs_inode *ri;
  495. struct extent_tree *et = F2FS_I(inode)->extent_tree;
  496. f2fs_wait_on_page_writeback(node_page, NODE, true, true);
  497. set_page_dirty(node_page);
  498. f2fs_inode_synced(inode);
  499. ri = F2FS_INODE(node_page);
  500. ri->i_mode = cpu_to_le16(inode->i_mode);
  501. ri->i_advise = F2FS_I(inode)->i_advise;
  502. ri->i_uid = cpu_to_le32(i_uid_read(inode));
  503. ri->i_gid = cpu_to_le32(i_gid_read(inode));
  504. ri->i_links = cpu_to_le32(inode->i_nlink);
  505. ri->i_size = cpu_to_le64(i_size_read(inode));
  506. ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
  507. if (et) {
  508. read_lock(&et->lock);
  509. set_raw_extent(&et->largest, &ri->i_ext);
  510. read_unlock(&et->lock);
  511. } else {
  512. memset(&ri->i_ext, 0, sizeof(ri->i_ext));
  513. }
  514. set_raw_inline(inode, ri);
  515. ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
  516. ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  517. ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
  518. ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
  519. ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  520. ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  521. if (S_ISDIR(inode->i_mode))
  522. ri->i_current_depth =
  523. cpu_to_le32(F2FS_I(inode)->i_current_depth);
  524. else if (S_ISREG(inode->i_mode))
  525. ri->i_gc_failures =
  526. cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
  527. ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
  528. ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
  529. ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
  530. ri->i_generation = cpu_to_le32(inode->i_generation);
  531. ri->i_dir_level = F2FS_I(inode)->i_dir_level;
  532. if (f2fs_has_extra_attr(inode)) {
  533. ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
  534. if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
  535. ri->i_inline_xattr_size =
  536. cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
  537. if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
  538. F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
  539. i_projid)) {
  540. projid_t i_projid;
  541. i_projid = from_kprojid(&init_user_ns,
  542. F2FS_I(inode)->i_projid);
  543. ri->i_projid = cpu_to_le32(i_projid);
  544. }
  545. if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
  546. F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
  547. i_crtime)) {
  548. ri->i_crtime =
  549. cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
  550. ri->i_crtime_nsec =
  551. cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
  552. }
  553. if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
  554. F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
  555. i_log_cluster_size)) {
  556. ri->i_compr_blocks =
  557. cpu_to_le64(atomic_read(
  558. &F2FS_I(inode)->i_compr_blocks));
  559. ri->i_compress_algorithm =
  560. F2FS_I(inode)->i_compress_algorithm;
  561. ri->i_compress_flag =
  562. cpu_to_le16(F2FS_I(inode)->i_compress_flag);
  563. ri->i_log_cluster_size =
  564. F2FS_I(inode)->i_log_cluster_size;
  565. }
  566. }
  567. __set_inode_rdev(inode, ri);
  568. /* deleted inode */
  569. if (inode->i_nlink == 0)
  570. clear_page_private_inline(node_page);
  571. F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
  572. F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
  573. F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
  574. F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
  575. #ifdef CONFIG_F2FS_CHECK_FS
  576. f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
  577. #endif
  578. }
  579. void f2fs_update_inode_page(struct inode *inode)
  580. {
  581. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  582. struct page *node_page;
  583. retry:
  584. node_page = f2fs_get_node_page(sbi, inode->i_ino);
  585. if (IS_ERR(node_page)) {
  586. int err = PTR_ERR(node_page);
  587. if (err == -ENOMEM) {
  588. cond_resched();
  589. goto retry;
  590. } else if (err != -ENOENT) {
  591. f2fs_stop_checkpoint(sbi, false);
  592. }
  593. return;
  594. }
  595. f2fs_update_inode(inode, node_page);
  596. f2fs_put_page(node_page, 1);
  597. }
  598. int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
  599. {
  600. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  601. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  602. inode->i_ino == F2FS_META_INO(sbi))
  603. return 0;
  604. /*
  605. * atime could be updated without dirtying f2fs inode in lazytime mode
  606. */
  607. if (f2fs_is_time_consistent(inode) &&
  608. !is_inode_flag_set(inode, FI_DIRTY_INODE))
  609. return 0;
  610. if (!f2fs_is_checkpoint_ready(sbi))
  611. return -ENOSPC;
  612. /*
  613. * We need to balance fs here to prevent from producing dirty node pages
  614. * during the urgent cleaning time when running out of free sections.
  615. */
  616. f2fs_update_inode_page(inode);
  617. if (wbc && wbc->nr_to_write)
  618. f2fs_balance_fs(sbi, true);
  619. return 0;
  620. }
  621. /*
  622. * Called at the last iput() if i_nlink is zero
  623. */
  624. void f2fs_evict_inode(struct inode *inode)
  625. {
  626. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  627. nid_t xnid = F2FS_I(inode)->i_xattr_nid;
  628. int err = 0;
  629. /* some remained atomic pages should discarded */
  630. if (f2fs_is_atomic_file(inode))
  631. f2fs_drop_inmem_pages(inode);
  632. trace_f2fs_evict_inode(inode);
  633. truncate_inode_pages_final(&inode->i_data);
  634. if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
  635. f2fs_invalidate_compress_pages(sbi, inode->i_ino);
  636. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  637. inode->i_ino == F2FS_META_INO(sbi) ||
  638. inode->i_ino == F2FS_COMPRESS_INO(sbi))
  639. goto out_clear;
  640. f2fs_bug_on(sbi, get_dirty_pages(inode));
  641. f2fs_remove_dirty_inode(inode);
  642. f2fs_destroy_extent_tree(inode);
  643. if (inode->i_nlink || is_bad_inode(inode))
  644. goto no_delete;
  645. err = dquot_initialize(inode);
  646. if (err) {
  647. err = 0;
  648. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  649. }
  650. f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
  651. f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
  652. f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
  653. sb_start_intwrite(inode->i_sb);
  654. set_inode_flag(inode, FI_NO_ALLOC);
  655. i_size_write(inode, 0);
  656. retry:
  657. if (F2FS_HAS_BLOCKS(inode))
  658. err = f2fs_truncate(inode);
  659. if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
  660. f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
  661. err = -EIO;
  662. }
  663. if (!err) {
  664. f2fs_lock_op(sbi);
  665. err = f2fs_remove_inode_page(inode);
  666. f2fs_unlock_op(sbi);
  667. if (err == -ENOENT)
  668. err = 0;
  669. }
  670. /* give more chances, if ENOMEM case */
  671. if (err == -ENOMEM) {
  672. err = 0;
  673. goto retry;
  674. }
  675. if (err) {
  676. f2fs_update_inode_page(inode);
  677. if (dquot_initialize_needed(inode))
  678. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  679. }
  680. sb_end_intwrite(inode->i_sb);
  681. no_delete:
  682. dquot_drop(inode);
  683. stat_dec_inline_xattr(inode);
  684. stat_dec_inline_dir(inode);
  685. stat_dec_inline_inode(inode);
  686. stat_dec_compr_inode(inode);
  687. stat_sub_compr_blocks(inode,
  688. atomic_read(&F2FS_I(inode)->i_compr_blocks));
  689. if (likely(!f2fs_cp_error(sbi) &&
  690. !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  691. f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
  692. else
  693. f2fs_inode_synced(inode);
  694. /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
  695. if (inode->i_ino)
  696. invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
  697. inode->i_ino);
  698. if (xnid)
  699. invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
  700. if (inode->i_nlink) {
  701. if (is_inode_flag_set(inode, FI_APPEND_WRITE))
  702. f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
  703. if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
  704. f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
  705. }
  706. if (is_inode_flag_set(inode, FI_FREE_NID)) {
  707. f2fs_alloc_nid_failed(sbi, inode->i_ino);
  708. clear_inode_flag(inode, FI_FREE_NID);
  709. } else {
  710. /*
  711. * If xattr nid is corrupted, we can reach out error condition,
  712. * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
  713. * In that case, f2fs_check_nid_range() is enough to give a clue.
  714. */
  715. }
  716. out_clear:
  717. fscrypt_put_encryption_info(inode);
  718. fsverity_cleanup_inode(inode);
  719. clear_inode(inode);
  720. }
  721. /* caller should call f2fs_lock_op() */
  722. void f2fs_handle_failed_inode(struct inode *inode)
  723. {
  724. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  725. struct node_info ni;
  726. int err;
  727. /*
  728. * clear nlink of inode in order to release resource of inode
  729. * immediately.
  730. */
  731. clear_nlink(inode);
  732. /*
  733. * we must call this to avoid inode being remained as dirty, resulting
  734. * in a panic when flushing dirty inodes in gdirty_list.
  735. */
  736. f2fs_update_inode_page(inode);
  737. f2fs_inode_synced(inode);
  738. /* don't make bad inode, since it becomes a regular file. */
  739. unlock_new_inode(inode);
  740. /*
  741. * Note: we should add inode to orphan list before f2fs_unlock_op()
  742. * so we can prevent losing this orphan when encoutering checkpoint
  743. * and following suddenly power-off.
  744. */
  745. err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
  746. if (err) {
  747. set_sbi_flag(sbi, SBI_NEED_FSCK);
  748. set_inode_flag(inode, FI_FREE_NID);
  749. f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
  750. goto out;
  751. }
  752. if (ni.blk_addr != NULL_ADDR) {
  753. err = f2fs_acquire_orphan_inode(sbi);
  754. if (err) {
  755. set_sbi_flag(sbi, SBI_NEED_FSCK);
  756. f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
  757. } else {
  758. f2fs_add_orphan_inode(inode);
  759. }
  760. f2fs_alloc_nid_done(sbi, inode->i_ino);
  761. } else {
  762. set_inode_flag(inode, FI_FREE_NID);
  763. }
  764. out:
  765. f2fs_unlock_op(sbi);
  766. /* iput will drop the inode object */
  767. iput(inode);
  768. }