compress.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * f2fs compress support
  4. *
  5. * Copyright (c) 2019 Chao Yu <chao@kernel.org>
  6. */
  7. #include <linux/fs.h>
  8. #include <linux/f2fs_fs.h>
  9. #include <linux/writeback.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/lzo.h>
  12. #include <linux/lz4.h>
  13. #include <linux/zstd.h>
  14. #include <linux/pagevec.h>
  15. #include "f2fs.h"
  16. #include "node.h"
  17. #include "segment.h"
  18. #include <trace/events/f2fs.h>
  19. static struct kmem_cache *cic_entry_slab;
  20. static struct kmem_cache *dic_entry_slab;
  21. static void *page_array_alloc(struct inode *inode, int nr)
  22. {
  23. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  24. unsigned int size = sizeof(struct page *) * nr;
  25. if (likely(size <= sbi->page_array_slab_size))
  26. return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
  27. return f2fs_kzalloc(sbi, size, GFP_NOFS);
  28. }
  29. static void page_array_free(struct inode *inode, void *pages, int nr)
  30. {
  31. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  32. unsigned int size = sizeof(struct page *) * nr;
  33. if (!pages)
  34. return;
  35. if (likely(size <= sbi->page_array_slab_size))
  36. kmem_cache_free(sbi->page_array_slab, pages);
  37. else
  38. kfree(pages);
  39. }
  40. struct f2fs_compress_ops {
  41. int (*init_compress_ctx)(struct compress_ctx *cc);
  42. void (*destroy_compress_ctx)(struct compress_ctx *cc);
  43. int (*compress_pages)(struct compress_ctx *cc);
  44. int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
  45. void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
  46. int (*decompress_pages)(struct decompress_io_ctx *dic);
  47. };
  48. static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
  49. {
  50. return index & (cc->cluster_size - 1);
  51. }
  52. static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
  53. {
  54. return index >> cc->log_cluster_size;
  55. }
  56. static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
  57. {
  58. return cc->cluster_idx << cc->log_cluster_size;
  59. }
  60. bool f2fs_is_compressed_page(struct page *page)
  61. {
  62. if (!PagePrivate(page))
  63. return false;
  64. if (!page_private(page))
  65. return false;
  66. if (page_private_nonpointer(page))
  67. return false;
  68. f2fs_bug_on(F2FS_M_SB(page->mapping),
  69. *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
  70. return true;
  71. }
  72. static void f2fs_set_compressed_page(struct page *page,
  73. struct inode *inode, pgoff_t index, void *data)
  74. {
  75. attach_page_private(page, (void *)data);
  76. /* i_crypto_info and iv index */
  77. page->index = index;
  78. page->mapping = inode->i_mapping;
  79. }
  80. static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
  81. {
  82. int i;
  83. for (i = 0; i < len; i++) {
  84. if (!cc->rpages[i])
  85. continue;
  86. if (unlock)
  87. unlock_page(cc->rpages[i]);
  88. else
  89. put_page(cc->rpages[i]);
  90. }
  91. }
  92. static void f2fs_put_rpages(struct compress_ctx *cc)
  93. {
  94. f2fs_drop_rpages(cc, cc->cluster_size, false);
  95. }
  96. static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
  97. {
  98. f2fs_drop_rpages(cc, len, true);
  99. }
  100. static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
  101. struct writeback_control *wbc, bool redirty, int unlock)
  102. {
  103. unsigned int i;
  104. for (i = 0; i < cc->cluster_size; i++) {
  105. if (!cc->rpages[i])
  106. continue;
  107. if (redirty)
  108. redirty_page_for_writepage(wbc, cc->rpages[i]);
  109. f2fs_put_page(cc->rpages[i], unlock);
  110. }
  111. }
  112. struct page *f2fs_compress_control_page(struct page *page)
  113. {
  114. return ((struct compress_io_ctx *)page_private(page))->rpages[0];
  115. }
  116. int f2fs_init_compress_ctx(struct compress_ctx *cc)
  117. {
  118. if (cc->rpages)
  119. return 0;
  120. cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
  121. return cc->rpages ? 0 : -ENOMEM;
  122. }
  123. void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
  124. {
  125. page_array_free(cc->inode, cc->rpages, cc->cluster_size);
  126. cc->rpages = NULL;
  127. cc->nr_rpages = 0;
  128. cc->nr_cpages = 0;
  129. if (!reuse)
  130. cc->cluster_idx = NULL_CLUSTER;
  131. }
  132. void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
  133. {
  134. unsigned int cluster_ofs;
  135. if (!f2fs_cluster_can_merge_page(cc, page->index))
  136. f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
  137. cluster_ofs = offset_in_cluster(cc, page->index);
  138. cc->rpages[cluster_ofs] = page;
  139. cc->nr_rpages++;
  140. cc->cluster_idx = cluster_idx(cc, page->index);
  141. }
  142. #ifdef CONFIG_F2FS_FS_LZO
  143. static int lzo_init_compress_ctx(struct compress_ctx *cc)
  144. {
  145. cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
  146. LZO1X_MEM_COMPRESS, GFP_NOFS);
  147. if (!cc->private)
  148. return -ENOMEM;
  149. cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
  150. return 0;
  151. }
  152. static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
  153. {
  154. kvfree(cc->private);
  155. cc->private = NULL;
  156. }
  157. static int lzo_compress_pages(struct compress_ctx *cc)
  158. {
  159. int ret;
  160. ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
  161. &cc->clen, cc->private);
  162. if (ret != LZO_E_OK) {
  163. printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
  164. KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
  165. return -EIO;
  166. }
  167. return 0;
  168. }
  169. static int lzo_decompress_pages(struct decompress_io_ctx *dic)
  170. {
  171. int ret;
  172. ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
  173. dic->rbuf, &dic->rlen);
  174. if (ret != LZO_E_OK) {
  175. printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
  176. KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
  177. return -EIO;
  178. }
  179. if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
  180. printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
  181. "expected:%lu\n", KERN_ERR,
  182. F2FS_I_SB(dic->inode)->sb->s_id,
  183. dic->rlen,
  184. PAGE_SIZE << dic->log_cluster_size);
  185. return -EIO;
  186. }
  187. return 0;
  188. }
  189. static const struct f2fs_compress_ops f2fs_lzo_ops = {
  190. .init_compress_ctx = lzo_init_compress_ctx,
  191. .destroy_compress_ctx = lzo_destroy_compress_ctx,
  192. .compress_pages = lzo_compress_pages,
  193. .decompress_pages = lzo_decompress_pages,
  194. };
  195. #endif
  196. #ifdef CONFIG_F2FS_FS_LZ4
  197. static int lz4_init_compress_ctx(struct compress_ctx *cc)
  198. {
  199. unsigned int size = LZ4_MEM_COMPRESS;
  200. #ifdef CONFIG_F2FS_FS_LZ4HC
  201. if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
  202. size = LZ4HC_MEM_COMPRESS;
  203. #endif
  204. cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
  205. if (!cc->private)
  206. return -ENOMEM;
  207. /*
  208. * we do not change cc->clen to LZ4_compressBound(inputsize) to
  209. * adapt worst compress case, because lz4 compressor can handle
  210. * output budget properly.
  211. */
  212. cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
  213. return 0;
  214. }
  215. static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
  216. {
  217. kvfree(cc->private);
  218. cc->private = NULL;
  219. }
  220. #ifdef CONFIG_F2FS_FS_LZ4HC
  221. static int lz4hc_compress_pages(struct compress_ctx *cc)
  222. {
  223. unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
  224. COMPRESS_LEVEL_OFFSET;
  225. int len;
  226. if (level)
  227. len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
  228. cc->clen, level, cc->private);
  229. else
  230. len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
  231. cc->clen, cc->private);
  232. if (!len)
  233. return -EAGAIN;
  234. cc->clen = len;
  235. return 0;
  236. }
  237. #endif
  238. static int lz4_compress_pages(struct compress_ctx *cc)
  239. {
  240. int len;
  241. #ifdef CONFIG_F2FS_FS_LZ4HC
  242. return lz4hc_compress_pages(cc);
  243. #endif
  244. len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
  245. cc->clen, cc->private);
  246. if (!len)
  247. return -EAGAIN;
  248. cc->clen = len;
  249. return 0;
  250. }
  251. static int lz4_decompress_pages(struct decompress_io_ctx *dic)
  252. {
  253. int ret;
  254. ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
  255. dic->clen, dic->rlen);
  256. if (ret < 0) {
  257. printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
  258. KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
  259. return -EIO;
  260. }
  261. if (ret != PAGE_SIZE << dic->log_cluster_size) {
  262. printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
  263. "expected:%lu\n", KERN_ERR,
  264. F2FS_I_SB(dic->inode)->sb->s_id, ret,
  265. PAGE_SIZE << dic->log_cluster_size);
  266. return -EIO;
  267. }
  268. return 0;
  269. }
  270. static const struct f2fs_compress_ops f2fs_lz4_ops = {
  271. .init_compress_ctx = lz4_init_compress_ctx,
  272. .destroy_compress_ctx = lz4_destroy_compress_ctx,
  273. .compress_pages = lz4_compress_pages,
  274. .decompress_pages = lz4_decompress_pages,
  275. };
  276. #endif
  277. #ifdef CONFIG_F2FS_FS_ZSTD
  278. #define F2FS_ZSTD_DEFAULT_CLEVEL 1
  279. static int zstd_init_compress_ctx(struct compress_ctx *cc)
  280. {
  281. ZSTD_parameters params;
  282. ZSTD_CStream *stream;
  283. void *workspace;
  284. unsigned int workspace_size;
  285. unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
  286. COMPRESS_LEVEL_OFFSET;
  287. if (!level)
  288. level = F2FS_ZSTD_DEFAULT_CLEVEL;
  289. params = ZSTD_getParams(level, cc->rlen, 0);
  290. workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
  291. workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
  292. workspace_size, GFP_NOFS);
  293. if (!workspace)
  294. return -ENOMEM;
  295. stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
  296. if (!stream) {
  297. printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
  298. KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
  299. __func__);
  300. kvfree(workspace);
  301. return -EIO;
  302. }
  303. cc->private = workspace;
  304. cc->private2 = stream;
  305. cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
  306. return 0;
  307. }
  308. static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
  309. {
  310. kvfree(cc->private);
  311. cc->private = NULL;
  312. cc->private2 = NULL;
  313. }
  314. static int zstd_compress_pages(struct compress_ctx *cc)
  315. {
  316. ZSTD_CStream *stream = cc->private2;
  317. ZSTD_inBuffer inbuf;
  318. ZSTD_outBuffer outbuf;
  319. int src_size = cc->rlen;
  320. int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
  321. int ret;
  322. inbuf.pos = 0;
  323. inbuf.src = cc->rbuf;
  324. inbuf.size = src_size;
  325. outbuf.pos = 0;
  326. outbuf.dst = cc->cbuf->cdata;
  327. outbuf.size = dst_size;
  328. ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
  329. if (ZSTD_isError(ret)) {
  330. printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
  331. KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
  332. __func__, ZSTD_getErrorCode(ret));
  333. return -EIO;
  334. }
  335. ret = ZSTD_endStream(stream, &outbuf);
  336. if (ZSTD_isError(ret)) {
  337. printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
  338. KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
  339. __func__, ZSTD_getErrorCode(ret));
  340. return -EIO;
  341. }
  342. /*
  343. * there is compressed data remained in intermediate buffer due to
  344. * no more space in cbuf.cdata
  345. */
  346. if (ret)
  347. return -EAGAIN;
  348. cc->clen = outbuf.pos;
  349. return 0;
  350. }
  351. static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
  352. {
  353. ZSTD_DStream *stream;
  354. void *workspace;
  355. unsigned int workspace_size;
  356. unsigned int max_window_size =
  357. MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
  358. workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
  359. workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
  360. workspace_size, GFP_NOFS);
  361. if (!workspace)
  362. return -ENOMEM;
  363. stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
  364. if (!stream) {
  365. printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
  366. KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
  367. __func__);
  368. kvfree(workspace);
  369. return -EIO;
  370. }
  371. dic->private = workspace;
  372. dic->private2 = stream;
  373. return 0;
  374. }
  375. static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
  376. {
  377. kvfree(dic->private);
  378. dic->private = NULL;
  379. dic->private2 = NULL;
  380. }
  381. static int zstd_decompress_pages(struct decompress_io_ctx *dic)
  382. {
  383. ZSTD_DStream *stream = dic->private2;
  384. ZSTD_inBuffer inbuf;
  385. ZSTD_outBuffer outbuf;
  386. int ret;
  387. inbuf.pos = 0;
  388. inbuf.src = dic->cbuf->cdata;
  389. inbuf.size = dic->clen;
  390. outbuf.pos = 0;
  391. outbuf.dst = dic->rbuf;
  392. outbuf.size = dic->rlen;
  393. ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
  394. if (ZSTD_isError(ret)) {
  395. printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
  396. KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
  397. __func__, ZSTD_getErrorCode(ret));
  398. return -EIO;
  399. }
  400. if (dic->rlen != outbuf.pos) {
  401. printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
  402. "expected:%lu\n", KERN_ERR,
  403. F2FS_I_SB(dic->inode)->sb->s_id,
  404. __func__, dic->rlen,
  405. PAGE_SIZE << dic->log_cluster_size);
  406. return -EIO;
  407. }
  408. return 0;
  409. }
  410. static const struct f2fs_compress_ops f2fs_zstd_ops = {
  411. .init_compress_ctx = zstd_init_compress_ctx,
  412. .destroy_compress_ctx = zstd_destroy_compress_ctx,
  413. .compress_pages = zstd_compress_pages,
  414. .init_decompress_ctx = zstd_init_decompress_ctx,
  415. .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
  416. .decompress_pages = zstd_decompress_pages,
  417. };
  418. #endif
  419. #ifdef CONFIG_F2FS_FS_LZO
  420. #ifdef CONFIG_F2FS_FS_LZORLE
  421. static int lzorle_compress_pages(struct compress_ctx *cc)
  422. {
  423. int ret;
  424. ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
  425. &cc->clen, cc->private);
  426. if (ret != LZO_E_OK) {
  427. printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
  428. KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
  429. return -EIO;
  430. }
  431. return 0;
  432. }
  433. static const struct f2fs_compress_ops f2fs_lzorle_ops = {
  434. .init_compress_ctx = lzo_init_compress_ctx,
  435. .destroy_compress_ctx = lzo_destroy_compress_ctx,
  436. .compress_pages = lzorle_compress_pages,
  437. .decompress_pages = lzo_decompress_pages,
  438. };
  439. #endif
  440. #endif
  441. static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
  442. #ifdef CONFIG_F2FS_FS_LZO
  443. &f2fs_lzo_ops,
  444. #else
  445. NULL,
  446. #endif
  447. #ifdef CONFIG_F2FS_FS_LZ4
  448. &f2fs_lz4_ops,
  449. #else
  450. NULL,
  451. #endif
  452. #ifdef CONFIG_F2FS_FS_ZSTD
  453. &f2fs_zstd_ops,
  454. #else
  455. NULL,
  456. #endif
  457. #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
  458. &f2fs_lzorle_ops,
  459. #else
  460. NULL,
  461. #endif
  462. };
  463. bool f2fs_is_compress_backend_ready(struct inode *inode)
  464. {
  465. if (!f2fs_compressed_file(inode))
  466. return true;
  467. return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
  468. }
  469. static mempool_t *compress_page_pool;
  470. static int num_compress_pages = 512;
  471. module_param(num_compress_pages, uint, 0444);
  472. MODULE_PARM_DESC(num_compress_pages,
  473. "Number of intermediate compress pages to preallocate");
  474. int f2fs_init_compress_mempool(void)
  475. {
  476. compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
  477. if (!compress_page_pool)
  478. return -ENOMEM;
  479. return 0;
  480. }
  481. void f2fs_destroy_compress_mempool(void)
  482. {
  483. mempool_destroy(compress_page_pool);
  484. }
  485. static struct page *f2fs_compress_alloc_page(void)
  486. {
  487. struct page *page;
  488. page = mempool_alloc(compress_page_pool, GFP_NOFS);
  489. lock_page(page);
  490. return page;
  491. }
  492. static void f2fs_compress_free_page(struct page *page)
  493. {
  494. if (!page)
  495. return;
  496. detach_page_private(page);
  497. page->mapping = NULL;
  498. unlock_page(page);
  499. mempool_free(page, compress_page_pool);
  500. }
  501. #define MAX_VMAP_RETRIES 3
  502. static void *f2fs_vmap(struct page **pages, unsigned int count)
  503. {
  504. int i;
  505. void *buf = NULL;
  506. for (i = 0; i < MAX_VMAP_RETRIES; i++) {
  507. buf = vm_map_ram(pages, count, -1);
  508. if (buf)
  509. break;
  510. vm_unmap_aliases();
  511. }
  512. return buf;
  513. }
  514. static int f2fs_compress_pages(struct compress_ctx *cc)
  515. {
  516. struct f2fs_inode_info *fi = F2FS_I(cc->inode);
  517. const struct f2fs_compress_ops *cops =
  518. f2fs_cops[fi->i_compress_algorithm];
  519. unsigned int max_len, new_nr_cpages;
  520. struct page **new_cpages;
  521. u32 chksum = 0;
  522. int i, ret;
  523. trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
  524. cc->cluster_size, fi->i_compress_algorithm);
  525. if (cops->init_compress_ctx) {
  526. ret = cops->init_compress_ctx(cc);
  527. if (ret)
  528. goto out;
  529. }
  530. max_len = COMPRESS_HEADER_SIZE + cc->clen;
  531. cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
  532. cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
  533. if (!cc->cpages) {
  534. ret = -ENOMEM;
  535. goto destroy_compress_ctx;
  536. }
  537. for (i = 0; i < cc->nr_cpages; i++) {
  538. cc->cpages[i] = f2fs_compress_alloc_page();
  539. if (!cc->cpages[i]) {
  540. ret = -ENOMEM;
  541. goto out_free_cpages;
  542. }
  543. }
  544. cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
  545. if (!cc->rbuf) {
  546. ret = -ENOMEM;
  547. goto out_free_cpages;
  548. }
  549. cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
  550. if (!cc->cbuf) {
  551. ret = -ENOMEM;
  552. goto out_vunmap_rbuf;
  553. }
  554. ret = cops->compress_pages(cc);
  555. if (ret)
  556. goto out_vunmap_cbuf;
  557. max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
  558. if (cc->clen > max_len) {
  559. ret = -EAGAIN;
  560. goto out_vunmap_cbuf;
  561. }
  562. cc->cbuf->clen = cpu_to_le32(cc->clen);
  563. if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
  564. chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
  565. cc->cbuf->cdata, cc->clen);
  566. cc->cbuf->chksum = cpu_to_le32(chksum);
  567. for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
  568. cc->cbuf->reserved[i] = cpu_to_le32(0);
  569. new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
  570. /* Now we're going to cut unnecessary tail pages */
  571. new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
  572. if (!new_cpages) {
  573. ret = -ENOMEM;
  574. goto out_vunmap_cbuf;
  575. }
  576. /* zero out any unused part of the last page */
  577. memset(&cc->cbuf->cdata[cc->clen], 0,
  578. (new_nr_cpages * PAGE_SIZE) -
  579. (cc->clen + COMPRESS_HEADER_SIZE));
  580. vm_unmap_ram(cc->cbuf, cc->nr_cpages);
  581. vm_unmap_ram(cc->rbuf, cc->cluster_size);
  582. for (i = 0; i < cc->nr_cpages; i++) {
  583. if (i < new_nr_cpages) {
  584. new_cpages[i] = cc->cpages[i];
  585. continue;
  586. }
  587. f2fs_compress_free_page(cc->cpages[i]);
  588. cc->cpages[i] = NULL;
  589. }
  590. if (cops->destroy_compress_ctx)
  591. cops->destroy_compress_ctx(cc);
  592. page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
  593. cc->cpages = new_cpages;
  594. cc->nr_cpages = new_nr_cpages;
  595. trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
  596. cc->clen, ret);
  597. return 0;
  598. out_vunmap_cbuf:
  599. vm_unmap_ram(cc->cbuf, cc->nr_cpages);
  600. out_vunmap_rbuf:
  601. vm_unmap_ram(cc->rbuf, cc->cluster_size);
  602. out_free_cpages:
  603. for (i = 0; i < cc->nr_cpages; i++) {
  604. if (cc->cpages[i])
  605. f2fs_compress_free_page(cc->cpages[i]);
  606. }
  607. page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
  608. cc->cpages = NULL;
  609. destroy_compress_ctx:
  610. if (cops->destroy_compress_ctx)
  611. cops->destroy_compress_ctx(cc);
  612. out:
  613. trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
  614. cc->clen, ret);
  615. return ret;
  616. }
  617. void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
  618. {
  619. struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
  620. struct f2fs_inode_info *fi = F2FS_I(dic->inode);
  621. const struct f2fs_compress_ops *cops =
  622. f2fs_cops[fi->i_compress_algorithm];
  623. int ret;
  624. int i;
  625. trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
  626. dic->cluster_size, fi->i_compress_algorithm);
  627. if (dic->failed) {
  628. ret = -EIO;
  629. goto out_end_io;
  630. }
  631. dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
  632. if (!dic->tpages) {
  633. ret = -ENOMEM;
  634. goto out_end_io;
  635. }
  636. for (i = 0; i < dic->cluster_size; i++) {
  637. if (dic->rpages[i]) {
  638. dic->tpages[i] = dic->rpages[i];
  639. continue;
  640. }
  641. dic->tpages[i] = f2fs_compress_alloc_page();
  642. if (!dic->tpages[i]) {
  643. ret = -ENOMEM;
  644. goto out_end_io;
  645. }
  646. }
  647. if (cops->init_decompress_ctx) {
  648. ret = cops->init_decompress_ctx(dic);
  649. if (ret)
  650. goto out_end_io;
  651. }
  652. dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
  653. if (!dic->rbuf) {
  654. ret = -ENOMEM;
  655. goto out_destroy_decompress_ctx;
  656. }
  657. dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
  658. if (!dic->cbuf) {
  659. ret = -ENOMEM;
  660. goto out_vunmap_rbuf;
  661. }
  662. dic->clen = le32_to_cpu(dic->cbuf->clen);
  663. dic->rlen = PAGE_SIZE << dic->log_cluster_size;
  664. if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
  665. ret = -EFSCORRUPTED;
  666. goto out_vunmap_cbuf;
  667. }
  668. ret = cops->decompress_pages(dic);
  669. if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
  670. u32 provided = le32_to_cpu(dic->cbuf->chksum);
  671. u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
  672. if (provided != calculated) {
  673. if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
  674. set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
  675. printk_ratelimited(
  676. "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
  677. KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
  678. provided, calculated);
  679. }
  680. set_sbi_flag(sbi, SBI_NEED_FSCK);
  681. }
  682. }
  683. out_vunmap_cbuf:
  684. vm_unmap_ram(dic->cbuf, dic->nr_cpages);
  685. out_vunmap_rbuf:
  686. vm_unmap_ram(dic->rbuf, dic->cluster_size);
  687. out_destroy_decompress_ctx:
  688. if (cops->destroy_decompress_ctx)
  689. cops->destroy_decompress_ctx(dic);
  690. out_end_io:
  691. trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
  692. dic->clen, ret);
  693. f2fs_decompress_end_io(dic, ret);
  694. }
  695. /*
  696. * This is called when a page of a compressed cluster has been read from disk
  697. * (or failed to be read from disk). It checks whether this page was the last
  698. * page being waited on in the cluster, and if so, it decompresses the cluster
  699. * (or in the case of a failure, cleans up without actually decompressing).
  700. */
  701. void f2fs_end_read_compressed_page(struct page *page, bool failed,
  702. block_t blkaddr)
  703. {
  704. struct decompress_io_ctx *dic =
  705. (struct decompress_io_ctx *)page_private(page);
  706. struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
  707. dec_page_count(sbi, F2FS_RD_DATA);
  708. if (failed)
  709. WRITE_ONCE(dic->failed, true);
  710. else if (blkaddr)
  711. f2fs_cache_compressed_page(sbi, page,
  712. dic->inode->i_ino, blkaddr);
  713. if (atomic_dec_and_test(&dic->remaining_pages))
  714. f2fs_decompress_cluster(dic);
  715. }
  716. static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
  717. {
  718. if (cc->cluster_idx == NULL_CLUSTER)
  719. return true;
  720. return cc->cluster_idx == cluster_idx(cc, index);
  721. }
  722. bool f2fs_cluster_is_empty(struct compress_ctx *cc)
  723. {
  724. return cc->nr_rpages == 0;
  725. }
  726. static bool f2fs_cluster_is_full(struct compress_ctx *cc)
  727. {
  728. return cc->cluster_size == cc->nr_rpages;
  729. }
  730. bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
  731. {
  732. if (f2fs_cluster_is_empty(cc))
  733. return true;
  734. return is_page_in_cluster(cc, index);
  735. }
  736. static bool cluster_has_invalid_data(struct compress_ctx *cc)
  737. {
  738. loff_t i_size = i_size_read(cc->inode);
  739. unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
  740. int i;
  741. for (i = 0; i < cc->cluster_size; i++) {
  742. struct page *page = cc->rpages[i];
  743. f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
  744. /* beyond EOF */
  745. if (page->index >= nr_pages)
  746. return true;
  747. }
  748. return false;
  749. }
  750. static int __f2fs_cluster_blocks(struct inode *inode,
  751. unsigned int cluster_idx, bool compr)
  752. {
  753. struct dnode_of_data dn;
  754. unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
  755. unsigned int start_idx = cluster_idx <<
  756. F2FS_I(inode)->i_log_cluster_size;
  757. int ret;
  758. set_new_dnode(&dn, inode, NULL, NULL, 0);
  759. ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
  760. if (ret) {
  761. if (ret == -ENOENT)
  762. ret = 0;
  763. goto fail;
  764. }
  765. if (dn.data_blkaddr == COMPRESS_ADDR) {
  766. int i;
  767. ret = 1;
  768. for (i = 1; i < cluster_size; i++) {
  769. block_t blkaddr;
  770. blkaddr = data_blkaddr(dn.inode,
  771. dn.node_page, dn.ofs_in_node + i);
  772. if (compr) {
  773. if (__is_valid_data_blkaddr(blkaddr))
  774. ret++;
  775. } else {
  776. if (blkaddr != NULL_ADDR)
  777. ret++;
  778. }
  779. }
  780. f2fs_bug_on(F2FS_I_SB(inode),
  781. !compr && ret != cluster_size &&
  782. !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
  783. }
  784. fail:
  785. f2fs_put_dnode(&dn);
  786. return ret;
  787. }
  788. /* return # of compressed blocks in compressed cluster */
  789. static int f2fs_compressed_blocks(struct compress_ctx *cc)
  790. {
  791. return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
  792. }
  793. /* return # of valid blocks in compressed cluster */
  794. int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
  795. {
  796. return __f2fs_cluster_blocks(inode,
  797. index >> F2FS_I(inode)->i_log_cluster_size,
  798. false);
  799. }
  800. static bool cluster_may_compress(struct compress_ctx *cc)
  801. {
  802. if (!f2fs_need_compress_data(cc->inode))
  803. return false;
  804. if (f2fs_is_atomic_file(cc->inode))
  805. return false;
  806. if (!f2fs_cluster_is_full(cc))
  807. return false;
  808. if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
  809. return false;
  810. return !cluster_has_invalid_data(cc);
  811. }
  812. static void set_cluster_writeback(struct compress_ctx *cc)
  813. {
  814. int i;
  815. for (i = 0; i < cc->cluster_size; i++) {
  816. if (cc->rpages[i])
  817. set_page_writeback(cc->rpages[i]);
  818. }
  819. }
  820. static void set_cluster_dirty(struct compress_ctx *cc)
  821. {
  822. int i;
  823. for (i = 0; i < cc->cluster_size; i++)
  824. if (cc->rpages[i])
  825. set_page_dirty(cc->rpages[i]);
  826. }
  827. static int prepare_compress_overwrite(struct compress_ctx *cc,
  828. struct page **pagep, pgoff_t index, void **fsdata)
  829. {
  830. struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
  831. struct address_space *mapping = cc->inode->i_mapping;
  832. struct page *page;
  833. sector_t last_block_in_bio;
  834. unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
  835. pgoff_t start_idx = start_idx_of_cluster(cc);
  836. int i, ret;
  837. retry:
  838. ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
  839. if (ret <= 0)
  840. return ret;
  841. ret = f2fs_init_compress_ctx(cc);
  842. if (ret)
  843. return ret;
  844. /* keep page reference to avoid page reclaim */
  845. for (i = 0; i < cc->cluster_size; i++) {
  846. page = f2fs_pagecache_get_page(mapping, start_idx + i,
  847. fgp_flag, GFP_NOFS);
  848. if (!page) {
  849. ret = -ENOMEM;
  850. goto unlock_pages;
  851. }
  852. if (PageUptodate(page))
  853. f2fs_put_page(page, 1);
  854. else
  855. f2fs_compress_ctx_add_page(cc, page);
  856. }
  857. if (!f2fs_cluster_is_empty(cc)) {
  858. struct bio *bio = NULL;
  859. ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
  860. &last_block_in_bio, false, true);
  861. f2fs_put_rpages(cc);
  862. f2fs_destroy_compress_ctx(cc, true);
  863. if (ret)
  864. goto out;
  865. if (bio)
  866. f2fs_submit_bio(sbi, bio, DATA);
  867. ret = f2fs_init_compress_ctx(cc);
  868. if (ret)
  869. goto out;
  870. }
  871. for (i = 0; i < cc->cluster_size; i++) {
  872. f2fs_bug_on(sbi, cc->rpages[i]);
  873. page = find_lock_page(mapping, start_idx + i);
  874. if (!page) {
  875. /* page can be truncated */
  876. goto release_and_retry;
  877. }
  878. f2fs_wait_on_page_writeback(page, DATA, true, true);
  879. f2fs_compress_ctx_add_page(cc, page);
  880. if (!PageUptodate(page)) {
  881. release_and_retry:
  882. f2fs_put_rpages(cc);
  883. f2fs_unlock_rpages(cc, i + 1);
  884. f2fs_destroy_compress_ctx(cc, true);
  885. goto retry;
  886. }
  887. }
  888. if (likely(!ret)) {
  889. *fsdata = cc->rpages;
  890. *pagep = cc->rpages[offset_in_cluster(cc, index)];
  891. return cc->cluster_size;
  892. }
  893. unlock_pages:
  894. f2fs_put_rpages(cc);
  895. f2fs_unlock_rpages(cc, i);
  896. f2fs_destroy_compress_ctx(cc, true);
  897. out:
  898. return ret;
  899. }
  900. int f2fs_prepare_compress_overwrite(struct inode *inode,
  901. struct page **pagep, pgoff_t index, void **fsdata)
  902. {
  903. struct compress_ctx cc = {
  904. .inode = inode,
  905. .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
  906. .cluster_size = F2FS_I(inode)->i_cluster_size,
  907. .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
  908. .rpages = NULL,
  909. .nr_rpages = 0,
  910. };
  911. return prepare_compress_overwrite(&cc, pagep, index, fsdata);
  912. }
  913. bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
  914. pgoff_t index, unsigned copied)
  915. {
  916. struct compress_ctx cc = {
  917. .inode = inode,
  918. .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
  919. .cluster_size = F2FS_I(inode)->i_cluster_size,
  920. .rpages = fsdata,
  921. };
  922. bool first_index = (index == cc.rpages[0]->index);
  923. if (copied)
  924. set_cluster_dirty(&cc);
  925. f2fs_put_rpages_wbc(&cc, NULL, false, 1);
  926. f2fs_destroy_compress_ctx(&cc, false);
  927. return first_index;
  928. }
  929. int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
  930. {
  931. void *fsdata = NULL;
  932. struct page *pagep;
  933. int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
  934. pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
  935. log_cluster_size;
  936. int err;
  937. err = f2fs_is_compressed_cluster(inode, start_idx);
  938. if (err < 0)
  939. return err;
  940. /* truncate normal cluster */
  941. if (!err)
  942. return f2fs_do_truncate_blocks(inode, from, lock);
  943. /* truncate compressed cluster */
  944. err = f2fs_prepare_compress_overwrite(inode, &pagep,
  945. start_idx, &fsdata);
  946. /* should not be a normal cluster */
  947. f2fs_bug_on(F2FS_I_SB(inode), err == 0);
  948. if (err <= 0)
  949. return err;
  950. if (err > 0) {
  951. struct page **rpages = fsdata;
  952. int cluster_size = F2FS_I(inode)->i_cluster_size;
  953. int i;
  954. for (i = cluster_size - 1; i >= 0; i--) {
  955. loff_t start = rpages[i]->index << PAGE_SHIFT;
  956. if (from <= start) {
  957. zero_user_segment(rpages[i], 0, PAGE_SIZE);
  958. } else {
  959. zero_user_segment(rpages[i], from - start,
  960. PAGE_SIZE);
  961. break;
  962. }
  963. }
  964. f2fs_compress_write_end(inode, fsdata, start_idx, true);
  965. }
  966. return 0;
  967. }
  968. static int f2fs_write_compressed_pages(struct compress_ctx *cc,
  969. int *submitted,
  970. struct writeback_control *wbc,
  971. enum iostat_type io_type)
  972. {
  973. struct inode *inode = cc->inode;
  974. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  975. struct f2fs_inode_info *fi = F2FS_I(inode);
  976. struct f2fs_io_info fio = {
  977. .sbi = sbi,
  978. .ino = cc->inode->i_ino,
  979. .type = DATA,
  980. .op = REQ_OP_WRITE,
  981. .op_flags = wbc_to_write_flags(wbc),
  982. .old_blkaddr = NEW_ADDR,
  983. .page = NULL,
  984. .encrypted_page = NULL,
  985. .compressed_page = NULL,
  986. .submitted = false,
  987. .io_type = io_type,
  988. .io_wbc = wbc,
  989. .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
  990. };
  991. struct dnode_of_data dn;
  992. struct node_info ni;
  993. struct compress_io_ctx *cic;
  994. pgoff_t start_idx = start_idx_of_cluster(cc);
  995. unsigned int last_index = cc->cluster_size - 1;
  996. loff_t psize;
  997. int i, err;
  998. /* we should bypass data pages to proceed the kworkder jobs */
  999. if (unlikely(f2fs_cp_error(sbi))) {
  1000. mapping_set_error(cc->rpages[0]->mapping, -EIO);
  1001. goto out_free;
  1002. }
  1003. if (IS_NOQUOTA(inode)) {
  1004. /*
  1005. * We need to wait for node_write to avoid block allocation during
  1006. * checkpoint. This can only happen to quota writes which can cause
  1007. * the below discard race condition.
  1008. */
  1009. f2fs_down_read(&sbi->node_write);
  1010. } else if (!f2fs_trylock_op(sbi)) {
  1011. goto out_free;
  1012. }
  1013. set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
  1014. err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
  1015. if (err)
  1016. goto out_unlock_op;
  1017. for (i = 0; i < cc->cluster_size; i++) {
  1018. if (data_blkaddr(dn.inode, dn.node_page,
  1019. dn.ofs_in_node + i) == NULL_ADDR)
  1020. goto out_put_dnode;
  1021. }
  1022. psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
  1023. err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
  1024. if (err)
  1025. goto out_put_dnode;
  1026. fio.version = ni.version;
  1027. cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
  1028. if (!cic)
  1029. goto out_put_dnode;
  1030. cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
  1031. cic->inode = inode;
  1032. atomic_set(&cic->pending_pages, cc->nr_cpages);
  1033. cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
  1034. if (!cic->rpages)
  1035. goto out_put_cic;
  1036. cic->nr_rpages = cc->cluster_size;
  1037. for (i = 0; i < cc->nr_cpages; i++) {
  1038. f2fs_set_compressed_page(cc->cpages[i], inode,
  1039. cc->rpages[i + 1]->index, cic);
  1040. fio.compressed_page = cc->cpages[i];
  1041. fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
  1042. dn.ofs_in_node + i + 1);
  1043. /* wait for GCed page writeback via META_MAPPING */
  1044. f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
  1045. if (fio.encrypted) {
  1046. fio.page = cc->rpages[i + 1];
  1047. err = f2fs_encrypt_one_page(&fio);
  1048. if (err)
  1049. goto out_destroy_crypt;
  1050. cc->cpages[i] = fio.encrypted_page;
  1051. }
  1052. }
  1053. set_cluster_writeback(cc);
  1054. for (i = 0; i < cc->cluster_size; i++)
  1055. cic->rpages[i] = cc->rpages[i];
  1056. for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
  1057. block_t blkaddr;
  1058. blkaddr = f2fs_data_blkaddr(&dn);
  1059. fio.page = cc->rpages[i];
  1060. fio.old_blkaddr = blkaddr;
  1061. /* cluster header */
  1062. if (i == 0) {
  1063. if (blkaddr == COMPRESS_ADDR)
  1064. fio.compr_blocks++;
  1065. if (__is_valid_data_blkaddr(blkaddr))
  1066. f2fs_invalidate_blocks(sbi, blkaddr);
  1067. f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
  1068. goto unlock_continue;
  1069. }
  1070. if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
  1071. fio.compr_blocks++;
  1072. if (i > cc->nr_cpages) {
  1073. if (__is_valid_data_blkaddr(blkaddr)) {
  1074. f2fs_invalidate_blocks(sbi, blkaddr);
  1075. f2fs_update_data_blkaddr(&dn, NEW_ADDR);
  1076. }
  1077. goto unlock_continue;
  1078. }
  1079. f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
  1080. if (fio.encrypted)
  1081. fio.encrypted_page = cc->cpages[i - 1];
  1082. else
  1083. fio.compressed_page = cc->cpages[i - 1];
  1084. cc->cpages[i - 1] = NULL;
  1085. f2fs_outplace_write_data(&dn, &fio);
  1086. (*submitted)++;
  1087. unlock_continue:
  1088. inode_dec_dirty_pages(cc->inode);
  1089. unlock_page(fio.page);
  1090. }
  1091. if (fio.compr_blocks)
  1092. f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
  1093. f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
  1094. add_compr_block_stat(inode, cc->nr_cpages);
  1095. set_inode_flag(cc->inode, FI_APPEND_WRITE);
  1096. if (cc->cluster_idx == 0)
  1097. set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
  1098. f2fs_put_dnode(&dn);
  1099. if (IS_NOQUOTA(inode))
  1100. f2fs_up_read(&sbi->node_write);
  1101. else
  1102. f2fs_unlock_op(sbi);
  1103. spin_lock(&fi->i_size_lock);
  1104. if (fi->last_disk_size < psize)
  1105. fi->last_disk_size = psize;
  1106. spin_unlock(&fi->i_size_lock);
  1107. f2fs_put_rpages(cc);
  1108. page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
  1109. cc->cpages = NULL;
  1110. f2fs_destroy_compress_ctx(cc, false);
  1111. return 0;
  1112. out_destroy_crypt:
  1113. page_array_free(cc->inode, cic->rpages, cc->cluster_size);
  1114. for (--i; i >= 0; i--)
  1115. fscrypt_finalize_bounce_page(&cc->cpages[i]);
  1116. out_put_cic:
  1117. kmem_cache_free(cic_entry_slab, cic);
  1118. out_put_dnode:
  1119. f2fs_put_dnode(&dn);
  1120. out_unlock_op:
  1121. if (IS_NOQUOTA(inode))
  1122. f2fs_up_read(&sbi->node_write);
  1123. else
  1124. f2fs_unlock_op(sbi);
  1125. out_free:
  1126. for (i = 0; i < cc->nr_cpages; i++) {
  1127. if (!cc->cpages[i])
  1128. continue;
  1129. f2fs_compress_free_page(cc->cpages[i]);
  1130. cc->cpages[i] = NULL;
  1131. }
  1132. page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
  1133. cc->cpages = NULL;
  1134. return -EAGAIN;
  1135. }
  1136. void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
  1137. {
  1138. struct f2fs_sb_info *sbi = bio->bi_private;
  1139. struct compress_io_ctx *cic =
  1140. (struct compress_io_ctx *)page_private(page);
  1141. int i;
  1142. if (unlikely(bio->bi_status))
  1143. mapping_set_error(cic->inode->i_mapping, -EIO);
  1144. f2fs_compress_free_page(page);
  1145. dec_page_count(sbi, F2FS_WB_DATA);
  1146. if (atomic_dec_return(&cic->pending_pages))
  1147. return;
  1148. for (i = 0; i < cic->nr_rpages; i++) {
  1149. WARN_ON(!cic->rpages[i]);
  1150. clear_page_private_gcing(cic->rpages[i]);
  1151. end_page_writeback(cic->rpages[i]);
  1152. }
  1153. page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
  1154. kmem_cache_free(cic_entry_slab, cic);
  1155. }
  1156. static int f2fs_write_raw_pages(struct compress_ctx *cc,
  1157. int *submitted,
  1158. struct writeback_control *wbc,
  1159. enum iostat_type io_type)
  1160. {
  1161. struct address_space *mapping = cc->inode->i_mapping;
  1162. int _submitted, compr_blocks, ret, i;
  1163. compr_blocks = f2fs_compressed_blocks(cc);
  1164. for (i = 0; i < cc->cluster_size; i++) {
  1165. if (!cc->rpages[i])
  1166. continue;
  1167. redirty_page_for_writepage(wbc, cc->rpages[i]);
  1168. unlock_page(cc->rpages[i]);
  1169. }
  1170. if (compr_blocks < 0)
  1171. return compr_blocks;
  1172. for (i = 0; i < cc->cluster_size; i++) {
  1173. if (!cc->rpages[i])
  1174. continue;
  1175. retry_write:
  1176. lock_page(cc->rpages[i]);
  1177. if (cc->rpages[i]->mapping != mapping) {
  1178. continue_unlock:
  1179. unlock_page(cc->rpages[i]);
  1180. continue;
  1181. }
  1182. if (!PageDirty(cc->rpages[i]))
  1183. goto continue_unlock;
  1184. if (!clear_page_dirty_for_io(cc->rpages[i]))
  1185. goto continue_unlock;
  1186. ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
  1187. NULL, NULL, wbc, io_type,
  1188. compr_blocks, false);
  1189. if (ret) {
  1190. if (ret == AOP_WRITEPAGE_ACTIVATE) {
  1191. unlock_page(cc->rpages[i]);
  1192. ret = 0;
  1193. } else if (ret == -EAGAIN) {
  1194. /*
  1195. * for quota file, just redirty left pages to
  1196. * avoid deadlock caused by cluster update race
  1197. * from foreground operation.
  1198. */
  1199. if (IS_NOQUOTA(cc->inode))
  1200. return 0;
  1201. ret = 0;
  1202. cond_resched();
  1203. congestion_wait(BLK_RW_ASYNC,
  1204. DEFAULT_IO_TIMEOUT);
  1205. goto retry_write;
  1206. }
  1207. return ret;
  1208. }
  1209. *submitted += _submitted;
  1210. }
  1211. f2fs_balance_fs(F2FS_M_SB(mapping), true);
  1212. return 0;
  1213. }
  1214. int f2fs_write_multi_pages(struct compress_ctx *cc,
  1215. int *submitted,
  1216. struct writeback_control *wbc,
  1217. enum iostat_type io_type)
  1218. {
  1219. int err;
  1220. *submitted = 0;
  1221. if (cluster_may_compress(cc)) {
  1222. err = f2fs_compress_pages(cc);
  1223. if (err == -EAGAIN) {
  1224. goto write;
  1225. } else if (err) {
  1226. f2fs_put_rpages_wbc(cc, wbc, true, 1);
  1227. goto destroy_out;
  1228. }
  1229. err = f2fs_write_compressed_pages(cc, submitted,
  1230. wbc, io_type);
  1231. if (!err)
  1232. return 0;
  1233. f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
  1234. }
  1235. write:
  1236. f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
  1237. err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
  1238. f2fs_put_rpages_wbc(cc, wbc, false, 0);
  1239. destroy_out:
  1240. f2fs_destroy_compress_ctx(cc, false);
  1241. return err;
  1242. }
  1243. static void f2fs_free_dic(struct decompress_io_ctx *dic);
  1244. struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
  1245. {
  1246. struct decompress_io_ctx *dic;
  1247. pgoff_t start_idx = start_idx_of_cluster(cc);
  1248. int i;
  1249. dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
  1250. if (!dic)
  1251. return ERR_PTR(-ENOMEM);
  1252. dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
  1253. if (!dic->rpages) {
  1254. kmem_cache_free(dic_entry_slab, dic);
  1255. return ERR_PTR(-ENOMEM);
  1256. }
  1257. dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
  1258. dic->inode = cc->inode;
  1259. atomic_set(&dic->remaining_pages, cc->nr_cpages);
  1260. dic->cluster_idx = cc->cluster_idx;
  1261. dic->cluster_size = cc->cluster_size;
  1262. dic->log_cluster_size = cc->log_cluster_size;
  1263. dic->nr_cpages = cc->nr_cpages;
  1264. refcount_set(&dic->refcnt, 1);
  1265. dic->failed = false;
  1266. dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
  1267. for (i = 0; i < dic->cluster_size; i++)
  1268. dic->rpages[i] = cc->rpages[i];
  1269. dic->nr_rpages = cc->cluster_size;
  1270. dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
  1271. if (!dic->cpages)
  1272. goto out_free;
  1273. for (i = 0; i < dic->nr_cpages; i++) {
  1274. struct page *page;
  1275. page = f2fs_compress_alloc_page();
  1276. if (!page)
  1277. goto out_free;
  1278. f2fs_set_compressed_page(page, cc->inode,
  1279. start_idx + i + 1, dic);
  1280. dic->cpages[i] = page;
  1281. }
  1282. return dic;
  1283. out_free:
  1284. f2fs_free_dic(dic);
  1285. return ERR_PTR(-ENOMEM);
  1286. }
  1287. static void f2fs_free_dic(struct decompress_io_ctx *dic)
  1288. {
  1289. int i;
  1290. if (dic->tpages) {
  1291. for (i = 0; i < dic->cluster_size; i++) {
  1292. if (dic->rpages[i])
  1293. continue;
  1294. if (!dic->tpages[i])
  1295. continue;
  1296. f2fs_compress_free_page(dic->tpages[i]);
  1297. }
  1298. page_array_free(dic->inode, dic->tpages, dic->cluster_size);
  1299. }
  1300. if (dic->cpages) {
  1301. for (i = 0; i < dic->nr_cpages; i++) {
  1302. if (!dic->cpages[i])
  1303. continue;
  1304. f2fs_compress_free_page(dic->cpages[i]);
  1305. }
  1306. page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
  1307. }
  1308. page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
  1309. kmem_cache_free(dic_entry_slab, dic);
  1310. }
  1311. static void f2fs_put_dic(struct decompress_io_ctx *dic)
  1312. {
  1313. if (refcount_dec_and_test(&dic->refcnt))
  1314. f2fs_free_dic(dic);
  1315. }
  1316. /*
  1317. * Update and unlock the cluster's pagecache pages, and release the reference to
  1318. * the decompress_io_ctx that was being held for I/O completion.
  1319. */
  1320. static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
  1321. {
  1322. int i;
  1323. for (i = 0; i < dic->cluster_size; i++) {
  1324. struct page *rpage = dic->rpages[i];
  1325. if (!rpage)
  1326. continue;
  1327. /* PG_error was set if verity failed. */
  1328. if (failed || PageError(rpage)) {
  1329. ClearPageUptodate(rpage);
  1330. /* will re-read again later */
  1331. ClearPageError(rpage);
  1332. } else {
  1333. SetPageUptodate(rpage);
  1334. }
  1335. unlock_page(rpage);
  1336. }
  1337. f2fs_put_dic(dic);
  1338. }
  1339. static void f2fs_verify_cluster(struct work_struct *work)
  1340. {
  1341. struct decompress_io_ctx *dic =
  1342. container_of(work, struct decompress_io_ctx, verity_work);
  1343. int i;
  1344. /* Verify the cluster's decompressed pages with fs-verity. */
  1345. for (i = 0; i < dic->cluster_size; i++) {
  1346. struct page *rpage = dic->rpages[i];
  1347. if (rpage && !fsverity_verify_page(rpage))
  1348. SetPageError(rpage);
  1349. }
  1350. __f2fs_decompress_end_io(dic, false);
  1351. }
  1352. /*
  1353. * This is called when a compressed cluster has been decompressed
  1354. * (or failed to be read and/or decompressed).
  1355. */
  1356. void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
  1357. {
  1358. if (!failed && dic->need_verity) {
  1359. /*
  1360. * Note that to avoid deadlocks, the verity work can't be done
  1361. * on the decompression workqueue. This is because verifying
  1362. * the data pages can involve reading metadata pages from the
  1363. * file, and these metadata pages may be compressed.
  1364. */
  1365. INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
  1366. fsverity_enqueue_verify_work(&dic->verity_work);
  1367. } else {
  1368. __f2fs_decompress_end_io(dic, failed);
  1369. }
  1370. }
  1371. /*
  1372. * Put a reference to a compressed page's decompress_io_ctx.
  1373. *
  1374. * This is called when the page is no longer needed and can be freed.
  1375. */
  1376. void f2fs_put_page_dic(struct page *page)
  1377. {
  1378. struct decompress_io_ctx *dic =
  1379. (struct decompress_io_ctx *)page_private(page);
  1380. f2fs_put_dic(dic);
  1381. }
  1382. const struct address_space_operations f2fs_compress_aops = {
  1383. .releasepage = f2fs_release_page,
  1384. .invalidatepage = f2fs_invalidate_page,
  1385. };
  1386. struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
  1387. {
  1388. return sbi->compress_inode->i_mapping;
  1389. }
  1390. void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
  1391. {
  1392. if (!sbi->compress_inode)
  1393. return;
  1394. invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
  1395. }
  1396. void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
  1397. nid_t ino, block_t blkaddr)
  1398. {
  1399. struct page *cpage;
  1400. int ret;
  1401. if (!test_opt(sbi, COMPRESS_CACHE))
  1402. return;
  1403. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
  1404. return;
  1405. if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
  1406. return;
  1407. cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
  1408. if (cpage) {
  1409. f2fs_put_page(cpage, 0);
  1410. return;
  1411. }
  1412. cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
  1413. if (!cpage)
  1414. return;
  1415. ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
  1416. blkaddr, GFP_NOFS);
  1417. if (ret) {
  1418. f2fs_put_page(cpage, 0);
  1419. return;
  1420. }
  1421. set_page_private_data(cpage, ino);
  1422. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
  1423. goto out;
  1424. memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
  1425. SetPageUptodate(cpage);
  1426. out:
  1427. f2fs_put_page(cpage, 1);
  1428. }
  1429. bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
  1430. block_t blkaddr)
  1431. {
  1432. struct page *cpage;
  1433. bool hitted = false;
  1434. if (!test_opt(sbi, COMPRESS_CACHE))
  1435. return false;
  1436. cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
  1437. blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
  1438. if (cpage) {
  1439. if (PageUptodate(cpage)) {
  1440. atomic_inc(&sbi->compress_page_hit);
  1441. memcpy(page_address(page),
  1442. page_address(cpage), PAGE_SIZE);
  1443. hitted = true;
  1444. }
  1445. f2fs_put_page(cpage, 1);
  1446. }
  1447. return hitted;
  1448. }
  1449. void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
  1450. {
  1451. struct address_space *mapping = sbi->compress_inode->i_mapping;
  1452. struct pagevec pvec;
  1453. pgoff_t index = 0;
  1454. pgoff_t end = MAX_BLKADDR(sbi);
  1455. if (!mapping->nrpages)
  1456. return;
  1457. pagevec_init(&pvec);
  1458. do {
  1459. unsigned int nr_pages;
  1460. int i;
  1461. nr_pages = pagevec_lookup_range(&pvec, mapping,
  1462. &index, end - 1);
  1463. if (!nr_pages)
  1464. break;
  1465. for (i = 0; i < nr_pages; i++) {
  1466. struct page *page = pvec.pages[i];
  1467. if (page->index > end)
  1468. break;
  1469. lock_page(page);
  1470. if (page->mapping != mapping) {
  1471. unlock_page(page);
  1472. continue;
  1473. }
  1474. if (ino != get_page_private_data(page)) {
  1475. unlock_page(page);
  1476. continue;
  1477. }
  1478. generic_error_remove_page(mapping, page);
  1479. unlock_page(page);
  1480. }
  1481. pagevec_release(&pvec);
  1482. cond_resched();
  1483. } while (index < end);
  1484. }
  1485. int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
  1486. {
  1487. struct inode *inode;
  1488. if (!test_opt(sbi, COMPRESS_CACHE))
  1489. return 0;
  1490. inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
  1491. if (IS_ERR(inode))
  1492. return PTR_ERR(inode);
  1493. sbi->compress_inode = inode;
  1494. sbi->compress_percent = COMPRESS_PERCENT;
  1495. sbi->compress_watermark = COMPRESS_WATERMARK;
  1496. atomic_set(&sbi->compress_page_hit, 0);
  1497. return 0;
  1498. }
  1499. void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
  1500. {
  1501. if (!sbi->compress_inode)
  1502. return;
  1503. iput(sbi->compress_inode);
  1504. sbi->compress_inode = NULL;
  1505. }
  1506. int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
  1507. {
  1508. dev_t dev = sbi->sb->s_bdev->bd_dev;
  1509. char slab_name[32];
  1510. sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
  1511. sbi->page_array_slab_size = sizeof(struct page *) <<
  1512. F2FS_OPTION(sbi).compress_log_size;
  1513. sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
  1514. sbi->page_array_slab_size);
  1515. if (!sbi->page_array_slab)
  1516. return -ENOMEM;
  1517. return 0;
  1518. }
  1519. void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
  1520. {
  1521. kmem_cache_destroy(sbi->page_array_slab);
  1522. }
  1523. static int __init f2fs_init_cic_cache(void)
  1524. {
  1525. cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
  1526. sizeof(struct compress_io_ctx));
  1527. if (!cic_entry_slab)
  1528. return -ENOMEM;
  1529. return 0;
  1530. }
  1531. static void f2fs_destroy_cic_cache(void)
  1532. {
  1533. kmem_cache_destroy(cic_entry_slab);
  1534. }
  1535. static int __init f2fs_init_dic_cache(void)
  1536. {
  1537. dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
  1538. sizeof(struct decompress_io_ctx));
  1539. if (!dic_entry_slab)
  1540. return -ENOMEM;
  1541. return 0;
  1542. }
  1543. static void f2fs_destroy_dic_cache(void)
  1544. {
  1545. kmem_cache_destroy(dic_entry_slab);
  1546. }
  1547. int __init f2fs_init_compress_cache(void)
  1548. {
  1549. int err;
  1550. err = f2fs_init_cic_cache();
  1551. if (err)
  1552. goto out;
  1553. err = f2fs_init_dic_cache();
  1554. if (err)
  1555. goto free_cic;
  1556. return 0;
  1557. free_cic:
  1558. f2fs_destroy_cic_cache();
  1559. out:
  1560. return -ENOMEM;
  1561. }
  1562. void f2fs_destroy_compress_cache(void)
  1563. {
  1564. f2fs_destroy_dic_cache();
  1565. f2fs_destroy_cic_cache();
  1566. }