zdata.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2018 HUAWEI, Inc.
  4. * https://www.huawei.com/
  5. * Created by Gao Xiang <gaoxiang25@huawei.com>
  6. */
  7. #include "zdata.h"
  8. #include "compress.h"
  9. #include <linux/prefetch.h>
  10. #include <trace/events/erofs.h>
  11. /*
  12. * since pclustersize is variable for big pcluster feature, introduce slab
  13. * pools implementation for different pcluster sizes.
  14. */
  15. struct z_erofs_pcluster_slab {
  16. struct kmem_cache *slab;
  17. unsigned int maxpages;
  18. char name[48];
  19. };
  20. #define _PCLP(n) { .maxpages = n }
  21. static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
  22. _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
  23. _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
  24. };
  25. static void z_erofs_destroy_pcluster_pool(void)
  26. {
  27. int i;
  28. for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
  29. if (!pcluster_pool[i].slab)
  30. continue;
  31. kmem_cache_destroy(pcluster_pool[i].slab);
  32. pcluster_pool[i].slab = NULL;
  33. }
  34. }
  35. static int z_erofs_create_pcluster_pool(void)
  36. {
  37. struct z_erofs_pcluster_slab *pcs;
  38. struct z_erofs_pcluster *a;
  39. unsigned int size;
  40. for (pcs = pcluster_pool;
  41. pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
  42. size = struct_size(a, compressed_pages, pcs->maxpages);
  43. sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
  44. pcs->slab = kmem_cache_create(pcs->name, size, 0,
  45. SLAB_RECLAIM_ACCOUNT, NULL);
  46. if (pcs->slab)
  47. continue;
  48. z_erofs_destroy_pcluster_pool();
  49. return -ENOMEM;
  50. }
  51. return 0;
  52. }
  53. static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
  54. {
  55. int i;
  56. for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
  57. struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
  58. struct z_erofs_pcluster *pcl;
  59. if (nrpages > pcs->maxpages)
  60. continue;
  61. pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
  62. if (!pcl)
  63. return ERR_PTR(-ENOMEM);
  64. pcl->pclusterpages = nrpages;
  65. return pcl;
  66. }
  67. return ERR_PTR(-EINVAL);
  68. }
  69. static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
  70. {
  71. int i;
  72. for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
  73. struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
  74. if (pcl->pclusterpages > pcs->maxpages)
  75. continue;
  76. kmem_cache_free(pcs->slab, pcl);
  77. return;
  78. }
  79. DBG_BUGON(1);
  80. }
  81. /*
  82. * a compressed_pages[] placeholder in order to avoid
  83. * being filled with file pages for in-place decompression.
  84. */
  85. #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
  86. /* how to allocate cached pages for a pcluster */
  87. enum z_erofs_cache_alloctype {
  88. DONTALLOC, /* don't allocate any cached pages */
  89. DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
  90. /*
  91. * try to use cached I/O if page allocation succeeds or fallback
  92. * to in-place I/O instead to avoid any direct reclaim.
  93. */
  94. TRYALLOC,
  95. };
  96. /*
  97. * tagged pointer with 1-bit tag for all compressed pages
  98. * tag 0 - the page is just found with an extra page reference
  99. */
  100. typedef tagptr1_t compressed_page_t;
  101. #define tag_compressed_page_justfound(page) \
  102. tagptr_fold(compressed_page_t, page, 1)
  103. static struct workqueue_struct *z_erofs_workqueue __read_mostly;
  104. void z_erofs_exit_zip_subsystem(void)
  105. {
  106. destroy_workqueue(z_erofs_workqueue);
  107. z_erofs_destroy_pcluster_pool();
  108. }
  109. static inline int z_erofs_init_workqueue(void)
  110. {
  111. const unsigned int onlinecpus = num_possible_cpus();
  112. /*
  113. * no need to spawn too many threads, limiting threads could minimum
  114. * scheduling overhead, perhaps per-CPU threads should be better?
  115. */
  116. z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
  117. WQ_UNBOUND | WQ_HIGHPRI,
  118. onlinecpus + onlinecpus / 4);
  119. return z_erofs_workqueue ? 0 : -ENOMEM;
  120. }
  121. int __init z_erofs_init_zip_subsystem(void)
  122. {
  123. int err = z_erofs_create_pcluster_pool();
  124. if (err)
  125. return err;
  126. err = z_erofs_init_workqueue();
  127. if (err)
  128. z_erofs_destroy_pcluster_pool();
  129. return err;
  130. }
  131. enum z_erofs_collectmode {
  132. COLLECT_SECONDARY,
  133. COLLECT_PRIMARY,
  134. /*
  135. * The current collection was the tail of an exist chain, in addition
  136. * that the previous processed chained collections are all decided to
  137. * be hooked up to it.
  138. * A new chain will be created for the remaining collections which are
  139. * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
  140. * the next collection cannot reuse the whole page safely in
  141. * the following scenario:
  142. * ________________________________________________________________
  143. * | tail (partial) page | head (partial) page |
  144. * | (belongs to the next cl) | (belongs to the current cl) |
  145. * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
  146. */
  147. COLLECT_PRIMARY_HOOKED,
  148. /*
  149. * a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it
  150. * could be dispatched into bypass queue later due to uptodated managed
  151. * pages. All related online pages cannot be reused for inplace I/O (or
  152. * pagevec) since it can be directly decoded without I/O submission.
  153. */
  154. COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
  155. /*
  156. * The current collection has been linked with the owned chain, and
  157. * could also be linked with the remaining collections, which means
  158. * if the processing page is the tail page of the collection, thus
  159. * the current collection can safely use the whole page (since
  160. * the previous collection is under control) for in-place I/O, as
  161. * illustrated below:
  162. * ________________________________________________________________
  163. * | tail (partial) page | head (partial) page |
  164. * | (of the current cl) | (of the previous collection) |
  165. * | PRIMARY_FOLLOWED or | |
  166. * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
  167. *
  168. * [ (*) the above page can be used as inplace I/O. ]
  169. */
  170. COLLECT_PRIMARY_FOLLOWED,
  171. };
  172. struct z_erofs_collector {
  173. struct z_erofs_pagevec_ctor vector;
  174. struct z_erofs_pcluster *pcl, *tailpcl;
  175. struct z_erofs_collection *cl;
  176. /* a pointer used to pick up inplace I/O pages */
  177. struct page **icpage_ptr;
  178. z_erofs_next_pcluster_t owned_head;
  179. enum z_erofs_collectmode mode;
  180. };
  181. struct z_erofs_decompress_frontend {
  182. struct inode *const inode;
  183. struct z_erofs_collector clt;
  184. struct erofs_map_blocks map;
  185. bool readahead;
  186. /* used for applying cache strategy on the fly */
  187. bool backmost;
  188. erofs_off_t headoffset;
  189. };
  190. #define COLLECTOR_INIT() { \
  191. .owned_head = Z_EROFS_PCLUSTER_TAIL, \
  192. .mode = COLLECT_PRIMARY_FOLLOWED }
  193. #define DECOMPRESS_FRONTEND_INIT(__i) { \
  194. .inode = __i, .clt = COLLECTOR_INIT(), \
  195. .backmost = true, }
  196. static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
  197. static DEFINE_MUTEX(z_pagemap_global_lock);
  198. static void preload_compressed_pages(struct z_erofs_collector *clt,
  199. struct address_space *mc,
  200. enum z_erofs_cache_alloctype type,
  201. struct list_head *pagepool)
  202. {
  203. struct z_erofs_pcluster *pcl = clt->pcl;
  204. bool standalone = true;
  205. gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
  206. __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
  207. struct page **pages;
  208. pgoff_t index;
  209. if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
  210. return;
  211. pages = pcl->compressed_pages;
  212. index = pcl->obj.index;
  213. for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) {
  214. struct page *page;
  215. compressed_page_t t;
  216. struct page *newpage = NULL;
  217. /* the compressed page was loaded before */
  218. if (READ_ONCE(*pages))
  219. continue;
  220. page = find_get_page(mc, index);
  221. if (page) {
  222. t = tag_compressed_page_justfound(page);
  223. } else {
  224. /* I/O is needed, no possible to decompress directly */
  225. standalone = false;
  226. switch (type) {
  227. case DELAYEDALLOC:
  228. t = tagptr_init(compressed_page_t,
  229. PAGE_UNALLOCATED);
  230. break;
  231. case TRYALLOC:
  232. newpage = erofs_allocpage(pagepool, gfp);
  233. if (!newpage)
  234. continue;
  235. set_page_private(newpage,
  236. Z_EROFS_PREALLOCATED_PAGE);
  237. t = tag_compressed_page_justfound(newpage);
  238. break;
  239. default: /* DONTALLOC */
  240. continue;
  241. }
  242. }
  243. if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
  244. continue;
  245. if (page) {
  246. put_page(page);
  247. } else if (newpage) {
  248. set_page_private(newpage, 0);
  249. list_add(&newpage->lru, pagepool);
  250. }
  251. }
  252. /*
  253. * don't do inplace I/O if all compressed pages are available in
  254. * managed cache since it can be moved to the bypass queue instead.
  255. */
  256. if (standalone)
  257. clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
  258. }
  259. /* called by erofs_shrinker to get rid of all compressed_pages */
  260. int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
  261. struct erofs_workgroup *grp)
  262. {
  263. struct z_erofs_pcluster *const pcl =
  264. container_of(grp, struct z_erofs_pcluster, obj);
  265. struct address_space *const mapping = MNGD_MAPPING(sbi);
  266. int i;
  267. /*
  268. * refcount of workgroup is now freezed as 1,
  269. * therefore no need to worry about available decompression users.
  270. */
  271. for (i = 0; i < pcl->pclusterpages; ++i) {
  272. struct page *page = pcl->compressed_pages[i];
  273. if (!page)
  274. continue;
  275. /* block other users from reclaiming or migrating the page */
  276. if (!trylock_page(page))
  277. return -EBUSY;
  278. if (page->mapping != mapping)
  279. continue;
  280. /* barrier is implied in the following 'unlock_page' */
  281. WRITE_ONCE(pcl->compressed_pages[i], NULL);
  282. detach_page_private(page);
  283. unlock_page(page);
  284. }
  285. return 0;
  286. }
  287. int erofs_try_to_free_cached_page(struct address_space *mapping,
  288. struct page *page)
  289. {
  290. struct z_erofs_pcluster *const pcl = (void *)page_private(page);
  291. int ret = 0; /* 0 - busy */
  292. if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
  293. unsigned int i;
  294. for (i = 0; i < pcl->pclusterpages; ++i) {
  295. if (pcl->compressed_pages[i] == page) {
  296. WRITE_ONCE(pcl->compressed_pages[i], NULL);
  297. ret = 1;
  298. break;
  299. }
  300. }
  301. erofs_workgroup_unfreeze(&pcl->obj, 1);
  302. if (ret)
  303. detach_page_private(page);
  304. }
  305. return ret;
  306. }
  307. /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
  308. static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
  309. struct page *page)
  310. {
  311. struct z_erofs_pcluster *const pcl = clt->pcl;
  312. while (clt->icpage_ptr > pcl->compressed_pages)
  313. if (!cmpxchg(--clt->icpage_ptr, NULL, page))
  314. return true;
  315. return false;
  316. }
  317. /* callers must be with collection lock held */
  318. static int z_erofs_attach_page(struct z_erofs_collector *clt,
  319. struct page *page, enum z_erofs_page_type type,
  320. bool pvec_safereuse)
  321. {
  322. int ret;
  323. /* give priority for inplaceio */
  324. if (clt->mode >= COLLECT_PRIMARY &&
  325. type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
  326. z_erofs_try_inplace_io(clt, page))
  327. return 0;
  328. ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
  329. pvec_safereuse);
  330. clt->cl->vcnt += (unsigned int)ret;
  331. return ret ? 0 : -EAGAIN;
  332. }
  333. static enum z_erofs_collectmode
  334. try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
  335. z_erofs_next_pcluster_t *owned_head)
  336. {
  337. /* let's claim these following types of pclusters */
  338. retry:
  339. if (pcl->next == Z_EROFS_PCLUSTER_NIL) {
  340. /* type 1, nil pcluster */
  341. if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
  342. *owned_head) != Z_EROFS_PCLUSTER_NIL)
  343. goto retry;
  344. *owned_head = &pcl->next;
  345. /* lucky, I am the followee :) */
  346. return COLLECT_PRIMARY_FOLLOWED;
  347. } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) {
  348. /*
  349. * type 2, link to the end of a existing open chain,
  350. * be careful that its submission itself is governed
  351. * by the original owned chain.
  352. */
  353. if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
  354. *owned_head) != Z_EROFS_PCLUSTER_TAIL)
  355. goto retry;
  356. *owned_head = Z_EROFS_PCLUSTER_TAIL;
  357. return COLLECT_PRIMARY_HOOKED;
  358. }
  359. return COLLECT_PRIMARY; /* :( better luck next time */
  360. }
  361. static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
  362. struct inode *inode,
  363. struct erofs_map_blocks *map)
  364. {
  365. struct z_erofs_pcluster *pcl = clt->pcl;
  366. struct z_erofs_collection *cl;
  367. unsigned int length;
  368. /* to avoid unexpected loop formed by corrupted images */
  369. if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
  370. DBG_BUGON(1);
  371. return -EFSCORRUPTED;
  372. }
  373. cl = z_erofs_primarycollection(pcl);
  374. if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
  375. DBG_BUGON(1);
  376. return -EFSCORRUPTED;
  377. }
  378. length = READ_ONCE(pcl->length);
  379. if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
  380. if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
  381. DBG_BUGON(1);
  382. return -EFSCORRUPTED;
  383. }
  384. } else {
  385. unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
  386. if (map->m_flags & EROFS_MAP_FULL_MAPPED)
  387. llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
  388. while (llen > length &&
  389. length != cmpxchg_relaxed(&pcl->length, length, llen)) {
  390. cpu_relax();
  391. length = READ_ONCE(pcl->length);
  392. }
  393. }
  394. mutex_lock(&cl->lock);
  395. /* used to check tail merging loop due to corrupted images */
  396. if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
  397. clt->tailpcl = pcl;
  398. clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head);
  399. /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */
  400. if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
  401. clt->tailpcl = NULL;
  402. clt->cl = cl;
  403. return 0;
  404. }
  405. static int z_erofs_register_collection(struct z_erofs_collector *clt,
  406. struct inode *inode,
  407. struct erofs_map_blocks *map)
  408. {
  409. struct z_erofs_pcluster *pcl;
  410. struct z_erofs_collection *cl;
  411. struct erofs_workgroup *grp;
  412. int err;
  413. /* no available pcluster, let's allocate one */
  414. pcl = z_erofs_alloc_pcluster(map->m_plen >> PAGE_SHIFT);
  415. if (IS_ERR(pcl))
  416. return PTR_ERR(pcl);
  417. atomic_set(&pcl->obj.refcount, 1);
  418. pcl->obj.index = map->m_pa >> PAGE_SHIFT;
  419. pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
  420. (map->m_flags & EROFS_MAP_FULL_MAPPED ?
  421. Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
  422. if (map->m_flags & EROFS_MAP_ZIPPED)
  423. pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4;
  424. else
  425. pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
  426. /* new pclusters should be claimed as type 1, primary and followed */
  427. pcl->next = clt->owned_head;
  428. clt->mode = COLLECT_PRIMARY_FOLLOWED;
  429. cl = z_erofs_primarycollection(pcl);
  430. cl->pageofs = map->m_la & ~PAGE_MASK;
  431. /*
  432. * lock all primary followed works before visible to others
  433. * and mutex_trylock *never* fails for a new pcluster.
  434. */
  435. mutex_init(&cl->lock);
  436. DBG_BUGON(!mutex_trylock(&cl->lock));
  437. grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj);
  438. if (IS_ERR(grp)) {
  439. err = PTR_ERR(grp);
  440. goto err_out;
  441. }
  442. if (grp != &pcl->obj) {
  443. clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
  444. err = -EEXIST;
  445. goto err_out;
  446. }
  447. /* used to check tail merging loop due to corrupted images */
  448. if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
  449. clt->tailpcl = pcl;
  450. clt->owned_head = &pcl->next;
  451. clt->pcl = pcl;
  452. clt->cl = cl;
  453. return 0;
  454. err_out:
  455. mutex_unlock(&cl->lock);
  456. z_erofs_free_pcluster(pcl);
  457. return err;
  458. }
  459. static int z_erofs_collector_begin(struct z_erofs_collector *clt,
  460. struct inode *inode,
  461. struct erofs_map_blocks *map)
  462. {
  463. struct erofs_workgroup *grp;
  464. int ret;
  465. DBG_BUGON(clt->cl);
  466. /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
  467. DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
  468. DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
  469. if (!PAGE_ALIGNED(map->m_pa)) {
  470. DBG_BUGON(1);
  471. return -EINVAL;
  472. }
  473. grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
  474. if (grp) {
  475. clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
  476. } else {
  477. ret = z_erofs_register_collection(clt, inode, map);
  478. if (!ret)
  479. goto out;
  480. if (ret != -EEXIST)
  481. return ret;
  482. }
  483. ret = z_erofs_lookup_collection(clt, inode, map);
  484. if (ret) {
  485. erofs_workgroup_put(&clt->pcl->obj);
  486. return ret;
  487. }
  488. out:
  489. z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
  490. clt->cl->pagevec, clt->cl->vcnt);
  491. /* since file-backed online pages are traversed in reverse order */
  492. clt->icpage_ptr = clt->pcl->compressed_pages + clt->pcl->pclusterpages;
  493. return 0;
  494. }
  495. /*
  496. * keep in mind that no referenced pclusters will be freed
  497. * only after a RCU grace period.
  498. */
  499. static void z_erofs_rcu_callback(struct rcu_head *head)
  500. {
  501. struct z_erofs_collection *const cl =
  502. container_of(head, struct z_erofs_collection, rcu);
  503. z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster,
  504. primary_collection));
  505. }
  506. void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
  507. {
  508. struct z_erofs_pcluster *const pcl =
  509. container_of(grp, struct z_erofs_pcluster, obj);
  510. struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
  511. call_rcu(&cl->rcu, z_erofs_rcu_callback);
  512. }
  513. static void z_erofs_collection_put(struct z_erofs_collection *cl)
  514. {
  515. struct z_erofs_pcluster *const pcl =
  516. container_of(cl, struct z_erofs_pcluster, primary_collection);
  517. erofs_workgroup_put(&pcl->obj);
  518. }
  519. static bool z_erofs_collector_end(struct z_erofs_collector *clt)
  520. {
  521. struct z_erofs_collection *cl = clt->cl;
  522. if (!cl)
  523. return false;
  524. z_erofs_pagevec_ctor_exit(&clt->vector, false);
  525. mutex_unlock(&cl->lock);
  526. /*
  527. * if all pending pages are added, don't hold its reference
  528. * any longer if the pcluster isn't hosted by ourselves.
  529. */
  530. if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
  531. z_erofs_collection_put(cl);
  532. clt->cl = NULL;
  533. return true;
  534. }
  535. static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
  536. unsigned int cachestrategy,
  537. erofs_off_t la)
  538. {
  539. if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
  540. return false;
  541. if (fe->backmost)
  542. return true;
  543. return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
  544. la < fe->headoffset;
  545. }
  546. static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
  547. struct page *page, struct list_head *pagepool)
  548. {
  549. struct inode *const inode = fe->inode;
  550. struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
  551. struct erofs_map_blocks *const map = &fe->map;
  552. struct z_erofs_collector *const clt = &fe->clt;
  553. const loff_t offset = page_offset(page);
  554. bool tight = true;
  555. enum z_erofs_cache_alloctype cache_strategy;
  556. enum z_erofs_page_type page_type;
  557. unsigned int cur, end, spiltted, index;
  558. int err = 0;
  559. /* register locked file pages as online pages in pack */
  560. z_erofs_onlinepage_init(page);
  561. spiltted = 0;
  562. end = PAGE_SIZE;
  563. repeat:
  564. cur = end - 1;
  565. /* lucky, within the range of the current map_blocks */
  566. if (offset + cur >= map->m_la &&
  567. offset + cur < map->m_la + map->m_llen) {
  568. /* didn't get a valid collection previously (very rare) */
  569. if (!clt->cl)
  570. goto restart_now;
  571. goto hitted;
  572. }
  573. /* go ahead the next map_blocks */
  574. erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
  575. if (z_erofs_collector_end(clt))
  576. fe->backmost = false;
  577. map->m_la = offset + cur;
  578. map->m_llen = 0;
  579. err = z_erofs_map_blocks_iter(inode, map, 0);
  580. if (err)
  581. goto err_out;
  582. restart_now:
  583. if (!(map->m_flags & EROFS_MAP_MAPPED))
  584. goto hitted;
  585. err = z_erofs_collector_begin(clt, inode, map);
  586. if (err)
  587. goto err_out;
  588. /* preload all compressed pages (maybe downgrade role if necessary) */
  589. if (should_alloc_managed_pages(fe, sbi->ctx.cache_strategy, map->m_la))
  590. cache_strategy = TRYALLOC;
  591. else
  592. cache_strategy = DONTALLOC;
  593. preload_compressed_pages(clt, MNGD_MAPPING(sbi),
  594. cache_strategy, pagepool);
  595. hitted:
  596. /*
  597. * Ensure the current partial page belongs to this submit chain rather
  598. * than other concurrent submit chains or the noio(bypass) chain since
  599. * those chains are handled asynchronously thus the page cannot be used
  600. * for inplace I/O or pagevec (should be processed in strict order.)
  601. */
  602. tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
  603. clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
  604. cur = end - min_t(unsigned int, offset + end - map->m_la, end);
  605. if (!(map->m_flags & EROFS_MAP_MAPPED)) {
  606. zero_user_segment(page, cur, end);
  607. goto next_part;
  608. }
  609. /* let's derive page type */
  610. page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
  611. (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
  612. (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
  613. Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
  614. if (cur)
  615. tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
  616. retry:
  617. err = z_erofs_attach_page(clt, page, page_type,
  618. clt->mode >= COLLECT_PRIMARY_FOLLOWED);
  619. /* should allocate an additional staging page for pagevec */
  620. if (err == -EAGAIN) {
  621. struct page *const newpage =
  622. alloc_page(GFP_NOFS | __GFP_NOFAIL);
  623. set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
  624. err = z_erofs_attach_page(clt, newpage,
  625. Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
  626. if (!err)
  627. goto retry;
  628. }
  629. if (err)
  630. goto err_out;
  631. index = page->index - (map->m_la >> PAGE_SHIFT);
  632. z_erofs_onlinepage_fixup(page, index, true);
  633. /* bump up the number of spiltted parts of a page */
  634. ++spiltted;
  635. /* also update nr_pages */
  636. clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
  637. next_part:
  638. /* can be used for verification */
  639. map->m_llen = offset + cur - map->m_la;
  640. end = cur;
  641. if (end > 0)
  642. goto repeat;
  643. out:
  644. z_erofs_onlinepage_endio(page);
  645. erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
  646. __func__, page, spiltted, map->m_llen);
  647. return err;
  648. /* if some error occurred while processing this page */
  649. err_out:
  650. SetPageError(page);
  651. goto out;
  652. }
  653. static void z_erofs_decompressqueue_work(struct work_struct *work);
  654. static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
  655. bool sync, int bios)
  656. {
  657. struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
  658. /* wake up the caller thread for sync decompression */
  659. if (sync) {
  660. unsigned long flags;
  661. spin_lock_irqsave(&io->u.wait.lock, flags);
  662. if (!atomic_add_return(bios, &io->pending_bios))
  663. wake_up_locked(&io->u.wait);
  664. spin_unlock_irqrestore(&io->u.wait.lock, flags);
  665. return;
  666. }
  667. if (atomic_add_return(bios, &io->pending_bios))
  668. return;
  669. /* Use workqueue and sync decompression for atomic contexts only */
  670. if (in_atomic() || irqs_disabled()) {
  671. queue_work(z_erofs_workqueue, &io->u.work);
  672. sbi->ctx.readahead_sync_decompress = true;
  673. return;
  674. }
  675. z_erofs_decompressqueue_work(&io->u.work);
  676. }
  677. static bool z_erofs_page_is_invalidated(struct page *page)
  678. {
  679. return !page->mapping && !z_erofs_is_shortlived_page(page);
  680. }
  681. static void z_erofs_decompressqueue_endio(struct bio *bio)
  682. {
  683. tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
  684. struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
  685. blk_status_t err = bio->bi_status;
  686. struct bio_vec *bvec;
  687. struct bvec_iter_all iter_all;
  688. bio_for_each_segment_all(bvec, bio, iter_all) {
  689. struct page *page = bvec->bv_page;
  690. DBG_BUGON(PageUptodate(page));
  691. DBG_BUGON(z_erofs_page_is_invalidated(page));
  692. if (err)
  693. SetPageError(page);
  694. if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
  695. if (!err)
  696. SetPageUptodate(page);
  697. unlock_page(page);
  698. }
  699. }
  700. z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
  701. bio_put(bio);
  702. }
  703. static int z_erofs_decompress_pcluster(struct super_block *sb,
  704. struct z_erofs_pcluster *pcl,
  705. struct list_head *pagepool)
  706. {
  707. struct erofs_sb_info *const sbi = EROFS_SB(sb);
  708. struct z_erofs_pagevec_ctor ctor;
  709. unsigned int i, inputsize, outputsize, llen, nr_pages;
  710. struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
  711. struct page **pages, **compressed_pages, *page;
  712. enum z_erofs_page_type page_type;
  713. bool overlapped, partial;
  714. struct z_erofs_collection *cl;
  715. int err;
  716. might_sleep();
  717. cl = z_erofs_primarycollection(pcl);
  718. DBG_BUGON(!READ_ONCE(cl->nr_pages));
  719. mutex_lock(&cl->lock);
  720. nr_pages = cl->nr_pages;
  721. if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
  722. pages = pages_onstack;
  723. } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
  724. mutex_trylock(&z_pagemap_global_lock)) {
  725. pages = z_pagemap_global;
  726. } else {
  727. gfp_t gfp_flags = GFP_KERNEL;
  728. if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
  729. gfp_flags |= __GFP_NOFAIL;
  730. pages = kvmalloc_array(nr_pages, sizeof(struct page *),
  731. gfp_flags);
  732. /* fallback to global pagemap for the lowmem scenario */
  733. if (!pages) {
  734. mutex_lock(&z_pagemap_global_lock);
  735. pages = z_pagemap_global;
  736. }
  737. }
  738. for (i = 0; i < nr_pages; ++i)
  739. pages[i] = NULL;
  740. err = 0;
  741. z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
  742. cl->pagevec, 0);
  743. for (i = 0; i < cl->vcnt; ++i) {
  744. unsigned int pagenr;
  745. page = z_erofs_pagevec_dequeue(&ctor, &page_type);
  746. /* all pages in pagevec ought to be valid */
  747. DBG_BUGON(!page);
  748. DBG_BUGON(z_erofs_page_is_invalidated(page));
  749. if (z_erofs_put_shortlivedpage(pagepool, page))
  750. continue;
  751. if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
  752. pagenr = 0;
  753. else
  754. pagenr = z_erofs_onlinepage_index(page);
  755. DBG_BUGON(pagenr >= nr_pages);
  756. /*
  757. * currently EROFS doesn't support multiref(dedup),
  758. * so here erroring out one multiref page.
  759. */
  760. if (pages[pagenr]) {
  761. DBG_BUGON(1);
  762. SetPageError(pages[pagenr]);
  763. z_erofs_onlinepage_endio(pages[pagenr]);
  764. err = -EFSCORRUPTED;
  765. }
  766. pages[pagenr] = page;
  767. }
  768. z_erofs_pagevec_ctor_exit(&ctor, true);
  769. overlapped = false;
  770. compressed_pages = pcl->compressed_pages;
  771. for (i = 0; i < pcl->pclusterpages; ++i) {
  772. unsigned int pagenr;
  773. page = compressed_pages[i];
  774. /* all compressed pages ought to be valid */
  775. DBG_BUGON(!page);
  776. DBG_BUGON(z_erofs_page_is_invalidated(page));
  777. if (!z_erofs_is_shortlived_page(page)) {
  778. if (erofs_page_is_managed(sbi, page)) {
  779. if (!PageUptodate(page))
  780. err = -EIO;
  781. continue;
  782. }
  783. /*
  784. * only if non-head page can be selected
  785. * for inplace decompression
  786. */
  787. pagenr = z_erofs_onlinepage_index(page);
  788. DBG_BUGON(pagenr >= nr_pages);
  789. if (pages[pagenr]) {
  790. DBG_BUGON(1);
  791. SetPageError(pages[pagenr]);
  792. z_erofs_onlinepage_endio(pages[pagenr]);
  793. err = -EFSCORRUPTED;
  794. }
  795. pages[pagenr] = page;
  796. overlapped = true;
  797. }
  798. /* PG_error needs checking for all non-managed pages */
  799. if (PageError(page)) {
  800. DBG_BUGON(PageUptodate(page));
  801. err = -EIO;
  802. }
  803. }
  804. if (err)
  805. goto out;
  806. llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
  807. if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
  808. outputsize = llen;
  809. partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
  810. } else {
  811. outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
  812. partial = true;
  813. }
  814. inputsize = pcl->pclusterpages * PAGE_SIZE;
  815. err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
  816. .sb = sb,
  817. .in = compressed_pages,
  818. .out = pages,
  819. .pageofs_out = cl->pageofs,
  820. .inputsize = inputsize,
  821. .outputsize = outputsize,
  822. .alg = pcl->algorithmformat,
  823. .inplace_io = overlapped,
  824. .partial_decoding = partial
  825. }, pagepool);
  826. out:
  827. /* must handle all compressed pages before ending pages */
  828. for (i = 0; i < pcl->pclusterpages; ++i) {
  829. page = compressed_pages[i];
  830. if (erofs_page_is_managed(sbi, page))
  831. continue;
  832. /* recycle all individual short-lived pages */
  833. (void)z_erofs_put_shortlivedpage(pagepool, page);
  834. WRITE_ONCE(compressed_pages[i], NULL);
  835. }
  836. for (i = 0; i < nr_pages; ++i) {
  837. page = pages[i];
  838. if (!page)
  839. continue;
  840. DBG_BUGON(z_erofs_page_is_invalidated(page));
  841. /* recycle all individual short-lived pages */
  842. if (z_erofs_put_shortlivedpage(pagepool, page))
  843. continue;
  844. if (err < 0)
  845. SetPageError(page);
  846. z_erofs_onlinepage_endio(page);
  847. }
  848. if (pages == z_pagemap_global)
  849. mutex_unlock(&z_pagemap_global_lock);
  850. else if (pages != pages_onstack)
  851. kvfree(pages);
  852. cl->nr_pages = 0;
  853. cl->vcnt = 0;
  854. /* all cl locks MUST be taken before the following line */
  855. WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
  856. /* all cl locks SHOULD be released right now */
  857. mutex_unlock(&cl->lock);
  858. z_erofs_collection_put(cl);
  859. return err;
  860. }
  861. static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
  862. struct list_head *pagepool)
  863. {
  864. z_erofs_next_pcluster_t owned = io->head;
  865. while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
  866. struct z_erofs_pcluster *pcl;
  867. /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
  868. DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
  869. /* no possible that 'owned' equals NULL */
  870. DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
  871. pcl = container_of(owned, struct z_erofs_pcluster, next);
  872. owned = READ_ONCE(pcl->next);
  873. z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
  874. }
  875. }
  876. static void z_erofs_decompressqueue_work(struct work_struct *work)
  877. {
  878. struct z_erofs_decompressqueue *bgq =
  879. container_of(work, struct z_erofs_decompressqueue, u.work);
  880. LIST_HEAD(pagepool);
  881. DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
  882. z_erofs_decompress_queue(bgq, &pagepool);
  883. put_pages_list(&pagepool);
  884. kvfree(bgq);
  885. }
  886. static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
  887. unsigned int nr,
  888. struct list_head *pagepool,
  889. struct address_space *mc,
  890. gfp_t gfp)
  891. {
  892. const pgoff_t index = pcl->obj.index;
  893. bool tocache = false;
  894. struct address_space *mapping;
  895. struct page *oldpage, *page;
  896. compressed_page_t t;
  897. int justfound;
  898. repeat:
  899. page = READ_ONCE(pcl->compressed_pages[nr]);
  900. oldpage = page;
  901. if (!page)
  902. goto out_allocpage;
  903. /*
  904. * the cached page has not been allocated and
  905. * an placeholder is out there, prepare it now.
  906. */
  907. if (page == PAGE_UNALLOCATED) {
  908. tocache = true;
  909. goto out_allocpage;
  910. }
  911. /* process the target tagged pointer */
  912. t = tagptr_init(compressed_page_t, page);
  913. justfound = tagptr_unfold_tags(t);
  914. page = tagptr_unfold_ptr(t);
  915. /*
  916. * preallocated cached pages, which is used to avoid direct reclaim
  917. * otherwise, it will go inplace I/O path instead.
  918. */
  919. if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
  920. WRITE_ONCE(pcl->compressed_pages[nr], page);
  921. set_page_private(page, 0);
  922. tocache = true;
  923. goto out_tocache;
  924. }
  925. mapping = READ_ONCE(page->mapping);
  926. /*
  927. * file-backed online pages in plcuster are all locked steady,
  928. * therefore it is impossible for `mapping' to be NULL.
  929. */
  930. if (mapping && mapping != mc)
  931. /* ought to be unmanaged pages */
  932. goto out;
  933. /* directly return for shortlived page as well */
  934. if (z_erofs_is_shortlived_page(page))
  935. goto out;
  936. lock_page(page);
  937. /* only true if page reclaim goes wrong, should never happen */
  938. DBG_BUGON(justfound && PagePrivate(page));
  939. /* the page is still in manage cache */
  940. if (page->mapping == mc) {
  941. WRITE_ONCE(pcl->compressed_pages[nr], page);
  942. ClearPageError(page);
  943. if (!PagePrivate(page)) {
  944. /*
  945. * impossible to be !PagePrivate(page) for
  946. * the current restriction as well if
  947. * the page is already in compressed_pages[].
  948. */
  949. DBG_BUGON(!justfound);
  950. justfound = 0;
  951. set_page_private(page, (unsigned long)pcl);
  952. SetPagePrivate(page);
  953. }
  954. /* no need to submit io if it is already up-to-date */
  955. if (PageUptodate(page)) {
  956. unlock_page(page);
  957. page = NULL;
  958. }
  959. goto out;
  960. }
  961. /*
  962. * the managed page has been truncated, it's unsafe to
  963. * reuse this one, let's allocate a new cache-managed page.
  964. */
  965. DBG_BUGON(page->mapping);
  966. DBG_BUGON(!justfound);
  967. tocache = true;
  968. unlock_page(page);
  969. put_page(page);
  970. out_allocpage:
  971. page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
  972. if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
  973. list_add(&page->lru, pagepool);
  974. cond_resched();
  975. goto repeat;
  976. }
  977. out_tocache:
  978. if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
  979. /* turn into temporary page if fails (1 ref) */
  980. set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
  981. goto out;
  982. }
  983. attach_page_private(page, pcl);
  984. /* drop a refcount added by allocpage (then we have 2 refs here) */
  985. put_page(page);
  986. out: /* the only exit (for tracing and debugging) */
  987. return page;
  988. }
  989. static struct z_erofs_decompressqueue *
  990. jobqueue_init(struct super_block *sb,
  991. struct z_erofs_decompressqueue *fgq, bool *fg)
  992. {
  993. struct z_erofs_decompressqueue *q;
  994. if (fg && !*fg) {
  995. q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
  996. if (!q) {
  997. *fg = true;
  998. goto fg_out;
  999. }
  1000. INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
  1001. } else {
  1002. fg_out:
  1003. q = fgq;
  1004. init_waitqueue_head(&fgq->u.wait);
  1005. atomic_set(&fgq->pending_bios, 0);
  1006. }
  1007. q->sb = sb;
  1008. q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
  1009. return q;
  1010. }
  1011. /* define decompression jobqueue types */
  1012. enum {
  1013. JQ_BYPASS,
  1014. JQ_SUBMIT,
  1015. NR_JOBQUEUES,
  1016. };
  1017. static void *jobqueueset_init(struct super_block *sb,
  1018. struct z_erofs_decompressqueue *q[],
  1019. struct z_erofs_decompressqueue *fgq, bool *fg)
  1020. {
  1021. /*
  1022. * if managed cache is enabled, bypass jobqueue is needed,
  1023. * no need to read from device for all pclusters in this queue.
  1024. */
  1025. q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
  1026. q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
  1027. return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
  1028. }
  1029. static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
  1030. z_erofs_next_pcluster_t qtail[],
  1031. z_erofs_next_pcluster_t owned_head)
  1032. {
  1033. z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
  1034. z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
  1035. DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
  1036. if (owned_head == Z_EROFS_PCLUSTER_TAIL)
  1037. owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
  1038. WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
  1039. WRITE_ONCE(*submit_qtail, owned_head);
  1040. WRITE_ONCE(*bypass_qtail, &pcl->next);
  1041. qtail[JQ_BYPASS] = &pcl->next;
  1042. }
  1043. static void z_erofs_submit_queue(struct super_block *sb,
  1044. struct z_erofs_decompress_frontend *f,
  1045. struct list_head *pagepool,
  1046. struct z_erofs_decompressqueue *fgq,
  1047. bool *force_fg)
  1048. {
  1049. struct erofs_sb_info *const sbi = EROFS_SB(sb);
  1050. z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
  1051. struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
  1052. void *bi_private;
  1053. z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
  1054. /* since bio will be NULL, no need to initialize last_index */
  1055. pgoff_t last_index;
  1056. unsigned int nr_bios = 0;
  1057. struct bio *bio = NULL;
  1058. bi_private = jobqueueset_init(sb, q, fgq, force_fg);
  1059. qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
  1060. qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
  1061. /* by default, all need io submission */
  1062. q[JQ_SUBMIT]->head = owned_head;
  1063. do {
  1064. struct z_erofs_pcluster *pcl;
  1065. pgoff_t cur, end;
  1066. unsigned int i = 0;
  1067. bool bypass = true;
  1068. /* no possible 'owned_head' equals the following */
  1069. DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
  1070. DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
  1071. pcl = container_of(owned_head, struct z_erofs_pcluster, next);
  1072. cur = pcl->obj.index;
  1073. end = cur + pcl->pclusterpages;
  1074. /* close the main owned chain at first */
  1075. owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
  1076. Z_EROFS_PCLUSTER_TAIL_CLOSED);
  1077. do {
  1078. struct page *page;
  1079. page = pickup_page_for_submission(pcl, i++, pagepool,
  1080. MNGD_MAPPING(sbi),
  1081. GFP_NOFS);
  1082. if (!page)
  1083. continue;
  1084. if (bio && cur != last_index + 1) {
  1085. submit_bio_retry:
  1086. submit_bio(bio);
  1087. bio = NULL;
  1088. }
  1089. if (!bio) {
  1090. bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
  1091. bio->bi_end_io = z_erofs_decompressqueue_endio;
  1092. bio_set_dev(bio, sb->s_bdev);
  1093. bio->bi_iter.bi_sector = (sector_t)cur <<
  1094. LOG_SECTORS_PER_BLOCK;
  1095. bio->bi_private = bi_private;
  1096. bio->bi_opf = REQ_OP_READ;
  1097. if (f->readahead)
  1098. bio->bi_opf |= REQ_RAHEAD;
  1099. ++nr_bios;
  1100. }
  1101. if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
  1102. goto submit_bio_retry;
  1103. last_index = cur;
  1104. bypass = false;
  1105. } while (++cur < end);
  1106. if (!bypass)
  1107. qtail[JQ_SUBMIT] = &pcl->next;
  1108. else
  1109. move_to_bypass_jobqueue(pcl, qtail, owned_head);
  1110. } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
  1111. if (bio)
  1112. submit_bio(bio);
  1113. /*
  1114. * although background is preferred, no one is pending for submission.
  1115. * don't issue workqueue for decompression but drop it directly instead.
  1116. */
  1117. if (!*force_fg && !nr_bios) {
  1118. kvfree(q[JQ_SUBMIT]);
  1119. return;
  1120. }
  1121. z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
  1122. }
  1123. static void z_erofs_runqueue(struct super_block *sb,
  1124. struct z_erofs_decompress_frontend *f,
  1125. struct list_head *pagepool, bool force_fg)
  1126. {
  1127. struct z_erofs_decompressqueue io[NR_JOBQUEUES];
  1128. if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
  1129. return;
  1130. z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
  1131. /* handle bypass queue (no i/o pclusters) immediately */
  1132. z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
  1133. if (!force_fg)
  1134. return;
  1135. /* wait until all bios are completed */
  1136. io_wait_event(io[JQ_SUBMIT].u.wait,
  1137. !atomic_read(&io[JQ_SUBMIT].pending_bios));
  1138. /* handle synchronous decompress queue in the caller context */
  1139. z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
  1140. }
  1141. static int z_erofs_readpage(struct file *file, struct page *page)
  1142. {
  1143. struct inode *const inode = page->mapping->host;
  1144. struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
  1145. int err;
  1146. LIST_HEAD(pagepool);
  1147. trace_erofs_readpage(page, false);
  1148. f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
  1149. err = z_erofs_do_read_page(&f, page, &pagepool);
  1150. (void)z_erofs_collector_end(&f.clt);
  1151. /* if some compressed cluster ready, need submit them anyway */
  1152. z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);
  1153. if (err)
  1154. erofs_err(inode->i_sb, "failed to read, err [%d]", err);
  1155. if (f.map.mpage)
  1156. put_page(f.map.mpage);
  1157. /* clean up the remaining free pages */
  1158. put_pages_list(&pagepool);
  1159. return err;
  1160. }
  1161. static void z_erofs_readahead(struct readahead_control *rac)
  1162. {
  1163. struct inode *const inode = rac->mapping->host;
  1164. struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
  1165. unsigned int nr_pages = readahead_count(rac);
  1166. bool sync = (sbi->ctx.readahead_sync_decompress &&
  1167. nr_pages <= sbi->ctx.max_sync_decompress_pages);
  1168. struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
  1169. struct page *page, *head = NULL;
  1170. LIST_HEAD(pagepool);
  1171. trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
  1172. f.readahead = true;
  1173. f.headoffset = readahead_pos(rac);
  1174. while ((page = readahead_page(rac))) {
  1175. prefetchw(&page->flags);
  1176. /*
  1177. * A pure asynchronous readahead is indicated if
  1178. * a PG_readahead marked page is hitted at first.
  1179. * Let's also do asynchronous decompression for this case.
  1180. */
  1181. sync &= !(PageReadahead(page) && !head);
  1182. set_page_private(page, (unsigned long)head);
  1183. head = page;
  1184. }
  1185. while (head) {
  1186. struct page *page = head;
  1187. int err;
  1188. /* traversal in reverse order */
  1189. head = (void *)page_private(page);
  1190. err = z_erofs_do_read_page(&f, page, &pagepool);
  1191. if (err)
  1192. erofs_err(inode->i_sb,
  1193. "readahead error at page %lu @ nid %llu",
  1194. page->index, EROFS_I(inode)->nid);
  1195. put_page(page);
  1196. }
  1197. (void)z_erofs_collector_end(&f.clt);
  1198. z_erofs_runqueue(inode->i_sb, &f, &pagepool, sync);
  1199. if (f.map.mpage)
  1200. put_page(f.map.mpage);
  1201. /* clean up the remaining free pages */
  1202. put_pages_list(&pagepool);
  1203. }
  1204. const struct address_space_operations z_erofs_aops = {
  1205. .readpage = z_erofs_readpage,
  1206. .readahead = z_erofs_readahead,
  1207. };