gc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This file is part of UBIFS.
  4. *
  5. * Copyright (C) 2006-2008 Nokia Corporation.
  6. *
  7. * Authors: Adrian Hunter
  8. * Artem Bityutskiy (Битюцкий Артём)
  9. */
  10. /*
  11. * This file implements garbage collection. The procedure for garbage collection
  12. * is different depending on whether a LEB as an index LEB (contains index
  13. * nodes) or not. For non-index LEBs, garbage collection finds a LEB which
  14. * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete
  15. * nodes to the journal, at which point the garbage-collected LEB is free to be
  16. * reused. For index LEBs, garbage collection marks the non-obsolete index nodes
  17. * dirty in the TNC, and after the next commit, the garbage-collected LEB is
  18. * to be reused. Garbage collection will cause the number of dirty index nodes
  19. * to grow, however sufficient space is reserved for the index to ensure the
  20. * commit will never run out of space.
  21. *
  22. * Notes about dead watermark. At current UBIFS implementation we assume that
  23. * LEBs which have less than @c->dead_wm bytes of free + dirty space are full
  24. * and not worth garbage-collecting. The dead watermark is one min. I/O unit
  25. * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS
  26. * Garbage Collector has to synchronize the GC head's write buffer before
  27. * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can
  28. * actually reclaim even very small pieces of dirty space by garbage collecting
  29. * enough dirty LEBs, but we do not bother doing this at this implementation.
  30. *
  31. * Notes about dark watermark. The results of GC work depends on how big are
  32. * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
  33. * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
  34. * have to waste large pieces of free space at the end of LEB B, because nodes
  35. * from LEB A would not fit. And the worst situation is when all nodes are of
  36. * maximum size. So dark watermark is the amount of free + dirty space in LEB
  37. * which are guaranteed to be reclaimable. If LEB has less space, the GC might
  38. * be unable to reclaim it. So, LEBs with free + dirty greater than dark
  39. * watermark are "good" LEBs from GC's point of few. The other LEBs are not so
  40. * good, and GC takes extra care when moving them.
  41. */
  42. #ifndef __UBOOT__
  43. #include <log.h>
  44. #include <dm/devres.h>
  45. #include <linux/slab.h>
  46. #include <linux/pagemap.h>
  47. #include <linux/list_sort.h>
  48. #endif
  49. #include "ubifs.h"
  50. #ifndef __UBOOT__
  51. /*
  52. * GC may need to move more than one LEB to make progress. The below constants
  53. * define "soft" and "hard" limits on the number of LEBs the garbage collector
  54. * may move.
  55. */
  56. #define SOFT_LEBS_LIMIT 4
  57. #define HARD_LEBS_LIMIT 32
  58. /**
  59. * switch_gc_head - switch the garbage collection journal head.
  60. * @c: UBIFS file-system description object
  61. * @buf: buffer to write
  62. * @len: length of the buffer to write
  63. * @lnum: LEB number written is returned here
  64. * @offs: offset written is returned here
  65. *
  66. * This function switch the GC head to the next LEB which is reserved in
  67. * @c->gc_lnum. Returns %0 in case of success, %-EAGAIN if commit is required,
  68. * and other negative error code in case of failures.
  69. */
  70. static int switch_gc_head(struct ubifs_info *c)
  71. {
  72. int err, gc_lnum = c->gc_lnum;
  73. struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
  74. ubifs_assert(gc_lnum != -1);
  75. dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
  76. wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
  77. c->leb_size - wbuf->offs - wbuf->used);
  78. err = ubifs_wbuf_sync_nolock(wbuf);
  79. if (err)
  80. return err;
  81. /*
  82. * The GC write-buffer was synchronized, we may safely unmap
  83. * 'c->gc_lnum'.
  84. */
  85. err = ubifs_leb_unmap(c, gc_lnum);
  86. if (err)
  87. return err;
  88. err = ubifs_wbuf_sync_nolock(wbuf);
  89. if (err)
  90. return err;
  91. err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
  92. if (err)
  93. return err;
  94. c->gc_lnum = -1;
  95. err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
  96. return err;
  97. }
  98. /**
  99. * data_nodes_cmp - compare 2 data nodes.
  100. * @priv: UBIFS file-system description object
  101. * @a: first data node
  102. * @a: second data node
  103. *
  104. * This function compares data nodes @a and @b. Returns %1 if @a has greater
  105. * inode or block number, and %-1 otherwise.
  106. */
  107. static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
  108. {
  109. ino_t inuma, inumb;
  110. struct ubifs_info *c = priv;
  111. struct ubifs_scan_node *sa, *sb;
  112. cond_resched();
  113. if (a == b)
  114. return 0;
  115. sa = list_entry(a, struct ubifs_scan_node, list);
  116. sb = list_entry(b, struct ubifs_scan_node, list);
  117. ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
  118. ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
  119. ubifs_assert(sa->type == UBIFS_DATA_NODE);
  120. ubifs_assert(sb->type == UBIFS_DATA_NODE);
  121. inuma = key_inum(c, &sa->key);
  122. inumb = key_inum(c, &sb->key);
  123. if (inuma == inumb) {
  124. unsigned int blka = key_block(c, &sa->key);
  125. unsigned int blkb = key_block(c, &sb->key);
  126. if (blka <= blkb)
  127. return -1;
  128. } else if (inuma <= inumb)
  129. return -1;
  130. return 1;
  131. }
  132. /*
  133. * nondata_nodes_cmp - compare 2 non-data nodes.
  134. * @priv: UBIFS file-system description object
  135. * @a: first node
  136. * @a: second node
  137. *
  138. * This function compares nodes @a and @b. It makes sure that inode nodes go
  139. * first and sorted by length in descending order. Directory entry nodes go
  140. * after inode nodes and are sorted in ascending hash valuer order.
  141. */
  142. static int nondata_nodes_cmp(void *priv, struct list_head *a,
  143. struct list_head *b)
  144. {
  145. ino_t inuma, inumb;
  146. struct ubifs_info *c = priv;
  147. struct ubifs_scan_node *sa, *sb;
  148. cond_resched();
  149. if (a == b)
  150. return 0;
  151. sa = list_entry(a, struct ubifs_scan_node, list);
  152. sb = list_entry(b, struct ubifs_scan_node, list);
  153. ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
  154. key_type(c, &sb->key) != UBIFS_DATA_KEY);
  155. ubifs_assert(sa->type != UBIFS_DATA_NODE &&
  156. sb->type != UBIFS_DATA_NODE);
  157. /* Inodes go before directory entries */
  158. if (sa->type == UBIFS_INO_NODE) {
  159. if (sb->type == UBIFS_INO_NODE)
  160. return sb->len - sa->len;
  161. return -1;
  162. }
  163. if (sb->type == UBIFS_INO_NODE)
  164. return 1;
  165. ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
  166. key_type(c, &sa->key) == UBIFS_XENT_KEY);
  167. ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
  168. key_type(c, &sb->key) == UBIFS_XENT_KEY);
  169. ubifs_assert(sa->type == UBIFS_DENT_NODE ||
  170. sa->type == UBIFS_XENT_NODE);
  171. ubifs_assert(sb->type == UBIFS_DENT_NODE ||
  172. sb->type == UBIFS_XENT_NODE);
  173. inuma = key_inum(c, &sa->key);
  174. inumb = key_inum(c, &sb->key);
  175. if (inuma == inumb) {
  176. uint32_t hasha = key_hash(c, &sa->key);
  177. uint32_t hashb = key_hash(c, &sb->key);
  178. if (hasha <= hashb)
  179. return -1;
  180. } else if (inuma <= inumb)
  181. return -1;
  182. return 1;
  183. }
  184. /**
  185. * sort_nodes - sort nodes for GC.
  186. * @c: UBIFS file-system description object
  187. * @sleb: describes nodes to sort and contains the result on exit
  188. * @nondata: contains non-data nodes on exit
  189. * @min: minimum node size is returned here
  190. *
  191. * This function sorts the list of inodes to garbage collect. First of all, it
  192. * kills obsolete nodes and separates data and non-data nodes to the
  193. * @sleb->nodes and @nondata lists correspondingly.
  194. *
  195. * Data nodes are then sorted in block number order - this is important for
  196. * bulk-read; data nodes with lower inode number go before data nodes with
  197. * higher inode number, and data nodes with lower block number go before data
  198. * nodes with higher block number;
  199. *
  200. * Non-data nodes are sorted as follows.
  201. * o First go inode nodes - they are sorted in descending length order.
  202. * o Then go directory entry nodes - they are sorted in hash order, which
  203. * should supposedly optimize 'readdir()'. Direntry nodes with lower parent
  204. * inode number go before direntry nodes with higher parent inode number,
  205. * and direntry nodes with lower name hash values go before direntry nodes
  206. * with higher name hash values.
  207. *
  208. * This function returns zero in case of success and a negative error code in
  209. * case of failure.
  210. */
  211. static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
  212. struct list_head *nondata, int *min)
  213. {
  214. int err;
  215. struct ubifs_scan_node *snod, *tmp;
  216. *min = INT_MAX;
  217. /* Separate data nodes and non-data nodes */
  218. list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
  219. ubifs_assert(snod->type == UBIFS_INO_NODE ||
  220. snod->type == UBIFS_DATA_NODE ||
  221. snod->type == UBIFS_DENT_NODE ||
  222. snod->type == UBIFS_XENT_NODE ||
  223. snod->type == UBIFS_TRUN_NODE);
  224. if (snod->type != UBIFS_INO_NODE &&
  225. snod->type != UBIFS_DATA_NODE &&
  226. snod->type != UBIFS_DENT_NODE &&
  227. snod->type != UBIFS_XENT_NODE) {
  228. /* Probably truncation node, zap it */
  229. list_del(&snod->list);
  230. kfree(snod);
  231. continue;
  232. }
  233. ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
  234. key_type(c, &snod->key) == UBIFS_INO_KEY ||
  235. key_type(c, &snod->key) == UBIFS_DENT_KEY ||
  236. key_type(c, &snod->key) == UBIFS_XENT_KEY);
  237. err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
  238. snod->offs, 0);
  239. if (err < 0)
  240. return err;
  241. if (!err) {
  242. /* The node is obsolete, remove it from the list */
  243. list_del(&snod->list);
  244. kfree(snod);
  245. continue;
  246. }
  247. if (snod->len < *min)
  248. *min = snod->len;
  249. if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
  250. list_move_tail(&snod->list, nondata);
  251. }
  252. /* Sort data and non-data nodes */
  253. list_sort(c, &sleb->nodes, &data_nodes_cmp);
  254. list_sort(c, nondata, &nondata_nodes_cmp);
  255. err = dbg_check_data_nodes_order(c, &sleb->nodes);
  256. if (err)
  257. return err;
  258. err = dbg_check_nondata_nodes_order(c, nondata);
  259. if (err)
  260. return err;
  261. return 0;
  262. }
  263. /**
  264. * move_node - move a node.
  265. * @c: UBIFS file-system description object
  266. * @sleb: describes the LEB to move nodes from
  267. * @snod: the mode to move
  268. * @wbuf: write-buffer to move node to
  269. *
  270. * This function moves node @snod to @wbuf, changes TNC correspondingly, and
  271. * destroys @snod. Returns zero in case of success and a negative error code in
  272. * case of failure.
  273. */
  274. static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
  275. struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
  276. {
  277. int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
  278. cond_resched();
  279. err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
  280. if (err)
  281. return err;
  282. err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
  283. snod->offs, new_lnum, new_offs,
  284. snod->len);
  285. list_del(&snod->list);
  286. kfree(snod);
  287. return err;
  288. }
  289. /**
  290. * move_nodes - move nodes.
  291. * @c: UBIFS file-system description object
  292. * @sleb: describes the LEB to move nodes from
  293. *
  294. * This function moves valid nodes from data LEB described by @sleb to the GC
  295. * journal head. This function returns zero in case of success, %-EAGAIN if
  296. * commit is required, and other negative error codes in case of other
  297. * failures.
  298. */
  299. static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
  300. {
  301. int err, min;
  302. LIST_HEAD(nondata);
  303. struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
  304. if (wbuf->lnum == -1) {
  305. /*
  306. * The GC journal head is not set, because it is the first GC
  307. * invocation since mount.
  308. */
  309. err = switch_gc_head(c);
  310. if (err)
  311. return err;
  312. }
  313. err = sort_nodes(c, sleb, &nondata, &min);
  314. if (err)
  315. goto out;
  316. /* Write nodes to their new location. Use the first-fit strategy */
  317. while (1) {
  318. int avail;
  319. struct ubifs_scan_node *snod, *tmp;
  320. /* Move data nodes */
  321. list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
  322. avail = c->leb_size - wbuf->offs - wbuf->used;
  323. if (snod->len > avail)
  324. /*
  325. * Do not skip data nodes in order to optimize
  326. * bulk-read.
  327. */
  328. break;
  329. err = move_node(c, sleb, snod, wbuf);
  330. if (err)
  331. goto out;
  332. }
  333. /* Move non-data nodes */
  334. list_for_each_entry_safe(snod, tmp, &nondata, list) {
  335. avail = c->leb_size - wbuf->offs - wbuf->used;
  336. if (avail < min)
  337. break;
  338. if (snod->len > avail) {
  339. /*
  340. * Keep going only if this is an inode with
  341. * some data. Otherwise stop and switch the GC
  342. * head. IOW, we assume that data-less inode
  343. * nodes and direntry nodes are roughly of the
  344. * same size.
  345. */
  346. if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
  347. snod->len == UBIFS_INO_NODE_SZ)
  348. break;
  349. continue;
  350. }
  351. err = move_node(c, sleb, snod, wbuf);
  352. if (err)
  353. goto out;
  354. }
  355. if (list_empty(&sleb->nodes) && list_empty(&nondata))
  356. break;
  357. /*
  358. * Waste the rest of the space in the LEB and switch to the
  359. * next LEB.
  360. */
  361. err = switch_gc_head(c);
  362. if (err)
  363. goto out;
  364. }
  365. return 0;
  366. out:
  367. list_splice_tail(&nondata, &sleb->nodes);
  368. return err;
  369. }
  370. /**
  371. * gc_sync_wbufs - sync write-buffers for GC.
  372. * @c: UBIFS file-system description object
  373. *
  374. * We must guarantee that obsoleting nodes are on flash. Unfortunately they may
  375. * be in a write-buffer instead. That is, a node could be written to a
  376. * write-buffer, obsoleting another node in a LEB that is GC'd. If that LEB is
  377. * erased before the write-buffer is sync'd and then there is an unclean
  378. * unmount, then an existing node is lost. To avoid this, we sync all
  379. * write-buffers.
  380. *
  381. * This function returns %0 on success or a negative error code on failure.
  382. */
  383. static int gc_sync_wbufs(struct ubifs_info *c)
  384. {
  385. int err, i;
  386. for (i = 0; i < c->jhead_cnt; i++) {
  387. if (i == GCHD)
  388. continue;
  389. err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
  390. if (err)
  391. return err;
  392. }
  393. return 0;
  394. }
  395. /**
  396. * ubifs_garbage_collect_leb - garbage-collect a logical eraseblock.
  397. * @c: UBIFS file-system description object
  398. * @lp: describes the LEB to garbage collect
  399. *
  400. * This function garbage-collects an LEB and returns one of the @LEB_FREED,
  401. * @LEB_RETAINED, etc positive codes in case of success, %-EAGAIN if commit is
  402. * required, and other negative error codes in case of failures.
  403. */
  404. int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
  405. {
  406. struct ubifs_scan_leb *sleb;
  407. struct ubifs_scan_node *snod;
  408. struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
  409. int err = 0, lnum = lp->lnum;
  410. ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
  411. c->need_recovery);
  412. ubifs_assert(c->gc_lnum != lnum);
  413. ubifs_assert(wbuf->lnum != lnum);
  414. if (lp->free + lp->dirty == c->leb_size) {
  415. /* Special case - a free LEB */
  416. dbg_gc("LEB %d is free, return it", lp->lnum);
  417. ubifs_assert(!(lp->flags & LPROPS_INDEX));
  418. if (lp->free != c->leb_size) {
  419. /*
  420. * Write buffers must be sync'd before unmapping
  421. * freeable LEBs, because one of them may contain data
  422. * which obsoletes something in 'lp->pnum'.
  423. */
  424. err = gc_sync_wbufs(c);
  425. if (err)
  426. return err;
  427. err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
  428. 0, 0, 0, 0);
  429. if (err)
  430. return err;
  431. }
  432. err = ubifs_leb_unmap(c, lp->lnum);
  433. if (err)
  434. return err;
  435. if (c->gc_lnum == -1) {
  436. c->gc_lnum = lnum;
  437. return LEB_RETAINED;
  438. }
  439. return LEB_FREED;
  440. }
  441. /*
  442. * We scan the entire LEB even though we only really need to scan up to
  443. * (c->leb_size - lp->free).
  444. */
  445. sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
  446. if (IS_ERR(sleb))
  447. return PTR_ERR(sleb);
  448. ubifs_assert(!list_empty(&sleb->nodes));
  449. snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
  450. if (snod->type == UBIFS_IDX_NODE) {
  451. struct ubifs_gced_idx_leb *idx_gc;
  452. dbg_gc("indexing LEB %d (free %d, dirty %d)",
  453. lnum, lp->free, lp->dirty);
  454. list_for_each_entry(snod, &sleb->nodes, list) {
  455. struct ubifs_idx_node *idx = snod->node;
  456. int level = le16_to_cpu(idx->level);
  457. ubifs_assert(snod->type == UBIFS_IDX_NODE);
  458. key_read(c, ubifs_idx_key(c, idx), &snod->key);
  459. err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
  460. snod->offs);
  461. if (err)
  462. goto out;
  463. }
  464. idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
  465. if (!idx_gc) {
  466. err = -ENOMEM;
  467. goto out;
  468. }
  469. idx_gc->lnum = lnum;
  470. idx_gc->unmap = 0;
  471. list_add(&idx_gc->list, &c->idx_gc);
  472. /*
  473. * Don't release the LEB until after the next commit, because
  474. * it may contain data which is needed for recovery. So
  475. * although we freed this LEB, it will become usable only after
  476. * the commit.
  477. */
  478. err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
  479. LPROPS_INDEX, 1);
  480. if (err)
  481. goto out;
  482. err = LEB_FREED_IDX;
  483. } else {
  484. dbg_gc("data LEB %d (free %d, dirty %d)",
  485. lnum, lp->free, lp->dirty);
  486. err = move_nodes(c, sleb);
  487. if (err)
  488. goto out_inc_seq;
  489. err = gc_sync_wbufs(c);
  490. if (err)
  491. goto out_inc_seq;
  492. err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
  493. if (err)
  494. goto out_inc_seq;
  495. /* Allow for races with TNC */
  496. c->gced_lnum = lnum;
  497. smp_wmb();
  498. c->gc_seq += 1;
  499. smp_wmb();
  500. if (c->gc_lnum == -1) {
  501. c->gc_lnum = lnum;
  502. err = LEB_RETAINED;
  503. } else {
  504. err = ubifs_wbuf_sync_nolock(wbuf);
  505. if (err)
  506. goto out;
  507. err = ubifs_leb_unmap(c, lnum);
  508. if (err)
  509. goto out;
  510. err = LEB_FREED;
  511. }
  512. }
  513. out:
  514. ubifs_scan_destroy(sleb);
  515. return err;
  516. out_inc_seq:
  517. /* We may have moved at least some nodes so allow for races with TNC */
  518. c->gced_lnum = lnum;
  519. smp_wmb();
  520. c->gc_seq += 1;
  521. smp_wmb();
  522. goto out;
  523. }
  524. /**
  525. * ubifs_garbage_collect - UBIFS garbage collector.
  526. * @c: UBIFS file-system description object
  527. * @anyway: do GC even if there are free LEBs
  528. *
  529. * This function does out-of-place garbage collection. The return codes are:
  530. * o positive LEB number if the LEB has been freed and may be used;
  531. * o %-EAGAIN if the caller has to run commit;
  532. * o %-ENOSPC if GC failed to make any progress;
  533. * o other negative error codes in case of other errors.
  534. *
  535. * Garbage collector writes data to the journal when GC'ing data LEBs, and just
  536. * marking indexing nodes dirty when GC'ing indexing LEBs. Thus, at some point
  537. * commit may be required. But commit cannot be run from inside GC, because the
  538. * caller might be holding the commit lock, so %-EAGAIN is returned instead;
  539. * And this error code means that the caller has to run commit, and re-run GC
  540. * if there is still no free space.
  541. *
  542. * There are many reasons why this function may return %-EAGAIN:
  543. * o the log is full and there is no space to write an LEB reference for
  544. * @c->gc_lnum;
  545. * o the journal is too large and exceeds size limitations;
  546. * o GC moved indexing LEBs, but they can be used only after the commit;
  547. * o the shrinker fails to find clean znodes to free and requests the commit;
  548. * o etc.
  549. *
  550. * Note, if the file-system is close to be full, this function may return
  551. * %-EAGAIN infinitely, so the caller has to limit amount of re-invocations of
  552. * the function. E.g., this happens if the limits on the journal size are too
  553. * tough and GC writes too much to the journal before an LEB is freed. This
  554. * might also mean that the journal is too large, and the TNC becomes to big,
  555. * so that the shrinker is constantly called, finds not clean znodes to free,
  556. * and requests commit. Well, this may also happen if the journal is all right,
  557. * but another kernel process consumes too much memory. Anyway, infinite
  558. * %-EAGAIN may happen, but in some extreme/misconfiguration cases.
  559. */
  560. int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
  561. {
  562. int i, err, ret, min_space = c->dead_wm;
  563. struct ubifs_lprops lp;
  564. struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
  565. ubifs_assert_cmt_locked(c);
  566. ubifs_assert(!c->ro_media && !c->ro_mount);
  567. if (ubifs_gc_should_commit(c))
  568. return -EAGAIN;
  569. mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
  570. if (c->ro_error) {
  571. ret = -EROFS;
  572. goto out_unlock;
  573. }
  574. /* We expect the write-buffer to be empty on entry */
  575. ubifs_assert(!wbuf->used);
  576. for (i = 0; ; i++) {
  577. int space_before, space_after;
  578. cond_resched();
  579. /* Give the commit an opportunity to run */
  580. if (ubifs_gc_should_commit(c)) {
  581. ret = -EAGAIN;
  582. break;
  583. }
  584. if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
  585. /*
  586. * We've done enough iterations. Indexing LEBs were
  587. * moved and will be available after the commit.
  588. */
  589. dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
  590. ubifs_commit_required(c);
  591. ret = -EAGAIN;
  592. break;
  593. }
  594. if (i > HARD_LEBS_LIMIT) {
  595. /*
  596. * We've moved too many LEBs and have not made
  597. * progress, give up.
  598. */
  599. dbg_gc("hard limit, -ENOSPC");
  600. ret = -ENOSPC;
  601. break;
  602. }
  603. /*
  604. * Empty and freeable LEBs can turn up while we waited for
  605. * the wbuf lock, or while we have been running GC. In that
  606. * case, we should just return one of those instead of
  607. * continuing to GC dirty LEBs. Hence we request
  608. * 'ubifs_find_dirty_leb()' to return an empty LEB if it can.
  609. */
  610. ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
  611. if (ret) {
  612. if (ret == -ENOSPC)
  613. dbg_gc("no more dirty LEBs");
  614. break;
  615. }
  616. dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
  617. lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
  618. min_space);
  619. space_before = c->leb_size - wbuf->offs - wbuf->used;
  620. if (wbuf->lnum == -1)
  621. space_before = 0;
  622. ret = ubifs_garbage_collect_leb(c, &lp);
  623. if (ret < 0) {
  624. if (ret == -EAGAIN) {
  625. /*
  626. * This is not error, so we have to return the
  627. * LEB to lprops. But if 'ubifs_return_leb()'
  628. * fails, its failure code is propagated to the
  629. * caller instead of the original '-EAGAIN'.
  630. */
  631. err = ubifs_return_leb(c, lp.lnum);
  632. if (err)
  633. ret = err;
  634. break;
  635. }
  636. goto out;
  637. }
  638. if (ret == LEB_FREED) {
  639. /* An LEB has been freed and is ready for use */
  640. dbg_gc("LEB %d freed, return", lp.lnum);
  641. ret = lp.lnum;
  642. break;
  643. }
  644. if (ret == LEB_FREED_IDX) {
  645. /*
  646. * This was an indexing LEB and it cannot be
  647. * immediately used. And instead of requesting the
  648. * commit straight away, we try to garbage collect some
  649. * more.
  650. */
  651. dbg_gc("indexing LEB %d freed, continue", lp.lnum);
  652. continue;
  653. }
  654. ubifs_assert(ret == LEB_RETAINED);
  655. space_after = c->leb_size - wbuf->offs - wbuf->used;
  656. dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
  657. space_after - space_before);
  658. if (space_after > space_before) {
  659. /* GC makes progress, keep working */
  660. min_space >>= 1;
  661. if (min_space < c->dead_wm)
  662. min_space = c->dead_wm;
  663. continue;
  664. }
  665. dbg_gc("did not make progress");
  666. /*
  667. * GC moved an LEB bud have not done any progress. This means
  668. * that the previous GC head LEB contained too few free space
  669. * and the LEB which was GC'ed contained only large nodes which
  670. * did not fit that space.
  671. *
  672. * We can do 2 things:
  673. * 1. pick another LEB in a hope it'll contain a small node
  674. * which will fit the space we have at the end of current GC
  675. * head LEB, but there is no guarantee, so we try this out
  676. * unless we have already been working for too long;
  677. * 2. request an LEB with more dirty space, which will force
  678. * 'ubifs_find_dirty_leb()' to start scanning the lprops
  679. * table, instead of just picking one from the heap
  680. * (previously it already picked the dirtiest LEB).
  681. */
  682. if (i < SOFT_LEBS_LIMIT) {
  683. dbg_gc("try again");
  684. continue;
  685. }
  686. min_space <<= 1;
  687. if (min_space > c->dark_wm)
  688. min_space = c->dark_wm;
  689. dbg_gc("set min. space to %d", min_space);
  690. }
  691. if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
  692. dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
  693. ubifs_commit_required(c);
  694. ret = -EAGAIN;
  695. }
  696. err = ubifs_wbuf_sync_nolock(wbuf);
  697. if (!err)
  698. err = ubifs_leb_unmap(c, c->gc_lnum);
  699. if (err) {
  700. ret = err;
  701. goto out;
  702. }
  703. out_unlock:
  704. mutex_unlock(&wbuf->io_mutex);
  705. return ret;
  706. out:
  707. ubifs_assert(ret < 0);
  708. ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
  709. ubifs_wbuf_sync_nolock(wbuf);
  710. ubifs_ro_mode(c, ret);
  711. mutex_unlock(&wbuf->io_mutex);
  712. ubifs_return_leb(c, lp.lnum);
  713. return ret;
  714. }
  715. /**
  716. * ubifs_gc_start_commit - garbage collection at start of commit.
  717. * @c: UBIFS file-system description object
  718. *
  719. * If a LEB has only dirty and free space, then we may safely unmap it and make
  720. * it free. Note, we cannot do this with indexing LEBs because dirty space may
  721. * correspond index nodes that are required for recovery. In that case, the
  722. * LEB cannot be unmapped until after the next commit.
  723. *
  724. * This function returns %0 upon success and a negative error code upon failure.
  725. */
  726. int ubifs_gc_start_commit(struct ubifs_info *c)
  727. {
  728. struct ubifs_gced_idx_leb *idx_gc;
  729. const struct ubifs_lprops *lp;
  730. int err = 0, flags;
  731. ubifs_get_lprops(c);
  732. /*
  733. * Unmap (non-index) freeable LEBs. Note that recovery requires that all
  734. * wbufs are sync'd before this, which is done in 'do_commit()'.
  735. */
  736. while (1) {
  737. lp = ubifs_fast_find_freeable(c);
  738. if (IS_ERR(lp)) {
  739. err = PTR_ERR(lp);
  740. goto out;
  741. }
  742. if (!lp)
  743. break;
  744. ubifs_assert(!(lp->flags & LPROPS_TAKEN));
  745. ubifs_assert(!(lp->flags & LPROPS_INDEX));
  746. err = ubifs_leb_unmap(c, lp->lnum);
  747. if (err)
  748. goto out;
  749. lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
  750. if (IS_ERR(lp)) {
  751. err = PTR_ERR(lp);
  752. goto out;
  753. }
  754. ubifs_assert(!(lp->flags & LPROPS_TAKEN));
  755. ubifs_assert(!(lp->flags & LPROPS_INDEX));
  756. }
  757. /* Mark GC'd index LEBs OK to unmap after this commit finishes */
  758. list_for_each_entry(idx_gc, &c->idx_gc, list)
  759. idx_gc->unmap = 1;
  760. /* Record index freeable LEBs for unmapping after commit */
  761. while (1) {
  762. lp = ubifs_fast_find_frdi_idx(c);
  763. if (IS_ERR(lp)) {
  764. err = PTR_ERR(lp);
  765. goto out;
  766. }
  767. if (!lp)
  768. break;
  769. idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
  770. if (!idx_gc) {
  771. err = -ENOMEM;
  772. goto out;
  773. }
  774. ubifs_assert(!(lp->flags & LPROPS_TAKEN));
  775. ubifs_assert(lp->flags & LPROPS_INDEX);
  776. /* Don't release the LEB until after the next commit */
  777. flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
  778. lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
  779. if (IS_ERR(lp)) {
  780. err = PTR_ERR(lp);
  781. kfree(idx_gc);
  782. goto out;
  783. }
  784. ubifs_assert(lp->flags & LPROPS_TAKEN);
  785. ubifs_assert(!(lp->flags & LPROPS_INDEX));
  786. idx_gc->lnum = lp->lnum;
  787. idx_gc->unmap = 1;
  788. list_add(&idx_gc->list, &c->idx_gc);
  789. }
  790. out:
  791. ubifs_release_lprops(c);
  792. return err;
  793. }
  794. /**
  795. * ubifs_gc_end_commit - garbage collection at end of commit.
  796. * @c: UBIFS file-system description object
  797. *
  798. * This function completes out-of-place garbage collection of index LEBs.
  799. */
  800. int ubifs_gc_end_commit(struct ubifs_info *c)
  801. {
  802. struct ubifs_gced_idx_leb *idx_gc, *tmp;
  803. struct ubifs_wbuf *wbuf;
  804. int err = 0;
  805. wbuf = &c->jheads[GCHD].wbuf;
  806. mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
  807. list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
  808. if (idx_gc->unmap) {
  809. dbg_gc("LEB %d", idx_gc->lnum);
  810. err = ubifs_leb_unmap(c, idx_gc->lnum);
  811. if (err)
  812. goto out;
  813. err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
  814. LPROPS_NC, 0, LPROPS_TAKEN, -1);
  815. if (err)
  816. goto out;
  817. list_del(&idx_gc->list);
  818. kfree(idx_gc);
  819. }
  820. out:
  821. mutex_unlock(&wbuf->io_mutex);
  822. return err;
  823. }
  824. #endif
  825. /**
  826. * ubifs_destroy_idx_gc - destroy idx_gc list.
  827. * @c: UBIFS file-system description object
  828. *
  829. * This function destroys the @c->idx_gc list. It is called when unmounting
  830. * so locks are not needed. Returns zero in case of success and a negative
  831. * error code in case of failure.
  832. */
  833. void ubifs_destroy_idx_gc(struct ubifs_info *c)
  834. {
  835. while (!list_empty(&c->idx_gc)) {
  836. struct ubifs_gced_idx_leb *idx_gc;
  837. idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
  838. list);
  839. c->idx_gc_cnt -= 1;
  840. list_del(&idx_gc->list);
  841. kfree(idx_gc);
  842. }
  843. }
  844. #ifndef __UBOOT__
  845. /**
  846. * ubifs_get_idx_gc_leb - get a LEB from GC'd index LEB list.
  847. * @c: UBIFS file-system description object
  848. *
  849. * Called during start commit so locks are not needed.
  850. */
  851. int ubifs_get_idx_gc_leb(struct ubifs_info *c)
  852. {
  853. struct ubifs_gced_idx_leb *idx_gc;
  854. int lnum;
  855. if (list_empty(&c->idx_gc))
  856. return -ENOSPC;
  857. idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
  858. lnum = idx_gc->lnum;
  859. /* c->idx_gc_cnt is updated by the caller when lprops are updated */
  860. list_del(&idx_gc->list);
  861. kfree(idx_gc);
  862. return lnum;
  863. }
  864. #endif