log.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * This file is part of UBIFS.
  4. *
  5. * Copyright (C) 2006-2008 Nokia Corporation.
  6. *
  7. * Authors: Artem Bityutskiy (Битюцкий Артём)
  8. * Adrian Hunter
  9. */
  10. /*
  11. * This file is a part of UBIFS journal implementation and contains various
  12. * functions which manipulate the log. The log is a fixed area on the flash
  13. * which does not contain any data but refers to buds. The log is a part of the
  14. * journal.
  15. */
  16. #ifdef __UBOOT__
  17. #include <linux/err.h>
  18. #endif
  19. #include "ubifs.h"
  20. static int dbg_check_bud_bytes(struct ubifs_info *c);
  21. /**
  22. * ubifs_search_bud - search bud LEB.
  23. * @c: UBIFS file-system description object
  24. * @lnum: logical eraseblock number to search
  25. *
  26. * This function searches bud LEB @lnum. Returns bud description object in case
  27. * of success and %NULL if there is no bud with this LEB number.
  28. */
  29. struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
  30. {
  31. struct rb_node *p;
  32. struct ubifs_bud *bud;
  33. spin_lock(&c->buds_lock);
  34. p = c->buds.rb_node;
  35. while (p) {
  36. bud = rb_entry(p, struct ubifs_bud, rb);
  37. if (lnum < bud->lnum)
  38. p = p->rb_left;
  39. else if (lnum > bud->lnum)
  40. p = p->rb_right;
  41. else {
  42. spin_unlock(&c->buds_lock);
  43. return bud;
  44. }
  45. }
  46. spin_unlock(&c->buds_lock);
  47. return NULL;
  48. }
  49. /**
  50. * ubifs_get_wbuf - get the wbuf associated with a LEB, if there is one.
  51. * @c: UBIFS file-system description object
  52. * @lnum: logical eraseblock number to search
  53. *
  54. * This functions returns the wbuf for @lnum or %NULL if there is not one.
  55. */
  56. struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
  57. {
  58. struct rb_node *p;
  59. struct ubifs_bud *bud;
  60. int jhead;
  61. if (!c->jheads)
  62. return NULL;
  63. spin_lock(&c->buds_lock);
  64. p = c->buds.rb_node;
  65. while (p) {
  66. bud = rb_entry(p, struct ubifs_bud, rb);
  67. if (lnum < bud->lnum)
  68. p = p->rb_left;
  69. else if (lnum > bud->lnum)
  70. p = p->rb_right;
  71. else {
  72. jhead = bud->jhead;
  73. spin_unlock(&c->buds_lock);
  74. return &c->jheads[jhead].wbuf;
  75. }
  76. }
  77. spin_unlock(&c->buds_lock);
  78. return NULL;
  79. }
  80. /**
  81. * empty_log_bytes - calculate amount of empty space in the log.
  82. * @c: UBIFS file-system description object
  83. */
  84. static inline long long empty_log_bytes(const struct ubifs_info *c)
  85. {
  86. long long h, t;
  87. h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
  88. t = (long long)c->ltail_lnum * c->leb_size;
  89. if (h > t)
  90. return c->log_bytes - h + t;
  91. else if (h != t)
  92. return t - h;
  93. else if (c->lhead_lnum != c->ltail_lnum)
  94. return 0;
  95. else
  96. return c->log_bytes;
  97. }
  98. /**
  99. * ubifs_add_bud - add bud LEB to the tree of buds and its journal head list.
  100. * @c: UBIFS file-system description object
  101. * @bud: the bud to add
  102. */
  103. void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
  104. {
  105. struct rb_node **p, *parent = NULL;
  106. struct ubifs_bud *b;
  107. struct ubifs_jhead *jhead;
  108. spin_lock(&c->buds_lock);
  109. p = &c->buds.rb_node;
  110. while (*p) {
  111. parent = *p;
  112. b = rb_entry(parent, struct ubifs_bud, rb);
  113. ubifs_assert(bud->lnum != b->lnum);
  114. if (bud->lnum < b->lnum)
  115. p = &(*p)->rb_left;
  116. else
  117. p = &(*p)->rb_right;
  118. }
  119. rb_link_node(&bud->rb, parent, p);
  120. rb_insert_color(&bud->rb, &c->buds);
  121. if (c->jheads) {
  122. jhead = &c->jheads[bud->jhead];
  123. list_add_tail(&bud->list, &jhead->buds_list);
  124. } else
  125. ubifs_assert(c->replaying && c->ro_mount);
  126. /*
  127. * Note, although this is a new bud, we anyway account this space now,
  128. * before any data has been written to it, because this is about to
  129. * guarantee fixed mount time, and this bud will anyway be read and
  130. * scanned.
  131. */
  132. c->bud_bytes += c->leb_size - bud->start;
  133. dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
  134. bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
  135. spin_unlock(&c->buds_lock);
  136. }
  137. /**
  138. * ubifs_add_bud_to_log - add a new bud to the log.
  139. * @c: UBIFS file-system description object
  140. * @jhead: journal head the bud belongs to
  141. * @lnum: LEB number of the bud
  142. * @offs: starting offset of the bud
  143. *
  144. * This function writes reference node for the new bud LEB @lnum it to the log,
  145. * and adds it to the buds tress. It also makes sure that log size does not
  146. * exceed the 'c->max_bud_bytes' limit. Returns zero in case of success,
  147. * %-EAGAIN if commit is required, and a negative error codes in case of
  148. * failure.
  149. */
  150. int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
  151. {
  152. int err;
  153. struct ubifs_bud *bud;
  154. struct ubifs_ref_node *ref;
  155. bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
  156. if (!bud)
  157. return -ENOMEM;
  158. ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
  159. if (!ref) {
  160. kfree(bud);
  161. return -ENOMEM;
  162. }
  163. mutex_lock(&c->log_mutex);
  164. ubifs_assert(!c->ro_media && !c->ro_mount);
  165. if (c->ro_error) {
  166. err = -EROFS;
  167. goto out_unlock;
  168. }
  169. /* Make sure we have enough space in the log */
  170. if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
  171. dbg_log("not enough log space - %lld, required %d",
  172. empty_log_bytes(c), c->min_log_bytes);
  173. ubifs_commit_required(c);
  174. err = -EAGAIN;
  175. goto out_unlock;
  176. }
  177. /*
  178. * Make sure the amount of space in buds will not exceed the
  179. * 'c->max_bud_bytes' limit, because we want to guarantee mount time
  180. * limits.
  181. *
  182. * It is not necessary to hold @c->buds_lock when reading @c->bud_bytes
  183. * because we are holding @c->log_mutex. All @c->bud_bytes take place
  184. * when both @c->log_mutex and @c->bud_bytes are locked.
  185. */
  186. if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
  187. dbg_log("bud bytes %lld (%lld max), require commit",
  188. c->bud_bytes, c->max_bud_bytes);
  189. ubifs_commit_required(c);
  190. err = -EAGAIN;
  191. goto out_unlock;
  192. }
  193. /*
  194. * If the journal is full enough - start background commit. Note, it is
  195. * OK to read 'c->cmt_state' without spinlock because integer reads
  196. * are atomic in the kernel.
  197. */
  198. if (c->bud_bytes >= c->bg_bud_bytes &&
  199. c->cmt_state == COMMIT_RESTING) {
  200. dbg_log("bud bytes %lld (%lld max), initiate BG commit",
  201. c->bud_bytes, c->max_bud_bytes);
  202. ubifs_request_bg_commit(c);
  203. }
  204. bud->lnum = lnum;
  205. bud->start = offs;
  206. bud->jhead = jhead;
  207. ref->ch.node_type = UBIFS_REF_NODE;
  208. ref->lnum = cpu_to_le32(bud->lnum);
  209. ref->offs = cpu_to_le32(bud->start);
  210. ref->jhead = cpu_to_le32(jhead);
  211. if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
  212. c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
  213. ubifs_assert(c->lhead_lnum != c->ltail_lnum);
  214. c->lhead_offs = 0;
  215. }
  216. if (c->lhead_offs == 0) {
  217. /* Must ensure next log LEB has been unmapped */
  218. err = ubifs_leb_unmap(c, c->lhead_lnum);
  219. if (err)
  220. goto out_unlock;
  221. }
  222. if (bud->start == 0) {
  223. /*
  224. * Before writing the LEB reference which refers an empty LEB
  225. * to the log, we have to make sure it is mapped, because
  226. * otherwise we'd risk to refer an LEB with garbage in case of
  227. * an unclean reboot, because the target LEB might have been
  228. * unmapped, but not yet physically erased.
  229. */
  230. err = ubifs_leb_map(c, bud->lnum);
  231. if (err)
  232. goto out_unlock;
  233. }
  234. dbg_log("write ref LEB %d:%d",
  235. c->lhead_lnum, c->lhead_offs);
  236. err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
  237. c->lhead_offs);
  238. if (err)
  239. goto out_unlock;
  240. c->lhead_offs += c->ref_node_alsz;
  241. ubifs_add_bud(c, bud);
  242. mutex_unlock(&c->log_mutex);
  243. kfree(ref);
  244. return 0;
  245. out_unlock:
  246. mutex_unlock(&c->log_mutex);
  247. kfree(ref);
  248. kfree(bud);
  249. return err;
  250. }
  251. /**
  252. * remove_buds - remove used buds.
  253. * @c: UBIFS file-system description object
  254. *
  255. * This function removes use buds from the buds tree. It does not remove the
  256. * buds which are pointed to by journal heads.
  257. */
  258. static void remove_buds(struct ubifs_info *c)
  259. {
  260. struct rb_node *p;
  261. ubifs_assert(list_empty(&c->old_buds));
  262. c->cmt_bud_bytes = 0;
  263. spin_lock(&c->buds_lock);
  264. p = rb_first(&c->buds);
  265. while (p) {
  266. struct rb_node *p1 = p;
  267. struct ubifs_bud *bud;
  268. struct ubifs_wbuf *wbuf;
  269. p = rb_next(p);
  270. bud = rb_entry(p1, struct ubifs_bud, rb);
  271. wbuf = &c->jheads[bud->jhead].wbuf;
  272. if (wbuf->lnum == bud->lnum) {
  273. /*
  274. * Do not remove buds which are pointed to by journal
  275. * heads (non-closed buds).
  276. */
  277. c->cmt_bud_bytes += wbuf->offs - bud->start;
  278. dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
  279. bud->lnum, bud->start, dbg_jhead(bud->jhead),
  280. wbuf->offs - bud->start, c->cmt_bud_bytes);
  281. bud->start = wbuf->offs;
  282. } else {
  283. c->cmt_bud_bytes += c->leb_size - bud->start;
  284. dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
  285. bud->lnum, bud->start, dbg_jhead(bud->jhead),
  286. c->leb_size - bud->start, c->cmt_bud_bytes);
  287. rb_erase(p1, &c->buds);
  288. /*
  289. * If the commit does not finish, the recovery will need
  290. * to replay the journal, in which case the old buds
  291. * must be unchanged. Do not release them until post
  292. * commit i.e. do not allow them to be garbage
  293. * collected.
  294. */
  295. list_move(&bud->list, &c->old_buds);
  296. }
  297. }
  298. spin_unlock(&c->buds_lock);
  299. }
  300. /**
  301. * ubifs_log_start_commit - start commit.
  302. * @c: UBIFS file-system description object
  303. * @ltail_lnum: return new log tail LEB number
  304. *
  305. * The commit operation starts with writing "commit start" node to the log and
  306. * reference nodes for all journal heads which will define new journal after
  307. * the commit has been finished. The commit start and reference nodes are
  308. * written in one go to the nearest empty log LEB (hence, when commit is
  309. * finished UBIFS may safely unmap all the previous log LEBs). This function
  310. * returns zero in case of success and a negative error code in case of
  311. * failure.
  312. */
  313. int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
  314. {
  315. void *buf;
  316. struct ubifs_cs_node *cs;
  317. struct ubifs_ref_node *ref;
  318. int err, i, max_len, len;
  319. err = dbg_check_bud_bytes(c);
  320. if (err)
  321. return err;
  322. max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
  323. max_len = ALIGN(max_len, c->min_io_size);
  324. buf = cs = kmalloc(max_len, GFP_NOFS);
  325. if (!buf)
  326. return -ENOMEM;
  327. cs->ch.node_type = UBIFS_CS_NODE;
  328. cs->cmt_no = cpu_to_le64(c->cmt_no);
  329. ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
  330. /*
  331. * Note, we do not lock 'c->log_mutex' because this is the commit start
  332. * phase and we are exclusively using the log. And we do not lock
  333. * write-buffer because nobody can write to the file-system at this
  334. * phase.
  335. */
  336. len = UBIFS_CS_NODE_SZ;
  337. for (i = 0; i < c->jhead_cnt; i++) {
  338. int lnum = c->jheads[i].wbuf.lnum;
  339. int offs = c->jheads[i].wbuf.offs;
  340. if (lnum == -1 || offs == c->leb_size)
  341. continue;
  342. dbg_log("add ref to LEB %d:%d for jhead %s",
  343. lnum, offs, dbg_jhead(i));
  344. ref = buf + len;
  345. ref->ch.node_type = UBIFS_REF_NODE;
  346. ref->lnum = cpu_to_le32(lnum);
  347. ref->offs = cpu_to_le32(offs);
  348. ref->jhead = cpu_to_le32(i);
  349. ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
  350. len += UBIFS_REF_NODE_SZ;
  351. }
  352. ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
  353. /* Switch to the next log LEB */
  354. if (c->lhead_offs) {
  355. c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
  356. ubifs_assert(c->lhead_lnum != c->ltail_lnum);
  357. c->lhead_offs = 0;
  358. }
  359. /* Must ensure next LEB has been unmapped */
  360. err = ubifs_leb_unmap(c, c->lhead_lnum);
  361. if (err)
  362. goto out;
  363. len = ALIGN(len, c->min_io_size);
  364. dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
  365. err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len);
  366. if (err)
  367. goto out;
  368. *ltail_lnum = c->lhead_lnum;
  369. c->lhead_offs += len;
  370. if (c->lhead_offs == c->leb_size) {
  371. c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
  372. c->lhead_offs = 0;
  373. }
  374. remove_buds(c);
  375. /*
  376. * We have started the commit and now users may use the rest of the log
  377. * for new writes.
  378. */
  379. c->min_log_bytes = 0;
  380. out:
  381. kfree(buf);
  382. return err;
  383. }
  384. /**
  385. * ubifs_log_end_commit - end commit.
  386. * @c: UBIFS file-system description object
  387. * @ltail_lnum: new log tail LEB number
  388. *
  389. * This function is called on when the commit operation was finished. It
  390. * moves log tail to new position and updates the master node so that it stores
  391. * the new log tail LEB number. Returns zero in case of success and a negative
  392. * error code in case of failure.
  393. */
  394. int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
  395. {
  396. int err;
  397. /*
  398. * At this phase we have to lock 'c->log_mutex' because UBIFS allows FS
  399. * writes during commit. Its only short "commit" start phase when
  400. * writers are blocked.
  401. */
  402. mutex_lock(&c->log_mutex);
  403. dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
  404. c->ltail_lnum, ltail_lnum);
  405. c->ltail_lnum = ltail_lnum;
  406. /*
  407. * The commit is finished and from now on it must be guaranteed that
  408. * there is always enough space for the next commit.
  409. */
  410. c->min_log_bytes = c->leb_size;
  411. spin_lock(&c->buds_lock);
  412. c->bud_bytes -= c->cmt_bud_bytes;
  413. spin_unlock(&c->buds_lock);
  414. err = dbg_check_bud_bytes(c);
  415. if (err)
  416. goto out;
  417. err = ubifs_write_master(c);
  418. out:
  419. mutex_unlock(&c->log_mutex);
  420. return err;
  421. }
  422. /**
  423. * ubifs_log_post_commit - things to do after commit is completed.
  424. * @c: UBIFS file-system description object
  425. * @old_ltail_lnum: old log tail LEB number
  426. *
  427. * Release buds only after commit is completed, because they must be unchanged
  428. * if recovery is needed.
  429. *
  430. * Unmap log LEBs only after commit is completed, because they may be needed for
  431. * recovery.
  432. *
  433. * This function returns %0 on success and a negative error code on failure.
  434. */
  435. int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
  436. {
  437. int lnum, err = 0;
  438. while (!list_empty(&c->old_buds)) {
  439. struct ubifs_bud *bud;
  440. bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
  441. err = ubifs_return_leb(c, bud->lnum);
  442. if (err)
  443. return err;
  444. list_del(&bud->list);
  445. kfree(bud);
  446. }
  447. mutex_lock(&c->log_mutex);
  448. for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
  449. lnum = ubifs_next_log_lnum(c, lnum)) {
  450. dbg_log("unmap log LEB %d", lnum);
  451. err = ubifs_leb_unmap(c, lnum);
  452. if (err)
  453. goto out;
  454. }
  455. out:
  456. mutex_unlock(&c->log_mutex);
  457. return err;
  458. }
  459. /**
  460. * struct done_ref - references that have been done.
  461. * @rb: rb-tree node
  462. * @lnum: LEB number
  463. */
  464. struct done_ref {
  465. struct rb_node rb;
  466. int lnum;
  467. };
  468. /**
  469. * done_already - determine if a reference has been done already.
  470. * @done_tree: rb-tree to store references that have been done
  471. * @lnum: LEB number of reference
  472. *
  473. * This function returns %1 if the reference has been done, %0 if not, otherwise
  474. * a negative error code is returned.
  475. */
  476. static int done_already(struct rb_root *done_tree, int lnum)
  477. {
  478. struct rb_node **p = &done_tree->rb_node, *parent = NULL;
  479. struct done_ref *dr;
  480. while (*p) {
  481. parent = *p;
  482. dr = rb_entry(parent, struct done_ref, rb);
  483. if (lnum < dr->lnum)
  484. p = &(*p)->rb_left;
  485. else if (lnum > dr->lnum)
  486. p = &(*p)->rb_right;
  487. else
  488. return 1;
  489. }
  490. dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
  491. if (!dr)
  492. return -ENOMEM;
  493. dr->lnum = lnum;
  494. rb_link_node(&dr->rb, parent, p);
  495. rb_insert_color(&dr->rb, done_tree);
  496. return 0;
  497. }
  498. /**
  499. * destroy_done_tree - destroy the done tree.
  500. * @done_tree: done tree to destroy
  501. */
  502. static void destroy_done_tree(struct rb_root *done_tree)
  503. {
  504. struct done_ref *dr, *n;
  505. rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
  506. kfree(dr);
  507. }
  508. /**
  509. * add_node - add a node to the consolidated log.
  510. * @c: UBIFS file-system description object
  511. * @buf: buffer to which to add
  512. * @lnum: LEB number to which to write is passed and returned here
  513. * @offs: offset to where to write is passed and returned here
  514. * @node: node to add
  515. *
  516. * This function returns %0 on success and a negative error code on failure.
  517. */
  518. static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
  519. void *node)
  520. {
  521. struct ubifs_ch *ch = node;
  522. int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
  523. if (len > remains) {
  524. int sz = ALIGN(*offs, c->min_io_size), err;
  525. ubifs_pad(c, buf + *offs, sz - *offs);
  526. err = ubifs_leb_change(c, *lnum, buf, sz);
  527. if (err)
  528. return err;
  529. *lnum = ubifs_next_log_lnum(c, *lnum);
  530. *offs = 0;
  531. }
  532. memcpy(buf + *offs, node, len);
  533. *offs += ALIGN(len, 8);
  534. return 0;
  535. }
  536. /**
  537. * ubifs_consolidate_log - consolidate the log.
  538. * @c: UBIFS file-system description object
  539. *
  540. * Repeated failed commits could cause the log to be full, but at least 1 LEB is
  541. * needed for commit. This function rewrites the reference nodes in the log
  542. * omitting duplicates, and failed CS nodes, and leaving no gaps.
  543. *
  544. * This function returns %0 on success and a negative error code on failure.
  545. */
  546. int ubifs_consolidate_log(struct ubifs_info *c)
  547. {
  548. struct ubifs_scan_leb *sleb;
  549. struct ubifs_scan_node *snod;
  550. struct rb_root done_tree = RB_ROOT;
  551. int lnum, err, first = 1, write_lnum, offs = 0;
  552. void *buf;
  553. dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
  554. c->lhead_lnum);
  555. buf = vmalloc(c->leb_size);
  556. if (!buf)
  557. return -ENOMEM;
  558. lnum = c->ltail_lnum;
  559. write_lnum = lnum;
  560. while (1) {
  561. sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
  562. if (IS_ERR(sleb)) {
  563. err = PTR_ERR(sleb);
  564. goto out_free;
  565. }
  566. list_for_each_entry(snod, &sleb->nodes, list) {
  567. switch (snod->type) {
  568. case UBIFS_REF_NODE: {
  569. struct ubifs_ref_node *ref = snod->node;
  570. int ref_lnum = le32_to_cpu(ref->lnum);
  571. err = done_already(&done_tree, ref_lnum);
  572. if (err < 0)
  573. goto out_scan;
  574. if (err != 1) {
  575. err = add_node(c, buf, &write_lnum,
  576. &offs, snod->node);
  577. if (err)
  578. goto out_scan;
  579. }
  580. break;
  581. }
  582. case UBIFS_CS_NODE:
  583. if (!first)
  584. break;
  585. err = add_node(c, buf, &write_lnum, &offs,
  586. snod->node);
  587. if (err)
  588. goto out_scan;
  589. first = 0;
  590. break;
  591. }
  592. }
  593. ubifs_scan_destroy(sleb);
  594. if (lnum == c->lhead_lnum)
  595. break;
  596. lnum = ubifs_next_log_lnum(c, lnum);
  597. }
  598. if (offs) {
  599. int sz = ALIGN(offs, c->min_io_size);
  600. ubifs_pad(c, buf + offs, sz - offs);
  601. err = ubifs_leb_change(c, write_lnum, buf, sz);
  602. if (err)
  603. goto out_free;
  604. offs = ALIGN(offs, c->min_io_size);
  605. }
  606. destroy_done_tree(&done_tree);
  607. vfree(buf);
  608. if (write_lnum == c->lhead_lnum) {
  609. ubifs_err(c, "log is too full");
  610. return -EINVAL;
  611. }
  612. /* Unmap remaining LEBs */
  613. lnum = write_lnum;
  614. do {
  615. lnum = ubifs_next_log_lnum(c, lnum);
  616. err = ubifs_leb_unmap(c, lnum);
  617. if (err)
  618. return err;
  619. } while (lnum != c->lhead_lnum);
  620. c->lhead_lnum = write_lnum;
  621. c->lhead_offs = offs;
  622. dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
  623. return 0;
  624. out_scan:
  625. ubifs_scan_destroy(sleb);
  626. out_free:
  627. destroy_done_tree(&done_tree);
  628. vfree(buf);
  629. return err;
  630. }
  631. /**
  632. * dbg_check_bud_bytes - make sure bud bytes calculation are all right.
  633. * @c: UBIFS file-system description object
  634. *
  635. * This function makes sure the amount of flash space used by closed buds
  636. * ('c->bud_bytes' is correct). Returns zero in case of success and %-EINVAL in
  637. * case of failure.
  638. */
  639. static int dbg_check_bud_bytes(struct ubifs_info *c)
  640. {
  641. int i, err = 0;
  642. struct ubifs_bud *bud;
  643. long long bud_bytes = 0;
  644. if (!dbg_is_chk_gen(c))
  645. return 0;
  646. spin_lock(&c->buds_lock);
  647. for (i = 0; i < c->jhead_cnt; i++)
  648. list_for_each_entry(bud, &c->jheads[i].buds_list, list)
  649. bud_bytes += c->leb_size - bud->start;
  650. if (c->bud_bytes != bud_bytes) {
  651. ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
  652. c->bud_bytes, bud_bytes);
  653. err = -EINVAL;
  654. }
  655. spin_unlock(&c->buds_lock);
  656. return err;
  657. }