build.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright © 2001-2007 Red Hat, Inc.
  5. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
  6. *
  7. * Created by David Woodhouse <dwmw2@infradead.org>
  8. *
  9. * For licensing information, see the file 'LICENCE' in this directory.
  10. *
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/mtd/mtd.h>
  18. #include <linux/mm.h> /* kvfree() */
  19. #include "nodelist.h"
  20. static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
  21. struct jffs2_inode_cache *, struct jffs2_full_dirent **);
  22. static inline struct jffs2_inode_cache *
  23. first_inode_chain(int *i, struct jffs2_sb_info *c)
  24. {
  25. for (; *i < c->inocache_hashsize; (*i)++) {
  26. if (c->inocache_list[*i])
  27. return c->inocache_list[*i];
  28. }
  29. return NULL;
  30. }
  31. static inline struct jffs2_inode_cache *
  32. next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
  33. {
  34. /* More in this chain? */
  35. if (ic->next)
  36. return ic->next;
  37. (*i)++;
  38. return first_inode_chain(i, c);
  39. }
  40. #define for_each_inode(i, c, ic) \
  41. for (i = 0, ic = first_inode_chain(&i, (c)); \
  42. ic; \
  43. ic = next_inode(&i, ic, (c)))
  44. static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
  45. struct jffs2_inode_cache *ic,
  46. int *dir_hardlinks)
  47. {
  48. struct jffs2_full_dirent *fd;
  49. dbg_fsbuild("building directory inode #%u\n", ic->ino);
  50. /* For each child, increase nlink */
  51. for(fd = ic->scan_dents; fd; fd = fd->next) {
  52. struct jffs2_inode_cache *child_ic;
  53. if (!fd->ino)
  54. continue;
  55. /* we can get high latency here with huge directories */
  56. child_ic = jffs2_get_ino_cache(c, fd->ino);
  57. if (!child_ic) {
  58. dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
  59. fd->name, fd->ino, ic->ino);
  60. jffs2_mark_node_obsolete(c, fd->raw);
  61. /* Clear the ic/raw union so it doesn't cause problems later. */
  62. fd->ic = NULL;
  63. continue;
  64. }
  65. /* From this point, fd->raw is no longer used so we can set fd->ic */
  66. fd->ic = child_ic;
  67. child_ic->pino_nlink++;
  68. /* If we appear (at this stage) to have hard-linked directories,
  69. * set a flag to trigger a scan later */
  70. if (fd->type == DT_DIR) {
  71. child_ic->flags |= INO_FLAGS_IS_DIR;
  72. if (child_ic->pino_nlink > 1)
  73. *dir_hardlinks = 1;
  74. }
  75. dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
  76. /* Can't free scan_dents so far. We might need them in pass 2 */
  77. }
  78. }
  79. /* Scan plan:
  80. - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
  81. - Scan directory tree from top down, setting nlink in inocaches
  82. - Scan inocaches for inodes with nlink==0
  83. */
  84. static int jffs2_build_filesystem(struct jffs2_sb_info *c)
  85. {
  86. int ret, i, dir_hardlinks = 0;
  87. struct jffs2_inode_cache *ic;
  88. struct jffs2_full_dirent *fd;
  89. struct jffs2_full_dirent *dead_fds = NULL;
  90. dbg_fsbuild("build FS data structures\n");
  91. /* First, scan the medium and build all the inode caches with
  92. lists of physical nodes */
  93. c->flags |= JFFS2_SB_FLAG_SCANNING;
  94. ret = jffs2_scan_medium(c);
  95. c->flags &= ~JFFS2_SB_FLAG_SCANNING;
  96. if (ret)
  97. goto exit;
  98. dbg_fsbuild("scanned flash completely\n");
  99. jffs2_dbg_dump_block_lists_nolock(c);
  100. dbg_fsbuild("pass 1 starting\n");
  101. c->flags |= JFFS2_SB_FLAG_BUILDING;
  102. /* Now scan the directory tree, increasing nlink according to every dirent found. */
  103. for_each_inode(i, c, ic) {
  104. if (ic->scan_dents) {
  105. jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
  106. cond_resched();
  107. }
  108. }
  109. dbg_fsbuild("pass 1 complete\n");
  110. /* Next, scan for inodes with nlink == 0 and remove them. If
  111. they were directories, then decrement the nlink of their
  112. children too, and repeat the scan. As that's going to be
  113. a fairly uncommon occurrence, it's not so evil to do it this
  114. way. Recursion bad. */
  115. dbg_fsbuild("pass 2 starting\n");
  116. for_each_inode(i, c, ic) {
  117. if (ic->pino_nlink)
  118. continue;
  119. jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
  120. cond_resched();
  121. }
  122. dbg_fsbuild("pass 2a starting\n");
  123. while (dead_fds) {
  124. fd = dead_fds;
  125. dead_fds = fd->next;
  126. ic = jffs2_get_ino_cache(c, fd->ino);
  127. if (ic)
  128. jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
  129. jffs2_free_full_dirent(fd);
  130. }
  131. dbg_fsbuild("pass 2a complete\n");
  132. if (dir_hardlinks) {
  133. /* If we detected directory hardlinks earlier, *hopefully*
  134. * they are gone now because some of the links were from
  135. * dead directories which still had some old dirents lying
  136. * around and not yet garbage-collected, but which have
  137. * been discarded above. So clear the pino_nlink field
  138. * in each directory, so that the final scan below can
  139. * print appropriate warnings. */
  140. for_each_inode(i, c, ic) {
  141. if (ic->flags & INO_FLAGS_IS_DIR)
  142. ic->pino_nlink = 0;
  143. }
  144. }
  145. dbg_fsbuild("freeing temporary data structures\n");
  146. /* Finally, we can scan again and free the dirent structs */
  147. for_each_inode(i, c, ic) {
  148. while(ic->scan_dents) {
  149. fd = ic->scan_dents;
  150. ic->scan_dents = fd->next;
  151. /* We do use the pino_nlink field to count nlink of
  152. * directories during fs build, so set it to the
  153. * parent ino# now. Now that there's hopefully only
  154. * one. */
  155. if (fd->type == DT_DIR) {
  156. if (!fd->ic) {
  157. /* We'll have complained about it and marked the coresponding
  158. raw node obsolete already. Just skip it. */
  159. continue;
  160. }
  161. /* We *have* to have set this in jffs2_build_inode_pass1() */
  162. BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
  163. /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
  164. * is set. Otherwise, we know this should never trigger anyway, so
  165. * we don't do the check. And ic->pino_nlink still contains the nlink
  166. * value (which is 1). */
  167. if (dir_hardlinks && fd->ic->pino_nlink) {
  168. JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
  169. fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
  170. /* Should we unlink it from its previous parent? */
  171. }
  172. /* For directories, ic->pino_nlink holds that parent inode # */
  173. fd->ic->pino_nlink = ic->ino;
  174. }
  175. jffs2_free_full_dirent(fd);
  176. }
  177. ic->scan_dents = NULL;
  178. cond_resched();
  179. }
  180. jffs2_build_xattr_subsystem(c);
  181. c->flags &= ~JFFS2_SB_FLAG_BUILDING;
  182. dbg_fsbuild("FS build complete\n");
  183. /* Rotate the lists by some number to ensure wear levelling */
  184. jffs2_rotate_lists(c);
  185. ret = 0;
  186. exit:
  187. if (ret) {
  188. for_each_inode(i, c, ic) {
  189. while(ic->scan_dents) {
  190. fd = ic->scan_dents;
  191. ic->scan_dents = fd->next;
  192. jffs2_free_full_dirent(fd);
  193. }
  194. }
  195. jffs2_clear_xattr_subsystem(c);
  196. }
  197. return ret;
  198. }
  199. static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
  200. struct jffs2_inode_cache *ic,
  201. struct jffs2_full_dirent **dead_fds)
  202. {
  203. struct jffs2_raw_node_ref *raw;
  204. struct jffs2_full_dirent *fd;
  205. dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
  206. raw = ic->nodes;
  207. while (raw != (void *)ic) {
  208. struct jffs2_raw_node_ref *next = raw->next_in_ino;
  209. dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
  210. jffs2_mark_node_obsolete(c, raw);
  211. raw = next;
  212. }
  213. if (ic->scan_dents) {
  214. int whinged = 0;
  215. dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
  216. while(ic->scan_dents) {
  217. struct jffs2_inode_cache *child_ic;
  218. fd = ic->scan_dents;
  219. ic->scan_dents = fd->next;
  220. if (!fd->ino) {
  221. /* It's a deletion dirent. Ignore it */
  222. dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
  223. jffs2_free_full_dirent(fd);
  224. continue;
  225. }
  226. if (!whinged)
  227. whinged = 1;
  228. dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
  229. child_ic = jffs2_get_ino_cache(c, fd->ino);
  230. if (!child_ic) {
  231. dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
  232. fd->name, fd->ino);
  233. jffs2_free_full_dirent(fd);
  234. continue;
  235. }
  236. /* Reduce nlink of the child. If it's now zero, stick it on the
  237. dead_fds list to be cleaned up later. Else just free the fd */
  238. child_ic->pino_nlink--;
  239. if (!child_ic->pino_nlink) {
  240. dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
  241. fd->ino, fd->name);
  242. fd->next = *dead_fds;
  243. *dead_fds = fd;
  244. } else {
  245. dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
  246. fd->ino, fd->name, child_ic->pino_nlink);
  247. jffs2_free_full_dirent(fd);
  248. }
  249. }
  250. }
  251. /*
  252. We don't delete the inocache from the hash list and free it yet.
  253. The erase code will do that, when all the nodes are completely gone.
  254. */
  255. }
  256. static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
  257. {
  258. uint32_t size;
  259. /* Deletion should almost _always_ be allowed. We're fairly
  260. buggered once we stop allowing people to delete stuff
  261. because there's not enough free space... */
  262. c->resv_blocks_deletion = 2;
  263. /* Be conservative about how much space we need before we allow writes.
  264. On top of that which is required for deletia, require an extra 2%
  265. of the medium to be available, for overhead caused by nodes being
  266. split across blocks, etc. */
  267. size = c->flash_size / 50; /* 2% of flash size */
  268. size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
  269. size += c->sector_size - 1; /* ... and round up */
  270. c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
  271. /* When do we let the GC thread run in the background */
  272. c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
  273. /* When do we allow garbage collection to merge nodes to make
  274. long-term progress at the expense of short-term space exhaustion? */
  275. c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
  276. /* When do we allow garbage collection to eat from bad blocks rather
  277. than actually making progress? */
  278. c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
  279. /* What number of 'very dirty' eraseblocks do we allow before we
  280. trigger the GC thread even if we don't _need_ the space. When we
  281. can't mark nodes obsolete on the medium, the old dirty nodes cause
  282. performance problems because we have to inspect and discard them. */
  283. c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
  284. if (jffs2_can_mark_obsolete(c))
  285. c->vdirty_blocks_gctrigger *= 10;
  286. /* If there's less than this amount of dirty space, don't bother
  287. trying to GC to make more space. It'll be a fruitless task */
  288. c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
  289. dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
  290. c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
  291. dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
  292. c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
  293. dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
  294. c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
  295. dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
  296. c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
  297. dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
  298. c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
  299. dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
  300. c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
  301. dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
  302. c->nospc_dirty_size);
  303. dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
  304. c->vdirty_blocks_gctrigger);
  305. }
  306. int jffs2_do_mount_fs(struct jffs2_sb_info *c)
  307. {
  308. int ret;
  309. int i;
  310. int size;
  311. c->free_size = c->flash_size;
  312. c->nr_blocks = c->flash_size / c->sector_size;
  313. size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
  314. #ifndef __ECOS
  315. if (jffs2_blocks_use_vmalloc(c))
  316. c->blocks = vzalloc(size);
  317. else
  318. #endif
  319. c->blocks = kzalloc(size, GFP_KERNEL);
  320. if (!c->blocks)
  321. return -ENOMEM;
  322. for (i=0; i<c->nr_blocks; i++) {
  323. INIT_LIST_HEAD(&c->blocks[i].list);
  324. c->blocks[i].offset = i * c->sector_size;
  325. c->blocks[i].free_size = c->sector_size;
  326. }
  327. INIT_LIST_HEAD(&c->clean_list);
  328. INIT_LIST_HEAD(&c->very_dirty_list);
  329. INIT_LIST_HEAD(&c->dirty_list);
  330. INIT_LIST_HEAD(&c->erasable_list);
  331. INIT_LIST_HEAD(&c->erasing_list);
  332. INIT_LIST_HEAD(&c->erase_checking_list);
  333. INIT_LIST_HEAD(&c->erase_pending_list);
  334. INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
  335. INIT_LIST_HEAD(&c->erase_complete_list);
  336. INIT_LIST_HEAD(&c->free_list);
  337. INIT_LIST_HEAD(&c->bad_list);
  338. INIT_LIST_HEAD(&c->bad_used_list);
  339. c->highest_ino = 1;
  340. c->summary = NULL;
  341. ret = jffs2_sum_init(c);
  342. if (ret)
  343. goto out_free;
  344. if (jffs2_build_filesystem(c)) {
  345. dbg_fsbuild("build_fs failed\n");
  346. jffs2_free_ino_caches(c);
  347. jffs2_free_raw_node_refs(c);
  348. ret = -EIO;
  349. goto out_sum_exit;
  350. }
  351. jffs2_calc_trigger_levels(c);
  352. return 0;
  353. out_sum_exit:
  354. jffs2_sum_exit(c);
  355. out_free:
  356. kvfree(c->blocks);
  357. return ret;
  358. }