malloc.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright (C) 2001-2003 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. * $Id: malloc.c,v 1.1.1.1 2007/06/12 07:27:13 eyryu Exp $
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/init.h>
  16. #include <linux/jffs2.h>
  17. #include "nodelist.h"
  18. /* These are initialised to NULL in the kernel startup code.
  19. If you're porting to other operating systems, beware */
  20. static struct kmem_cache *full_dnode_slab;
  21. static struct kmem_cache *raw_dirent_slab;
  22. static struct kmem_cache *raw_inode_slab;
  23. static struct kmem_cache *tmp_dnode_info_slab;
  24. static struct kmem_cache *raw_node_ref_slab;
  25. static struct kmem_cache *node_frag_slab;
  26. static struct kmem_cache *inode_cache_slab;
  27. #ifdef CONFIG_JFFS2_FS_XATTR
  28. static struct kmem_cache *xattr_datum_cache;
  29. static struct kmem_cache *xattr_ref_cache;
  30. #endif
  31. int __init jffs2_create_slab_caches(void)
  32. {
  33. full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
  34. sizeof(struct jffs2_full_dnode),
  35. 0, 0, NULL, NULL);
  36. if (!full_dnode_slab)
  37. goto err;
  38. raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
  39. sizeof(struct jffs2_raw_dirent),
  40. 0, 0, NULL, NULL);
  41. if (!raw_dirent_slab)
  42. goto err;
  43. raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
  44. sizeof(struct jffs2_raw_inode),
  45. 0, 0, NULL, NULL);
  46. if (!raw_inode_slab)
  47. goto err;
  48. tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
  49. sizeof(struct jffs2_tmp_dnode_info),
  50. 0, 0, NULL, NULL);
  51. if (!tmp_dnode_info_slab)
  52. goto err;
  53. raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
  54. sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
  55. 0, 0, NULL, NULL);
  56. if (!raw_node_ref_slab)
  57. goto err;
  58. node_frag_slab = kmem_cache_create("jffs2_node_frag",
  59. sizeof(struct jffs2_node_frag),
  60. 0, 0, NULL, NULL);
  61. if (!node_frag_slab)
  62. goto err;
  63. inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
  64. sizeof(struct jffs2_inode_cache),
  65. 0, 0, NULL, NULL);
  66. if (!inode_cache_slab)
  67. goto err;
  68. #ifdef CONFIG_JFFS2_FS_XATTR
  69. xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
  70. sizeof(struct jffs2_xattr_datum),
  71. 0, 0, NULL, NULL);
  72. if (!xattr_datum_cache)
  73. goto err;
  74. xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
  75. sizeof(struct jffs2_xattr_ref),
  76. 0, 0, NULL, NULL);
  77. if (!xattr_ref_cache)
  78. goto err;
  79. #endif
  80. return 0;
  81. err:
  82. jffs2_destroy_slab_caches();
  83. return -ENOMEM;
  84. }
  85. void jffs2_destroy_slab_caches(void)
  86. {
  87. if(full_dnode_slab)
  88. kmem_cache_destroy(full_dnode_slab);
  89. if(raw_dirent_slab)
  90. kmem_cache_destroy(raw_dirent_slab);
  91. if(raw_inode_slab)
  92. kmem_cache_destroy(raw_inode_slab);
  93. if(tmp_dnode_info_slab)
  94. kmem_cache_destroy(tmp_dnode_info_slab);
  95. if(raw_node_ref_slab)
  96. kmem_cache_destroy(raw_node_ref_slab);
  97. if(node_frag_slab)
  98. kmem_cache_destroy(node_frag_slab);
  99. if(inode_cache_slab)
  100. kmem_cache_destroy(inode_cache_slab);
  101. #ifdef CONFIG_JFFS2_FS_XATTR
  102. if (xattr_datum_cache)
  103. kmem_cache_destroy(xattr_datum_cache);
  104. if (xattr_ref_cache)
  105. kmem_cache_destroy(xattr_ref_cache);
  106. #endif
  107. }
  108. struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
  109. {
  110. struct jffs2_full_dirent *ret;
  111. ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
  112. dbg_memalloc("%p\n", ret);
  113. return ret;
  114. }
  115. void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
  116. {
  117. dbg_memalloc("%p\n", x);
  118. kfree(x);
  119. }
  120. struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
  121. {
  122. struct jffs2_full_dnode *ret;
  123. ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
  124. dbg_memalloc("%p\n", ret);
  125. return ret;
  126. }
  127. void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
  128. {
  129. dbg_memalloc("%p\n", x);
  130. kmem_cache_free(full_dnode_slab, x);
  131. }
  132. struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
  133. {
  134. struct jffs2_raw_dirent *ret;
  135. ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
  136. dbg_memalloc("%p\n", ret);
  137. return ret;
  138. }
  139. void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
  140. {
  141. dbg_memalloc("%p\n", x);
  142. kmem_cache_free(raw_dirent_slab, x);
  143. }
  144. struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
  145. {
  146. struct jffs2_raw_inode *ret;
  147. ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
  148. dbg_memalloc("%p\n", ret);
  149. return ret;
  150. }
  151. void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
  152. {
  153. dbg_memalloc("%p\n", x);
  154. kmem_cache_free(raw_inode_slab, x);
  155. }
  156. struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
  157. {
  158. struct jffs2_tmp_dnode_info *ret;
  159. ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
  160. dbg_memalloc("%p\n",
  161. ret);
  162. return ret;
  163. }
  164. void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
  165. {
  166. dbg_memalloc("%p\n", x);
  167. kmem_cache_free(tmp_dnode_info_slab, x);
  168. }
  169. static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
  170. {
  171. struct jffs2_raw_node_ref *ret;
  172. ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
  173. if (ret) {
  174. int i = 0;
  175. for (i=0; i < REFS_PER_BLOCK; i++) {
  176. ret[i].flash_offset = REF_EMPTY_NODE;
  177. ret[i].next_in_ino = NULL;
  178. }
  179. ret[i].flash_offset = REF_LINK_NODE;
  180. ret[i].next_in_ino = NULL;
  181. }
  182. return ret;
  183. }
  184. int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
  185. struct jffs2_eraseblock *jeb, int nr)
  186. {
  187. struct jffs2_raw_node_ref **p, *ref;
  188. int i = nr;
  189. dbg_memalloc("%d\n", nr);
  190. p = &jeb->last_node;
  191. ref = *p;
  192. dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
  193. /* If jeb->last_node is really a valid node then skip over it */
  194. if (ref && ref->flash_offset != REF_EMPTY_NODE)
  195. ref++;
  196. while (i) {
  197. if (!ref) {
  198. dbg_memalloc("Allocating new refblock linked from %p\n", p);
  199. ref = *p = jffs2_alloc_refblock();
  200. if (!ref)
  201. return -ENOMEM;
  202. }
  203. if (ref->flash_offset == REF_LINK_NODE) {
  204. p = &ref->next_in_ino;
  205. ref = *p;
  206. continue;
  207. }
  208. i--;
  209. ref++;
  210. }
  211. jeb->allocated_refs = nr;
  212. dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
  213. nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
  214. jeb->last_node->next_in_ino);
  215. return 0;
  216. }
  217. void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
  218. {
  219. dbg_memalloc("%p\n", x);
  220. kmem_cache_free(raw_node_ref_slab, x);
  221. }
  222. struct jffs2_node_frag *jffs2_alloc_node_frag(void)
  223. {
  224. struct jffs2_node_frag *ret;
  225. ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
  226. dbg_memalloc("%p\n", ret);
  227. return ret;
  228. }
  229. void jffs2_free_node_frag(struct jffs2_node_frag *x)
  230. {
  231. dbg_memalloc("%p\n", x);
  232. kmem_cache_free(node_frag_slab, x);
  233. }
  234. struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
  235. {
  236. struct jffs2_inode_cache *ret;
  237. ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
  238. dbg_memalloc("%p\n", ret);
  239. return ret;
  240. }
  241. void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
  242. {
  243. dbg_memalloc("%p\n", x);
  244. kmem_cache_free(inode_cache_slab, x);
  245. }
  246. #ifdef CONFIG_JFFS2_FS_XATTR
  247. struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
  248. {
  249. struct jffs2_xattr_datum *xd;
  250. xd = kmem_cache_alloc(xattr_datum_cache, GFP_KERNEL);
  251. dbg_memalloc("%p\n", xd);
  252. memset(xd, 0, sizeof(struct jffs2_xattr_datum));
  253. xd->class = RAWNODE_CLASS_XATTR_DATUM;
  254. xd->node = (void *)xd;
  255. INIT_LIST_HEAD(&xd->xindex);
  256. return xd;
  257. }
  258. void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
  259. {
  260. dbg_memalloc("%p\n", xd);
  261. kmem_cache_free(xattr_datum_cache, xd);
  262. }
  263. struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
  264. {
  265. struct jffs2_xattr_ref *ref;
  266. ref = kmem_cache_alloc(xattr_ref_cache, GFP_KERNEL);
  267. dbg_memalloc("%p\n", ref);
  268. memset(ref, 0, sizeof(struct jffs2_xattr_ref));
  269. ref->class = RAWNODE_CLASS_XATTR_REF;
  270. ref->node = (void *)ref;
  271. return ref;
  272. }
  273. void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
  274. {
  275. dbg_memalloc("%p\n", ref);
  276. kmem_cache_free(xattr_ref_cache, ref);
  277. }
  278. #endif