cache.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Squashfs - a compressed read only filesystem for Linux
  4. *
  5. * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
  6. * Phillip Lougher <phillip@squashfs.org.uk>
  7. *
  8. * cache.c
  9. */
  10. /*
  11. * Blocks in Squashfs are compressed. To avoid repeatedly decompressing
  12. * recently accessed data Squashfs uses two small metadata and fragment caches.
  13. *
  14. * This file implements a generic cache implementation used for both caches,
  15. * plus functions layered ontop of the generic cache implementation to
  16. * access the metadata and fragment caches.
  17. *
  18. * To avoid out of memory and fragmentation issues with vmalloc the cache
  19. * uses sequences of kmalloced PAGE_SIZE buffers.
  20. *
  21. * It should be noted that the cache is not used for file datablocks, these
  22. * are decompressed and cached in the page-cache in the normal way. The
  23. * cache is only used to temporarily cache fragment and metadata blocks
  24. * which have been read as as a result of a metadata (i.e. inode or
  25. * directory) or fragment access. Because metadata and fragments are packed
  26. * together into blocks (to gain greater compression) the read of a particular
  27. * piece of metadata or fragment will retrieve other metadata/fragments which
  28. * have been packed with it, these because of locality-of-reference may be read
  29. * in the near future. Temporarily caching them ensures they are available for
  30. * near future access without requiring an additional read and decompress.
  31. */
  32. #include <linux/fs.h>
  33. #include <linux/vfs.h>
  34. #include <linux/slab.h>
  35. #include <linux/vmalloc.h>
  36. #include <linux/sched.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/wait.h>
  39. #include <linux/pagemap.h>
  40. #include "squashfs_fs.h"
  41. #include "squashfs_fs_sb.h"
  42. #include "squashfs.h"
  43. #include "page_actor.h"
  44. /*
  45. * Look-up block in cache, and increment usage count. If not in cache, read
  46. * and decompress it from disk.
  47. */
  48. struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
  49. struct squashfs_cache *cache, u64 block, int length)
  50. {
  51. int i, n;
  52. struct squashfs_cache_entry *entry;
  53. spin_lock(&cache->lock);
  54. while (1) {
  55. for (i = cache->curr_blk, n = 0; n < cache->entries; n++) {
  56. if (cache->entry[i].block == block) {
  57. cache->curr_blk = i;
  58. break;
  59. }
  60. i = (i + 1) % cache->entries;
  61. }
  62. if (n == cache->entries) {
  63. /*
  64. * Block not in cache, if all cache entries are used
  65. * go to sleep waiting for one to become available.
  66. */
  67. if (cache->unused == 0) {
  68. cache->num_waiters++;
  69. spin_unlock(&cache->lock);
  70. wait_event(cache->wait_queue, cache->unused);
  71. spin_lock(&cache->lock);
  72. cache->num_waiters--;
  73. continue;
  74. }
  75. /*
  76. * At least one unused cache entry. A simple
  77. * round-robin strategy is used to choose the entry to
  78. * be evicted from the cache.
  79. */
  80. i = cache->next_blk;
  81. for (n = 0; n < cache->entries; n++) {
  82. if (cache->entry[i].refcount == 0)
  83. break;
  84. i = (i + 1) % cache->entries;
  85. }
  86. cache->next_blk = (i + 1) % cache->entries;
  87. entry = &cache->entry[i];
  88. /*
  89. * Initialise chosen cache entry, and fill it in from
  90. * disk.
  91. */
  92. cache->unused--;
  93. entry->block = block;
  94. entry->refcount = 1;
  95. entry->pending = 1;
  96. entry->num_waiters = 0;
  97. entry->error = 0;
  98. spin_unlock(&cache->lock);
  99. entry->length = squashfs_read_data(sb, block, length,
  100. &entry->next_index, entry->actor);
  101. spin_lock(&cache->lock);
  102. if (entry->length < 0)
  103. entry->error = entry->length;
  104. entry->pending = 0;
  105. /*
  106. * While filling this entry one or more other processes
  107. * have looked it up in the cache, and have slept
  108. * waiting for it to become available.
  109. */
  110. if (entry->num_waiters) {
  111. spin_unlock(&cache->lock);
  112. wake_up_all(&entry->wait_queue);
  113. } else
  114. spin_unlock(&cache->lock);
  115. goto out;
  116. }
  117. /*
  118. * Block already in cache. Increment refcount so it doesn't
  119. * get reused until we're finished with it, if it was
  120. * previously unused there's one less cache entry available
  121. * for reuse.
  122. */
  123. entry = &cache->entry[i];
  124. if (entry->refcount == 0)
  125. cache->unused--;
  126. entry->refcount++;
  127. /*
  128. * If the entry is currently being filled in by another process
  129. * go to sleep waiting for it to become available.
  130. */
  131. if (entry->pending) {
  132. entry->num_waiters++;
  133. spin_unlock(&cache->lock);
  134. wait_event(entry->wait_queue, !entry->pending);
  135. } else
  136. spin_unlock(&cache->lock);
  137. goto out;
  138. }
  139. out:
  140. TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
  141. cache->name, i, entry->block, entry->refcount, entry->error);
  142. if (entry->error)
  143. ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
  144. block);
  145. return entry;
  146. }
  147. /*
  148. * Release cache entry, once usage count is zero it can be reused.
  149. */
  150. void squashfs_cache_put(struct squashfs_cache_entry *entry)
  151. {
  152. struct squashfs_cache *cache = entry->cache;
  153. spin_lock(&cache->lock);
  154. entry->refcount--;
  155. if (entry->refcount == 0) {
  156. cache->unused++;
  157. /*
  158. * If there's any processes waiting for a block to become
  159. * available, wake one up.
  160. */
  161. if (cache->num_waiters) {
  162. spin_unlock(&cache->lock);
  163. wake_up(&cache->wait_queue);
  164. return;
  165. }
  166. }
  167. spin_unlock(&cache->lock);
  168. }
  169. /*
  170. * Delete cache reclaiming all kmalloced buffers.
  171. */
  172. void squashfs_cache_delete(struct squashfs_cache *cache)
  173. {
  174. int i, j;
  175. if (cache == NULL)
  176. return;
  177. for (i = 0; i < cache->entries; i++) {
  178. if (cache->entry[i].data) {
  179. for (j = 0; j < cache->pages; j++)
  180. kfree(cache->entry[i].data[j]);
  181. kfree(cache->entry[i].data);
  182. }
  183. kfree(cache->entry[i].actor);
  184. }
  185. kfree(cache->entry);
  186. kfree(cache);
  187. }
  188. /*
  189. * Initialise cache allocating the specified number of entries, each of
  190. * size block_size. To avoid vmalloc fragmentation issues each entry
  191. * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
  192. */
  193. struct squashfs_cache *squashfs_cache_init(char *name, int entries,
  194. int block_size)
  195. {
  196. int i, j;
  197. struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  198. if (cache == NULL) {
  199. ERROR("Failed to allocate %s cache\n", name);
  200. return NULL;
  201. }
  202. cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
  203. if (cache->entry == NULL) {
  204. ERROR("Failed to allocate %s cache\n", name);
  205. goto cleanup;
  206. }
  207. cache->curr_blk = 0;
  208. cache->next_blk = 0;
  209. cache->unused = entries;
  210. cache->entries = entries;
  211. cache->block_size = block_size;
  212. cache->pages = block_size >> PAGE_SHIFT;
  213. cache->pages = cache->pages ? cache->pages : 1;
  214. cache->name = name;
  215. cache->num_waiters = 0;
  216. spin_lock_init(&cache->lock);
  217. init_waitqueue_head(&cache->wait_queue);
  218. for (i = 0; i < entries; i++) {
  219. struct squashfs_cache_entry *entry = &cache->entry[i];
  220. init_waitqueue_head(&cache->entry[i].wait_queue);
  221. entry->cache = cache;
  222. entry->block = SQUASHFS_INVALID_BLK;
  223. entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
  224. if (entry->data == NULL) {
  225. ERROR("Failed to allocate %s cache entry\n", name);
  226. goto cleanup;
  227. }
  228. for (j = 0; j < cache->pages; j++) {
  229. entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
  230. if (entry->data[j] == NULL) {
  231. ERROR("Failed to allocate %s buffer\n", name);
  232. goto cleanup;
  233. }
  234. }
  235. entry->actor = squashfs_page_actor_init(entry->data,
  236. cache->pages, 0);
  237. if (entry->actor == NULL) {
  238. ERROR("Failed to allocate %s cache entry\n", name);
  239. goto cleanup;
  240. }
  241. }
  242. return cache;
  243. cleanup:
  244. squashfs_cache_delete(cache);
  245. return NULL;
  246. }
  247. /*
  248. * Copy up to length bytes from cache entry to buffer starting at offset bytes
  249. * into the cache entry. If there's not length bytes then copy the number of
  250. * bytes available. In all cases return the number of bytes copied.
  251. */
  252. int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
  253. int offset, int length)
  254. {
  255. int remaining = length;
  256. if (length == 0)
  257. return 0;
  258. else if (buffer == NULL)
  259. return min(length, entry->length - offset);
  260. while (offset < entry->length) {
  261. void *buff = entry->data[offset / PAGE_SIZE]
  262. + (offset % PAGE_SIZE);
  263. int bytes = min_t(int, entry->length - offset,
  264. PAGE_SIZE - (offset % PAGE_SIZE));
  265. if (bytes >= remaining) {
  266. memcpy(buffer, buff, remaining);
  267. remaining = 0;
  268. break;
  269. }
  270. memcpy(buffer, buff, bytes);
  271. buffer += bytes;
  272. remaining -= bytes;
  273. offset += bytes;
  274. }
  275. return length - remaining;
  276. }
  277. /*
  278. * Read length bytes from metadata position <block, offset> (block is the
  279. * start of the compressed block on disk, and offset is the offset into
  280. * the block once decompressed). Data is packed into consecutive blocks,
  281. * and length bytes may require reading more than one block.
  282. */
  283. int squashfs_read_metadata(struct super_block *sb, void *buffer,
  284. u64 *block, int *offset, int length)
  285. {
  286. struct squashfs_sb_info *msblk = sb->s_fs_info;
  287. int bytes, res = length;
  288. struct squashfs_cache_entry *entry;
  289. TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
  290. if (unlikely(length < 0))
  291. return -EIO;
  292. while (length) {
  293. entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
  294. if (entry->error) {
  295. res = entry->error;
  296. goto error;
  297. } else if (*offset >= entry->length) {
  298. res = -EIO;
  299. goto error;
  300. }
  301. bytes = squashfs_copy_data(buffer, entry, *offset, length);
  302. if (buffer)
  303. buffer += bytes;
  304. length -= bytes;
  305. *offset += bytes;
  306. if (*offset == entry->length) {
  307. *block = entry->next_index;
  308. *offset = 0;
  309. }
  310. squashfs_cache_put(entry);
  311. }
  312. return res;
  313. error:
  314. squashfs_cache_put(entry);
  315. return res;
  316. }
  317. /*
  318. * Look-up in the fragmment cache the fragment located at <start_block> in the
  319. * filesystem. If necessary read and decompress it from disk.
  320. */
  321. struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
  322. u64 start_block, int length)
  323. {
  324. struct squashfs_sb_info *msblk = sb->s_fs_info;
  325. return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
  326. length);
  327. }
  328. /*
  329. * Read and decompress the datablock located at <start_block> in the
  330. * filesystem. The cache is used here to avoid duplicating locking and
  331. * read/decompress code.
  332. */
  333. struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
  334. u64 start_block, int length)
  335. {
  336. struct squashfs_sb_info *msblk = sb->s_fs_info;
  337. return squashfs_cache_get(sb, msblk->read_page, start_block, length);
  338. }
  339. /*
  340. * Read a filesystem table (uncompressed sequence of bytes) from disk
  341. */
  342. void *squashfs_read_table(struct super_block *sb, u64 block, int length)
  343. {
  344. int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
  345. int i, res;
  346. void *table, *buffer, **data;
  347. struct squashfs_page_actor *actor;
  348. table = buffer = kmalloc(length, GFP_KERNEL);
  349. if (table == NULL)
  350. return ERR_PTR(-ENOMEM);
  351. data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
  352. if (data == NULL) {
  353. res = -ENOMEM;
  354. goto failed;
  355. }
  356. actor = squashfs_page_actor_init(data, pages, length);
  357. if (actor == NULL) {
  358. res = -ENOMEM;
  359. goto failed2;
  360. }
  361. for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
  362. data[i] = buffer;
  363. res = squashfs_read_data(sb, block, length |
  364. SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
  365. kfree(data);
  366. kfree(actor);
  367. if (res < 0)
  368. goto failed;
  369. return table;
  370. failed2:
  371. kfree(data);
  372. failed:
  373. kfree(table);
  374. return ERR_PTR(res);
  375. }