blkcache.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) Nelson Integration, LLC 2016
  4. * Author: Eric Nelson<eric@nelint.com>
  5. *
  6. */
  7. #include <common.h>
  8. #include <blk.h>
  9. #include <malloc.h>
  10. #include <part.h>
  11. #include <linux/ctype.h>
  12. #include <linux/list.h>
  13. struct block_cache_node {
  14. struct list_head lh;
  15. int iftype;
  16. int devnum;
  17. lbaint_t start;
  18. lbaint_t blkcnt;
  19. unsigned long blksz;
  20. char *cache;
  21. };
  22. #ifndef CONFIG_M68K
  23. static LIST_HEAD(block_cache);
  24. #else
  25. static struct list_head block_cache;
  26. #endif
  27. static struct block_cache_stats _stats = {
  28. .max_blocks_per_entry = 8,
  29. .max_entries = 32
  30. };
  31. #ifdef CONFIG_M68K
  32. int blkcache_init(void)
  33. {
  34. INIT_LIST_HEAD(&block_cache);
  35. return 0;
  36. }
  37. #endif
  38. static struct block_cache_node *cache_find(int iftype, int devnum,
  39. lbaint_t start, lbaint_t blkcnt,
  40. unsigned long blksz)
  41. {
  42. struct block_cache_node *node;
  43. list_for_each_entry(node, &block_cache, lh)
  44. if ((node->iftype == iftype) &&
  45. (node->devnum == devnum) &&
  46. (node->blksz == blksz) &&
  47. (node->start <= start) &&
  48. (node->start + node->blkcnt >= start + blkcnt)) {
  49. if (block_cache.next != &node->lh) {
  50. /* maintain MRU ordering */
  51. list_del(&node->lh);
  52. list_add(&node->lh, &block_cache);
  53. }
  54. return node;
  55. }
  56. return 0;
  57. }
  58. int blkcache_read(int iftype, int devnum,
  59. lbaint_t start, lbaint_t blkcnt,
  60. unsigned long blksz, void *buffer)
  61. {
  62. struct block_cache_node *node = cache_find(iftype, devnum, start,
  63. blkcnt, blksz);
  64. if (node) {
  65. const char *src = node->cache + (start - node->start) * blksz;
  66. memcpy(buffer, src, blksz * blkcnt);
  67. debug("hit: start " LBAF ", count " LBAFU "\n",
  68. start, blkcnt);
  69. ++_stats.hits;
  70. return 1;
  71. }
  72. debug("miss: start " LBAF ", count " LBAFU "\n",
  73. start, blkcnt);
  74. ++_stats.misses;
  75. return 0;
  76. }
  77. void blkcache_fill(int iftype, int devnum,
  78. lbaint_t start, lbaint_t blkcnt,
  79. unsigned long blksz, void const *buffer)
  80. {
  81. lbaint_t bytes;
  82. struct block_cache_node *node;
  83. /* don't cache big stuff */
  84. if (blkcnt > _stats.max_blocks_per_entry)
  85. return;
  86. if (_stats.max_entries == 0)
  87. return;
  88. bytes = blksz * blkcnt;
  89. if (_stats.max_entries <= _stats.entries) {
  90. /* pop LRU */
  91. node = (struct block_cache_node *)block_cache.prev;
  92. list_del(&node->lh);
  93. _stats.entries--;
  94. debug("drop: start " LBAF ", count " LBAFU "\n",
  95. node->start, node->blkcnt);
  96. if (node->blkcnt * node->blksz < bytes) {
  97. free(node->cache);
  98. node->cache = 0;
  99. }
  100. } else {
  101. node = malloc(sizeof(*node));
  102. if (!node)
  103. return;
  104. node->cache = 0;
  105. }
  106. if (!node->cache) {
  107. node->cache = malloc(bytes);
  108. if (!node->cache) {
  109. free(node);
  110. return;
  111. }
  112. }
  113. debug("fill: start " LBAF ", count " LBAFU "\n",
  114. start, blkcnt);
  115. node->iftype = iftype;
  116. node->devnum = devnum;
  117. node->start = start;
  118. node->blkcnt = blkcnt;
  119. node->blksz = blksz;
  120. memcpy(node->cache, buffer, bytes);
  121. list_add(&node->lh, &block_cache);
  122. _stats.entries++;
  123. }
  124. void blkcache_invalidate(int iftype, int devnum)
  125. {
  126. struct list_head *entry, *n;
  127. struct block_cache_node *node;
  128. list_for_each_safe(entry, n, &block_cache) {
  129. node = (struct block_cache_node *)entry;
  130. if ((node->iftype == iftype) &&
  131. (node->devnum == devnum)) {
  132. list_del(entry);
  133. free(node->cache);
  134. free(node);
  135. --_stats.entries;
  136. }
  137. }
  138. }
  139. void blkcache_configure(unsigned blocks, unsigned entries)
  140. {
  141. struct block_cache_node *node;
  142. if ((blocks != _stats.max_blocks_per_entry) ||
  143. (entries != _stats.max_entries)) {
  144. /* invalidate cache */
  145. while (!list_empty(&block_cache)) {
  146. node = (struct block_cache_node *)block_cache.next;
  147. list_del(&node->lh);
  148. free(node->cache);
  149. free(node);
  150. }
  151. _stats.entries = 0;
  152. }
  153. _stats.max_blocks_per_entry = blocks;
  154. _stats.max_entries = entries;
  155. _stats.hits = 0;
  156. _stats.misses = 0;
  157. }
  158. void blkcache_stats(struct block_cache_stats *stats)
  159. {
  160. memcpy(stats, &_stats, sizeof(*stats));
  161. _stats.hits = 0;
  162. _stats.misses = 0;
  163. }