blkcache.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) Nelson Integration, LLC 2016
  4. * Author: Eric Nelson<eric@nelint.com>
  5. *
  6. */
  7. #include <common.h>
  8. #include <blk.h>
  9. #include <log.h>
  10. #include <malloc.h>
  11. #include <part.h>
  12. #include <linux/ctype.h>
  13. #include <linux/list.h>
  14. #ifdef CONFIG_NEEDS_MANUAL_RELOC
  15. DECLARE_GLOBAL_DATA_PTR;
  16. #endif
  17. struct block_cache_node {
  18. struct list_head lh;
  19. int iftype;
  20. int devnum;
  21. lbaint_t start;
  22. lbaint_t blkcnt;
  23. unsigned long blksz;
  24. char *cache;
  25. };
  26. static LIST_HEAD(block_cache);
  27. static struct block_cache_stats _stats = {
  28. .max_blocks_per_entry = 8,
  29. .max_entries = 32
  30. };
  31. #ifdef CONFIG_NEEDS_MANUAL_RELOC
  32. int blkcache_init(void)
  33. {
  34. struct list_head *head = &block_cache;
  35. head->next = (uintptr_t)head->next + gd->reloc_off;
  36. head->prev = (uintptr_t)head->prev + gd->reloc_off;
  37. return 0;
  38. }
  39. #endif
  40. static struct block_cache_node *cache_find(int iftype, int devnum,
  41. lbaint_t start, lbaint_t blkcnt,
  42. unsigned long blksz)
  43. {
  44. struct block_cache_node *node;
  45. list_for_each_entry(node, &block_cache, lh)
  46. if ((node->iftype == iftype) &&
  47. (node->devnum == devnum) &&
  48. (node->blksz == blksz) &&
  49. (node->start <= start) &&
  50. (node->start + node->blkcnt >= start + blkcnt)) {
  51. if (block_cache.next != &node->lh) {
  52. /* maintain MRU ordering */
  53. list_del(&node->lh);
  54. list_add(&node->lh, &block_cache);
  55. }
  56. return node;
  57. }
  58. return 0;
  59. }
  60. int blkcache_read(int iftype, int devnum,
  61. lbaint_t start, lbaint_t blkcnt,
  62. unsigned long blksz, void *buffer)
  63. {
  64. struct block_cache_node *node = cache_find(iftype, devnum, start,
  65. blkcnt, blksz);
  66. if (node) {
  67. const char *src = node->cache + (start - node->start) * blksz;
  68. memcpy(buffer, src, blksz * blkcnt);
  69. debug("hit: start " LBAF ", count " LBAFU "\n",
  70. start, blkcnt);
  71. ++_stats.hits;
  72. return 1;
  73. }
  74. debug("miss: start " LBAF ", count " LBAFU "\n",
  75. start, blkcnt);
  76. ++_stats.misses;
  77. return 0;
  78. }
  79. void blkcache_fill(int iftype, int devnum,
  80. lbaint_t start, lbaint_t blkcnt,
  81. unsigned long blksz, void const *buffer)
  82. {
  83. lbaint_t bytes;
  84. struct block_cache_node *node;
  85. /* don't cache big stuff */
  86. if (blkcnt > _stats.max_blocks_per_entry)
  87. return;
  88. if (_stats.max_entries == 0)
  89. return;
  90. bytes = blksz * blkcnt;
  91. if (_stats.max_entries <= _stats.entries) {
  92. /* pop LRU */
  93. node = (struct block_cache_node *)block_cache.prev;
  94. list_del(&node->lh);
  95. _stats.entries--;
  96. debug("drop: start " LBAF ", count " LBAFU "\n",
  97. node->start, node->blkcnt);
  98. if (node->blkcnt * node->blksz < bytes) {
  99. free(node->cache);
  100. node->cache = 0;
  101. }
  102. } else {
  103. node = malloc(sizeof(*node));
  104. if (!node)
  105. return;
  106. node->cache = 0;
  107. }
  108. if (!node->cache) {
  109. node->cache = malloc(bytes);
  110. if (!node->cache) {
  111. free(node);
  112. return;
  113. }
  114. }
  115. debug("fill: start " LBAF ", count " LBAFU "\n",
  116. start, blkcnt);
  117. node->iftype = iftype;
  118. node->devnum = devnum;
  119. node->start = start;
  120. node->blkcnt = blkcnt;
  121. node->blksz = blksz;
  122. memcpy(node->cache, buffer, bytes);
  123. list_add(&node->lh, &block_cache);
  124. _stats.entries++;
  125. }
  126. void blkcache_invalidate(int iftype, int devnum)
  127. {
  128. struct list_head *entry, *n;
  129. struct block_cache_node *node;
  130. list_for_each_safe(entry, n, &block_cache) {
  131. node = (struct block_cache_node *)entry;
  132. if ((node->iftype == iftype) &&
  133. (node->devnum == devnum)) {
  134. list_del(entry);
  135. free(node->cache);
  136. free(node);
  137. --_stats.entries;
  138. }
  139. }
  140. }
  141. void blkcache_configure(unsigned blocks, unsigned entries)
  142. {
  143. struct block_cache_node *node;
  144. if ((blocks != _stats.max_blocks_per_entry) ||
  145. (entries != _stats.max_entries)) {
  146. /* invalidate cache */
  147. while (!list_empty(&block_cache)) {
  148. node = (struct block_cache_node *)block_cache.next;
  149. list_del(&node->lh);
  150. free(node->cache);
  151. free(node);
  152. }
  153. _stats.entries = 0;
  154. }
  155. _stats.max_blocks_per_entry = blocks;
  156. _stats.max_entries = entries;
  157. _stats.hits = 0;
  158. _stats.misses = 0;
  159. }
  160. void blkcache_stats(struct block_cache_stats *stats)
  161. {
  162. memcpy(stats, &_stats, sizeof(*stats));
  163. _stats.hits = 0;
  164. _stats.misses = 0;
  165. }