cacheinfo.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 SiFive
  4. */
  5. #include <linux/cpu.h>
  6. #include <linux/of.h>
  7. #include <linux/of_device.h>
  8. #include <asm/cacheinfo.h>
  9. static struct riscv_cacheinfo_ops *rv_cache_ops;
  10. void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
  11. {
  12. rv_cache_ops = ops;
  13. }
  14. EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
  15. const struct attribute_group *
  16. cache_get_priv_group(struct cacheinfo *this_leaf)
  17. {
  18. if (rv_cache_ops && rv_cache_ops->get_priv_group)
  19. return rv_cache_ops->get_priv_group(this_leaf);
  20. return NULL;
  21. }
  22. static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
  23. {
  24. /*
  25. * Using raw_smp_processor_id() elides a preemptability check, but this
  26. * is really indicative of a larger problem: the cacheinfo UABI assumes
  27. * that cores have a homonogenous view of the cache hierarchy. That
  28. * happens to be the case for the current set of RISC-V systems, but
  29. * likely won't be true in general. Since there's no way to provide
  30. * correct information for these systems via the current UABI we're
  31. * just eliding the check for now.
  32. */
  33. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
  34. struct cacheinfo *this_leaf;
  35. int index;
  36. for (index = 0; index < this_cpu_ci->num_leaves; index++) {
  37. this_leaf = this_cpu_ci->info_list + index;
  38. if (this_leaf->level == level && this_leaf->type == type)
  39. return this_leaf;
  40. }
  41. return NULL;
  42. }
  43. uintptr_t get_cache_size(u32 level, enum cache_type type)
  44. {
  45. struct cacheinfo *this_leaf = get_cacheinfo(level, type);
  46. return this_leaf ? this_leaf->size : 0;
  47. }
  48. uintptr_t get_cache_geometry(u32 level, enum cache_type type)
  49. {
  50. struct cacheinfo *this_leaf = get_cacheinfo(level, type);
  51. return this_leaf ? (this_leaf->ways_of_associativity << 16 |
  52. this_leaf->coherency_line_size) :
  53. 0;
  54. }
  55. static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
  56. unsigned int level, unsigned int size,
  57. unsigned int sets, unsigned int line_size)
  58. {
  59. this_leaf->level = level;
  60. this_leaf->type = type;
  61. this_leaf->size = size;
  62. this_leaf->number_of_sets = sets;
  63. this_leaf->coherency_line_size = line_size;
  64. /*
  65. * If the cache is fully associative, there is no need to
  66. * check the other properties.
  67. */
  68. if (sets == 1)
  69. return;
  70. /*
  71. * Set the ways number for n-ways associative, make sure
  72. * all properties are big than zero.
  73. */
  74. if (sets > 0 && size > 0 && line_size > 0)
  75. this_leaf->ways_of_associativity = (size / sets) / line_size;
  76. }
  77. static void fill_cacheinfo(struct cacheinfo **this_leaf,
  78. struct device_node *node, unsigned int level)
  79. {
  80. unsigned int size, sets, line_size;
  81. if (!of_property_read_u32(node, "cache-size", &size) &&
  82. !of_property_read_u32(node, "cache-block-size", &line_size) &&
  83. !of_property_read_u32(node, "cache-sets", &sets)) {
  84. ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size);
  85. }
  86. if (!of_property_read_u32(node, "i-cache-size", &size) &&
  87. !of_property_read_u32(node, "i-cache-sets", &sets) &&
  88. !of_property_read_u32(node, "i-cache-block-size", &line_size)) {
  89. ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size);
  90. }
  91. if (!of_property_read_u32(node, "d-cache-size", &size) &&
  92. !of_property_read_u32(node, "d-cache-sets", &sets) &&
  93. !of_property_read_u32(node, "d-cache-block-size", &line_size)) {
  94. ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size);
  95. }
  96. }
  97. int init_cache_level(unsigned int cpu)
  98. {
  99. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  100. struct device_node *np = of_cpu_device_node_get(cpu);
  101. struct device_node *prev = NULL;
  102. int levels = 0, leaves = 0, level;
  103. if (of_property_read_bool(np, "cache-size"))
  104. ++leaves;
  105. if (of_property_read_bool(np, "i-cache-size"))
  106. ++leaves;
  107. if (of_property_read_bool(np, "d-cache-size"))
  108. ++leaves;
  109. if (leaves > 0)
  110. levels = 1;
  111. prev = np;
  112. while ((np = of_find_next_cache_node(np))) {
  113. of_node_put(prev);
  114. prev = np;
  115. if (!of_device_is_compatible(np, "cache"))
  116. break;
  117. if (of_property_read_u32(np, "cache-level", &level))
  118. break;
  119. if (level <= levels)
  120. break;
  121. if (of_property_read_bool(np, "cache-size"))
  122. ++leaves;
  123. if (of_property_read_bool(np, "i-cache-size"))
  124. ++leaves;
  125. if (of_property_read_bool(np, "d-cache-size"))
  126. ++leaves;
  127. levels = level;
  128. }
  129. of_node_put(np);
  130. this_cpu_ci->num_levels = levels;
  131. this_cpu_ci->num_leaves = leaves;
  132. return 0;
  133. }
  134. int populate_cache_leaves(unsigned int cpu)
  135. {
  136. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  137. struct cacheinfo *this_leaf = this_cpu_ci->info_list;
  138. struct device_node *np = of_cpu_device_node_get(cpu);
  139. struct device_node *prev = NULL;
  140. int levels = 1, level = 1;
  141. /* Level 1 caches in cpu node */
  142. fill_cacheinfo(&this_leaf, np, level);
  143. /* Next level caches in cache nodes */
  144. prev = np;
  145. while ((np = of_find_next_cache_node(np))) {
  146. of_node_put(prev);
  147. prev = np;
  148. if (!of_device_is_compatible(np, "cache"))
  149. break;
  150. if (of_property_read_u32(np, "cache-level", &level))
  151. break;
  152. if (level <= levels)
  153. break;
  154. fill_cacheinfo(&this_leaf, np, level);
  155. levels = level;
  156. }
  157. of_node_put(np);
  158. return 0;
  159. }