mtrr.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2014 Google, Inc
  4. *
  5. * Memory Type Range Regsters - these are used to tell the CPU whether
  6. * memory is cacheable and if so the cache write mode to use.
  7. *
  8. * These can speed up booting. See the mtrr command.
  9. *
  10. * Reference: Intel Architecture Software Developer's Manual, Volume 3:
  11. * System Programming
  12. */
  13. /*
  14. * Note that any console output (e.g. debug()) in this file will likely fail
  15. * since the MTRR registers are sometimes in flux.
  16. */
  17. #include <common.h>
  18. #include <cpu_func.h>
  19. #include <log.h>
  20. #include <sort.h>
  21. #include <asm/cache.h>
  22. #include <asm/global_data.h>
  23. #include <asm/io.h>
  24. #include <asm/mp.h>
  25. #include <asm/msr.h>
  26. #include <asm/mtrr.h>
  27. DECLARE_GLOBAL_DATA_PTR;
  28. /* Prepare to adjust MTRRs */
  29. void mtrr_open(struct mtrr_state *state, bool do_caches)
  30. {
  31. if (!gd->arch.has_mtrr)
  32. return;
  33. if (do_caches) {
  34. state->enable_cache = dcache_status();
  35. if (state->enable_cache)
  36. disable_caches();
  37. }
  38. state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR);
  39. wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN);
  40. }
  41. /* Clean up after adjusting MTRRs, and enable them */
  42. void mtrr_close(struct mtrr_state *state, bool do_caches)
  43. {
  44. if (!gd->arch.has_mtrr)
  45. return;
  46. wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN);
  47. if (do_caches && state->enable_cache)
  48. enable_caches();
  49. }
  50. static void set_var_mtrr(uint reg, uint type, uint64_t start, uint64_t size)
  51. {
  52. u64 mask;
  53. wrmsrl(MTRR_PHYS_BASE_MSR(reg), start | type);
  54. mask = ~(size - 1);
  55. mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
  56. wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask | MTRR_PHYS_MASK_VALID);
  57. }
  58. void mtrr_read_all(struct mtrr_info *info)
  59. {
  60. int reg_count = mtrr_get_var_count();
  61. int i;
  62. for (i = 0; i < reg_count; i++) {
  63. info->mtrr[i].base = native_read_msr(MTRR_PHYS_BASE_MSR(i));
  64. info->mtrr[i].mask = native_read_msr(MTRR_PHYS_MASK_MSR(i));
  65. }
  66. }
  67. void mtrr_write_all(struct mtrr_info *info)
  68. {
  69. int reg_count = mtrr_get_var_count();
  70. struct mtrr_state state;
  71. int i;
  72. for (i = 0; i < reg_count; i++) {
  73. mtrr_open(&state, true);
  74. wrmsrl(MTRR_PHYS_BASE_MSR(i), info->mtrr[i].base);
  75. wrmsrl(MTRR_PHYS_MASK_MSR(i), info->mtrr[i].mask);
  76. mtrr_close(&state, true);
  77. }
  78. }
  79. static void write_mtrrs(void *arg)
  80. {
  81. struct mtrr_info *info = arg;
  82. mtrr_write_all(info);
  83. }
  84. static void read_mtrrs(void *arg)
  85. {
  86. struct mtrr_info *info = arg;
  87. mtrr_read_all(info);
  88. }
  89. /**
  90. * mtrr_copy_to_aps() - Copy the MTRRs from the boot CPU to other CPUs
  91. *
  92. * @return 0 on success, -ve on failure
  93. */
  94. static int mtrr_copy_to_aps(void)
  95. {
  96. struct mtrr_info info;
  97. int ret;
  98. ret = mp_run_on_cpus(MP_SELECT_BSP, read_mtrrs, &info);
  99. if (ret == -ENXIO)
  100. return 0;
  101. else if (ret)
  102. return log_msg_ret("bsp", ret);
  103. ret = mp_run_on_cpus(MP_SELECT_APS, write_mtrrs, &info);
  104. if (ret)
  105. return log_msg_ret("bsp", ret);
  106. return 0;
  107. }
  108. static int h_comp_mtrr(const void *p1, const void *p2)
  109. {
  110. const struct mtrr_request *req1 = p1;
  111. const struct mtrr_request *req2 = p2;
  112. s64 diff = req1->start - req2->start;
  113. return diff < 0 ? -1 : diff > 0 ? 1 : 0;
  114. }
  115. int mtrr_commit(bool do_caches)
  116. {
  117. struct mtrr_request *req = gd->arch.mtrr_req;
  118. struct mtrr_state state;
  119. int ret;
  120. int i;
  121. debug("%s: enabled=%d, count=%d\n", __func__, gd->arch.has_mtrr,
  122. gd->arch.mtrr_req_count);
  123. if (!gd->arch.has_mtrr)
  124. return -ENOSYS;
  125. debug("open\n");
  126. mtrr_open(&state, do_caches);
  127. debug("open done\n");
  128. qsort(req, gd->arch.mtrr_req_count, sizeof(*req), h_comp_mtrr);
  129. for (i = 0; i < gd->arch.mtrr_req_count; i++, req++)
  130. set_var_mtrr(i, req->type, req->start, req->size);
  131. /* Clear the ones that are unused */
  132. debug("clear\n");
  133. for (; i < mtrr_get_var_count(); i++)
  134. wrmsrl(MTRR_PHYS_MASK_MSR(i), 0);
  135. debug("close\n");
  136. mtrr_close(&state, do_caches);
  137. debug("mtrr done\n");
  138. if (gd->flags & GD_FLG_RELOC) {
  139. ret = mtrr_copy_to_aps();
  140. if (ret)
  141. return log_msg_ret("copy", ret);
  142. }
  143. return 0;
  144. }
  145. int mtrr_add_request(int type, uint64_t start, uint64_t size)
  146. {
  147. struct mtrr_request *req;
  148. uint64_t mask;
  149. debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
  150. if (!gd->arch.has_mtrr)
  151. return -ENOSYS;
  152. if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
  153. return -ENOSPC;
  154. req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
  155. req->type = type;
  156. req->start = start;
  157. req->size = size;
  158. debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1,
  159. req->type, req->start, req->size);
  160. mask = ~(req->size - 1);
  161. mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
  162. mask |= MTRR_PHYS_MASK_VALID;
  163. debug(" %016llx %016llx\n", req->start | req->type, mask);
  164. return 0;
  165. }
  166. int mtrr_get_var_count(void)
  167. {
  168. return msr_read(MSR_MTRR_CAP_MSR).lo & MSR_MTRR_CAP_VCNT;
  169. }
  170. static int get_free_var_mtrr(void)
  171. {
  172. struct msr_t maskm;
  173. int vcnt;
  174. int i;
  175. vcnt = mtrr_get_var_count();
  176. /* Identify the first var mtrr which is not valid */
  177. for (i = 0; i < vcnt; i++) {
  178. maskm = msr_read(MTRR_PHYS_MASK_MSR(i));
  179. if ((maskm.lo & MTRR_PHYS_MASK_VALID) == 0)
  180. return i;
  181. }
  182. /* No free var mtrr */
  183. return -ENOSPC;
  184. }
  185. int mtrr_set_next_var(uint type, uint64_t start, uint64_t size)
  186. {
  187. int mtrr;
  188. mtrr = get_free_var_mtrr();
  189. if (mtrr < 0)
  190. return mtrr;
  191. set_var_mtrr(mtrr, type, start, size);
  192. debug("MTRR %x: start=%x, size=%x\n", mtrr, (uint)start, (uint)size);
  193. return 0;
  194. }
  195. /** enum mtrr_opcode - supported operations for mtrr_do_oper() */
  196. enum mtrr_opcode {
  197. MTRR_OP_SET,
  198. MTRR_OP_SET_VALID,
  199. };
  200. /**
  201. * struct mtrr_oper - An MTRR operation to perform on a CPU
  202. *
  203. * @opcode: Indicates operation to perform
  204. * @reg: MTRR reg number to select (0-7, -1 = all)
  205. * @valid: Valid value to write for MTRR_OP_SET_VALID
  206. * @base: Base value to write for MTRR_OP_SET
  207. * @mask: Mask value to write for MTRR_OP_SET
  208. */
  209. struct mtrr_oper {
  210. enum mtrr_opcode opcode;
  211. int reg;
  212. bool valid;
  213. u64 base;
  214. u64 mask;
  215. };
  216. static void mtrr_do_oper(void *arg)
  217. {
  218. struct mtrr_oper *oper = arg;
  219. u64 mask;
  220. switch (oper->opcode) {
  221. case MTRR_OP_SET_VALID:
  222. mask = native_read_msr(MTRR_PHYS_MASK_MSR(oper->reg));
  223. if (oper->valid)
  224. mask |= MTRR_PHYS_MASK_VALID;
  225. else
  226. mask &= ~MTRR_PHYS_MASK_VALID;
  227. wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), mask);
  228. break;
  229. case MTRR_OP_SET:
  230. wrmsrl(MTRR_PHYS_BASE_MSR(oper->reg), oper->base);
  231. wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), oper->mask);
  232. break;
  233. }
  234. }
  235. static int mtrr_start_op(int cpu_select, struct mtrr_oper *oper)
  236. {
  237. struct mtrr_state state;
  238. int ret;
  239. mtrr_open(&state, true);
  240. ret = mp_run_on_cpus(cpu_select, mtrr_do_oper, oper);
  241. mtrr_close(&state, true);
  242. if (ret)
  243. return log_msg_ret("run", ret);
  244. return 0;
  245. }
  246. int mtrr_set_valid(int cpu_select, int reg, bool valid)
  247. {
  248. struct mtrr_oper oper;
  249. oper.opcode = MTRR_OP_SET_VALID;
  250. oper.reg = reg;
  251. oper.valid = valid;
  252. return mtrr_start_op(cpu_select, &oper);
  253. }
  254. int mtrr_set(int cpu_select, int reg, u64 base, u64 mask)
  255. {
  256. struct mtrr_oper oper;
  257. oper.opcode = MTRR_OP_SET;
  258. oper.reg = reg;
  259. oper.base = base;
  260. oper.mask = mask;
  261. return mtrr_start_op(cpu_select, &oper);
  262. }