paging_tmpl.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. *
  11. * Authors:
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Avi Kivity <avi@qumranet.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. *
  18. */
  19. /*
  20. * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  21. * so the code in this file is compiled twice, once per pte size.
  22. */
  23. #if PTTYPE == 64
  24. #define pt_element_t u64
  25. #define guest_walker guest_walker64
  26. #define FNAME(name) paging##64_##name
  27. #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  28. #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
  29. #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  30. #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
  31. #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
  32. #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
  33. #ifdef CONFIG_X86_64
  34. #define PT_MAX_FULL_LEVELS 4
  35. #else
  36. #define PT_MAX_FULL_LEVELS 2
  37. #endif
  38. #elif PTTYPE == 32
  39. #define pt_element_t u32
  40. #define guest_walker guest_walker32
  41. #define FNAME(name) paging##32_##name
  42. #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  43. #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
  44. #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  45. #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
  46. #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
  47. #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
  48. #define PT_MAX_FULL_LEVELS 2
  49. #else
  50. #error Invalid PTTYPE value
  51. #endif
  52. /*
  53. * The guest_walker structure emulates the behavior of the hardware page
  54. * table walker.
  55. */
  56. struct guest_walker {
  57. int level;
  58. gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  59. pt_element_t *table;
  60. pt_element_t *ptep;
  61. pt_element_t inherited_ar;
  62. gfn_t gfn;
  63. u32 error_code;
  64. };
  65. /*
  66. * Fetch a guest pte for a guest virtual address
  67. */
  68. static int FNAME(walk_addr)(struct guest_walker *walker,
  69. struct kvm_vcpu *vcpu, gva_t addr,
  70. int write_fault, int user_fault, int fetch_fault)
  71. {
  72. hpa_t hpa;
  73. struct kvm_memory_slot *slot;
  74. pt_element_t *ptep;
  75. pt_element_t root;
  76. gfn_t table_gfn;
  77. pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
  78. walker->level = vcpu->mmu.root_level;
  79. walker->table = NULL;
  80. root = vcpu->cr3;
  81. #if PTTYPE == 64
  82. if (!is_long_mode(vcpu)) {
  83. walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
  84. root = *walker->ptep;
  85. if (!(root & PT_PRESENT_MASK))
  86. goto not_present;
  87. --walker->level;
  88. }
  89. #endif
  90. table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  91. walker->table_gfn[walker->level - 1] = table_gfn;
  92. pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
  93. walker->level - 1, table_gfn);
  94. slot = gfn_to_memslot(vcpu->kvm, table_gfn);
  95. hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
  96. walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
  97. ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
  98. (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
  99. walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
  100. for (;;) {
  101. int index = PT_INDEX(addr, walker->level);
  102. hpa_t paddr;
  103. ptep = &walker->table[index];
  104. ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
  105. ((unsigned long)ptep & PAGE_MASK));
  106. if (!is_present_pte(*ptep))
  107. goto not_present;
  108. if (write_fault && !is_writeble_pte(*ptep))
  109. if (user_fault || is_write_protection(vcpu))
  110. goto access_error;
  111. if (user_fault && !(*ptep & PT_USER_MASK))
  112. goto access_error;
  113. #if PTTYPE == 64
  114. if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
  115. goto access_error;
  116. #endif
  117. if (!(*ptep & PT_ACCESSED_MASK)) {
  118. mark_page_dirty(vcpu->kvm, table_gfn);
  119. *ptep |= PT_ACCESSED_MASK;
  120. }
  121. if (walker->level == PT_PAGE_TABLE_LEVEL) {
  122. walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
  123. >> PAGE_SHIFT;
  124. break;
  125. }
  126. if (walker->level == PT_DIRECTORY_LEVEL
  127. && (*ptep & PT_PAGE_SIZE_MASK)
  128. && (PTTYPE == 64 || is_pse(vcpu))) {
  129. walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
  130. >> PAGE_SHIFT;
  131. walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
  132. break;
  133. }
  134. if (walker->level != 3 || is_long_mode(vcpu))
  135. walker->inherited_ar &= walker->table[index];
  136. table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
  137. paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
  138. kunmap_atomic(walker->table, KM_USER0);
  139. walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
  140. KM_USER0);
  141. --walker->level;
  142. walker->table_gfn[walker->level - 1 ] = table_gfn;
  143. pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
  144. walker->level - 1, table_gfn);
  145. }
  146. walker->ptep = ptep;
  147. pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
  148. return 1;
  149. not_present:
  150. walker->error_code = 0;
  151. goto err;
  152. access_error:
  153. walker->error_code = PFERR_PRESENT_MASK;
  154. err:
  155. if (write_fault)
  156. walker->error_code |= PFERR_WRITE_MASK;
  157. if (user_fault)
  158. walker->error_code |= PFERR_USER_MASK;
  159. if (fetch_fault)
  160. walker->error_code |= PFERR_FETCH_MASK;
  161. return 0;
  162. }
  163. static void FNAME(release_walker)(struct guest_walker *walker)
  164. {
  165. if (walker->table)
  166. kunmap_atomic(walker->table, KM_USER0);
  167. }
  168. static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
  169. struct guest_walker *walker)
  170. {
  171. mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
  172. }
  173. static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
  174. u64 *shadow_pte, u64 access_bits, gfn_t gfn)
  175. {
  176. ASSERT(*shadow_pte == 0);
  177. access_bits &= guest_pte;
  178. *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
  179. set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
  180. guest_pte & PT_DIRTY_MASK, access_bits, gfn);
  181. }
  182. static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
  183. u64 *shadow_pte, u64 access_bits, gfn_t gfn)
  184. {
  185. gpa_t gaddr;
  186. ASSERT(*shadow_pte == 0);
  187. access_bits &= guest_pde;
  188. gaddr = (gpa_t)gfn << PAGE_SHIFT;
  189. if (PTTYPE == 32 && is_cpuid_PSE36())
  190. gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
  191. (32 - PT32_DIR_PSE36_SHIFT);
  192. *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
  193. set_pte_common(vcpu, shadow_pte, gaddr,
  194. guest_pde & PT_DIRTY_MASK, access_bits, gfn);
  195. }
  196. /*
  197. * Fetch a shadow pte for a specific level in the paging hierarchy.
  198. */
  199. static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
  200. struct guest_walker *walker)
  201. {
  202. hpa_t shadow_addr;
  203. int level;
  204. u64 *prev_shadow_ent = NULL;
  205. pt_element_t *guest_ent = walker->ptep;
  206. if (!is_present_pte(*guest_ent))
  207. return NULL;
  208. shadow_addr = vcpu->mmu.root_hpa;
  209. level = vcpu->mmu.shadow_root_level;
  210. if (level == PT32E_ROOT_LEVEL) {
  211. shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
  212. shadow_addr &= PT64_BASE_ADDR_MASK;
  213. --level;
  214. }
  215. for (; ; level--) {
  216. u32 index = SHADOW_PT_INDEX(addr, level);
  217. u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
  218. struct kvm_mmu_page *shadow_page;
  219. u64 shadow_pte;
  220. int metaphysical;
  221. gfn_t table_gfn;
  222. if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
  223. if (level == PT_PAGE_TABLE_LEVEL)
  224. return shadow_ent;
  225. shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
  226. prev_shadow_ent = shadow_ent;
  227. continue;
  228. }
  229. if (level == PT_PAGE_TABLE_LEVEL) {
  230. if (walker->level == PT_DIRECTORY_LEVEL) {
  231. if (prev_shadow_ent)
  232. *prev_shadow_ent |= PT_SHADOW_PS_MARK;
  233. FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
  234. walker->inherited_ar,
  235. walker->gfn);
  236. } else {
  237. ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
  238. FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
  239. walker->inherited_ar,
  240. walker->gfn);
  241. }
  242. return shadow_ent;
  243. }
  244. if (level - 1 == PT_PAGE_TABLE_LEVEL
  245. && walker->level == PT_DIRECTORY_LEVEL) {
  246. metaphysical = 1;
  247. table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
  248. >> PAGE_SHIFT;
  249. } else {
  250. metaphysical = 0;
  251. table_gfn = walker->table_gfn[level - 2];
  252. }
  253. shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
  254. metaphysical, shadow_ent);
  255. shadow_addr = shadow_page->page_hpa;
  256. shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
  257. | PT_WRITABLE_MASK | PT_USER_MASK;
  258. *shadow_ent = shadow_pte;
  259. prev_shadow_ent = shadow_ent;
  260. }
  261. }
  262. /*
  263. * The guest faulted for write. We need to
  264. *
  265. * - check write permissions
  266. * - update the guest pte dirty bit
  267. * - update our own dirty page tracking structures
  268. */
  269. static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
  270. u64 *shadow_ent,
  271. struct guest_walker *walker,
  272. gva_t addr,
  273. int user,
  274. int *write_pt)
  275. {
  276. pt_element_t *guest_ent;
  277. int writable_shadow;
  278. gfn_t gfn;
  279. struct kvm_mmu_page *page;
  280. if (is_writeble_pte(*shadow_ent))
  281. return !user || (*shadow_ent & PT_USER_MASK);
  282. writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
  283. if (user) {
  284. /*
  285. * User mode access. Fail if it's a kernel page or a read-only
  286. * page.
  287. */
  288. if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
  289. return 0;
  290. ASSERT(*shadow_ent & PT_USER_MASK);
  291. } else
  292. /*
  293. * Kernel mode access. Fail if it's a read-only page and
  294. * supervisor write protection is enabled.
  295. */
  296. if (!writable_shadow) {
  297. if (is_write_protection(vcpu))
  298. return 0;
  299. *shadow_ent &= ~PT_USER_MASK;
  300. }
  301. guest_ent = walker->ptep;
  302. if (!is_present_pte(*guest_ent)) {
  303. *shadow_ent = 0;
  304. return 0;
  305. }
  306. gfn = walker->gfn;
  307. if (user) {
  308. /*
  309. * Usermode page faults won't be for page table updates.
  310. */
  311. while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
  312. pgprintk("%s: zap %lx %x\n",
  313. __FUNCTION__, gfn, page->role.word);
  314. kvm_mmu_zap_page(vcpu, page);
  315. }
  316. } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
  317. pgprintk("%s: found shadow page for %lx, marking ro\n",
  318. __FUNCTION__, gfn);
  319. mark_page_dirty(vcpu->kvm, gfn);
  320. FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
  321. *guest_ent |= PT_DIRTY_MASK;
  322. *write_pt = 1;
  323. return 0;
  324. }
  325. mark_page_dirty(vcpu->kvm, gfn);
  326. *shadow_ent |= PT_WRITABLE_MASK;
  327. FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
  328. *guest_ent |= PT_DIRTY_MASK;
  329. rmap_add(vcpu, shadow_ent);
  330. return 1;
  331. }
  332. /*
  333. * Page fault handler. There are several causes for a page fault:
  334. * - there is no shadow pte for the guest pte
  335. * - write access through a shadow pte marked read only so that we can set
  336. * the dirty bit
  337. * - write access to a shadow pte marked read only so we can update the page
  338. * dirty bitmap, when userspace requests it
  339. * - mmio access; in this case we will never install a present shadow pte
  340. * - normal guest page fault due to the guest pte marked not present, not
  341. * writable, or not executable
  342. *
  343. * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  344. * a negative value on error.
  345. */
  346. static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
  347. u32 error_code)
  348. {
  349. int write_fault = error_code & PFERR_WRITE_MASK;
  350. int user_fault = error_code & PFERR_USER_MASK;
  351. int fetch_fault = error_code & PFERR_FETCH_MASK;
  352. struct guest_walker walker;
  353. u64 *shadow_pte;
  354. int fixed;
  355. int write_pt = 0;
  356. int r;
  357. pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
  358. kvm_mmu_audit(vcpu, "pre page fault");
  359. r = mmu_topup_memory_caches(vcpu);
  360. if (r)
  361. return r;
  362. /*
  363. * Look up the shadow pte for the faulting address.
  364. */
  365. r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
  366. fetch_fault);
  367. /*
  368. * The page is not mapped by the guest. Let the guest handle it.
  369. */
  370. if (!r) {
  371. pgprintk("%s: guest page fault\n", __FUNCTION__);
  372. inject_page_fault(vcpu, addr, walker.error_code);
  373. FNAME(release_walker)(&walker);
  374. return 0;
  375. }
  376. shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
  377. pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
  378. shadow_pte, *shadow_pte);
  379. /*
  380. * Update the shadow pte.
  381. */
  382. if (write_fault)
  383. fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
  384. user_fault, &write_pt);
  385. else
  386. fixed = fix_read_pf(shadow_pte);
  387. pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
  388. shadow_pte, *shadow_pte);
  389. FNAME(release_walker)(&walker);
  390. /*
  391. * mmio: emulate if accessible, otherwise its a guest fault.
  392. */
  393. if (is_io_pte(*shadow_pte))
  394. return 1;
  395. ++kvm_stat.pf_fixed;
  396. kvm_mmu_audit(vcpu, "post page fault (fixed)");
  397. return write_pt;
  398. }
  399. static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
  400. {
  401. struct guest_walker walker;
  402. gpa_t gpa = UNMAPPED_GVA;
  403. int r;
  404. r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
  405. if (r) {
  406. gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
  407. gpa |= vaddr & ~PAGE_MASK;
  408. }
  409. FNAME(release_walker)(&walker);
  410. return gpa;
  411. }
  412. #undef pt_element_t
  413. #undef guest_walker
  414. #undef FNAME
  415. #undef PT_BASE_ADDR_MASK
  416. #undef PT_INDEX
  417. #undef SHADOW_PT_INDEX
  418. #undef PT_LEVEL_MASK
  419. #undef PT_PTE_COPY_MASK
  420. #undef PT_NON_PTE_COPY_MASK
  421. #undef PT_DIR_BASE_ADDR_MASK
  422. #undef PT_MAX_FULL_LEVELS