gaccess.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * guest access functions
  4. *
  5. * Copyright IBM Corp. 2014
  6. *
  7. */
  8. #include <linux/vmalloc.h>
  9. #include <linux/mm_types.h>
  10. #include <linux/err.h>
  11. #include <linux/pgtable.h>
  12. #include <asm/gmap.h>
  13. #include "kvm-s390.h"
  14. #include "gaccess.h"
  15. #include <asm/switch_to.h>
  16. union asce {
  17. unsigned long val;
  18. struct {
  19. unsigned long origin : 52; /* Region- or Segment-Table Origin */
  20. unsigned long : 2;
  21. unsigned long g : 1; /* Subspace Group Control */
  22. unsigned long p : 1; /* Private Space Control */
  23. unsigned long s : 1; /* Storage-Alteration-Event Control */
  24. unsigned long x : 1; /* Space-Switch-Event Control */
  25. unsigned long r : 1; /* Real-Space Control */
  26. unsigned long : 1;
  27. unsigned long dt : 2; /* Designation-Type Control */
  28. unsigned long tl : 2; /* Region- or Segment-Table Length */
  29. };
  30. };
  31. enum {
  32. ASCE_TYPE_SEGMENT = 0,
  33. ASCE_TYPE_REGION3 = 1,
  34. ASCE_TYPE_REGION2 = 2,
  35. ASCE_TYPE_REGION1 = 3
  36. };
  37. union region1_table_entry {
  38. unsigned long val;
  39. struct {
  40. unsigned long rto: 52;/* Region-Table Origin */
  41. unsigned long : 2;
  42. unsigned long p : 1; /* DAT-Protection Bit */
  43. unsigned long : 1;
  44. unsigned long tf : 2; /* Region-Second-Table Offset */
  45. unsigned long i : 1; /* Region-Invalid Bit */
  46. unsigned long : 1;
  47. unsigned long tt : 2; /* Table-Type Bits */
  48. unsigned long tl : 2; /* Region-Second-Table Length */
  49. };
  50. };
  51. union region2_table_entry {
  52. unsigned long val;
  53. struct {
  54. unsigned long rto: 52;/* Region-Table Origin */
  55. unsigned long : 2;
  56. unsigned long p : 1; /* DAT-Protection Bit */
  57. unsigned long : 1;
  58. unsigned long tf : 2; /* Region-Third-Table Offset */
  59. unsigned long i : 1; /* Region-Invalid Bit */
  60. unsigned long : 1;
  61. unsigned long tt : 2; /* Table-Type Bits */
  62. unsigned long tl : 2; /* Region-Third-Table Length */
  63. };
  64. };
  65. struct region3_table_entry_fc0 {
  66. unsigned long sto: 52;/* Segment-Table Origin */
  67. unsigned long : 1;
  68. unsigned long fc : 1; /* Format-Control */
  69. unsigned long p : 1; /* DAT-Protection Bit */
  70. unsigned long : 1;
  71. unsigned long tf : 2; /* Segment-Table Offset */
  72. unsigned long i : 1; /* Region-Invalid Bit */
  73. unsigned long cr : 1; /* Common-Region Bit */
  74. unsigned long tt : 2; /* Table-Type Bits */
  75. unsigned long tl : 2; /* Segment-Table Length */
  76. };
  77. struct region3_table_entry_fc1 {
  78. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  79. unsigned long : 14;
  80. unsigned long av : 1; /* ACCF-Validity Control */
  81. unsigned long acc: 4; /* Access-Control Bits */
  82. unsigned long f : 1; /* Fetch-Protection Bit */
  83. unsigned long fc : 1; /* Format-Control */
  84. unsigned long p : 1; /* DAT-Protection Bit */
  85. unsigned long iep: 1; /* Instruction-Execution-Protection */
  86. unsigned long : 2;
  87. unsigned long i : 1; /* Region-Invalid Bit */
  88. unsigned long cr : 1; /* Common-Region Bit */
  89. unsigned long tt : 2; /* Table-Type Bits */
  90. unsigned long : 2;
  91. };
  92. union region3_table_entry {
  93. unsigned long val;
  94. struct region3_table_entry_fc0 fc0;
  95. struct region3_table_entry_fc1 fc1;
  96. struct {
  97. unsigned long : 53;
  98. unsigned long fc : 1; /* Format-Control */
  99. unsigned long : 4;
  100. unsigned long i : 1; /* Region-Invalid Bit */
  101. unsigned long cr : 1; /* Common-Region Bit */
  102. unsigned long tt : 2; /* Table-Type Bits */
  103. unsigned long : 2;
  104. };
  105. };
  106. struct segment_entry_fc0 {
  107. unsigned long pto: 53;/* Page-Table Origin */
  108. unsigned long fc : 1; /* Format-Control */
  109. unsigned long p : 1; /* DAT-Protection Bit */
  110. unsigned long : 3;
  111. unsigned long i : 1; /* Segment-Invalid Bit */
  112. unsigned long cs : 1; /* Common-Segment Bit */
  113. unsigned long tt : 2; /* Table-Type Bits */
  114. unsigned long : 2;
  115. };
  116. struct segment_entry_fc1 {
  117. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  118. unsigned long : 3;
  119. unsigned long av : 1; /* ACCF-Validity Control */
  120. unsigned long acc: 4; /* Access-Control Bits */
  121. unsigned long f : 1; /* Fetch-Protection Bit */
  122. unsigned long fc : 1; /* Format-Control */
  123. unsigned long p : 1; /* DAT-Protection Bit */
  124. unsigned long iep: 1; /* Instruction-Execution-Protection */
  125. unsigned long : 2;
  126. unsigned long i : 1; /* Segment-Invalid Bit */
  127. unsigned long cs : 1; /* Common-Segment Bit */
  128. unsigned long tt : 2; /* Table-Type Bits */
  129. unsigned long : 2;
  130. };
  131. union segment_table_entry {
  132. unsigned long val;
  133. struct segment_entry_fc0 fc0;
  134. struct segment_entry_fc1 fc1;
  135. struct {
  136. unsigned long : 53;
  137. unsigned long fc : 1; /* Format-Control */
  138. unsigned long : 4;
  139. unsigned long i : 1; /* Segment-Invalid Bit */
  140. unsigned long cs : 1; /* Common-Segment Bit */
  141. unsigned long tt : 2; /* Table-Type Bits */
  142. unsigned long : 2;
  143. };
  144. };
  145. enum {
  146. TABLE_TYPE_SEGMENT = 0,
  147. TABLE_TYPE_REGION3 = 1,
  148. TABLE_TYPE_REGION2 = 2,
  149. TABLE_TYPE_REGION1 = 3
  150. };
  151. union page_table_entry {
  152. unsigned long val;
  153. struct {
  154. unsigned long pfra : 52; /* Page-Frame Real Address */
  155. unsigned long z : 1; /* Zero Bit */
  156. unsigned long i : 1; /* Page-Invalid Bit */
  157. unsigned long p : 1; /* DAT-Protection Bit */
  158. unsigned long iep: 1; /* Instruction-Execution-Protection */
  159. unsigned long : 8;
  160. };
  161. };
  162. /*
  163. * vaddress union in order to easily decode a virtual address into its
  164. * region first index, region second index etc. parts.
  165. */
  166. union vaddress {
  167. unsigned long addr;
  168. struct {
  169. unsigned long rfx : 11;
  170. unsigned long rsx : 11;
  171. unsigned long rtx : 11;
  172. unsigned long sx : 11;
  173. unsigned long px : 8;
  174. unsigned long bx : 12;
  175. };
  176. struct {
  177. unsigned long rfx01 : 2;
  178. unsigned long : 9;
  179. unsigned long rsx01 : 2;
  180. unsigned long : 9;
  181. unsigned long rtx01 : 2;
  182. unsigned long : 9;
  183. unsigned long sx01 : 2;
  184. unsigned long : 29;
  185. };
  186. };
  187. /*
  188. * raddress union which will contain the result (real or absolute address)
  189. * after a page table walk. The rfaa, sfaa and pfra members are used to
  190. * simply assign them the value of a region, segment or page table entry.
  191. */
  192. union raddress {
  193. unsigned long addr;
  194. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  195. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  196. unsigned long pfra : 52; /* Page-Frame Real Address */
  197. };
  198. union alet {
  199. u32 val;
  200. struct {
  201. u32 reserved : 7;
  202. u32 p : 1;
  203. u32 alesn : 8;
  204. u32 alen : 16;
  205. };
  206. };
  207. union ald {
  208. u32 val;
  209. struct {
  210. u32 : 1;
  211. u32 alo : 24;
  212. u32 all : 7;
  213. };
  214. };
  215. struct ale {
  216. unsigned long i : 1; /* ALEN-Invalid Bit */
  217. unsigned long : 5;
  218. unsigned long fo : 1; /* Fetch-Only Bit */
  219. unsigned long p : 1; /* Private Bit */
  220. unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
  221. unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
  222. unsigned long : 32;
  223. unsigned long : 1;
  224. unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
  225. unsigned long : 6;
  226. unsigned long astesn : 32; /* ASTE Sequence Number */
  227. };
  228. struct aste {
  229. unsigned long i : 1; /* ASX-Invalid Bit */
  230. unsigned long ato : 29; /* Authority-Table Origin */
  231. unsigned long : 1;
  232. unsigned long b : 1; /* Base-Space Bit */
  233. unsigned long ax : 16; /* Authorization Index */
  234. unsigned long atl : 12; /* Authority-Table Length */
  235. unsigned long : 2;
  236. unsigned long ca : 1; /* Controlled-ASN Bit */
  237. unsigned long ra : 1; /* Reusable-ASN Bit */
  238. unsigned long asce : 64; /* Address-Space-Control Element */
  239. unsigned long ald : 32;
  240. unsigned long astesn : 32;
  241. /* .. more fields there */
  242. };
  243. int ipte_lock_held(struct kvm_vcpu *vcpu)
  244. {
  245. if (vcpu->arch.sie_block->eca & ECA_SII) {
  246. int rc;
  247. read_lock(&vcpu->kvm->arch.sca_lock);
  248. rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
  249. read_unlock(&vcpu->kvm->arch.sca_lock);
  250. return rc;
  251. }
  252. return vcpu->kvm->arch.ipte_lock_count != 0;
  253. }
  254. static void ipte_lock_simple(struct kvm_vcpu *vcpu)
  255. {
  256. union ipte_control old, new, *ic;
  257. mutex_lock(&vcpu->kvm->arch.ipte_mutex);
  258. vcpu->kvm->arch.ipte_lock_count++;
  259. if (vcpu->kvm->arch.ipte_lock_count > 1)
  260. goto out;
  261. retry:
  262. read_lock(&vcpu->kvm->arch.sca_lock);
  263. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  264. do {
  265. old = READ_ONCE(*ic);
  266. if (old.k) {
  267. read_unlock(&vcpu->kvm->arch.sca_lock);
  268. cond_resched();
  269. goto retry;
  270. }
  271. new = old;
  272. new.k = 1;
  273. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  274. read_unlock(&vcpu->kvm->arch.sca_lock);
  275. out:
  276. mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
  277. }
  278. static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
  279. {
  280. union ipte_control old, new, *ic;
  281. mutex_lock(&vcpu->kvm->arch.ipte_mutex);
  282. vcpu->kvm->arch.ipte_lock_count--;
  283. if (vcpu->kvm->arch.ipte_lock_count)
  284. goto out;
  285. read_lock(&vcpu->kvm->arch.sca_lock);
  286. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  287. do {
  288. old = READ_ONCE(*ic);
  289. new = old;
  290. new.k = 0;
  291. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  292. read_unlock(&vcpu->kvm->arch.sca_lock);
  293. wake_up(&vcpu->kvm->arch.ipte_wq);
  294. out:
  295. mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
  296. }
  297. static void ipte_lock_siif(struct kvm_vcpu *vcpu)
  298. {
  299. union ipte_control old, new, *ic;
  300. retry:
  301. read_lock(&vcpu->kvm->arch.sca_lock);
  302. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  303. do {
  304. old = READ_ONCE(*ic);
  305. if (old.kg) {
  306. read_unlock(&vcpu->kvm->arch.sca_lock);
  307. cond_resched();
  308. goto retry;
  309. }
  310. new = old;
  311. new.k = 1;
  312. new.kh++;
  313. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  314. read_unlock(&vcpu->kvm->arch.sca_lock);
  315. }
  316. static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
  317. {
  318. union ipte_control old, new, *ic;
  319. read_lock(&vcpu->kvm->arch.sca_lock);
  320. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  321. do {
  322. old = READ_ONCE(*ic);
  323. new = old;
  324. new.kh--;
  325. if (!new.kh)
  326. new.k = 0;
  327. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  328. read_unlock(&vcpu->kvm->arch.sca_lock);
  329. if (!new.kh)
  330. wake_up(&vcpu->kvm->arch.ipte_wq);
  331. }
  332. void ipte_lock(struct kvm_vcpu *vcpu)
  333. {
  334. if (vcpu->arch.sie_block->eca & ECA_SII)
  335. ipte_lock_siif(vcpu);
  336. else
  337. ipte_lock_simple(vcpu);
  338. }
  339. void ipte_unlock(struct kvm_vcpu *vcpu)
  340. {
  341. if (vcpu->arch.sie_block->eca & ECA_SII)
  342. ipte_unlock_siif(vcpu);
  343. else
  344. ipte_unlock_simple(vcpu);
  345. }
  346. static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
  347. enum gacc_mode mode)
  348. {
  349. union alet alet;
  350. struct ale ale;
  351. struct aste aste;
  352. unsigned long ald_addr, authority_table_addr;
  353. union ald ald;
  354. int eax, rc;
  355. u8 authority_table;
  356. if (ar >= NUM_ACRS)
  357. return -EINVAL;
  358. save_access_regs(vcpu->run->s.regs.acrs);
  359. alet.val = vcpu->run->s.regs.acrs[ar];
  360. if (ar == 0 || alet.val == 0) {
  361. asce->val = vcpu->arch.sie_block->gcr[1];
  362. return 0;
  363. } else if (alet.val == 1) {
  364. asce->val = vcpu->arch.sie_block->gcr[7];
  365. return 0;
  366. }
  367. if (alet.reserved)
  368. return PGM_ALET_SPECIFICATION;
  369. if (alet.p)
  370. ald_addr = vcpu->arch.sie_block->gcr[5];
  371. else
  372. ald_addr = vcpu->arch.sie_block->gcr[2];
  373. ald_addr &= 0x7fffffc0;
  374. rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
  375. if (rc)
  376. return rc;
  377. if (alet.alen / 8 > ald.all)
  378. return PGM_ALEN_TRANSLATION;
  379. if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
  380. return PGM_ADDRESSING;
  381. rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
  382. sizeof(struct ale));
  383. if (rc)
  384. return rc;
  385. if (ale.i == 1)
  386. return PGM_ALEN_TRANSLATION;
  387. if (ale.alesn != alet.alesn)
  388. return PGM_ALE_SEQUENCE;
  389. rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
  390. if (rc)
  391. return rc;
  392. if (aste.i)
  393. return PGM_ASTE_VALIDITY;
  394. if (aste.astesn != ale.astesn)
  395. return PGM_ASTE_SEQUENCE;
  396. if (ale.p == 1) {
  397. eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
  398. if (ale.aleax != eax) {
  399. if (eax / 16 > aste.atl)
  400. return PGM_EXTENDED_AUTHORITY;
  401. authority_table_addr = aste.ato * 4 + eax / 4;
  402. rc = read_guest_real(vcpu, authority_table_addr,
  403. &authority_table,
  404. sizeof(u8));
  405. if (rc)
  406. return rc;
  407. if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
  408. return PGM_EXTENDED_AUTHORITY;
  409. }
  410. }
  411. if (ale.fo == 1 && mode == GACC_STORE)
  412. return PGM_PROTECTION;
  413. asce->val = aste.asce;
  414. return 0;
  415. }
  416. struct trans_exc_code_bits {
  417. unsigned long addr : 52; /* Translation-exception Address */
  418. unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
  419. unsigned long : 2;
  420. unsigned long b56 : 1;
  421. unsigned long : 3;
  422. unsigned long b60 : 1;
  423. unsigned long b61 : 1;
  424. unsigned long as : 2; /* ASCE Identifier */
  425. };
  426. enum {
  427. FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
  428. FSI_STORE = 1, /* Exception was due to store operation */
  429. FSI_FETCH = 2 /* Exception was due to fetch operation */
  430. };
  431. enum prot_type {
  432. PROT_TYPE_LA = 0,
  433. PROT_TYPE_KEYC = 1,
  434. PROT_TYPE_ALC = 2,
  435. PROT_TYPE_DAT = 3,
  436. PROT_TYPE_IEP = 4,
  437. };
  438. static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
  439. u8 ar, enum gacc_mode mode, enum prot_type prot)
  440. {
  441. struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
  442. struct trans_exc_code_bits *tec;
  443. memset(pgm, 0, sizeof(*pgm));
  444. pgm->code = code;
  445. tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
  446. switch (code) {
  447. case PGM_PROTECTION:
  448. switch (prot) {
  449. case PROT_TYPE_IEP:
  450. tec->b61 = 1;
  451. fallthrough;
  452. case PROT_TYPE_LA:
  453. tec->b56 = 1;
  454. break;
  455. case PROT_TYPE_KEYC:
  456. tec->b60 = 1;
  457. break;
  458. case PROT_TYPE_ALC:
  459. tec->b60 = 1;
  460. fallthrough;
  461. case PROT_TYPE_DAT:
  462. tec->b61 = 1;
  463. break;
  464. }
  465. fallthrough;
  466. case PGM_ASCE_TYPE:
  467. case PGM_PAGE_TRANSLATION:
  468. case PGM_REGION_FIRST_TRANS:
  469. case PGM_REGION_SECOND_TRANS:
  470. case PGM_REGION_THIRD_TRANS:
  471. case PGM_SEGMENT_TRANSLATION:
  472. /*
  473. * op_access_id only applies to MOVE_PAGE -> set bit 61
  474. * exc_access_id has to be set to 0 for some instructions. Both
  475. * cases have to be handled by the caller.
  476. */
  477. tec->addr = gva >> PAGE_SHIFT;
  478. tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
  479. tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
  480. fallthrough;
  481. case PGM_ALEN_TRANSLATION:
  482. case PGM_ALE_SEQUENCE:
  483. case PGM_ASTE_VALIDITY:
  484. case PGM_ASTE_SEQUENCE:
  485. case PGM_EXTENDED_AUTHORITY:
  486. /*
  487. * We can always store exc_access_id, as it is
  488. * undefined for non-ar cases. It is undefined for
  489. * most DAT protection exceptions.
  490. */
  491. pgm->exc_access_id = ar;
  492. break;
  493. }
  494. return code;
  495. }
  496. static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
  497. unsigned long ga, u8 ar, enum gacc_mode mode)
  498. {
  499. int rc;
  500. struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
  501. if (!psw.dat) {
  502. asce->val = 0;
  503. asce->r = 1;
  504. return 0;
  505. }
  506. if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
  507. psw.as = PSW_BITS_AS_PRIMARY;
  508. switch (psw.as) {
  509. case PSW_BITS_AS_PRIMARY:
  510. asce->val = vcpu->arch.sie_block->gcr[1];
  511. return 0;
  512. case PSW_BITS_AS_SECONDARY:
  513. asce->val = vcpu->arch.sie_block->gcr[7];
  514. return 0;
  515. case PSW_BITS_AS_HOME:
  516. asce->val = vcpu->arch.sie_block->gcr[13];
  517. return 0;
  518. case PSW_BITS_AS_ACCREG:
  519. rc = ar_translation(vcpu, asce, ar, mode);
  520. if (rc > 0)
  521. return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
  522. return rc;
  523. }
  524. return 0;
  525. }
  526. static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
  527. {
  528. return kvm_read_guest(kvm, gpa, val, sizeof(*val));
  529. }
  530. /**
  531. * guest_translate - translate a guest virtual into a guest absolute address
  532. * @vcpu: virtual cpu
  533. * @gva: guest virtual address
  534. * @gpa: points to where guest physical (absolute) address should be stored
  535. * @asce: effective asce
  536. * @mode: indicates the access mode to be used
  537. * @prot: returns the type for protection exceptions
  538. *
  539. * Translate a guest virtual address into a guest absolute address by means
  540. * of dynamic address translation as specified by the architecture.
  541. * If the resulting absolute address is not available in the configuration
  542. * an addressing exception is indicated and @gpa will not be changed.
  543. *
  544. * Returns: - zero on success; @gpa contains the resulting absolute address
  545. * - a negative value if guest access failed due to e.g. broken
  546. * guest mapping
  547. * - a positve value if an access exception happened. In this case
  548. * the returned value is the program interruption code as defined
  549. * by the architecture
  550. */
  551. static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
  552. unsigned long *gpa, const union asce asce,
  553. enum gacc_mode mode, enum prot_type *prot)
  554. {
  555. union vaddress vaddr = {.addr = gva};
  556. union raddress raddr = {.addr = gva};
  557. union page_table_entry pte;
  558. int dat_protection = 0;
  559. int iep_protection = 0;
  560. union ctlreg0 ctlreg0;
  561. unsigned long ptr;
  562. int edat1, edat2, iep;
  563. ctlreg0.val = vcpu->arch.sie_block->gcr[0];
  564. edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
  565. edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
  566. iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
  567. if (asce.r)
  568. goto real_address;
  569. ptr = asce.origin * PAGE_SIZE;
  570. switch (asce.dt) {
  571. case ASCE_TYPE_REGION1:
  572. if (vaddr.rfx01 > asce.tl)
  573. return PGM_REGION_FIRST_TRANS;
  574. ptr += vaddr.rfx * 8;
  575. break;
  576. case ASCE_TYPE_REGION2:
  577. if (vaddr.rfx)
  578. return PGM_ASCE_TYPE;
  579. if (vaddr.rsx01 > asce.tl)
  580. return PGM_REGION_SECOND_TRANS;
  581. ptr += vaddr.rsx * 8;
  582. break;
  583. case ASCE_TYPE_REGION3:
  584. if (vaddr.rfx || vaddr.rsx)
  585. return PGM_ASCE_TYPE;
  586. if (vaddr.rtx01 > asce.tl)
  587. return PGM_REGION_THIRD_TRANS;
  588. ptr += vaddr.rtx * 8;
  589. break;
  590. case ASCE_TYPE_SEGMENT:
  591. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  592. return PGM_ASCE_TYPE;
  593. if (vaddr.sx01 > asce.tl)
  594. return PGM_SEGMENT_TRANSLATION;
  595. ptr += vaddr.sx * 8;
  596. break;
  597. }
  598. switch (asce.dt) {
  599. case ASCE_TYPE_REGION1: {
  600. union region1_table_entry rfte;
  601. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  602. return PGM_ADDRESSING;
  603. if (deref_table(vcpu->kvm, ptr, &rfte.val))
  604. return -EFAULT;
  605. if (rfte.i)
  606. return PGM_REGION_FIRST_TRANS;
  607. if (rfte.tt != TABLE_TYPE_REGION1)
  608. return PGM_TRANSLATION_SPEC;
  609. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  610. return PGM_REGION_SECOND_TRANS;
  611. if (edat1)
  612. dat_protection |= rfte.p;
  613. ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
  614. }
  615. fallthrough;
  616. case ASCE_TYPE_REGION2: {
  617. union region2_table_entry rste;
  618. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  619. return PGM_ADDRESSING;
  620. if (deref_table(vcpu->kvm, ptr, &rste.val))
  621. return -EFAULT;
  622. if (rste.i)
  623. return PGM_REGION_SECOND_TRANS;
  624. if (rste.tt != TABLE_TYPE_REGION2)
  625. return PGM_TRANSLATION_SPEC;
  626. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  627. return PGM_REGION_THIRD_TRANS;
  628. if (edat1)
  629. dat_protection |= rste.p;
  630. ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
  631. }
  632. fallthrough;
  633. case ASCE_TYPE_REGION3: {
  634. union region3_table_entry rtte;
  635. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  636. return PGM_ADDRESSING;
  637. if (deref_table(vcpu->kvm, ptr, &rtte.val))
  638. return -EFAULT;
  639. if (rtte.i)
  640. return PGM_REGION_THIRD_TRANS;
  641. if (rtte.tt != TABLE_TYPE_REGION3)
  642. return PGM_TRANSLATION_SPEC;
  643. if (rtte.cr && asce.p && edat2)
  644. return PGM_TRANSLATION_SPEC;
  645. if (rtte.fc && edat2) {
  646. dat_protection |= rtte.fc1.p;
  647. iep_protection = rtte.fc1.iep;
  648. raddr.rfaa = rtte.fc1.rfaa;
  649. goto absolute_address;
  650. }
  651. if (vaddr.sx01 < rtte.fc0.tf)
  652. return PGM_SEGMENT_TRANSLATION;
  653. if (vaddr.sx01 > rtte.fc0.tl)
  654. return PGM_SEGMENT_TRANSLATION;
  655. if (edat1)
  656. dat_protection |= rtte.fc0.p;
  657. ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
  658. }
  659. fallthrough;
  660. case ASCE_TYPE_SEGMENT: {
  661. union segment_table_entry ste;
  662. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  663. return PGM_ADDRESSING;
  664. if (deref_table(vcpu->kvm, ptr, &ste.val))
  665. return -EFAULT;
  666. if (ste.i)
  667. return PGM_SEGMENT_TRANSLATION;
  668. if (ste.tt != TABLE_TYPE_SEGMENT)
  669. return PGM_TRANSLATION_SPEC;
  670. if (ste.cs && asce.p)
  671. return PGM_TRANSLATION_SPEC;
  672. if (ste.fc && edat1) {
  673. dat_protection |= ste.fc1.p;
  674. iep_protection = ste.fc1.iep;
  675. raddr.sfaa = ste.fc1.sfaa;
  676. goto absolute_address;
  677. }
  678. dat_protection |= ste.fc0.p;
  679. ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
  680. }
  681. }
  682. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  683. return PGM_ADDRESSING;
  684. if (deref_table(vcpu->kvm, ptr, &pte.val))
  685. return -EFAULT;
  686. if (pte.i)
  687. return PGM_PAGE_TRANSLATION;
  688. if (pte.z)
  689. return PGM_TRANSLATION_SPEC;
  690. dat_protection |= pte.p;
  691. iep_protection = pte.iep;
  692. raddr.pfra = pte.pfra;
  693. real_address:
  694. raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
  695. absolute_address:
  696. if (mode == GACC_STORE && dat_protection) {
  697. *prot = PROT_TYPE_DAT;
  698. return PGM_PROTECTION;
  699. }
  700. if (mode == GACC_IFETCH && iep_protection && iep) {
  701. *prot = PROT_TYPE_IEP;
  702. return PGM_PROTECTION;
  703. }
  704. if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
  705. return PGM_ADDRESSING;
  706. *gpa = raddr.addr;
  707. return 0;
  708. }
  709. static inline int is_low_address(unsigned long ga)
  710. {
  711. /* Check for address ranges 0..511 and 4096..4607 */
  712. return (ga & ~0x11fful) == 0;
  713. }
  714. static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
  715. const union asce asce)
  716. {
  717. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  718. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  719. if (!ctlreg0.lap)
  720. return 0;
  721. if (psw_bits(*psw).dat && asce.p)
  722. return 0;
  723. return 1;
  724. }
  725. static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
  726. unsigned long *pages, unsigned long nr_pages,
  727. const union asce asce, enum gacc_mode mode)
  728. {
  729. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  730. int lap_enabled, rc = 0;
  731. enum prot_type prot;
  732. lap_enabled = low_address_protection_enabled(vcpu, asce);
  733. while (nr_pages) {
  734. ga = kvm_s390_logical_to_effective(vcpu, ga);
  735. if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
  736. return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
  737. PROT_TYPE_LA);
  738. ga &= PAGE_MASK;
  739. if (psw_bits(*psw).dat) {
  740. rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
  741. if (rc < 0)
  742. return rc;
  743. } else {
  744. *pages = kvm_s390_real_to_abs(vcpu, ga);
  745. if (kvm_is_error_gpa(vcpu->kvm, *pages))
  746. rc = PGM_ADDRESSING;
  747. }
  748. if (rc)
  749. return trans_exc(vcpu, rc, ga, ar, mode, prot);
  750. ga += PAGE_SIZE;
  751. pages++;
  752. nr_pages--;
  753. }
  754. return 0;
  755. }
  756. int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
  757. unsigned long len, enum gacc_mode mode)
  758. {
  759. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  760. unsigned long _len, nr_pages, gpa, idx;
  761. unsigned long pages_array[2];
  762. unsigned long *pages;
  763. int need_ipte_lock;
  764. union asce asce;
  765. int rc;
  766. if (!len)
  767. return 0;
  768. ga = kvm_s390_logical_to_effective(vcpu, ga);
  769. rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
  770. if (rc)
  771. return rc;
  772. nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
  773. pages = pages_array;
  774. if (nr_pages > ARRAY_SIZE(pages_array))
  775. pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
  776. if (!pages)
  777. return -ENOMEM;
  778. need_ipte_lock = psw_bits(*psw).dat && !asce.r;
  779. if (need_ipte_lock)
  780. ipte_lock(vcpu);
  781. rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
  782. for (idx = 0; idx < nr_pages && !rc; idx++) {
  783. gpa = *(pages + idx) + (ga & ~PAGE_MASK);
  784. _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
  785. if (mode == GACC_STORE)
  786. rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
  787. else
  788. rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
  789. len -= _len;
  790. ga += _len;
  791. data += _len;
  792. }
  793. if (need_ipte_lock)
  794. ipte_unlock(vcpu);
  795. if (nr_pages > ARRAY_SIZE(pages_array))
  796. vfree(pages);
  797. return rc;
  798. }
  799. int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
  800. void *data, unsigned long len, enum gacc_mode mode)
  801. {
  802. unsigned long _len, gpa;
  803. int rc = 0;
  804. while (len && !rc) {
  805. gpa = kvm_s390_real_to_abs(vcpu, gra);
  806. _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
  807. if (mode)
  808. rc = write_guest_abs(vcpu, gpa, data, _len);
  809. else
  810. rc = read_guest_abs(vcpu, gpa, data, _len);
  811. len -= _len;
  812. gra += _len;
  813. data += _len;
  814. }
  815. return rc;
  816. }
  817. /**
  818. * guest_translate_address - translate guest logical into guest absolute address
  819. *
  820. * Parameter semantics are the same as the ones from guest_translate.
  821. * The memory contents at the guest address are not changed.
  822. *
  823. * Note: The IPTE lock is not taken during this function, so the caller
  824. * has to take care of this.
  825. */
  826. int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
  827. unsigned long *gpa, enum gacc_mode mode)
  828. {
  829. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  830. enum prot_type prot;
  831. union asce asce;
  832. int rc;
  833. gva = kvm_s390_logical_to_effective(vcpu, gva);
  834. rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
  835. if (rc)
  836. return rc;
  837. if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
  838. if (mode == GACC_STORE)
  839. return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
  840. mode, PROT_TYPE_LA);
  841. }
  842. if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
  843. rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
  844. if (rc > 0)
  845. return trans_exc(vcpu, rc, gva, 0, mode, prot);
  846. } else {
  847. *gpa = kvm_s390_real_to_abs(vcpu, gva);
  848. if (kvm_is_error_gpa(vcpu->kvm, *gpa))
  849. return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
  850. }
  851. return rc;
  852. }
  853. /**
  854. * check_gva_range - test a range of guest virtual addresses for accessibility
  855. */
  856. int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
  857. unsigned long length, enum gacc_mode mode)
  858. {
  859. unsigned long gpa;
  860. unsigned long currlen;
  861. int rc = 0;
  862. ipte_lock(vcpu);
  863. while (length > 0 && !rc) {
  864. currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
  865. rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
  866. gva += currlen;
  867. length -= currlen;
  868. }
  869. ipte_unlock(vcpu);
  870. return rc;
  871. }
  872. /**
  873. * kvm_s390_check_low_addr_prot_real - check for low-address protection
  874. * @gra: Guest real address
  875. *
  876. * Checks whether an address is subject to low-address protection and set
  877. * up vcpu->arch.pgm accordingly if necessary.
  878. *
  879. * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
  880. */
  881. int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
  882. {
  883. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  884. if (!ctlreg0.lap || !is_low_address(gra))
  885. return 0;
  886. return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
  887. }
  888. /**
  889. * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
  890. * @sg: pointer to the shadow guest address space structure
  891. * @saddr: faulting address in the shadow gmap
  892. * @pgt: pointer to the beginning of the page table for the given address if
  893. * successful (return value 0), or to the first invalid DAT entry in
  894. * case of exceptions (return value > 0)
  895. * @fake: pgt references contiguous guest memory block, not a pgtable
  896. */
  897. static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
  898. unsigned long *pgt, int *dat_protection,
  899. int *fake)
  900. {
  901. struct gmap *parent;
  902. union asce asce;
  903. union vaddress vaddr;
  904. unsigned long ptr;
  905. int rc;
  906. *fake = 0;
  907. *dat_protection = 0;
  908. parent = sg->parent;
  909. vaddr.addr = saddr;
  910. asce.val = sg->orig_asce;
  911. ptr = asce.origin * PAGE_SIZE;
  912. if (asce.r) {
  913. *fake = 1;
  914. ptr = 0;
  915. asce.dt = ASCE_TYPE_REGION1;
  916. }
  917. switch (asce.dt) {
  918. case ASCE_TYPE_REGION1:
  919. if (vaddr.rfx01 > asce.tl && !*fake)
  920. return PGM_REGION_FIRST_TRANS;
  921. break;
  922. case ASCE_TYPE_REGION2:
  923. if (vaddr.rfx)
  924. return PGM_ASCE_TYPE;
  925. if (vaddr.rsx01 > asce.tl)
  926. return PGM_REGION_SECOND_TRANS;
  927. break;
  928. case ASCE_TYPE_REGION3:
  929. if (vaddr.rfx || vaddr.rsx)
  930. return PGM_ASCE_TYPE;
  931. if (vaddr.rtx01 > asce.tl)
  932. return PGM_REGION_THIRD_TRANS;
  933. break;
  934. case ASCE_TYPE_SEGMENT:
  935. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  936. return PGM_ASCE_TYPE;
  937. if (vaddr.sx01 > asce.tl)
  938. return PGM_SEGMENT_TRANSLATION;
  939. break;
  940. }
  941. switch (asce.dt) {
  942. case ASCE_TYPE_REGION1: {
  943. union region1_table_entry rfte;
  944. if (*fake) {
  945. ptr += vaddr.rfx * _REGION1_SIZE;
  946. rfte.val = ptr;
  947. goto shadow_r2t;
  948. }
  949. *pgt = ptr + vaddr.rfx * 8;
  950. rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
  951. if (rc)
  952. return rc;
  953. if (rfte.i)
  954. return PGM_REGION_FIRST_TRANS;
  955. if (rfte.tt != TABLE_TYPE_REGION1)
  956. return PGM_TRANSLATION_SPEC;
  957. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  958. return PGM_REGION_SECOND_TRANS;
  959. if (sg->edat_level >= 1)
  960. *dat_protection |= rfte.p;
  961. ptr = rfte.rto * PAGE_SIZE;
  962. shadow_r2t:
  963. rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
  964. if (rc)
  965. return rc;
  966. }
  967. fallthrough;
  968. case ASCE_TYPE_REGION2: {
  969. union region2_table_entry rste;
  970. if (*fake) {
  971. ptr += vaddr.rsx * _REGION2_SIZE;
  972. rste.val = ptr;
  973. goto shadow_r3t;
  974. }
  975. *pgt = ptr + vaddr.rsx * 8;
  976. rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
  977. if (rc)
  978. return rc;
  979. if (rste.i)
  980. return PGM_REGION_SECOND_TRANS;
  981. if (rste.tt != TABLE_TYPE_REGION2)
  982. return PGM_TRANSLATION_SPEC;
  983. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  984. return PGM_REGION_THIRD_TRANS;
  985. if (sg->edat_level >= 1)
  986. *dat_protection |= rste.p;
  987. ptr = rste.rto * PAGE_SIZE;
  988. shadow_r3t:
  989. rste.p |= *dat_protection;
  990. rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
  991. if (rc)
  992. return rc;
  993. }
  994. fallthrough;
  995. case ASCE_TYPE_REGION3: {
  996. union region3_table_entry rtte;
  997. if (*fake) {
  998. ptr += vaddr.rtx * _REGION3_SIZE;
  999. rtte.val = ptr;
  1000. goto shadow_sgt;
  1001. }
  1002. *pgt = ptr + vaddr.rtx * 8;
  1003. rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
  1004. if (rc)
  1005. return rc;
  1006. if (rtte.i)
  1007. return PGM_REGION_THIRD_TRANS;
  1008. if (rtte.tt != TABLE_TYPE_REGION3)
  1009. return PGM_TRANSLATION_SPEC;
  1010. if (rtte.cr && asce.p && sg->edat_level >= 2)
  1011. return PGM_TRANSLATION_SPEC;
  1012. if (rtte.fc && sg->edat_level >= 2) {
  1013. *dat_protection |= rtte.fc0.p;
  1014. *fake = 1;
  1015. ptr = rtte.fc1.rfaa * _REGION3_SIZE;
  1016. rtte.val = ptr;
  1017. goto shadow_sgt;
  1018. }
  1019. if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
  1020. return PGM_SEGMENT_TRANSLATION;
  1021. if (sg->edat_level >= 1)
  1022. *dat_protection |= rtte.fc0.p;
  1023. ptr = rtte.fc0.sto * PAGE_SIZE;
  1024. shadow_sgt:
  1025. rtte.fc0.p |= *dat_protection;
  1026. rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
  1027. if (rc)
  1028. return rc;
  1029. }
  1030. fallthrough;
  1031. case ASCE_TYPE_SEGMENT: {
  1032. union segment_table_entry ste;
  1033. if (*fake) {
  1034. ptr += vaddr.sx * _SEGMENT_SIZE;
  1035. ste.val = ptr;
  1036. goto shadow_pgt;
  1037. }
  1038. *pgt = ptr + vaddr.sx * 8;
  1039. rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
  1040. if (rc)
  1041. return rc;
  1042. if (ste.i)
  1043. return PGM_SEGMENT_TRANSLATION;
  1044. if (ste.tt != TABLE_TYPE_SEGMENT)
  1045. return PGM_TRANSLATION_SPEC;
  1046. if (ste.cs && asce.p)
  1047. return PGM_TRANSLATION_SPEC;
  1048. *dat_protection |= ste.fc0.p;
  1049. if (ste.fc && sg->edat_level >= 1) {
  1050. *fake = 1;
  1051. ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
  1052. ste.val = ptr;
  1053. goto shadow_pgt;
  1054. }
  1055. ptr = ste.fc0.pto * (PAGE_SIZE / 2);
  1056. shadow_pgt:
  1057. ste.fc0.p |= *dat_protection;
  1058. rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
  1059. if (rc)
  1060. return rc;
  1061. }
  1062. }
  1063. /* Return the parent address of the page table */
  1064. *pgt = ptr;
  1065. return 0;
  1066. }
  1067. /**
  1068. * kvm_s390_shadow_fault - handle fault on a shadow page table
  1069. * @vcpu: virtual cpu
  1070. * @sg: pointer to the shadow guest address space structure
  1071. * @saddr: faulting address in the shadow gmap
  1072. * @datptr: will contain the address of the faulting DAT table entry, or of
  1073. * the valid leaf, plus some flags
  1074. *
  1075. * Returns: - 0 if the shadow fault was successfully resolved
  1076. * - > 0 (pgm exception code) on exceptions while faulting
  1077. * - -EAGAIN if the caller can retry immediately
  1078. * - -EFAULT when accessing invalid guest addresses
  1079. * - -ENOMEM if out of memory
  1080. */
  1081. int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
  1082. unsigned long saddr, unsigned long *datptr)
  1083. {
  1084. union vaddress vaddr;
  1085. union page_table_entry pte;
  1086. unsigned long pgt = 0;
  1087. int dat_protection, fake;
  1088. int rc;
  1089. mmap_read_lock(sg->mm);
  1090. /*
  1091. * We don't want any guest-2 tables to change - so the parent
  1092. * tables/pointers we read stay valid - unshadowing is however
  1093. * always possible - only guest_table_lock protects us.
  1094. */
  1095. ipte_lock(vcpu);
  1096. rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
  1097. if (rc)
  1098. rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
  1099. &fake);
  1100. vaddr.addr = saddr;
  1101. if (fake) {
  1102. pte.val = pgt + vaddr.px * PAGE_SIZE;
  1103. goto shadow_page;
  1104. }
  1105. switch (rc) {
  1106. case PGM_SEGMENT_TRANSLATION:
  1107. case PGM_REGION_THIRD_TRANS:
  1108. case PGM_REGION_SECOND_TRANS:
  1109. case PGM_REGION_FIRST_TRANS:
  1110. pgt |= PEI_NOT_PTE;
  1111. break;
  1112. case 0:
  1113. pgt += vaddr.px * 8;
  1114. rc = gmap_read_table(sg->parent, pgt, &pte.val);
  1115. }
  1116. if (datptr)
  1117. *datptr = pgt | dat_protection * PEI_DAT_PROT;
  1118. if (!rc && pte.i)
  1119. rc = PGM_PAGE_TRANSLATION;
  1120. if (!rc && pte.z)
  1121. rc = PGM_TRANSLATION_SPEC;
  1122. shadow_page:
  1123. pte.p |= dat_protection;
  1124. if (!rc)
  1125. rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
  1126. ipte_unlock(vcpu);
  1127. mmap_read_unlock(sg->mm);
  1128. return rc;
  1129. }