book3s_xics.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2012 Michael Ellerman, IBM Corporation.
  4. * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/kvm_host.h>
  8. #include <linux/err.h>
  9. #include <linux/gfp.h>
  10. #include <linux/anon_inodes.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/uaccess.h>
  13. #include <asm/kvm_book3s.h>
  14. #include <asm/kvm_ppc.h>
  15. #include <asm/hvcall.h>
  16. #include <asm/xics.h>
  17. #include <asm/debugfs.h>
  18. #include <asm/time.h>
  19. #include <linux/seq_file.h>
  20. #include "book3s_xics.h"
  21. #if 1
  22. #define XICS_DBG(fmt...) do { } while (0)
  23. #else
  24. #define XICS_DBG(fmt...) trace_printk(fmt)
  25. #endif
  26. #define ENABLE_REALMODE true
  27. #define DEBUG_REALMODE false
  28. /*
  29. * LOCKING
  30. * =======
  31. *
  32. * Each ICS has a spin lock protecting the information about the IRQ
  33. * sources and avoiding simultaneous deliveries of the same interrupt.
  34. *
  35. * ICP operations are done via a single compare & swap transaction
  36. * (most ICP state fits in the union kvmppc_icp_state)
  37. */
  38. /*
  39. * TODO
  40. * ====
  41. *
  42. * - To speed up resends, keep a bitmap of "resend" set bits in the
  43. * ICS
  44. *
  45. * - Speed up server# -> ICP lookup (array ? hash table ?)
  46. *
  47. * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
  48. * locks array to improve scalability
  49. */
  50. /* -- ICS routines -- */
  51. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  52. u32 new_irq, bool check_resend);
  53. /*
  54. * Return value ideally indicates how the interrupt was handled, but no
  55. * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
  56. * so just return 0.
  57. */
  58. static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
  59. {
  60. struct ics_irq_state *state;
  61. struct kvmppc_ics *ics;
  62. u16 src;
  63. u32 pq_old, pq_new;
  64. XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
  65. ics = kvmppc_xics_find_ics(xics, irq, &src);
  66. if (!ics) {
  67. XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
  68. return -EINVAL;
  69. }
  70. state = &ics->irq_state[src];
  71. if (!state->exists)
  72. return -EINVAL;
  73. if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
  74. level = 1;
  75. else if (level == KVM_INTERRUPT_UNSET)
  76. level = 0;
  77. /*
  78. * Take other values the same as 1, consistent with original code.
  79. * maybe WARN here?
  80. */
  81. if (!state->lsi && level == 0) /* noop for MSI */
  82. return 0;
  83. do {
  84. pq_old = state->pq_state;
  85. if (state->lsi) {
  86. if (level) {
  87. if (pq_old & PQ_PRESENTED)
  88. /* Setting already set LSI ... */
  89. return 0;
  90. pq_new = PQ_PRESENTED;
  91. } else
  92. pq_new = 0;
  93. } else
  94. pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
  95. } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
  96. /* Test P=1, Q=0, this is the only case where we present */
  97. if (pq_new == PQ_PRESENTED)
  98. icp_deliver_irq(xics, NULL, irq, false);
  99. /* Record which CPU this arrived on for passed-through interrupts */
  100. if (state->host_irq)
  101. state->intr_cpu = raw_smp_processor_id();
  102. return 0;
  103. }
  104. static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
  105. struct kvmppc_icp *icp)
  106. {
  107. int i;
  108. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  109. struct ics_irq_state *state = &ics->irq_state[i];
  110. if (state->resend) {
  111. XICS_DBG("resend %#x prio %#x\n", state->number,
  112. state->priority);
  113. icp_deliver_irq(xics, icp, state->number, true);
  114. }
  115. }
  116. }
  117. static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
  118. struct ics_irq_state *state,
  119. u32 server, u32 priority, u32 saved_priority)
  120. {
  121. bool deliver;
  122. unsigned long flags;
  123. local_irq_save(flags);
  124. arch_spin_lock(&ics->lock);
  125. state->server = server;
  126. state->priority = priority;
  127. state->saved_priority = saved_priority;
  128. deliver = false;
  129. if ((state->masked_pending || state->resend) && priority != MASKED) {
  130. state->masked_pending = 0;
  131. state->resend = 0;
  132. deliver = true;
  133. }
  134. arch_spin_unlock(&ics->lock);
  135. local_irq_restore(flags);
  136. return deliver;
  137. }
  138. int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
  139. {
  140. struct kvmppc_xics *xics = kvm->arch.xics;
  141. struct kvmppc_icp *icp;
  142. struct kvmppc_ics *ics;
  143. struct ics_irq_state *state;
  144. u16 src;
  145. if (!xics)
  146. return -ENODEV;
  147. ics = kvmppc_xics_find_ics(xics, irq, &src);
  148. if (!ics)
  149. return -EINVAL;
  150. state = &ics->irq_state[src];
  151. icp = kvmppc_xics_find_server(kvm, server);
  152. if (!icp)
  153. return -EINVAL;
  154. XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
  155. irq, server, priority,
  156. state->masked_pending, state->resend);
  157. if (write_xive(xics, ics, state, server, priority, priority))
  158. icp_deliver_irq(xics, icp, irq, false);
  159. return 0;
  160. }
  161. int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
  162. {
  163. struct kvmppc_xics *xics = kvm->arch.xics;
  164. struct kvmppc_ics *ics;
  165. struct ics_irq_state *state;
  166. u16 src;
  167. unsigned long flags;
  168. if (!xics)
  169. return -ENODEV;
  170. ics = kvmppc_xics_find_ics(xics, irq, &src);
  171. if (!ics)
  172. return -EINVAL;
  173. state = &ics->irq_state[src];
  174. local_irq_save(flags);
  175. arch_spin_lock(&ics->lock);
  176. *server = state->server;
  177. *priority = state->priority;
  178. arch_spin_unlock(&ics->lock);
  179. local_irq_restore(flags);
  180. return 0;
  181. }
  182. int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
  183. {
  184. struct kvmppc_xics *xics = kvm->arch.xics;
  185. struct kvmppc_icp *icp;
  186. struct kvmppc_ics *ics;
  187. struct ics_irq_state *state;
  188. u16 src;
  189. if (!xics)
  190. return -ENODEV;
  191. ics = kvmppc_xics_find_ics(xics, irq, &src);
  192. if (!ics)
  193. return -EINVAL;
  194. state = &ics->irq_state[src];
  195. icp = kvmppc_xics_find_server(kvm, state->server);
  196. if (!icp)
  197. return -EINVAL;
  198. if (write_xive(xics, ics, state, state->server, state->saved_priority,
  199. state->saved_priority))
  200. icp_deliver_irq(xics, icp, irq, false);
  201. return 0;
  202. }
  203. int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
  204. {
  205. struct kvmppc_xics *xics = kvm->arch.xics;
  206. struct kvmppc_ics *ics;
  207. struct ics_irq_state *state;
  208. u16 src;
  209. if (!xics)
  210. return -ENODEV;
  211. ics = kvmppc_xics_find_ics(xics, irq, &src);
  212. if (!ics)
  213. return -EINVAL;
  214. state = &ics->irq_state[src];
  215. write_xive(xics, ics, state, state->server, MASKED, state->priority);
  216. return 0;
  217. }
  218. /* -- ICP routines, including hcalls -- */
  219. static inline bool icp_try_update(struct kvmppc_icp *icp,
  220. union kvmppc_icp_state old,
  221. union kvmppc_icp_state new,
  222. bool change_self)
  223. {
  224. bool success;
  225. /* Calculate new output value */
  226. new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
  227. /* Attempt atomic update */
  228. success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
  229. if (!success)
  230. goto bail;
  231. XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  232. icp->server_num,
  233. old.cppr, old.mfrr, old.pending_pri, old.xisr,
  234. old.need_resend, old.out_ee);
  235. XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  236. new.cppr, new.mfrr, new.pending_pri, new.xisr,
  237. new.need_resend, new.out_ee);
  238. /*
  239. * Check for output state update
  240. *
  241. * Note that this is racy since another processor could be updating
  242. * the state already. This is why we never clear the interrupt output
  243. * here, we only ever set it. The clear only happens prior to doing
  244. * an update and only by the processor itself. Currently we do it
  245. * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
  246. *
  247. * We also do not try to figure out whether the EE state has changed,
  248. * we unconditionally set it if the new state calls for it. The reason
  249. * for that is that we opportunistically remove the pending interrupt
  250. * flag when raising CPPR, so we need to set it back here if an
  251. * interrupt is still pending.
  252. */
  253. if (new.out_ee) {
  254. kvmppc_book3s_queue_irqprio(icp->vcpu,
  255. BOOK3S_INTERRUPT_EXTERNAL);
  256. if (!change_self)
  257. kvmppc_fast_vcpu_kick(icp->vcpu);
  258. }
  259. bail:
  260. return success;
  261. }
  262. static void icp_check_resend(struct kvmppc_xics *xics,
  263. struct kvmppc_icp *icp)
  264. {
  265. u32 icsid;
  266. /* Order this load with the test for need_resend in the caller */
  267. smp_rmb();
  268. for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
  269. struct kvmppc_ics *ics = xics->ics[icsid];
  270. if (!test_and_clear_bit(icsid, icp->resend_map))
  271. continue;
  272. if (!ics)
  273. continue;
  274. ics_check_resend(xics, ics, icp);
  275. }
  276. }
  277. static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
  278. u32 *reject)
  279. {
  280. union kvmppc_icp_state old_state, new_state;
  281. bool success;
  282. XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
  283. icp->server_num);
  284. do {
  285. old_state = new_state = READ_ONCE(icp->state);
  286. *reject = 0;
  287. /* See if we can deliver */
  288. success = new_state.cppr > priority &&
  289. new_state.mfrr > priority &&
  290. new_state.pending_pri > priority;
  291. /*
  292. * If we can, check for a rejection and perform the
  293. * delivery
  294. */
  295. if (success) {
  296. *reject = new_state.xisr;
  297. new_state.xisr = irq;
  298. new_state.pending_pri = priority;
  299. } else {
  300. /*
  301. * If we failed to deliver we set need_resend
  302. * so a subsequent CPPR state change causes us
  303. * to try a new delivery.
  304. */
  305. new_state.need_resend = true;
  306. }
  307. } while (!icp_try_update(icp, old_state, new_state, false));
  308. return success;
  309. }
  310. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  311. u32 new_irq, bool check_resend)
  312. {
  313. struct ics_irq_state *state;
  314. struct kvmppc_ics *ics;
  315. u32 reject;
  316. u16 src;
  317. unsigned long flags;
  318. /*
  319. * This is used both for initial delivery of an interrupt and
  320. * for subsequent rejection.
  321. *
  322. * Rejection can be racy vs. resends. We have evaluated the
  323. * rejection in an atomic ICP transaction which is now complete,
  324. * so potentially the ICP can already accept the interrupt again.
  325. *
  326. * So we need to retry the delivery. Essentially the reject path
  327. * boils down to a failed delivery. Always.
  328. *
  329. * Now the interrupt could also have moved to a different target,
  330. * thus we may need to re-do the ICP lookup as well
  331. */
  332. again:
  333. /* Get the ICS state and lock it */
  334. ics = kvmppc_xics_find_ics(xics, new_irq, &src);
  335. if (!ics) {
  336. XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
  337. return;
  338. }
  339. state = &ics->irq_state[src];
  340. /* Get a lock on the ICS */
  341. local_irq_save(flags);
  342. arch_spin_lock(&ics->lock);
  343. /* Get our server */
  344. if (!icp || state->server != icp->server_num) {
  345. icp = kvmppc_xics_find_server(xics->kvm, state->server);
  346. if (!icp) {
  347. pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
  348. new_irq, state->server);
  349. goto out;
  350. }
  351. }
  352. if (check_resend)
  353. if (!state->resend)
  354. goto out;
  355. /* Clear the resend bit of that interrupt */
  356. state->resend = 0;
  357. /*
  358. * If masked, bail out
  359. *
  360. * Note: PAPR doesn't mention anything about masked pending
  361. * when doing a resend, only when doing a delivery.
  362. *
  363. * However that would have the effect of losing a masked
  364. * interrupt that was rejected and isn't consistent with
  365. * the whole masked_pending business which is about not
  366. * losing interrupts that occur while masked.
  367. *
  368. * I don't differentiate normal deliveries and resends, this
  369. * implementation will differ from PAPR and not lose such
  370. * interrupts.
  371. */
  372. if (state->priority == MASKED) {
  373. XICS_DBG("irq %#x masked pending\n", new_irq);
  374. state->masked_pending = 1;
  375. goto out;
  376. }
  377. /*
  378. * Try the delivery, this will set the need_resend flag
  379. * in the ICP as part of the atomic transaction if the
  380. * delivery is not possible.
  381. *
  382. * Note that if successful, the new delivery might have itself
  383. * rejected an interrupt that was "delivered" before we took the
  384. * ics spin lock.
  385. *
  386. * In this case we do the whole sequence all over again for the
  387. * new guy. We cannot assume that the rejected interrupt is less
  388. * favored than the new one, and thus doesn't need to be delivered,
  389. * because by the time we exit icp_try_to_deliver() the target
  390. * processor may well have alrady consumed & completed it, and thus
  391. * the rejected interrupt might actually be already acceptable.
  392. */
  393. if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
  394. /*
  395. * Delivery was successful, did we reject somebody else ?
  396. */
  397. if (reject && reject != XICS_IPI) {
  398. arch_spin_unlock(&ics->lock);
  399. local_irq_restore(flags);
  400. new_irq = reject;
  401. check_resend = 0;
  402. goto again;
  403. }
  404. } else {
  405. /*
  406. * We failed to deliver the interrupt we need to set the
  407. * resend map bit and mark the ICS state as needing a resend
  408. */
  409. state->resend = 1;
  410. /*
  411. * Make sure when checking resend, we don't miss the resend
  412. * if resend_map bit is seen and cleared.
  413. */
  414. smp_wmb();
  415. set_bit(ics->icsid, icp->resend_map);
  416. /*
  417. * If the need_resend flag got cleared in the ICP some time
  418. * between icp_try_to_deliver() atomic update and now, then
  419. * we know it might have missed the resend_map bit. So we
  420. * retry
  421. */
  422. smp_mb();
  423. if (!icp->state.need_resend) {
  424. state->resend = 0;
  425. arch_spin_unlock(&ics->lock);
  426. local_irq_restore(flags);
  427. check_resend = 0;
  428. goto again;
  429. }
  430. }
  431. out:
  432. arch_spin_unlock(&ics->lock);
  433. local_irq_restore(flags);
  434. }
  435. static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  436. u8 new_cppr)
  437. {
  438. union kvmppc_icp_state old_state, new_state;
  439. bool resend;
  440. /*
  441. * This handles several related states in one operation:
  442. *
  443. * ICP State: Down_CPPR
  444. *
  445. * Load CPPR with new value and if the XISR is 0
  446. * then check for resends:
  447. *
  448. * ICP State: Resend
  449. *
  450. * If MFRR is more favored than CPPR, check for IPIs
  451. * and notify ICS of a potential resend. This is done
  452. * asynchronously (when used in real mode, we will have
  453. * to exit here).
  454. *
  455. * We do not handle the complete Check_IPI as documented
  456. * here. In the PAPR, this state will be used for both
  457. * Set_MFRR and Down_CPPR. However, we know that we aren't
  458. * changing the MFRR state here so we don't need to handle
  459. * the case of an MFRR causing a reject of a pending irq,
  460. * this will have been handled when the MFRR was set in the
  461. * first place.
  462. *
  463. * Thus we don't have to handle rejects, only resends.
  464. *
  465. * When implementing real mode for HV KVM, resend will lead to
  466. * a H_TOO_HARD return and the whole transaction will be handled
  467. * in virtual mode.
  468. */
  469. do {
  470. old_state = new_state = READ_ONCE(icp->state);
  471. /* Down_CPPR */
  472. new_state.cppr = new_cppr;
  473. /*
  474. * Cut down Resend / Check_IPI / IPI
  475. *
  476. * The logic is that we cannot have a pending interrupt
  477. * trumped by an IPI at this point (see above), so we
  478. * know that either the pending interrupt is already an
  479. * IPI (in which case we don't care to override it) or
  480. * it's either more favored than us or non existent
  481. */
  482. if (new_state.mfrr < new_cppr &&
  483. new_state.mfrr <= new_state.pending_pri) {
  484. WARN_ON(new_state.xisr != XICS_IPI &&
  485. new_state.xisr != 0);
  486. new_state.pending_pri = new_state.mfrr;
  487. new_state.xisr = XICS_IPI;
  488. }
  489. /* Latch/clear resend bit */
  490. resend = new_state.need_resend;
  491. new_state.need_resend = 0;
  492. } while (!icp_try_update(icp, old_state, new_state, true));
  493. /*
  494. * Now handle resend checks. Those are asynchronous to the ICP
  495. * state update in HW (ie bus transactions) so we can handle them
  496. * separately here too
  497. */
  498. if (resend)
  499. icp_check_resend(xics, icp);
  500. }
  501. static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
  502. {
  503. union kvmppc_icp_state old_state, new_state;
  504. struct kvmppc_icp *icp = vcpu->arch.icp;
  505. u32 xirr;
  506. /* First, remove EE from the processor */
  507. kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  508. /*
  509. * ICP State: Accept_Interrupt
  510. *
  511. * Return the pending interrupt (if any) along with the
  512. * current CPPR, then clear the XISR & set CPPR to the
  513. * pending priority
  514. */
  515. do {
  516. old_state = new_state = READ_ONCE(icp->state);
  517. xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
  518. if (!old_state.xisr)
  519. break;
  520. new_state.cppr = new_state.pending_pri;
  521. new_state.pending_pri = 0xff;
  522. new_state.xisr = 0;
  523. } while (!icp_try_update(icp, old_state, new_state, true));
  524. XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
  525. return xirr;
  526. }
  527. static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
  528. unsigned long mfrr)
  529. {
  530. union kvmppc_icp_state old_state, new_state;
  531. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  532. struct kvmppc_icp *icp;
  533. u32 reject;
  534. bool resend;
  535. bool local;
  536. XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
  537. vcpu->vcpu_id, server, mfrr);
  538. icp = vcpu->arch.icp;
  539. local = icp->server_num == server;
  540. if (!local) {
  541. icp = kvmppc_xics_find_server(vcpu->kvm, server);
  542. if (!icp)
  543. return H_PARAMETER;
  544. }
  545. /*
  546. * ICP state: Set_MFRR
  547. *
  548. * If the CPPR is more favored than the new MFRR, then
  549. * nothing needs to be rejected as there can be no XISR to
  550. * reject. If the MFRR is being made less favored then
  551. * there might be a previously-rejected interrupt needing
  552. * to be resent.
  553. *
  554. * ICP state: Check_IPI
  555. *
  556. * If the CPPR is less favored, then we might be replacing
  557. * an interrupt, and thus need to possibly reject it.
  558. *
  559. * ICP State: IPI
  560. *
  561. * Besides rejecting any pending interrupts, we also
  562. * update XISR and pending_pri to mark IPI as pending.
  563. *
  564. * PAPR does not describe this state, but if the MFRR is being
  565. * made less favored than its earlier value, there might be
  566. * a previously-rejected interrupt needing to be resent.
  567. * Ideally, we would want to resend only if
  568. * prio(pending_interrupt) < mfrr &&
  569. * prio(pending_interrupt) < cppr
  570. * where pending interrupt is the one that was rejected. But
  571. * we don't have that state, so we simply trigger a resend
  572. * whenever the MFRR is made less favored.
  573. */
  574. do {
  575. old_state = new_state = READ_ONCE(icp->state);
  576. /* Set_MFRR */
  577. new_state.mfrr = mfrr;
  578. /* Check_IPI */
  579. reject = 0;
  580. resend = false;
  581. if (mfrr < new_state.cppr) {
  582. /* Reject a pending interrupt if not an IPI */
  583. if (mfrr <= new_state.pending_pri) {
  584. reject = new_state.xisr;
  585. new_state.pending_pri = mfrr;
  586. new_state.xisr = XICS_IPI;
  587. }
  588. }
  589. if (mfrr > old_state.mfrr) {
  590. resend = new_state.need_resend;
  591. new_state.need_resend = 0;
  592. }
  593. } while (!icp_try_update(icp, old_state, new_state, local));
  594. /* Handle reject */
  595. if (reject && reject != XICS_IPI)
  596. icp_deliver_irq(xics, icp, reject, false);
  597. /* Handle resend */
  598. if (resend)
  599. icp_check_resend(xics, icp);
  600. return H_SUCCESS;
  601. }
  602. static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
  603. {
  604. union kvmppc_icp_state state;
  605. struct kvmppc_icp *icp;
  606. icp = vcpu->arch.icp;
  607. if (icp->server_num != server) {
  608. icp = kvmppc_xics_find_server(vcpu->kvm, server);
  609. if (!icp)
  610. return H_PARAMETER;
  611. }
  612. state = READ_ONCE(icp->state);
  613. kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
  614. kvmppc_set_gpr(vcpu, 5, state.mfrr);
  615. return H_SUCCESS;
  616. }
  617. static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
  618. {
  619. union kvmppc_icp_state old_state, new_state;
  620. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  621. struct kvmppc_icp *icp = vcpu->arch.icp;
  622. u32 reject;
  623. XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
  624. /*
  625. * ICP State: Set_CPPR
  626. *
  627. * We can safely compare the new value with the current
  628. * value outside of the transaction as the CPPR is only
  629. * ever changed by the processor on itself
  630. */
  631. if (cppr > icp->state.cppr)
  632. icp_down_cppr(xics, icp, cppr);
  633. else if (cppr == icp->state.cppr)
  634. return;
  635. /*
  636. * ICP State: Up_CPPR
  637. *
  638. * The processor is raising its priority, this can result
  639. * in a rejection of a pending interrupt:
  640. *
  641. * ICP State: Reject_Current
  642. *
  643. * We can remove EE from the current processor, the update
  644. * transaction will set it again if needed
  645. */
  646. kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  647. do {
  648. old_state = new_state = READ_ONCE(icp->state);
  649. reject = 0;
  650. new_state.cppr = cppr;
  651. if (cppr <= new_state.pending_pri) {
  652. reject = new_state.xisr;
  653. new_state.xisr = 0;
  654. new_state.pending_pri = 0xff;
  655. }
  656. } while (!icp_try_update(icp, old_state, new_state, true));
  657. /*
  658. * Check for rejects. They are handled by doing a new delivery
  659. * attempt (see comments in icp_deliver_irq).
  660. */
  661. if (reject && reject != XICS_IPI)
  662. icp_deliver_irq(xics, icp, reject, false);
  663. }
  664. static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
  665. {
  666. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  667. struct kvmppc_icp *icp = vcpu->arch.icp;
  668. struct kvmppc_ics *ics;
  669. struct ics_irq_state *state;
  670. u16 src;
  671. u32 pq_old, pq_new;
  672. /*
  673. * ICS EOI handling: For LSI, if P bit is still set, we need to
  674. * resend it.
  675. *
  676. * For MSI, we move Q bit into P (and clear Q). If it is set,
  677. * resend it.
  678. */
  679. ics = kvmppc_xics_find_ics(xics, irq, &src);
  680. if (!ics) {
  681. XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
  682. return H_PARAMETER;
  683. }
  684. state = &ics->irq_state[src];
  685. if (state->lsi)
  686. pq_new = state->pq_state;
  687. else
  688. do {
  689. pq_old = state->pq_state;
  690. pq_new = pq_old >> 1;
  691. } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
  692. if (pq_new & PQ_PRESENTED)
  693. icp_deliver_irq(xics, icp, irq, false);
  694. kvm_notify_acked_irq(vcpu->kvm, 0, irq);
  695. return H_SUCCESS;
  696. }
  697. static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
  698. {
  699. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  700. struct kvmppc_icp *icp = vcpu->arch.icp;
  701. u32 irq = xirr & 0x00ffffff;
  702. XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
  703. /*
  704. * ICP State: EOI
  705. *
  706. * Note: If EOI is incorrectly used by SW to lower the CPPR
  707. * value (ie more favored), we do not check for rejection of
  708. * a pending interrupt, this is a SW error and PAPR specifies
  709. * that we don't have to deal with it.
  710. *
  711. * The sending of an EOI to the ICS is handled after the
  712. * CPPR update
  713. *
  714. * ICP State: Down_CPPR which we handle
  715. * in a separate function as it's shared with H_CPPR.
  716. */
  717. icp_down_cppr(xics, icp, xirr >> 24);
  718. /* IPIs have no EOI */
  719. if (irq == XICS_IPI)
  720. return H_SUCCESS;
  721. return ics_eoi(vcpu, irq);
  722. }
  723. int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
  724. {
  725. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  726. struct kvmppc_icp *icp = vcpu->arch.icp;
  727. XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
  728. hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
  729. if (icp->rm_action & XICS_RM_KICK_VCPU) {
  730. icp->n_rm_kick_vcpu++;
  731. kvmppc_fast_vcpu_kick(icp->rm_kick_target);
  732. }
  733. if (icp->rm_action & XICS_RM_CHECK_RESEND) {
  734. icp->n_rm_check_resend++;
  735. icp_check_resend(xics, icp->rm_resend_icp);
  736. }
  737. if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
  738. icp->n_rm_notify_eoi++;
  739. kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
  740. }
  741. icp->rm_action = 0;
  742. return H_SUCCESS;
  743. }
  744. EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
  745. int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
  746. {
  747. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  748. unsigned long res;
  749. int rc = H_SUCCESS;
  750. /* Check if we have an ICP */
  751. if (!xics || !vcpu->arch.icp)
  752. return H_HARDWARE;
  753. /* These requests don't have real-mode implementations at present */
  754. switch (req) {
  755. case H_XIRR_X:
  756. res = kvmppc_h_xirr(vcpu);
  757. kvmppc_set_gpr(vcpu, 4, res);
  758. kvmppc_set_gpr(vcpu, 5, get_tb());
  759. return rc;
  760. case H_IPOLL:
  761. rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
  762. return rc;
  763. }
  764. /* Check for real mode returning too hard */
  765. if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
  766. return kvmppc_xics_rm_complete(vcpu, req);
  767. switch (req) {
  768. case H_XIRR:
  769. res = kvmppc_h_xirr(vcpu);
  770. kvmppc_set_gpr(vcpu, 4, res);
  771. break;
  772. case H_CPPR:
  773. kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
  774. break;
  775. case H_EOI:
  776. rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
  777. break;
  778. case H_IPI:
  779. rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
  780. kvmppc_get_gpr(vcpu, 5));
  781. break;
  782. }
  783. return rc;
  784. }
  785. EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
  786. /* -- Initialisation code etc. -- */
  787. static void xics_debugfs_irqmap(struct seq_file *m,
  788. struct kvmppc_passthru_irqmap *pimap)
  789. {
  790. int i;
  791. if (!pimap)
  792. return;
  793. seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
  794. pimap->n_mapped);
  795. for (i = 0; i < pimap->n_mapped; i++) {
  796. seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
  797. pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
  798. }
  799. }
  800. static int xics_debug_show(struct seq_file *m, void *private)
  801. {
  802. struct kvmppc_xics *xics = m->private;
  803. struct kvm *kvm = xics->kvm;
  804. struct kvm_vcpu *vcpu;
  805. int icsid, i;
  806. unsigned long flags;
  807. unsigned long t_rm_kick_vcpu, t_rm_check_resend;
  808. unsigned long t_rm_notify_eoi;
  809. unsigned long t_reject, t_check_resend;
  810. if (!kvm)
  811. return 0;
  812. t_rm_kick_vcpu = 0;
  813. t_rm_notify_eoi = 0;
  814. t_rm_check_resend = 0;
  815. t_check_resend = 0;
  816. t_reject = 0;
  817. xics_debugfs_irqmap(m, kvm->arch.pimap);
  818. seq_printf(m, "=========\nICP state\n=========\n");
  819. kvm_for_each_vcpu(i, vcpu, kvm) {
  820. struct kvmppc_icp *icp = vcpu->arch.icp;
  821. union kvmppc_icp_state state;
  822. if (!icp)
  823. continue;
  824. state.raw = READ_ONCE(icp->state.raw);
  825. seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
  826. icp->server_num, state.xisr,
  827. state.pending_pri, state.cppr, state.mfrr,
  828. state.out_ee, state.need_resend);
  829. t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
  830. t_rm_notify_eoi += icp->n_rm_notify_eoi;
  831. t_rm_check_resend += icp->n_rm_check_resend;
  832. t_check_resend += icp->n_check_resend;
  833. t_reject += icp->n_reject;
  834. }
  835. seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
  836. t_rm_kick_vcpu, t_rm_check_resend,
  837. t_rm_notify_eoi);
  838. seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
  839. t_check_resend, t_reject);
  840. for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
  841. struct kvmppc_ics *ics = xics->ics[icsid];
  842. if (!ics)
  843. continue;
  844. seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
  845. icsid);
  846. local_irq_save(flags);
  847. arch_spin_lock(&ics->lock);
  848. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  849. struct ics_irq_state *irq = &ics->irq_state[i];
  850. seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
  851. irq->number, irq->server, irq->priority,
  852. irq->saved_priority, irq->pq_state,
  853. irq->resend, irq->masked_pending);
  854. }
  855. arch_spin_unlock(&ics->lock);
  856. local_irq_restore(flags);
  857. }
  858. return 0;
  859. }
  860. DEFINE_SHOW_ATTRIBUTE(xics_debug);
  861. static void xics_debugfs_init(struct kvmppc_xics *xics)
  862. {
  863. char *name;
  864. name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
  865. if (!name) {
  866. pr_err("%s: no memory for name\n", __func__);
  867. return;
  868. }
  869. xics->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
  870. xics, &xics_debug_fops);
  871. pr_debug("%s: created %s\n", __func__, name);
  872. kfree(name);
  873. }
  874. static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
  875. struct kvmppc_xics *xics, int irq)
  876. {
  877. struct kvmppc_ics *ics;
  878. int i, icsid;
  879. icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
  880. mutex_lock(&kvm->lock);
  881. /* ICS already exists - somebody else got here first */
  882. if (xics->ics[icsid])
  883. goto out;
  884. /* Create the ICS */
  885. ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
  886. if (!ics)
  887. goto out;
  888. ics->icsid = icsid;
  889. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  890. ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
  891. ics->irq_state[i].priority = MASKED;
  892. ics->irq_state[i].saved_priority = MASKED;
  893. }
  894. smp_wmb();
  895. xics->ics[icsid] = ics;
  896. if (icsid > xics->max_icsid)
  897. xics->max_icsid = icsid;
  898. out:
  899. mutex_unlock(&kvm->lock);
  900. return xics->ics[icsid];
  901. }
  902. static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
  903. {
  904. struct kvmppc_icp *icp;
  905. if (!vcpu->kvm->arch.xics)
  906. return -ENODEV;
  907. if (kvmppc_xics_find_server(vcpu->kvm, server_num))
  908. return -EEXIST;
  909. icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
  910. if (!icp)
  911. return -ENOMEM;
  912. icp->vcpu = vcpu;
  913. icp->server_num = server_num;
  914. icp->state.mfrr = MASKED;
  915. icp->state.pending_pri = MASKED;
  916. vcpu->arch.icp = icp;
  917. XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
  918. return 0;
  919. }
  920. u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
  921. {
  922. struct kvmppc_icp *icp = vcpu->arch.icp;
  923. union kvmppc_icp_state state;
  924. if (!icp)
  925. return 0;
  926. state = icp->state;
  927. return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
  928. ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
  929. ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
  930. ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
  931. }
  932. int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
  933. {
  934. struct kvmppc_icp *icp = vcpu->arch.icp;
  935. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  936. union kvmppc_icp_state old_state, new_state;
  937. struct kvmppc_ics *ics;
  938. u8 cppr, mfrr, pending_pri;
  939. u32 xisr;
  940. u16 src;
  941. bool resend;
  942. if (!icp || !xics)
  943. return -ENOENT;
  944. cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
  945. xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
  946. KVM_REG_PPC_ICP_XISR_MASK;
  947. mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
  948. pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
  949. /* Require the new state to be internally consistent */
  950. if (xisr == 0) {
  951. if (pending_pri != 0xff)
  952. return -EINVAL;
  953. } else if (xisr == XICS_IPI) {
  954. if (pending_pri != mfrr || pending_pri >= cppr)
  955. return -EINVAL;
  956. } else {
  957. if (pending_pri >= mfrr || pending_pri >= cppr)
  958. return -EINVAL;
  959. ics = kvmppc_xics_find_ics(xics, xisr, &src);
  960. if (!ics)
  961. return -EINVAL;
  962. }
  963. new_state.raw = 0;
  964. new_state.cppr = cppr;
  965. new_state.xisr = xisr;
  966. new_state.mfrr = mfrr;
  967. new_state.pending_pri = pending_pri;
  968. /*
  969. * Deassert the CPU interrupt request.
  970. * icp_try_update will reassert it if necessary.
  971. */
  972. kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  973. /*
  974. * Note that if we displace an interrupt from old_state.xisr,
  975. * we don't mark it as rejected. We expect userspace to set
  976. * the state of the interrupt sources to be consistent with
  977. * the ICP states (either before or afterwards, which doesn't
  978. * matter). We do handle resends due to CPPR becoming less
  979. * favoured because that is necessary to end up with a
  980. * consistent state in the situation where userspace restores
  981. * the ICS states before the ICP states.
  982. */
  983. do {
  984. old_state = READ_ONCE(icp->state);
  985. if (new_state.mfrr <= old_state.mfrr) {
  986. resend = false;
  987. new_state.need_resend = old_state.need_resend;
  988. } else {
  989. resend = old_state.need_resend;
  990. new_state.need_resend = 0;
  991. }
  992. } while (!icp_try_update(icp, old_state, new_state, false));
  993. if (resend)
  994. icp_check_resend(xics, icp);
  995. return 0;
  996. }
  997. static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
  998. {
  999. int ret;
  1000. struct kvmppc_ics *ics;
  1001. struct ics_irq_state *irqp;
  1002. u64 __user *ubufp = (u64 __user *) addr;
  1003. u16 idx;
  1004. u64 val, prio;
  1005. unsigned long flags;
  1006. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  1007. if (!ics)
  1008. return -ENOENT;
  1009. irqp = &ics->irq_state[idx];
  1010. local_irq_save(flags);
  1011. arch_spin_lock(&ics->lock);
  1012. ret = -ENOENT;
  1013. if (irqp->exists) {
  1014. val = irqp->server;
  1015. prio = irqp->priority;
  1016. if (prio == MASKED) {
  1017. val |= KVM_XICS_MASKED;
  1018. prio = irqp->saved_priority;
  1019. }
  1020. val |= prio << KVM_XICS_PRIORITY_SHIFT;
  1021. if (irqp->lsi) {
  1022. val |= KVM_XICS_LEVEL_SENSITIVE;
  1023. if (irqp->pq_state & PQ_PRESENTED)
  1024. val |= KVM_XICS_PENDING;
  1025. } else if (irqp->masked_pending || irqp->resend)
  1026. val |= KVM_XICS_PENDING;
  1027. if (irqp->pq_state & PQ_PRESENTED)
  1028. val |= KVM_XICS_PRESENTED;
  1029. if (irqp->pq_state & PQ_QUEUED)
  1030. val |= KVM_XICS_QUEUED;
  1031. ret = 0;
  1032. }
  1033. arch_spin_unlock(&ics->lock);
  1034. local_irq_restore(flags);
  1035. if (!ret && put_user(val, ubufp))
  1036. ret = -EFAULT;
  1037. return ret;
  1038. }
  1039. static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
  1040. {
  1041. struct kvmppc_ics *ics;
  1042. struct ics_irq_state *irqp;
  1043. u64 __user *ubufp = (u64 __user *) addr;
  1044. u16 idx;
  1045. u64 val;
  1046. u8 prio;
  1047. u32 server;
  1048. unsigned long flags;
  1049. if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
  1050. return -ENOENT;
  1051. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  1052. if (!ics) {
  1053. ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
  1054. if (!ics)
  1055. return -ENOMEM;
  1056. }
  1057. irqp = &ics->irq_state[idx];
  1058. if (get_user(val, ubufp))
  1059. return -EFAULT;
  1060. server = val & KVM_XICS_DESTINATION_MASK;
  1061. prio = val >> KVM_XICS_PRIORITY_SHIFT;
  1062. if (prio != MASKED &&
  1063. kvmppc_xics_find_server(xics->kvm, server) == NULL)
  1064. return -EINVAL;
  1065. local_irq_save(flags);
  1066. arch_spin_lock(&ics->lock);
  1067. irqp->server = server;
  1068. irqp->saved_priority = prio;
  1069. if (val & KVM_XICS_MASKED)
  1070. prio = MASKED;
  1071. irqp->priority = prio;
  1072. irqp->resend = 0;
  1073. irqp->masked_pending = 0;
  1074. irqp->lsi = 0;
  1075. irqp->pq_state = 0;
  1076. if (val & KVM_XICS_LEVEL_SENSITIVE)
  1077. irqp->lsi = 1;
  1078. /* If PENDING, set P in case P is not saved because of old code */
  1079. if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
  1080. irqp->pq_state |= PQ_PRESENTED;
  1081. if (val & KVM_XICS_QUEUED)
  1082. irqp->pq_state |= PQ_QUEUED;
  1083. irqp->exists = 1;
  1084. arch_spin_unlock(&ics->lock);
  1085. local_irq_restore(flags);
  1086. if (val & KVM_XICS_PENDING)
  1087. icp_deliver_irq(xics, NULL, irqp->number, false);
  1088. return 0;
  1089. }
  1090. int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  1091. bool line_status)
  1092. {
  1093. struct kvmppc_xics *xics = kvm->arch.xics;
  1094. if (!xics)
  1095. return -ENODEV;
  1096. return ics_deliver_irq(xics, irq, level);
  1097. }
  1098. static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1099. {
  1100. struct kvmppc_xics *xics = dev->private;
  1101. switch (attr->group) {
  1102. case KVM_DEV_XICS_GRP_SOURCES:
  1103. return xics_set_source(xics, attr->attr, attr->addr);
  1104. }
  1105. return -ENXIO;
  1106. }
  1107. static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1108. {
  1109. struct kvmppc_xics *xics = dev->private;
  1110. switch (attr->group) {
  1111. case KVM_DEV_XICS_GRP_SOURCES:
  1112. return xics_get_source(xics, attr->attr, attr->addr);
  1113. }
  1114. return -ENXIO;
  1115. }
  1116. static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1117. {
  1118. switch (attr->group) {
  1119. case KVM_DEV_XICS_GRP_SOURCES:
  1120. if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
  1121. attr->attr < KVMPPC_XICS_NR_IRQS)
  1122. return 0;
  1123. break;
  1124. }
  1125. return -ENXIO;
  1126. }
  1127. /*
  1128. * Called when device fd is closed. kvm->lock is held.
  1129. */
  1130. static void kvmppc_xics_release(struct kvm_device *dev)
  1131. {
  1132. struct kvmppc_xics *xics = dev->private;
  1133. int i;
  1134. struct kvm *kvm = xics->kvm;
  1135. struct kvm_vcpu *vcpu;
  1136. pr_devel("Releasing xics device\n");
  1137. /*
  1138. * Since this is the device release function, we know that
  1139. * userspace does not have any open fd referring to the
  1140. * device. Therefore there can not be any of the device
  1141. * attribute set/get functions being executed concurrently,
  1142. * and similarly, the connect_vcpu and set/clr_mapped
  1143. * functions also cannot be being executed.
  1144. */
  1145. debugfs_remove(xics->dentry);
  1146. /*
  1147. * We should clean up the vCPU interrupt presenters first.
  1148. */
  1149. kvm_for_each_vcpu(i, vcpu, kvm) {
  1150. /*
  1151. * Take vcpu->mutex to ensure that no one_reg get/set ioctl
  1152. * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently.
  1153. * Holding the vcpu->mutex also means that execution is
  1154. * excluded for the vcpu until the ICP was freed. When the vcpu
  1155. * can execute again, vcpu->arch.icp and vcpu->arch.irq_type
  1156. * have been cleared and the vcpu will not be going into the
  1157. * XICS code anymore.
  1158. */
  1159. mutex_lock(&vcpu->mutex);
  1160. kvmppc_xics_free_icp(vcpu);
  1161. mutex_unlock(&vcpu->mutex);
  1162. }
  1163. if (kvm)
  1164. kvm->arch.xics = NULL;
  1165. for (i = 0; i <= xics->max_icsid; i++) {
  1166. kfree(xics->ics[i]);
  1167. xics->ics[i] = NULL;
  1168. }
  1169. /*
  1170. * A reference of the kvmppc_xics pointer is now kept under
  1171. * the xics_device pointer of the machine for reuse. It is
  1172. * freed when the VM is destroyed for now until we fix all the
  1173. * execution paths.
  1174. */
  1175. kfree(dev);
  1176. }
  1177. static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm)
  1178. {
  1179. struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device;
  1180. struct kvmppc_xics *xics = *kvm_xics_device;
  1181. if (!xics) {
  1182. xics = kzalloc(sizeof(*xics), GFP_KERNEL);
  1183. *kvm_xics_device = xics;
  1184. } else {
  1185. memset(xics, 0, sizeof(*xics));
  1186. }
  1187. return xics;
  1188. }
  1189. static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
  1190. {
  1191. struct kvmppc_xics *xics;
  1192. struct kvm *kvm = dev->kvm;
  1193. pr_devel("Creating xics for partition\n");
  1194. /* Already there ? */
  1195. if (kvm->arch.xics)
  1196. return -EEXIST;
  1197. xics = kvmppc_xics_get_device(kvm);
  1198. if (!xics)
  1199. return -ENOMEM;
  1200. dev->private = xics;
  1201. xics->dev = dev;
  1202. xics->kvm = kvm;
  1203. kvm->arch.xics = xics;
  1204. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  1205. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  1206. cpu_has_feature(CPU_FTR_HVMODE)) {
  1207. /* Enable real mode support */
  1208. xics->real_mode = ENABLE_REALMODE;
  1209. xics->real_mode_dbg = DEBUG_REALMODE;
  1210. }
  1211. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  1212. return 0;
  1213. }
  1214. static void kvmppc_xics_init(struct kvm_device *dev)
  1215. {
  1216. struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
  1217. xics_debugfs_init(xics);
  1218. }
  1219. struct kvm_device_ops kvm_xics_ops = {
  1220. .name = "kvm-xics",
  1221. .create = kvmppc_xics_create,
  1222. .init = kvmppc_xics_init,
  1223. .release = kvmppc_xics_release,
  1224. .set_attr = xics_set_attr,
  1225. .get_attr = xics_get_attr,
  1226. .has_attr = xics_has_attr,
  1227. };
  1228. int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
  1229. u32 xcpu)
  1230. {
  1231. struct kvmppc_xics *xics = dev->private;
  1232. int r = -EBUSY;
  1233. if (dev->ops != &kvm_xics_ops)
  1234. return -EPERM;
  1235. if (xics->kvm != vcpu->kvm)
  1236. return -EPERM;
  1237. if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
  1238. return -EBUSY;
  1239. r = kvmppc_xics_create_icp(vcpu, xcpu);
  1240. if (!r)
  1241. vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
  1242. return r;
  1243. }
  1244. void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
  1245. {
  1246. if (!vcpu->arch.icp)
  1247. return;
  1248. kfree(vcpu->arch.icp);
  1249. vcpu->arch.icp = NULL;
  1250. vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
  1251. }
  1252. void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
  1253. unsigned long host_irq)
  1254. {
  1255. struct kvmppc_xics *xics = kvm->arch.xics;
  1256. struct kvmppc_ics *ics;
  1257. u16 idx;
  1258. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  1259. if (!ics)
  1260. return;
  1261. ics->irq_state[idx].host_irq = host_irq;
  1262. ics->irq_state[idx].intr_cpu = -1;
  1263. }
  1264. EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
  1265. void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
  1266. unsigned long host_irq)
  1267. {
  1268. struct kvmppc_xics *xics = kvm->arch.xics;
  1269. struct kvmppc_ics *ics;
  1270. u16 idx;
  1271. ics = kvmppc_xics_find_ics(xics, irq, &idx);
  1272. if (!ics)
  1273. return;
  1274. ics->irq_state[idx].host_irq = 0;
  1275. }
  1276. EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);