link.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright 2017 IBM Corp.
  3. #include <linux/sched/mm.h>
  4. #include <linux/mutex.h>
  5. #include <linux/mm_types.h>
  6. #include <linux/mmu_context.h>
  7. #include <asm/copro.h>
  8. #include <asm/pnv-ocxl.h>
  9. #include <asm/xive.h>
  10. #include <misc/ocxl.h>
  11. #include "ocxl_internal.h"
  12. #include "trace.h"
  13. #define SPA_PASID_BITS 15
  14. #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1)
  15. #define SPA_PE_MASK SPA_PASID_MAX
  16. #define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */
  17. #define SPA_CFG_SF (1ull << (63-0))
  18. #define SPA_CFG_TA (1ull << (63-1))
  19. #define SPA_CFG_HV (1ull << (63-3))
  20. #define SPA_CFG_UV (1ull << (63-4))
  21. #define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */
  22. #define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */
  23. #define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */
  24. #define SPA_CFG_PR (1ull << (63-49))
  25. #define SPA_CFG_TC (1ull << (63-54))
  26. #define SPA_CFG_DR (1ull << (63-59))
  27. #define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */
  28. #define SPA_XSL_S (1ull << (63-38)) /* Store operation */
  29. #define SPA_PE_VALID 0x80000000
  30. struct pe_data {
  31. struct mm_struct *mm;
  32. /* callback to trigger when a translation fault occurs */
  33. void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr);
  34. /* opaque pointer to be passed to the above callback */
  35. void *xsl_err_data;
  36. struct rcu_head rcu;
  37. };
  38. struct spa {
  39. struct ocxl_process_element *spa_mem;
  40. int spa_order;
  41. struct mutex spa_lock;
  42. struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
  43. char *irq_name;
  44. int virq;
  45. void __iomem *reg_dsisr;
  46. void __iomem *reg_dar;
  47. void __iomem *reg_tfc;
  48. void __iomem *reg_pe_handle;
  49. /*
  50. * The following field are used by the memory fault
  51. * interrupt handler. We can only have one interrupt at a
  52. * time. The NPU won't raise another interrupt until the
  53. * previous one has been ack'd by writing to the TFC register
  54. */
  55. struct xsl_fault {
  56. struct work_struct fault_work;
  57. u64 pe;
  58. u64 dsisr;
  59. u64 dar;
  60. struct pe_data pe_data;
  61. } xsl_fault;
  62. };
  63. /*
  64. * A opencapi link can be used be by several PCI functions. We have
  65. * one link per device slot.
  66. *
  67. * A linked list of opencapi links should suffice, as there's a
  68. * limited number of opencapi slots on a system and lookup is only
  69. * done when the device is probed
  70. */
  71. struct ocxl_link {
  72. struct list_head list;
  73. struct kref ref;
  74. int domain;
  75. int bus;
  76. int dev;
  77. atomic_t irq_available;
  78. struct spa *spa;
  79. void *platform_data;
  80. };
  81. static struct list_head links_list = LIST_HEAD_INIT(links_list);
  82. static DEFINE_MUTEX(links_list_lock);
  83. enum xsl_response {
  84. CONTINUE,
  85. ADDRESS_ERROR,
  86. RESTART,
  87. };
  88. static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe)
  89. {
  90. u64 reg;
  91. *dsisr = in_be64(spa->reg_dsisr);
  92. *dar = in_be64(spa->reg_dar);
  93. reg = in_be64(spa->reg_pe_handle);
  94. *pe = reg & SPA_PE_MASK;
  95. }
  96. static void ack_irq(struct spa *spa, enum xsl_response r)
  97. {
  98. u64 reg = 0;
  99. /* continue is not supported */
  100. if (r == RESTART)
  101. reg = PPC_BIT(31);
  102. else if (r == ADDRESS_ERROR)
  103. reg = PPC_BIT(30);
  104. else
  105. WARN(1, "Invalid irq response %d\n", r);
  106. if (reg) {
  107. trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe,
  108. spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg);
  109. out_be64(spa->reg_tfc, reg);
  110. }
  111. }
  112. static void xsl_fault_handler_bh(struct work_struct *fault_work)
  113. {
  114. vm_fault_t flt = 0;
  115. unsigned long access, flags, inv_flags = 0;
  116. enum xsl_response r;
  117. struct xsl_fault *fault = container_of(fault_work, struct xsl_fault,
  118. fault_work);
  119. struct spa *spa = container_of(fault, struct spa, xsl_fault);
  120. int rc;
  121. /*
  122. * We must release a reference on mm_users whenever exiting this
  123. * function (taken in the memory fault interrupt handler)
  124. */
  125. rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr,
  126. &flt);
  127. if (rc) {
  128. pr_debug("copro_handle_mm_fault failed: %d\n", rc);
  129. if (fault->pe_data.xsl_err_cb) {
  130. fault->pe_data.xsl_err_cb(
  131. fault->pe_data.xsl_err_data,
  132. fault->dar, fault->dsisr);
  133. }
  134. r = ADDRESS_ERROR;
  135. goto ack;
  136. }
  137. if (!radix_enabled()) {
  138. /*
  139. * update_mmu_cache() will not have loaded the hash
  140. * since current->trap is not a 0x400 or 0x300, so
  141. * just call hash_page_mm() here.
  142. */
  143. access = _PAGE_PRESENT | _PAGE_READ;
  144. if (fault->dsisr & SPA_XSL_S)
  145. access |= _PAGE_WRITE;
  146. if (get_region_id(fault->dar) != USER_REGION_ID)
  147. access |= _PAGE_PRIVILEGED;
  148. local_irq_save(flags);
  149. hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300,
  150. inv_flags);
  151. local_irq_restore(flags);
  152. }
  153. r = RESTART;
  154. ack:
  155. mmput(fault->pe_data.mm);
  156. ack_irq(spa, r);
  157. }
  158. static irqreturn_t xsl_fault_handler(int irq, void *data)
  159. {
  160. struct ocxl_link *link = (struct ocxl_link *) data;
  161. struct spa *spa = link->spa;
  162. u64 dsisr, dar, pe_handle;
  163. struct pe_data *pe_data;
  164. struct ocxl_process_element *pe;
  165. int pid;
  166. bool schedule = false;
  167. read_irq(spa, &dsisr, &dar, &pe_handle);
  168. trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
  169. WARN_ON(pe_handle > SPA_PE_MASK);
  170. pe = spa->spa_mem + pe_handle;
  171. pid = be32_to_cpu(pe->pid);
  172. /* We could be reading all null values here if the PE is being
  173. * removed while an interrupt kicks in. It's not supposed to
  174. * happen if the driver notified the AFU to terminate the
  175. * PASID, and the AFU waited for pending operations before
  176. * acknowledging. But even if it happens, we won't find a
  177. * memory context below and fail silently, so it should be ok.
  178. */
  179. if (!(dsisr & SPA_XSL_TF)) {
  180. WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr);
  181. ack_irq(spa, ADDRESS_ERROR);
  182. return IRQ_HANDLED;
  183. }
  184. rcu_read_lock();
  185. pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle);
  186. if (!pe_data) {
  187. /*
  188. * Could only happen if the driver didn't notify the
  189. * AFU about PASID termination before removing the PE,
  190. * or the AFU didn't wait for all memory access to
  191. * have completed.
  192. *
  193. * Either way, we fail early, but we shouldn't log an
  194. * error message, as it is a valid (if unexpected)
  195. * scenario
  196. */
  197. rcu_read_unlock();
  198. pr_debug("Unknown mm context for xsl interrupt\n");
  199. ack_irq(spa, ADDRESS_ERROR);
  200. return IRQ_HANDLED;
  201. }
  202. if (!pe_data->mm) {
  203. /*
  204. * translation fault from a kernel context - an OpenCAPI
  205. * device tried to access a bad kernel address
  206. */
  207. rcu_read_unlock();
  208. pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n");
  209. ack_irq(spa, ADDRESS_ERROR);
  210. return IRQ_HANDLED;
  211. }
  212. WARN_ON(pe_data->mm->context.id != pid);
  213. if (mmget_not_zero(pe_data->mm)) {
  214. spa->xsl_fault.pe = pe_handle;
  215. spa->xsl_fault.dar = dar;
  216. spa->xsl_fault.dsisr = dsisr;
  217. spa->xsl_fault.pe_data = *pe_data;
  218. schedule = true;
  219. /* mm_users count released by bottom half */
  220. }
  221. rcu_read_unlock();
  222. if (schedule)
  223. schedule_work(&spa->xsl_fault.fault_work);
  224. else
  225. ack_irq(spa, ADDRESS_ERROR);
  226. return IRQ_HANDLED;
  227. }
  228. static void unmap_irq_registers(struct spa *spa)
  229. {
  230. pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc,
  231. spa->reg_pe_handle);
  232. }
  233. static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
  234. {
  235. return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar,
  236. &spa->reg_tfc, &spa->reg_pe_handle);
  237. }
  238. static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link)
  239. {
  240. struct spa *spa = link->spa;
  241. int rc;
  242. int hwirq;
  243. rc = pnv_ocxl_get_xsl_irq(dev, &hwirq);
  244. if (rc)
  245. return rc;
  246. rc = map_irq_registers(dev, spa);
  247. if (rc)
  248. return rc;
  249. spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
  250. link->domain, link->bus, link->dev);
  251. if (!spa->irq_name) {
  252. dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
  253. rc = -ENOMEM;
  254. goto err_xsl;
  255. }
  256. /*
  257. * At some point, we'll need to look into allowing a higher
  258. * number of interrupts. Could we have an IRQ domain per link?
  259. */
  260. spa->virq = irq_create_mapping(NULL, hwirq);
  261. if (!spa->virq) {
  262. dev_err(&dev->dev,
  263. "irq_create_mapping failed for translation interrupt\n");
  264. rc = -EINVAL;
  265. goto err_name;
  266. }
  267. dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
  268. rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
  269. link);
  270. if (rc) {
  271. dev_err(&dev->dev,
  272. "request_irq failed for translation interrupt: %d\n",
  273. rc);
  274. rc = -EINVAL;
  275. goto err_mapping;
  276. }
  277. return 0;
  278. err_mapping:
  279. irq_dispose_mapping(spa->virq);
  280. err_name:
  281. kfree(spa->irq_name);
  282. err_xsl:
  283. unmap_irq_registers(spa);
  284. return rc;
  285. }
  286. static void release_xsl_irq(struct ocxl_link *link)
  287. {
  288. struct spa *spa = link->spa;
  289. if (spa->virq) {
  290. free_irq(spa->virq, link);
  291. irq_dispose_mapping(spa->virq);
  292. }
  293. kfree(spa->irq_name);
  294. unmap_irq_registers(spa);
  295. }
  296. static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link)
  297. {
  298. struct spa *spa;
  299. spa = kzalloc(sizeof(struct spa), GFP_KERNEL);
  300. if (!spa)
  301. return -ENOMEM;
  302. mutex_init(&spa->spa_lock);
  303. INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL);
  304. INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh);
  305. spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT;
  306. spa->spa_mem = (struct ocxl_process_element *)
  307. __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order);
  308. if (!spa->spa_mem) {
  309. dev_err(&dev->dev, "Can't allocate Shared Process Area\n");
  310. kfree(spa);
  311. return -ENOMEM;
  312. }
  313. pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus,
  314. link->dev, spa->spa_mem);
  315. link->spa = spa;
  316. return 0;
  317. }
  318. static void free_spa(struct ocxl_link *link)
  319. {
  320. struct spa *spa = link->spa;
  321. pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus,
  322. link->dev);
  323. if (spa && spa->spa_mem) {
  324. free_pages((unsigned long) spa->spa_mem, spa->spa_order);
  325. kfree(spa);
  326. link->spa = NULL;
  327. }
  328. }
  329. static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link)
  330. {
  331. struct ocxl_link *link;
  332. int rc;
  333. link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL);
  334. if (!link)
  335. return -ENOMEM;
  336. kref_init(&link->ref);
  337. link->domain = pci_domain_nr(dev->bus);
  338. link->bus = dev->bus->number;
  339. link->dev = PCI_SLOT(dev->devfn);
  340. atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
  341. rc = alloc_spa(dev, link);
  342. if (rc)
  343. goto err_free;
  344. rc = setup_xsl_irq(dev, link);
  345. if (rc)
  346. goto err_spa;
  347. /* platform specific hook */
  348. rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
  349. &link->platform_data);
  350. if (rc)
  351. goto err_xsl_irq;
  352. *out_link = link;
  353. return 0;
  354. err_xsl_irq:
  355. release_xsl_irq(link);
  356. err_spa:
  357. free_spa(link);
  358. err_free:
  359. kfree(link);
  360. return rc;
  361. }
  362. static void free_link(struct ocxl_link *link)
  363. {
  364. release_xsl_irq(link);
  365. free_spa(link);
  366. kfree(link);
  367. }
  368. int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle)
  369. {
  370. int rc = 0;
  371. struct ocxl_link *link;
  372. mutex_lock(&links_list_lock);
  373. list_for_each_entry(link, &links_list, list) {
  374. /* The functions of a device all share the same link */
  375. if (link->domain == pci_domain_nr(dev->bus) &&
  376. link->bus == dev->bus->number &&
  377. link->dev == PCI_SLOT(dev->devfn)) {
  378. kref_get(&link->ref);
  379. *link_handle = link;
  380. goto unlock;
  381. }
  382. }
  383. rc = alloc_link(dev, PE_mask, &link);
  384. if (rc)
  385. goto unlock;
  386. list_add(&link->list, &links_list);
  387. *link_handle = link;
  388. unlock:
  389. mutex_unlock(&links_list_lock);
  390. return rc;
  391. }
  392. EXPORT_SYMBOL_GPL(ocxl_link_setup);
  393. static void release_xsl(struct kref *ref)
  394. {
  395. struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
  396. list_del(&link->list);
  397. /* call platform code before releasing data */
  398. pnv_ocxl_spa_release(link->platform_data);
  399. free_link(link);
  400. }
  401. void ocxl_link_release(struct pci_dev *dev, void *link_handle)
  402. {
  403. struct ocxl_link *link = (struct ocxl_link *) link_handle;
  404. mutex_lock(&links_list_lock);
  405. kref_put(&link->ref, release_xsl);
  406. mutex_unlock(&links_list_lock);
  407. }
  408. EXPORT_SYMBOL_GPL(ocxl_link_release);
  409. static u64 calculate_cfg_state(bool kernel)
  410. {
  411. u64 state;
  412. state = SPA_CFG_DR;
  413. if (mfspr(SPRN_LPCR) & LPCR_TC)
  414. state |= SPA_CFG_TC;
  415. if (radix_enabled())
  416. state |= SPA_CFG_XLAT_ror;
  417. else
  418. state |= SPA_CFG_XLAT_hpt;
  419. state |= SPA_CFG_HV;
  420. if (kernel) {
  421. if (mfmsr() & MSR_SF)
  422. state |= SPA_CFG_SF;
  423. } else {
  424. state |= SPA_CFG_PR;
  425. if (!test_tsk_thread_flag(current, TIF_32BIT))
  426. state |= SPA_CFG_SF;
  427. }
  428. return state;
  429. }
  430. int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
  431. u64 amr, struct mm_struct *mm,
  432. void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
  433. void *xsl_err_data)
  434. {
  435. struct ocxl_link *link = (struct ocxl_link *) link_handle;
  436. struct spa *spa = link->spa;
  437. struct ocxl_process_element *pe;
  438. int pe_handle, rc = 0;
  439. struct pe_data *pe_data;
  440. BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128);
  441. if (pasid > SPA_PASID_MAX)
  442. return -EINVAL;
  443. mutex_lock(&spa->spa_lock);
  444. pe_handle = pasid & SPA_PE_MASK;
  445. pe = spa->spa_mem + pe_handle;
  446. if (pe->software_state) {
  447. rc = -EBUSY;
  448. goto unlock;
  449. }
  450. pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL);
  451. if (!pe_data) {
  452. rc = -ENOMEM;
  453. goto unlock;
  454. }
  455. pe_data->mm = mm;
  456. pe_data->xsl_err_cb = xsl_err_cb;
  457. pe_data->xsl_err_data = xsl_err_data;
  458. memset(pe, 0, sizeof(struct ocxl_process_element));
  459. pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0));
  460. pe->lpid = cpu_to_be32(mfspr(SPRN_LPID));
  461. pe->pid = cpu_to_be32(pidr);
  462. pe->tid = cpu_to_be32(tidr);
  463. pe->amr = cpu_to_be64(amr);
  464. pe->software_state = cpu_to_be32(SPA_PE_VALID);
  465. /*
  466. * For user contexts, register a copro so that TLBIs are seen
  467. * by the nest MMU. If we have a kernel context, TLBIs are
  468. * already global.
  469. */
  470. if (mm)
  471. mm_context_add_copro(mm);
  472. /*
  473. * Barrier is to make sure PE is visible in the SPA before it
  474. * is used by the device. It also helps with the global TLBI
  475. * invalidation
  476. */
  477. mb();
  478. radix_tree_insert(&spa->pe_tree, pe_handle, pe_data);
  479. /*
  480. * The mm must stay valid for as long as the device uses it. We
  481. * lower the count when the context is removed from the SPA.
  482. *
  483. * We grab mm_count (and not mm_users), as we don't want to
  484. * end up in a circular dependency if a process mmaps its
  485. * mmio, therefore incrementing the file ref count when
  486. * calling mmap(), and forgets to unmap before exiting. In
  487. * that scenario, when the kernel handles the death of the
  488. * process, the file is not cleaned because unmap was not
  489. * called, and the mm wouldn't be freed because we would still
  490. * have a reference on mm_users. Incrementing mm_count solves
  491. * the problem.
  492. */
  493. if (mm)
  494. mmgrab(mm);
  495. trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
  496. unlock:
  497. mutex_unlock(&spa->spa_lock);
  498. return rc;
  499. }
  500. EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
  501. int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
  502. {
  503. struct ocxl_link *link = (struct ocxl_link *) link_handle;
  504. struct spa *spa = link->spa;
  505. struct ocxl_process_element *pe;
  506. int pe_handle, rc;
  507. if (pasid > SPA_PASID_MAX)
  508. return -EINVAL;
  509. pe_handle = pasid & SPA_PE_MASK;
  510. pe = spa->spa_mem + pe_handle;
  511. mutex_lock(&spa->spa_lock);
  512. pe->tid = cpu_to_be32(tid);
  513. /*
  514. * The barrier makes sure the PE is updated
  515. * before we clear the NPU context cache below, so that the
  516. * old PE cannot be reloaded erroneously.
  517. */
  518. mb();
  519. /*
  520. * hook to platform code
  521. * On powerpc, the entry needs to be cleared from the context
  522. * cache of the NPU.
  523. */
  524. rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
  525. WARN_ON(rc);
  526. mutex_unlock(&spa->spa_lock);
  527. return rc;
  528. }
  529. int ocxl_link_remove_pe(void *link_handle, int pasid)
  530. {
  531. struct ocxl_link *link = (struct ocxl_link *) link_handle;
  532. struct spa *spa = link->spa;
  533. struct ocxl_process_element *pe;
  534. struct pe_data *pe_data;
  535. int pe_handle, rc;
  536. if (pasid > SPA_PASID_MAX)
  537. return -EINVAL;
  538. /*
  539. * About synchronization with our memory fault handler:
  540. *
  541. * Before removing the PE, the driver is supposed to have
  542. * notified the AFU, which should have cleaned up and make
  543. * sure the PASID is no longer in use, including pending
  544. * interrupts. However, there's no way to be sure...
  545. *
  546. * We clear the PE and remove the context from our radix
  547. * tree. From that point on, any new interrupt for that
  548. * context will fail silently, which is ok. As mentioned
  549. * above, that's not expected, but it could happen if the
  550. * driver or AFU didn't do the right thing.
  551. *
  552. * There could still be a bottom half running, but we don't
  553. * need to wait/flush, as it is managing a reference count on
  554. * the mm it reads from the radix tree.
  555. */
  556. pe_handle = pasid & SPA_PE_MASK;
  557. pe = spa->spa_mem + pe_handle;
  558. mutex_lock(&spa->spa_lock);
  559. if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) {
  560. rc = -EINVAL;
  561. goto unlock;
  562. }
  563. trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid,
  564. be32_to_cpu(pe->pid), be32_to_cpu(pe->tid));
  565. memset(pe, 0, sizeof(struct ocxl_process_element));
  566. /*
  567. * The barrier makes sure the PE is removed from the SPA
  568. * before we clear the NPU context cache below, so that the
  569. * old PE cannot be reloaded erroneously.
  570. */
  571. mb();
  572. /*
  573. * hook to platform code
  574. * On powerpc, the entry needs to be cleared from the context
  575. * cache of the NPU.
  576. */
  577. rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
  578. WARN_ON(rc);
  579. pe_data = radix_tree_delete(&spa->pe_tree, pe_handle);
  580. if (!pe_data) {
  581. WARN(1, "Couldn't find pe data when removing PE\n");
  582. } else {
  583. if (pe_data->mm) {
  584. mm_context_remove_copro(pe_data->mm);
  585. mmdrop(pe_data->mm);
  586. }
  587. kfree_rcu(pe_data, rcu);
  588. }
  589. unlock:
  590. mutex_unlock(&spa->spa_lock);
  591. return rc;
  592. }
  593. EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
  594. int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
  595. {
  596. struct ocxl_link *link = (struct ocxl_link *) link_handle;
  597. int irq;
  598. if (atomic_dec_if_positive(&link->irq_available) < 0)
  599. return -ENOSPC;
  600. irq = xive_native_alloc_irq();
  601. if (!irq) {
  602. atomic_inc(&link->irq_available);
  603. return -ENXIO;
  604. }
  605. *hw_irq = irq;
  606. return 0;
  607. }
  608. EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
  609. void ocxl_link_free_irq(void *link_handle, int hw_irq)
  610. {
  611. struct ocxl_link *link = (struct ocxl_link *) link_handle;
  612. xive_native_free_irq(hw_irq);
  613. atomic_inc(&link->irq_available);
  614. }
  615. EXPORT_SYMBOL_GPL(ocxl_link_free_irq);