msm_iommu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  3. *
  4. * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/errno.h>
  11. #include <linux/io.h>
  12. #include <linux/io-pgtable.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/slab.h>
  17. #include <linux/iommu.h>
  18. #include <linux/clk.h>
  19. #include <linux/err.h>
  20. #include <linux/of_iommu.h>
  21. #include <asm/cacheflush.h>
  22. #include <linux/sizes.h>
  23. #include "msm_iommu_hw-8xxx.h"
  24. #include "msm_iommu.h"
  25. #define MRC(reg, processor, op1, crn, crm, op2) \
  26. __asm__ __volatile__ ( \
  27. " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
  28. : "=r" (reg))
  29. /* bitmap of the page sizes currently supported */
  30. #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  31. static DEFINE_SPINLOCK(msm_iommu_lock);
  32. static LIST_HEAD(qcom_iommu_devices);
  33. static struct iommu_ops msm_iommu_ops;
  34. struct msm_priv {
  35. struct list_head list_attached;
  36. struct iommu_domain domain;
  37. struct io_pgtable_cfg cfg;
  38. struct io_pgtable_ops *iop;
  39. struct device *dev;
  40. spinlock_t pgtlock; /* pagetable lock */
  41. };
  42. static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  43. {
  44. return container_of(dom, struct msm_priv, domain);
  45. }
  46. static int __enable_clocks(struct msm_iommu_dev *iommu)
  47. {
  48. int ret;
  49. ret = clk_enable(iommu->pclk);
  50. if (ret)
  51. goto fail;
  52. if (iommu->clk) {
  53. ret = clk_enable(iommu->clk);
  54. if (ret)
  55. clk_disable(iommu->pclk);
  56. }
  57. fail:
  58. return ret;
  59. }
  60. static void __disable_clocks(struct msm_iommu_dev *iommu)
  61. {
  62. if (iommu->clk)
  63. clk_disable(iommu->clk);
  64. clk_disable(iommu->pclk);
  65. }
  66. static void msm_iommu_reset(void __iomem *base, int ncb)
  67. {
  68. int ctx;
  69. SET_RPUE(base, 0);
  70. SET_RPUEIE(base, 0);
  71. SET_ESRRESTORE(base, 0);
  72. SET_TBE(base, 0);
  73. SET_CR(base, 0);
  74. SET_SPDMBE(base, 0);
  75. SET_TESTBUSCR(base, 0);
  76. SET_TLBRSW(base, 0);
  77. SET_GLOBAL_TLBIALL(base, 0);
  78. SET_RPU_ACR(base, 0);
  79. SET_TLBLKCRWE(base, 1);
  80. for (ctx = 0; ctx < ncb; ctx++) {
  81. SET_BPRCOSH(base, ctx, 0);
  82. SET_BPRCISH(base, ctx, 0);
  83. SET_BPRCNSH(base, ctx, 0);
  84. SET_BPSHCFG(base, ctx, 0);
  85. SET_BPMTCFG(base, ctx, 0);
  86. SET_ACTLR(base, ctx, 0);
  87. SET_SCTLR(base, ctx, 0);
  88. SET_FSRRESTORE(base, ctx, 0);
  89. SET_TTBR0(base, ctx, 0);
  90. SET_TTBR1(base, ctx, 0);
  91. SET_TTBCR(base, ctx, 0);
  92. SET_BFBCR(base, ctx, 0);
  93. SET_PAR(base, ctx, 0);
  94. SET_FAR(base, ctx, 0);
  95. SET_CTX_TLBIALL(base, ctx, 0);
  96. SET_TLBFLPTER(base, ctx, 0);
  97. SET_TLBSLPTER(base, ctx, 0);
  98. SET_TLBLKCR(base, ctx, 0);
  99. SET_CONTEXTIDR(base, ctx, 0);
  100. }
  101. }
  102. static void __flush_iotlb(void *cookie)
  103. {
  104. struct msm_priv *priv = cookie;
  105. struct msm_iommu_dev *iommu = NULL;
  106. struct msm_iommu_ctx_dev *master;
  107. int ret = 0;
  108. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  109. ret = __enable_clocks(iommu);
  110. if (ret)
  111. goto fail;
  112. list_for_each_entry(master, &iommu->ctx_list, list)
  113. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  114. __disable_clocks(iommu);
  115. }
  116. fail:
  117. return;
  118. }
  119. static void __flush_iotlb_range(unsigned long iova, size_t size,
  120. size_t granule, bool leaf, void *cookie)
  121. {
  122. struct msm_priv *priv = cookie;
  123. struct msm_iommu_dev *iommu = NULL;
  124. struct msm_iommu_ctx_dev *master;
  125. int ret = 0;
  126. int temp_size;
  127. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  128. ret = __enable_clocks(iommu);
  129. if (ret)
  130. goto fail;
  131. list_for_each_entry(master, &iommu->ctx_list, list) {
  132. temp_size = size;
  133. do {
  134. iova &= TLBIVA_VA;
  135. iova |= GET_CONTEXTIDR_ASID(iommu->base,
  136. master->num);
  137. SET_TLBIVA(iommu->base, master->num, iova);
  138. iova += granule;
  139. } while (temp_size -= granule);
  140. }
  141. __disable_clocks(iommu);
  142. }
  143. fail:
  144. return;
  145. }
  146. static void __flush_iotlb_walk(unsigned long iova, size_t size,
  147. size_t granule, void *cookie)
  148. {
  149. __flush_iotlb_range(iova, size, granule, false, cookie);
  150. }
  151. static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
  152. unsigned long iova, size_t granule, void *cookie)
  153. {
  154. __flush_iotlb_range(iova, granule, granule, true, cookie);
  155. }
  156. static const struct iommu_flush_ops msm_iommu_flush_ops = {
  157. .tlb_flush_all = __flush_iotlb,
  158. .tlb_flush_walk = __flush_iotlb_walk,
  159. .tlb_add_page = __flush_iotlb_page,
  160. };
  161. static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
  162. {
  163. int idx;
  164. do {
  165. idx = find_next_zero_bit(map, end, start);
  166. if (idx == end)
  167. return -ENOSPC;
  168. } while (test_and_set_bit(idx, map));
  169. return idx;
  170. }
  171. static void msm_iommu_free_ctx(unsigned long *map, int idx)
  172. {
  173. clear_bit(idx, map);
  174. }
  175. static void config_mids(struct msm_iommu_dev *iommu,
  176. struct msm_iommu_ctx_dev *master)
  177. {
  178. int mid, ctx, i;
  179. for (i = 0; i < master->num_mids; i++) {
  180. mid = master->mids[i];
  181. ctx = master->num;
  182. SET_M2VCBR_N(iommu->base, mid, 0);
  183. SET_CBACR_N(iommu->base, ctx, 0);
  184. /* Set VMID = 0 */
  185. SET_VMID(iommu->base, mid, 0);
  186. /* Set the context number for that MID to this context */
  187. SET_CBNDX(iommu->base, mid, ctx);
  188. /* Set MID associated with this context bank to 0*/
  189. SET_CBVMID(iommu->base, ctx, 0);
  190. /* Set the ASID for TLB tagging for this context */
  191. SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
  192. /* Set security bit override to be Non-secure */
  193. SET_NSCFG(iommu->base, mid, 3);
  194. }
  195. }
  196. static void __reset_context(void __iomem *base, int ctx)
  197. {
  198. SET_BPRCOSH(base, ctx, 0);
  199. SET_BPRCISH(base, ctx, 0);
  200. SET_BPRCNSH(base, ctx, 0);
  201. SET_BPSHCFG(base, ctx, 0);
  202. SET_BPMTCFG(base, ctx, 0);
  203. SET_ACTLR(base, ctx, 0);
  204. SET_SCTLR(base, ctx, 0);
  205. SET_FSRRESTORE(base, ctx, 0);
  206. SET_TTBR0(base, ctx, 0);
  207. SET_TTBR1(base, ctx, 0);
  208. SET_TTBCR(base, ctx, 0);
  209. SET_BFBCR(base, ctx, 0);
  210. SET_PAR(base, ctx, 0);
  211. SET_FAR(base, ctx, 0);
  212. SET_CTX_TLBIALL(base, ctx, 0);
  213. SET_TLBFLPTER(base, ctx, 0);
  214. SET_TLBSLPTER(base, ctx, 0);
  215. SET_TLBLKCR(base, ctx, 0);
  216. }
  217. static void __program_context(void __iomem *base, int ctx,
  218. struct msm_priv *priv)
  219. {
  220. __reset_context(base, ctx);
  221. /* Turn on TEX Remap */
  222. SET_TRE(base, ctx, 1);
  223. SET_AFE(base, ctx, 1);
  224. /* Set up HTW mode */
  225. /* TLB miss configuration: perform HTW on miss */
  226. SET_TLBMCFG(base, ctx, 0x3);
  227. /* V2P configuration: HTW for access */
  228. SET_V2PCFG(base, ctx, 0x3);
  229. SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
  230. SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
  231. SET_TTBR1(base, ctx, 0);
  232. /* Set prrr and nmrr */
  233. SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
  234. SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
  235. /* Invalidate the TLB for this context */
  236. SET_CTX_TLBIALL(base, ctx, 0);
  237. /* Set interrupt number to "secure" interrupt */
  238. SET_IRPTNDX(base, ctx, 0);
  239. /* Enable context fault interrupt */
  240. SET_CFEIE(base, ctx, 1);
  241. /* Stall access on a context fault and let the handler deal with it */
  242. SET_CFCFG(base, ctx, 1);
  243. /* Redirect all cacheable requests to L2 slave port. */
  244. SET_RCISH(base, ctx, 1);
  245. SET_RCOSH(base, ctx, 1);
  246. SET_RCNSH(base, ctx, 1);
  247. /* Turn on BFB prefetch */
  248. SET_BFBDFE(base, ctx, 1);
  249. /* Enable the MMU */
  250. SET_M(base, ctx, 1);
  251. }
  252. static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
  253. {
  254. struct msm_priv *priv;
  255. if (type != IOMMU_DOMAIN_UNMANAGED)
  256. return NULL;
  257. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  258. if (!priv)
  259. goto fail_nomem;
  260. INIT_LIST_HEAD(&priv->list_attached);
  261. priv->domain.geometry.aperture_start = 0;
  262. priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
  263. priv->domain.geometry.force_aperture = true;
  264. return &priv->domain;
  265. fail_nomem:
  266. kfree(priv);
  267. return NULL;
  268. }
  269. static void msm_iommu_domain_free(struct iommu_domain *domain)
  270. {
  271. struct msm_priv *priv;
  272. unsigned long flags;
  273. spin_lock_irqsave(&msm_iommu_lock, flags);
  274. priv = to_msm_priv(domain);
  275. kfree(priv);
  276. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  277. }
  278. static int msm_iommu_domain_config(struct msm_priv *priv)
  279. {
  280. spin_lock_init(&priv->pgtlock);
  281. priv->cfg = (struct io_pgtable_cfg) {
  282. .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
  283. .ias = 32,
  284. .oas = 32,
  285. .tlb = &msm_iommu_flush_ops,
  286. .iommu_dev = priv->dev,
  287. };
  288. priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
  289. if (!priv->iop) {
  290. dev_err(priv->dev, "Failed to allocate pgtable\n");
  291. return -EINVAL;
  292. }
  293. msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
  294. return 0;
  295. }
  296. /* Must be called under msm_iommu_lock */
  297. static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
  298. {
  299. struct msm_iommu_dev *iommu, *ret = NULL;
  300. struct msm_iommu_ctx_dev *master;
  301. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  302. master = list_first_entry(&iommu->ctx_list,
  303. struct msm_iommu_ctx_dev,
  304. list);
  305. if (master->of_node == dev->of_node) {
  306. ret = iommu;
  307. break;
  308. }
  309. }
  310. return ret;
  311. }
  312. static struct iommu_device *msm_iommu_probe_device(struct device *dev)
  313. {
  314. struct msm_iommu_dev *iommu;
  315. unsigned long flags;
  316. spin_lock_irqsave(&msm_iommu_lock, flags);
  317. iommu = find_iommu_for_dev(dev);
  318. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  319. if (!iommu)
  320. return ERR_PTR(-ENODEV);
  321. return &iommu->iommu;
  322. }
  323. static void msm_iommu_release_device(struct device *dev)
  324. {
  325. }
  326. static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  327. {
  328. int ret = 0;
  329. unsigned long flags;
  330. struct msm_iommu_dev *iommu;
  331. struct msm_priv *priv = to_msm_priv(domain);
  332. struct msm_iommu_ctx_dev *master;
  333. priv->dev = dev;
  334. msm_iommu_domain_config(priv);
  335. spin_lock_irqsave(&msm_iommu_lock, flags);
  336. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  337. master = list_first_entry(&iommu->ctx_list,
  338. struct msm_iommu_ctx_dev,
  339. list);
  340. if (master->of_node == dev->of_node) {
  341. ret = __enable_clocks(iommu);
  342. if (ret)
  343. goto fail;
  344. list_for_each_entry(master, &iommu->ctx_list, list) {
  345. if (master->num) {
  346. dev_err(dev, "domain already attached");
  347. ret = -EEXIST;
  348. goto fail;
  349. }
  350. master->num =
  351. msm_iommu_alloc_ctx(iommu->context_map,
  352. 0, iommu->ncb);
  353. if (IS_ERR_VALUE(master->num)) {
  354. ret = -ENODEV;
  355. goto fail;
  356. }
  357. config_mids(iommu, master);
  358. __program_context(iommu->base, master->num,
  359. priv);
  360. }
  361. __disable_clocks(iommu);
  362. list_add(&iommu->dom_node, &priv->list_attached);
  363. }
  364. }
  365. fail:
  366. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  367. return ret;
  368. }
  369. static void msm_iommu_detach_dev(struct iommu_domain *domain,
  370. struct device *dev)
  371. {
  372. struct msm_priv *priv = to_msm_priv(domain);
  373. unsigned long flags;
  374. struct msm_iommu_dev *iommu;
  375. struct msm_iommu_ctx_dev *master;
  376. int ret;
  377. free_io_pgtable_ops(priv->iop);
  378. spin_lock_irqsave(&msm_iommu_lock, flags);
  379. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  380. ret = __enable_clocks(iommu);
  381. if (ret)
  382. goto fail;
  383. list_for_each_entry(master, &iommu->ctx_list, list) {
  384. msm_iommu_free_ctx(iommu->context_map, master->num);
  385. __reset_context(iommu->base, master->num);
  386. }
  387. __disable_clocks(iommu);
  388. }
  389. fail:
  390. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  391. }
  392. static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
  393. phys_addr_t pa, size_t len, int prot, gfp_t gfp)
  394. {
  395. struct msm_priv *priv = to_msm_priv(domain);
  396. unsigned long flags;
  397. int ret;
  398. spin_lock_irqsave(&priv->pgtlock, flags);
  399. ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
  400. spin_unlock_irqrestore(&priv->pgtlock, flags);
  401. return ret;
  402. }
  403. static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
  404. size_t size)
  405. {
  406. struct msm_priv *priv = to_msm_priv(domain);
  407. __flush_iotlb_range(iova, size, SZ_4K, false, priv);
  408. }
  409. static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  410. size_t len, struct iommu_iotlb_gather *gather)
  411. {
  412. struct msm_priv *priv = to_msm_priv(domain);
  413. unsigned long flags;
  414. spin_lock_irqsave(&priv->pgtlock, flags);
  415. len = priv->iop->unmap(priv->iop, iova, len, gather);
  416. spin_unlock_irqrestore(&priv->pgtlock, flags);
  417. return len;
  418. }
  419. static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
  420. dma_addr_t va)
  421. {
  422. struct msm_priv *priv;
  423. struct msm_iommu_dev *iommu;
  424. struct msm_iommu_ctx_dev *master;
  425. unsigned int par;
  426. unsigned long flags;
  427. phys_addr_t ret = 0;
  428. spin_lock_irqsave(&msm_iommu_lock, flags);
  429. priv = to_msm_priv(domain);
  430. iommu = list_first_entry(&priv->list_attached,
  431. struct msm_iommu_dev, dom_node);
  432. if (list_empty(&iommu->ctx_list))
  433. goto fail;
  434. master = list_first_entry(&iommu->ctx_list,
  435. struct msm_iommu_ctx_dev, list);
  436. if (!master)
  437. goto fail;
  438. ret = __enable_clocks(iommu);
  439. if (ret)
  440. goto fail;
  441. /* Invalidate context TLB */
  442. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  443. SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
  444. par = GET_PAR(iommu->base, master->num);
  445. /* We are dealing with a supersection */
  446. if (GET_NOFAULT_SS(iommu->base, master->num))
  447. ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
  448. else /* Upper 20 bits from PAR, lower 12 from VA */
  449. ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
  450. if (GET_FAULT(iommu->base, master->num))
  451. ret = 0;
  452. __disable_clocks(iommu);
  453. fail:
  454. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  455. return ret;
  456. }
  457. static bool msm_iommu_capable(enum iommu_cap cap)
  458. {
  459. return false;
  460. }
  461. static void print_ctx_regs(void __iomem *base, int ctx)
  462. {
  463. unsigned int fsr = GET_FSR(base, ctx);
  464. pr_err("FAR = %08x PAR = %08x\n",
  465. GET_FAR(base, ctx), GET_PAR(base, ctx));
  466. pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
  467. (fsr & 0x02) ? "TF " : "",
  468. (fsr & 0x04) ? "AFF " : "",
  469. (fsr & 0x08) ? "APF " : "",
  470. (fsr & 0x10) ? "TLBMF " : "",
  471. (fsr & 0x20) ? "HTWDEEF " : "",
  472. (fsr & 0x40) ? "HTWSEEF " : "",
  473. (fsr & 0x80) ? "MHF " : "",
  474. (fsr & 0x10000) ? "SL " : "",
  475. (fsr & 0x40000000) ? "SS " : "",
  476. (fsr & 0x80000000) ? "MULTI " : "");
  477. pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
  478. GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
  479. pr_err("TTBR0 = %08x TTBR1 = %08x\n",
  480. GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
  481. pr_err("SCTLR = %08x ACTLR = %08x\n",
  482. GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
  483. }
  484. static void insert_iommu_master(struct device *dev,
  485. struct msm_iommu_dev **iommu,
  486. struct of_phandle_args *spec)
  487. {
  488. struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
  489. int sid;
  490. if (list_empty(&(*iommu)->ctx_list)) {
  491. master = kzalloc(sizeof(*master), GFP_ATOMIC);
  492. master->of_node = dev->of_node;
  493. list_add(&master->list, &(*iommu)->ctx_list);
  494. dev_iommu_priv_set(dev, master);
  495. }
  496. for (sid = 0; sid < master->num_mids; sid++)
  497. if (master->mids[sid] == spec->args[0]) {
  498. dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
  499. sid);
  500. return;
  501. }
  502. master->mids[master->num_mids++] = spec->args[0];
  503. }
  504. static int qcom_iommu_of_xlate(struct device *dev,
  505. struct of_phandle_args *spec)
  506. {
  507. struct msm_iommu_dev *iommu;
  508. unsigned long flags;
  509. int ret = 0;
  510. spin_lock_irqsave(&msm_iommu_lock, flags);
  511. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
  512. if (iommu->dev->of_node == spec->np)
  513. break;
  514. if (!iommu || iommu->dev->of_node != spec->np) {
  515. ret = -ENODEV;
  516. goto fail;
  517. }
  518. insert_iommu_master(dev, &iommu, spec);
  519. fail:
  520. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  521. return ret;
  522. }
  523. irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
  524. {
  525. struct msm_iommu_dev *iommu = dev_id;
  526. unsigned int fsr;
  527. int i, ret;
  528. spin_lock(&msm_iommu_lock);
  529. if (!iommu) {
  530. pr_err("Invalid device ID in context interrupt handler\n");
  531. goto fail;
  532. }
  533. pr_err("Unexpected IOMMU page fault!\n");
  534. pr_err("base = %08x\n", (unsigned int)iommu->base);
  535. ret = __enable_clocks(iommu);
  536. if (ret)
  537. goto fail;
  538. for (i = 0; i < iommu->ncb; i++) {
  539. fsr = GET_FSR(iommu->base, i);
  540. if (fsr) {
  541. pr_err("Fault occurred in context %d.\n", i);
  542. pr_err("Interesting registers:\n");
  543. print_ctx_regs(iommu->base, i);
  544. SET_FSR(iommu->base, i, 0x4000000F);
  545. }
  546. }
  547. __disable_clocks(iommu);
  548. fail:
  549. spin_unlock(&msm_iommu_lock);
  550. return 0;
  551. }
  552. static struct iommu_ops msm_iommu_ops = {
  553. .capable = msm_iommu_capable,
  554. .domain_alloc = msm_iommu_domain_alloc,
  555. .domain_free = msm_iommu_domain_free,
  556. .attach_dev = msm_iommu_attach_dev,
  557. .detach_dev = msm_iommu_detach_dev,
  558. .map = msm_iommu_map,
  559. .unmap = msm_iommu_unmap,
  560. /*
  561. * Nothing is needed here, the barrier to guarantee
  562. * completion of the tlb sync operation is implicitly
  563. * taken care when the iommu client does a writel before
  564. * kick starting the other master.
  565. */
  566. .iotlb_sync = NULL,
  567. .iotlb_sync_map = msm_iommu_sync_map,
  568. .iova_to_phys = msm_iommu_iova_to_phys,
  569. .probe_device = msm_iommu_probe_device,
  570. .release_device = msm_iommu_release_device,
  571. .device_group = generic_device_group,
  572. .pgsize_bitmap = MSM_IOMMU_PGSIZES,
  573. .of_xlate = qcom_iommu_of_xlate,
  574. };
  575. static int msm_iommu_probe(struct platform_device *pdev)
  576. {
  577. struct resource *r;
  578. resource_size_t ioaddr;
  579. struct msm_iommu_dev *iommu;
  580. int ret, par, val;
  581. iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
  582. if (!iommu)
  583. return -ENODEV;
  584. iommu->dev = &pdev->dev;
  585. INIT_LIST_HEAD(&iommu->ctx_list);
  586. iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
  587. if (IS_ERR(iommu->pclk)) {
  588. dev_err(iommu->dev, "could not get smmu_pclk\n");
  589. return PTR_ERR(iommu->pclk);
  590. }
  591. ret = clk_prepare(iommu->pclk);
  592. if (ret) {
  593. dev_err(iommu->dev, "could not prepare smmu_pclk\n");
  594. return ret;
  595. }
  596. iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
  597. if (IS_ERR(iommu->clk)) {
  598. dev_err(iommu->dev, "could not get iommu_clk\n");
  599. clk_unprepare(iommu->pclk);
  600. return PTR_ERR(iommu->clk);
  601. }
  602. ret = clk_prepare(iommu->clk);
  603. if (ret) {
  604. dev_err(iommu->dev, "could not prepare iommu_clk\n");
  605. clk_unprepare(iommu->pclk);
  606. return ret;
  607. }
  608. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  609. iommu->base = devm_ioremap_resource(iommu->dev, r);
  610. if (IS_ERR(iommu->base)) {
  611. dev_err(iommu->dev, "could not get iommu base\n");
  612. ret = PTR_ERR(iommu->base);
  613. goto fail;
  614. }
  615. ioaddr = r->start;
  616. iommu->irq = platform_get_irq(pdev, 0);
  617. if (iommu->irq < 0) {
  618. ret = -ENODEV;
  619. goto fail;
  620. }
  621. ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
  622. if (ret) {
  623. dev_err(iommu->dev, "could not get ncb\n");
  624. goto fail;
  625. }
  626. iommu->ncb = val;
  627. msm_iommu_reset(iommu->base, iommu->ncb);
  628. SET_M(iommu->base, 0, 1);
  629. SET_PAR(iommu->base, 0, 0);
  630. SET_V2PCFG(iommu->base, 0, 1);
  631. SET_V2PPR(iommu->base, 0, 0);
  632. par = GET_PAR(iommu->base, 0);
  633. SET_V2PCFG(iommu->base, 0, 0);
  634. SET_M(iommu->base, 0, 0);
  635. if (!par) {
  636. pr_err("Invalid PAR value detected\n");
  637. ret = -ENODEV;
  638. goto fail;
  639. }
  640. ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
  641. msm_iommu_fault_handler,
  642. IRQF_ONESHOT | IRQF_SHARED,
  643. "msm_iommu_secure_irpt_handler",
  644. iommu);
  645. if (ret) {
  646. pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
  647. goto fail;
  648. }
  649. list_add(&iommu->dev_node, &qcom_iommu_devices);
  650. ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
  651. "msm-smmu.%pa", &ioaddr);
  652. if (ret) {
  653. pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
  654. goto fail;
  655. }
  656. iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
  657. iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
  658. ret = iommu_device_register(&iommu->iommu);
  659. if (ret) {
  660. pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
  661. goto fail;
  662. }
  663. bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
  664. pr_info("device mapped at %p, irq %d with %d ctx banks\n",
  665. iommu->base, iommu->irq, iommu->ncb);
  666. return ret;
  667. fail:
  668. clk_unprepare(iommu->clk);
  669. clk_unprepare(iommu->pclk);
  670. return ret;
  671. }
  672. static const struct of_device_id msm_iommu_dt_match[] = {
  673. { .compatible = "qcom,apq8064-iommu" },
  674. {}
  675. };
  676. static int msm_iommu_remove(struct platform_device *pdev)
  677. {
  678. struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
  679. clk_unprepare(iommu->clk);
  680. clk_unprepare(iommu->pclk);
  681. return 0;
  682. }
  683. static struct platform_driver msm_iommu_driver = {
  684. .driver = {
  685. .name = "msm_iommu",
  686. .of_match_table = msm_iommu_dt_match,
  687. },
  688. .probe = msm_iommu_probe,
  689. .remove = msm_iommu_remove,
  690. };
  691. static int __init msm_iommu_driver_init(void)
  692. {
  693. int ret;
  694. ret = platform_driver_register(&msm_iommu_driver);
  695. if (ret != 0)
  696. pr_err("Failed to register IOMMU driver\n");
  697. return ret;
  698. }
  699. subsys_initcall(msm_iommu_driver_init);