ultra.S 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * ultra.S: Don't expand these all over the place...
  4. *
  5. * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
  6. */
  7. #include <linux/pgtable.h>
  8. #include <asm/asi.h>
  9. #include <asm/page.h>
  10. #include <asm/spitfire.h>
  11. #include <asm/mmu_context.h>
  12. #include <asm/mmu.h>
  13. #include <asm/pil.h>
  14. #include <asm/head.h>
  15. #include <asm/thread_info.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/hypervisor.h>
  18. #include <asm/cpudata.h>
  19. /* Basically, most of the Spitfire vs. Cheetah madness
  20. * has to do with the fact that Cheetah does not support
  21. * IMMU flushes out of the secondary context. Someone needs
  22. * to throw a south lake birthday party for the folks
  23. * in Microelectronics who refused to fix this shit.
  24. */
  25. /* This file is meant to be read efficiently by the CPU, not humans.
  26. * Staraj sie tego nikomu nie pierdolnac...
  27. */
  28. .text
  29. .align 32
  30. .globl __flush_tlb_mm
  31. __flush_tlb_mm: /* 19 insns */
  32. /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  33. ldxa [%o1] ASI_DMMU, %g2
  34. cmp %g2, %o0
  35. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  36. mov 0x50, %g3
  37. stxa %g0, [%g3] ASI_DMMU_DEMAP
  38. stxa %g0, [%g3] ASI_IMMU_DEMAP
  39. sethi %hi(KERNBASE), %g3
  40. flush %g3
  41. retl
  42. nop
  43. nop
  44. nop
  45. nop
  46. nop
  47. nop
  48. nop
  49. nop
  50. nop
  51. nop
  52. .align 32
  53. .globl __flush_tlb_page
  54. __flush_tlb_page: /* 22 insns */
  55. /* %o0 = context, %o1 = vaddr */
  56. rdpr %pstate, %g7
  57. andn %g7, PSTATE_IE, %g2
  58. wrpr %g2, %pstate
  59. mov SECONDARY_CONTEXT, %o4
  60. ldxa [%o4] ASI_DMMU, %g2
  61. stxa %o0, [%o4] ASI_DMMU
  62. andcc %o1, 1, %g0
  63. andn %o1, 1, %o3
  64. be,pn %icc, 1f
  65. or %o3, 0x10, %o3
  66. stxa %g0, [%o3] ASI_IMMU_DEMAP
  67. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  68. membar #Sync
  69. stxa %g2, [%o4] ASI_DMMU
  70. sethi %hi(KERNBASE), %o4
  71. flush %o4
  72. retl
  73. wrpr %g7, 0x0, %pstate
  74. nop
  75. nop
  76. nop
  77. nop
  78. .align 32
  79. .globl __flush_tlb_pending
  80. __flush_tlb_pending: /* 27 insns */
  81. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  82. rdpr %pstate, %g7
  83. sllx %o1, 3, %o1
  84. andn %g7, PSTATE_IE, %g2
  85. wrpr %g2, %pstate
  86. mov SECONDARY_CONTEXT, %o4
  87. ldxa [%o4] ASI_DMMU, %g2
  88. stxa %o0, [%o4] ASI_DMMU
  89. 1: sub %o1, (1 << 3), %o1
  90. ldx [%o2 + %o1], %o3
  91. andcc %o3, 1, %g0
  92. andn %o3, 1, %o3
  93. be,pn %icc, 2f
  94. or %o3, 0x10, %o3
  95. stxa %g0, [%o3] ASI_IMMU_DEMAP
  96. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  97. membar #Sync
  98. brnz,pt %o1, 1b
  99. nop
  100. stxa %g2, [%o4] ASI_DMMU
  101. sethi %hi(KERNBASE), %o4
  102. flush %o4
  103. retl
  104. wrpr %g7, 0x0, %pstate
  105. nop
  106. nop
  107. nop
  108. nop
  109. .align 32
  110. .globl __flush_tlb_kernel_range
  111. __flush_tlb_kernel_range: /* 31 insns */
  112. /* %o0=start, %o1=end */
  113. cmp %o0, %o1
  114. be,pn %xcc, 2f
  115. sub %o1, %o0, %o3
  116. srlx %o3, 18, %o4
  117. brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
  118. sethi %hi(PAGE_SIZE), %o4
  119. sub %o3, %o4, %o3
  120. or %o0, 0x20, %o0 ! Nucleus
  121. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  122. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  123. membar #Sync
  124. brnz,pt %o3, 1b
  125. sub %o3, %o4, %o3
  126. 2: sethi %hi(KERNBASE), %o3
  127. flush %o3
  128. retl
  129. nop
  130. nop
  131. nop
  132. nop
  133. nop
  134. nop
  135. nop
  136. nop
  137. nop
  138. nop
  139. nop
  140. nop
  141. nop
  142. nop
  143. nop
  144. __spitfire_flush_tlb_kernel_range_slow:
  145. mov 63 * 8, %o4
  146. 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
  147. andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
  148. bne,pn %xcc, 2f
  149. mov TLB_TAG_ACCESS, %o3
  150. stxa %g0, [%o3] ASI_IMMU
  151. stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
  152. membar #Sync
  153. 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
  154. andcc %o3, 0x40, %g0
  155. bne,pn %xcc, 2f
  156. mov TLB_TAG_ACCESS, %o3
  157. stxa %g0, [%o3] ASI_DMMU
  158. stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
  159. membar #Sync
  160. 2: sub %o4, 8, %o4
  161. brgez,pt %o4, 1b
  162. nop
  163. retl
  164. nop
  165. __spitfire_flush_tlb_mm_slow:
  166. rdpr %pstate, %g1
  167. wrpr %g1, PSTATE_IE, %pstate
  168. stxa %o0, [%o1] ASI_DMMU
  169. stxa %g0, [%g3] ASI_DMMU_DEMAP
  170. stxa %g0, [%g3] ASI_IMMU_DEMAP
  171. flush %g6
  172. stxa %g2, [%o1] ASI_DMMU
  173. sethi %hi(KERNBASE), %o1
  174. flush %o1
  175. retl
  176. wrpr %g1, 0, %pstate
  177. /*
  178. * The following code flushes one page_size worth.
  179. */
  180. .section .kprobes.text, "ax"
  181. .align 32
  182. .globl __flush_icache_page
  183. __flush_icache_page: /* %o0 = phys_page */
  184. srlx %o0, PAGE_SHIFT, %o0
  185. sethi %hi(PAGE_OFFSET), %g1
  186. sllx %o0, PAGE_SHIFT, %o0
  187. sethi %hi(PAGE_SIZE), %g2
  188. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  189. add %o0, %g1, %o0
  190. 1: subcc %g2, 32, %g2
  191. bne,pt %icc, 1b
  192. flush %o0 + %g2
  193. retl
  194. nop
  195. #ifdef DCACHE_ALIASING_POSSIBLE
  196. #if (PAGE_SHIFT != 13)
  197. #error only page shift of 13 is supported by dcache flush
  198. #endif
  199. #define DTAG_MASK 0x3
  200. /* This routine is Spitfire specific so the hardcoded
  201. * D-cache size and line-size are OK.
  202. */
  203. .align 64
  204. .globl __flush_dcache_page
  205. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  206. sethi %hi(PAGE_OFFSET), %g1
  207. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  208. sub %o0, %g1, %o0 ! physical address
  209. srlx %o0, 11, %o0 ! make D-cache TAG
  210. sethi %hi(1 << 14), %o2 ! D-cache size
  211. sub %o2, (1 << 5), %o2 ! D-cache line size
  212. 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
  213. andcc %o3, DTAG_MASK, %g0 ! Valid?
  214. be,pn %xcc, 2f ! Nope, branch
  215. andn %o3, DTAG_MASK, %o3 ! Clear valid bits
  216. cmp %o3, %o0 ! TAG match?
  217. bne,pt %xcc, 2f ! Nope, branch
  218. nop
  219. stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
  220. membar #Sync
  221. 2: brnz,pt %o2, 1b
  222. sub %o2, (1 << 5), %o2 ! D-cache line size
  223. /* The I-cache does not snoop local stores so we
  224. * better flush that too when necessary.
  225. */
  226. brnz,pt %o1, __flush_icache_page
  227. sllx %o0, 11, %o0
  228. retl
  229. nop
  230. #endif /* DCACHE_ALIASING_POSSIBLE */
  231. .previous
  232. /* Cheetah specific versions, patched at boot time. */
  233. __cheetah_flush_tlb_mm: /* 19 insns */
  234. rdpr %pstate, %g7
  235. andn %g7, PSTATE_IE, %g2
  236. wrpr %g2, 0x0, %pstate
  237. wrpr %g0, 1, %tl
  238. mov PRIMARY_CONTEXT, %o2
  239. mov 0x40, %g3
  240. ldxa [%o2] ASI_DMMU, %g2
  241. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
  242. sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
  243. or %o0, %o1, %o0 /* Preserve nucleus page size fields */
  244. stxa %o0, [%o2] ASI_DMMU
  245. stxa %g0, [%g3] ASI_DMMU_DEMAP
  246. stxa %g0, [%g3] ASI_IMMU_DEMAP
  247. stxa %g2, [%o2] ASI_DMMU
  248. sethi %hi(KERNBASE), %o2
  249. flush %o2
  250. wrpr %g0, 0, %tl
  251. retl
  252. wrpr %g7, 0x0, %pstate
  253. __cheetah_flush_tlb_page: /* 22 insns */
  254. /* %o0 = context, %o1 = vaddr */
  255. rdpr %pstate, %g7
  256. andn %g7, PSTATE_IE, %g2
  257. wrpr %g2, 0x0, %pstate
  258. wrpr %g0, 1, %tl
  259. mov PRIMARY_CONTEXT, %o4
  260. ldxa [%o4] ASI_DMMU, %g2
  261. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  262. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  263. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  264. stxa %o0, [%o4] ASI_DMMU
  265. andcc %o1, 1, %g0
  266. be,pn %icc, 1f
  267. andn %o1, 1, %o3
  268. stxa %g0, [%o3] ASI_IMMU_DEMAP
  269. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  270. membar #Sync
  271. stxa %g2, [%o4] ASI_DMMU
  272. sethi %hi(KERNBASE), %o4
  273. flush %o4
  274. wrpr %g0, 0, %tl
  275. retl
  276. wrpr %g7, 0x0, %pstate
  277. __cheetah_flush_tlb_pending: /* 27 insns */
  278. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  279. rdpr %pstate, %g7
  280. sllx %o1, 3, %o1
  281. andn %g7, PSTATE_IE, %g2
  282. wrpr %g2, 0x0, %pstate
  283. wrpr %g0, 1, %tl
  284. mov PRIMARY_CONTEXT, %o4
  285. ldxa [%o4] ASI_DMMU, %g2
  286. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  287. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  288. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  289. stxa %o0, [%o4] ASI_DMMU
  290. 1: sub %o1, (1 << 3), %o1
  291. ldx [%o2 + %o1], %o3
  292. andcc %o3, 1, %g0
  293. be,pn %icc, 2f
  294. andn %o3, 1, %o3
  295. stxa %g0, [%o3] ASI_IMMU_DEMAP
  296. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  297. membar #Sync
  298. brnz,pt %o1, 1b
  299. nop
  300. stxa %g2, [%o4] ASI_DMMU
  301. sethi %hi(KERNBASE), %o4
  302. flush %o4
  303. wrpr %g0, 0, %tl
  304. retl
  305. wrpr %g7, 0x0, %pstate
  306. __cheetah_flush_tlb_kernel_range: /* 31 insns */
  307. /* %o0=start, %o1=end */
  308. cmp %o0, %o1
  309. be,pn %xcc, 2f
  310. sub %o1, %o0, %o3
  311. srlx %o3, 18, %o4
  312. brnz,pn %o4, 3f
  313. sethi %hi(PAGE_SIZE), %o4
  314. sub %o3, %o4, %o3
  315. or %o0, 0x20, %o0 ! Nucleus
  316. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  317. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  318. membar #Sync
  319. brnz,pt %o3, 1b
  320. sub %o3, %o4, %o3
  321. 2: sethi %hi(KERNBASE), %o3
  322. flush %o3
  323. retl
  324. nop
  325. 3: mov 0x80, %o4
  326. stxa %g0, [%o4] ASI_DMMU_DEMAP
  327. membar #Sync
  328. stxa %g0, [%o4] ASI_IMMU_DEMAP
  329. membar #Sync
  330. retl
  331. nop
  332. nop
  333. nop
  334. nop
  335. nop
  336. nop
  337. nop
  338. nop
  339. #ifdef DCACHE_ALIASING_POSSIBLE
  340. __cheetah_flush_dcache_page: /* 11 insns */
  341. sethi %hi(PAGE_OFFSET), %g1
  342. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  343. sub %o0, %g1, %o0
  344. sethi %hi(PAGE_SIZE), %o4
  345. 1: subcc %o4, (1 << 5), %o4
  346. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  347. membar #Sync
  348. bne,pt %icc, 1b
  349. nop
  350. retl /* I-cache flush never needed on Cheetah, see callers. */
  351. nop
  352. #endif /* DCACHE_ALIASING_POSSIBLE */
  353. /* Hypervisor specific versions, patched at boot time. */
  354. __hypervisor_tlb_tl0_error:
  355. save %sp, -192, %sp
  356. mov %i0, %o0
  357. call hypervisor_tlbop_error
  358. mov %i1, %o1
  359. ret
  360. restore
  361. __hypervisor_flush_tlb_mm: /* 19 insns */
  362. mov %o0, %o2 /* ARG2: mmu context */
  363. mov 0, %o0 /* ARG0: CPU lists unimplemented */
  364. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  365. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  366. mov HV_FAST_MMU_DEMAP_CTX, %o5
  367. ta HV_FAST_TRAP
  368. brnz,pn %o0, 1f
  369. mov HV_FAST_MMU_DEMAP_CTX, %o1
  370. retl
  371. nop
  372. 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
  373. jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
  374. nop
  375. nop
  376. nop
  377. nop
  378. nop
  379. nop
  380. nop
  381. __hypervisor_flush_tlb_page: /* 22 insns */
  382. /* %o0 = context, %o1 = vaddr */
  383. mov %o0, %g2
  384. mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
  385. mov %g2, %o1 /* ARG1: mmu context */
  386. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  387. srlx %o0, PAGE_SHIFT, %o0
  388. sllx %o0, PAGE_SHIFT, %o0
  389. ta HV_MMU_UNMAP_ADDR_TRAP
  390. brnz,pn %o0, 1f
  391. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  392. retl
  393. nop
  394. 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
  395. jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
  396. nop
  397. nop
  398. nop
  399. nop
  400. nop
  401. nop
  402. nop
  403. nop
  404. nop
  405. __hypervisor_flush_tlb_pending: /* 27 insns */
  406. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  407. sllx %o1, 3, %g1
  408. mov %o2, %g2
  409. mov %o0, %g3
  410. 1: sub %g1, (1 << 3), %g1
  411. ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
  412. mov %g3, %o1 /* ARG1: mmu context */
  413. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  414. srlx %o0, PAGE_SHIFT, %o0
  415. sllx %o0, PAGE_SHIFT, %o0
  416. ta HV_MMU_UNMAP_ADDR_TRAP
  417. brnz,pn %o0, 1f
  418. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  419. brnz,pt %g1, 1b
  420. nop
  421. retl
  422. nop
  423. 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
  424. jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
  425. nop
  426. nop
  427. nop
  428. nop
  429. nop
  430. nop
  431. nop
  432. nop
  433. nop
  434. __hypervisor_flush_tlb_kernel_range: /* 31 insns */
  435. /* %o0=start, %o1=end */
  436. cmp %o0, %o1
  437. be,pn %xcc, 2f
  438. sub %o1, %o0, %g2
  439. srlx %g2, 18, %g3
  440. brnz,pn %g3, 4f
  441. mov %o0, %g1
  442. sethi %hi(PAGE_SIZE), %g3
  443. sub %g2, %g3, %g2
  444. 1: add %g1, %g2, %o0 /* ARG0: virtual address */
  445. mov 0, %o1 /* ARG1: mmu context */
  446. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  447. ta HV_MMU_UNMAP_ADDR_TRAP
  448. brnz,pn %o0, 3f
  449. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  450. brnz,pt %g2, 1b
  451. sub %g2, %g3, %g2
  452. 2: retl
  453. nop
  454. 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
  455. jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
  456. nop
  457. 4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
  458. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  459. mov 0, %o2 /* ARG2: mmu context == nucleus */
  460. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  461. mov HV_FAST_MMU_DEMAP_CTX, %o5
  462. ta HV_FAST_TRAP
  463. brnz,pn %o0, 3b
  464. mov HV_FAST_MMU_DEMAP_CTX, %o1
  465. retl
  466. nop
  467. #ifdef DCACHE_ALIASING_POSSIBLE
  468. /* XXX Niagara and friends have an 8K cache, so no aliasing is
  469. * XXX possible, but nothing explicit in the Hypervisor API
  470. * XXX guarantees this.
  471. */
  472. __hypervisor_flush_dcache_page: /* 2 insns */
  473. retl
  474. nop
  475. #endif
  476. tlb_patch_one:
  477. 1: lduw [%o1], %g1
  478. stw %g1, [%o0]
  479. flush %o0
  480. subcc %o2, 1, %o2
  481. add %o1, 4, %o1
  482. bne,pt %icc, 1b
  483. add %o0, 4, %o0
  484. retl
  485. nop
  486. #ifdef CONFIG_SMP
  487. /* These are all called by the slaves of a cross call, at
  488. * trap level 1, with interrupts fully disabled.
  489. *
  490. * Register usage:
  491. * %g5 mm->context (all tlb flushes)
  492. * %g1 address arg 1 (tlb page and range flushes)
  493. * %g7 address arg 2 (tlb range flush only)
  494. *
  495. * %g6 scratch 1
  496. * %g2 scratch 2
  497. * %g3 scratch 3
  498. * %g4 scratch 4
  499. */
  500. .align 32
  501. .globl xcall_flush_tlb_mm
  502. xcall_flush_tlb_mm: /* 24 insns */
  503. mov PRIMARY_CONTEXT, %g2
  504. ldxa [%g2] ASI_DMMU, %g3
  505. srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
  506. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  507. or %g5, %g4, %g5 /* Preserve nucleus page size fields */
  508. stxa %g5, [%g2] ASI_DMMU
  509. mov 0x40, %g4
  510. stxa %g0, [%g4] ASI_DMMU_DEMAP
  511. stxa %g0, [%g4] ASI_IMMU_DEMAP
  512. stxa %g3, [%g2] ASI_DMMU
  513. retry
  514. nop
  515. nop
  516. nop
  517. nop
  518. nop
  519. nop
  520. nop
  521. nop
  522. nop
  523. nop
  524. nop
  525. nop
  526. nop
  527. .globl xcall_flush_tlb_page
  528. xcall_flush_tlb_page: /* 20 insns */
  529. /* %g5=context, %g1=vaddr */
  530. mov PRIMARY_CONTEXT, %g4
  531. ldxa [%g4] ASI_DMMU, %g2
  532. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
  533. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  534. or %g5, %g4, %g5
  535. mov PRIMARY_CONTEXT, %g4
  536. stxa %g5, [%g4] ASI_DMMU
  537. andcc %g1, 0x1, %g0
  538. be,pn %icc, 2f
  539. andn %g1, 0x1, %g5
  540. stxa %g0, [%g5] ASI_IMMU_DEMAP
  541. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  542. membar #Sync
  543. stxa %g2, [%g4] ASI_DMMU
  544. retry
  545. nop
  546. nop
  547. nop
  548. nop
  549. nop
  550. .globl xcall_flush_tlb_kernel_range
  551. xcall_flush_tlb_kernel_range: /* 44 insns */
  552. sethi %hi(PAGE_SIZE - 1), %g2
  553. or %g2, %lo(PAGE_SIZE - 1), %g2
  554. andn %g1, %g2, %g1
  555. andn %g7, %g2, %g7
  556. sub %g7, %g1, %g3
  557. srlx %g3, 18, %g2
  558. brnz,pn %g2, 2f
  559. sethi %hi(PAGE_SIZE), %g2
  560. sub %g3, %g2, %g3
  561. or %g1, 0x20, %g1 ! Nucleus
  562. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  563. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  564. membar #Sync
  565. brnz,pt %g3, 1b
  566. sub %g3, %g2, %g3
  567. retry
  568. 2: mov 63 * 8, %g1
  569. 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
  570. andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
  571. bne,pn %xcc, 2f
  572. mov TLB_TAG_ACCESS, %g2
  573. stxa %g0, [%g2] ASI_IMMU
  574. stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
  575. membar #Sync
  576. 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
  577. andcc %g2, 0x40, %g0
  578. bne,pn %xcc, 2f
  579. mov TLB_TAG_ACCESS, %g2
  580. stxa %g0, [%g2] ASI_DMMU
  581. stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
  582. membar #Sync
  583. 2: sub %g1, 8, %g1
  584. brgez,pt %g1, 1b
  585. nop
  586. retry
  587. nop
  588. nop
  589. nop
  590. nop
  591. nop
  592. nop
  593. nop
  594. nop
  595. nop
  596. /* This runs in a very controlled environment, so we do
  597. * not need to worry about BH races etc.
  598. */
  599. .globl xcall_sync_tick
  600. xcall_sync_tick:
  601. 661: rdpr %pstate, %g2
  602. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  603. .section .sun4v_2insn_patch, "ax"
  604. .word 661b
  605. nop
  606. nop
  607. .previous
  608. rdpr %pil, %g2
  609. wrpr %g0, PIL_NORMAL_MAX, %pil
  610. sethi %hi(109f), %g7
  611. b,pt %xcc, etrap_irq
  612. 109: or %g7, %lo(109b), %g7
  613. #ifdef CONFIG_TRACE_IRQFLAGS
  614. call trace_hardirqs_off
  615. nop
  616. #endif
  617. call smp_synchronize_tick_client
  618. nop
  619. b rtrap_xcall
  620. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  621. .globl xcall_fetch_glob_regs
  622. xcall_fetch_glob_regs:
  623. sethi %hi(global_cpu_snapshot), %g1
  624. or %g1, %lo(global_cpu_snapshot), %g1
  625. __GET_CPUID(%g2)
  626. sllx %g2, 6, %g3
  627. add %g1, %g3, %g1
  628. rdpr %tstate, %g7
  629. stx %g7, [%g1 + GR_SNAP_TSTATE]
  630. rdpr %tpc, %g7
  631. stx %g7, [%g1 + GR_SNAP_TPC]
  632. rdpr %tnpc, %g7
  633. stx %g7, [%g1 + GR_SNAP_TNPC]
  634. stx %o7, [%g1 + GR_SNAP_O7]
  635. stx %i7, [%g1 + GR_SNAP_I7]
  636. /* Don't try this at home kids... */
  637. rdpr %cwp, %g3
  638. sub %g3, 1, %g7
  639. wrpr %g7, %cwp
  640. mov %i7, %g7
  641. wrpr %g3, %cwp
  642. stx %g7, [%g1 + GR_SNAP_RPC]
  643. sethi %hi(trap_block), %g7
  644. or %g7, %lo(trap_block), %g7
  645. sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
  646. add %g7, %g2, %g7
  647. ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
  648. stx %g3, [%g1 + GR_SNAP_THREAD]
  649. retry
  650. .globl xcall_fetch_glob_pmu
  651. xcall_fetch_glob_pmu:
  652. sethi %hi(global_cpu_snapshot), %g1
  653. or %g1, %lo(global_cpu_snapshot), %g1
  654. __GET_CPUID(%g2)
  655. sllx %g2, 6, %g3
  656. add %g1, %g3, %g1
  657. rd %pic, %g7
  658. stx %g7, [%g1 + (4 * 8)]
  659. rd %pcr, %g7
  660. stx %g7, [%g1 + (0 * 8)]
  661. retry
  662. .globl xcall_fetch_glob_pmu_n4
  663. xcall_fetch_glob_pmu_n4:
  664. sethi %hi(global_cpu_snapshot), %g1
  665. or %g1, %lo(global_cpu_snapshot), %g1
  666. __GET_CPUID(%g2)
  667. sllx %g2, 6, %g3
  668. add %g1, %g3, %g1
  669. ldxa [%g0] ASI_PIC, %g7
  670. stx %g7, [%g1 + (4 * 8)]
  671. mov 0x08, %g3
  672. ldxa [%g3] ASI_PIC, %g7
  673. stx %g7, [%g1 + (5 * 8)]
  674. mov 0x10, %g3
  675. ldxa [%g3] ASI_PIC, %g7
  676. stx %g7, [%g1 + (6 * 8)]
  677. mov 0x18, %g3
  678. ldxa [%g3] ASI_PIC, %g7
  679. stx %g7, [%g1 + (7 * 8)]
  680. mov %o0, %g2
  681. mov %o1, %g3
  682. mov %o5, %g7
  683. mov HV_FAST_VT_GET_PERFREG, %o5
  684. mov 3, %o0
  685. ta HV_FAST_TRAP
  686. stx %o1, [%g1 + (3 * 8)]
  687. mov HV_FAST_VT_GET_PERFREG, %o5
  688. mov 2, %o0
  689. ta HV_FAST_TRAP
  690. stx %o1, [%g1 + (2 * 8)]
  691. mov HV_FAST_VT_GET_PERFREG, %o5
  692. mov 1, %o0
  693. ta HV_FAST_TRAP
  694. stx %o1, [%g1 + (1 * 8)]
  695. mov HV_FAST_VT_GET_PERFREG, %o5
  696. mov 0, %o0
  697. ta HV_FAST_TRAP
  698. stx %o1, [%g1 + (0 * 8)]
  699. mov %g2, %o0
  700. mov %g3, %o1
  701. mov %g7, %o5
  702. retry
  703. __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
  704. sethi %hi(PAGE_SIZE - 1), %g2
  705. or %g2, %lo(PAGE_SIZE - 1), %g2
  706. andn %g1, %g2, %g1
  707. andn %g7, %g2, %g7
  708. sub %g7, %g1, %g3
  709. srlx %g3, 18, %g2
  710. brnz,pn %g2, 2f
  711. sethi %hi(PAGE_SIZE), %g2
  712. sub %g3, %g2, %g3
  713. or %g1, 0x20, %g1 ! Nucleus
  714. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  715. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  716. membar #Sync
  717. brnz,pt %g3, 1b
  718. sub %g3, %g2, %g3
  719. retry
  720. 2: mov 0x80, %g2
  721. stxa %g0, [%g2] ASI_DMMU_DEMAP
  722. membar #Sync
  723. stxa %g0, [%g2] ASI_IMMU_DEMAP
  724. membar #Sync
  725. retry
  726. nop
  727. nop
  728. nop
  729. nop
  730. nop
  731. nop
  732. nop
  733. nop
  734. nop
  735. nop
  736. nop
  737. nop
  738. nop
  739. nop
  740. nop
  741. nop
  742. nop
  743. nop
  744. nop
  745. nop
  746. nop
  747. nop
  748. #ifdef DCACHE_ALIASING_POSSIBLE
  749. .align 32
  750. .globl xcall_flush_dcache_page_cheetah
  751. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  752. sethi %hi(PAGE_SIZE), %g3
  753. 1: subcc %g3, (1 << 5), %g3
  754. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  755. membar #Sync
  756. bne,pt %icc, 1b
  757. nop
  758. retry
  759. nop
  760. #endif /* DCACHE_ALIASING_POSSIBLE */
  761. .globl xcall_flush_dcache_page_spitfire
  762. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  763. %g7 == kernel page virtual address
  764. %g5 == (page->mapping != NULL) */
  765. #ifdef DCACHE_ALIASING_POSSIBLE
  766. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  767. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  768. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  769. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  770. andcc %g2, 0x3, %g0
  771. be,pn %xcc, 2f
  772. andn %g2, 0x3, %g2
  773. cmp %g2, %g1
  774. bne,pt %xcc, 2f
  775. nop
  776. stxa %g0, [%g3] ASI_DCACHE_TAG
  777. membar #Sync
  778. 2: cmp %g3, 0
  779. bne,pt %xcc, 1b
  780. sub %g3, (1 << 5), %g3
  781. brz,pn %g5, 2f
  782. #endif /* DCACHE_ALIASING_POSSIBLE */
  783. sethi %hi(PAGE_SIZE), %g3
  784. 1: flush %g7
  785. subcc %g3, (1 << 5), %g3
  786. bne,pt %icc, 1b
  787. add %g7, (1 << 5), %g7
  788. 2: retry
  789. nop
  790. nop
  791. /* %g5: error
  792. * %g6: tlb op
  793. */
  794. __hypervisor_tlb_xcall_error:
  795. mov %g5, %g4
  796. mov %g6, %g5
  797. ba,pt %xcc, etrap
  798. rd %pc, %g7
  799. mov %l4, %o0
  800. call hypervisor_tlbop_error_xcall
  801. mov %l5, %o1
  802. ba,a,pt %xcc, rtrap
  803. .globl __hypervisor_xcall_flush_tlb_mm
  804. __hypervisor_xcall_flush_tlb_mm: /* 24 insns */
  805. /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
  806. mov %o0, %g2
  807. mov %o1, %g3
  808. mov %o2, %g4
  809. mov %o3, %g1
  810. mov %o5, %g7
  811. clr %o0 /* ARG0: CPU lists unimplemented */
  812. clr %o1 /* ARG1: CPU lists unimplemented */
  813. mov %g5, %o2 /* ARG2: mmu context */
  814. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  815. mov HV_FAST_MMU_DEMAP_CTX, %o5
  816. ta HV_FAST_TRAP
  817. mov HV_FAST_MMU_DEMAP_CTX, %g6
  818. brnz,pn %o0, 1f
  819. mov %o0, %g5
  820. mov %g2, %o0
  821. mov %g3, %o1
  822. mov %g4, %o2
  823. mov %g1, %o3
  824. mov %g7, %o5
  825. membar #Sync
  826. retry
  827. 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
  828. jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
  829. nop
  830. .globl __hypervisor_xcall_flush_tlb_page
  831. __hypervisor_xcall_flush_tlb_page: /* 20 insns */
  832. /* %g5=ctx, %g1=vaddr */
  833. mov %o0, %g2
  834. mov %o1, %g3
  835. mov %o2, %g4
  836. mov %g1, %o0 /* ARG0: virtual address */
  837. mov %g5, %o1 /* ARG1: mmu context */
  838. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  839. srlx %o0, PAGE_SHIFT, %o0
  840. sllx %o0, PAGE_SHIFT, %o0
  841. ta HV_MMU_UNMAP_ADDR_TRAP
  842. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  843. brnz,a,pn %o0, 1f
  844. mov %o0, %g5
  845. mov %g2, %o0
  846. mov %g3, %o1
  847. mov %g4, %o2
  848. membar #Sync
  849. retry
  850. 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
  851. jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
  852. nop
  853. .globl __hypervisor_xcall_flush_tlb_kernel_range
  854. __hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
  855. /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
  856. sethi %hi(PAGE_SIZE - 1), %g2
  857. or %g2, %lo(PAGE_SIZE - 1), %g2
  858. andn %g1, %g2, %g1
  859. andn %g7, %g2, %g7
  860. sub %g7, %g1, %g3
  861. srlx %g3, 18, %g7
  862. add %g2, 1, %g2
  863. sub %g3, %g2, %g3
  864. mov %o0, %g2
  865. mov %o1, %g4
  866. brnz,pn %g7, 2f
  867. mov %o2, %g7
  868. 1: add %g1, %g3, %o0 /* ARG0: virtual address */
  869. mov 0, %o1 /* ARG1: mmu context */
  870. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  871. ta HV_MMU_UNMAP_ADDR_TRAP
  872. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  873. brnz,pn %o0, 1f
  874. mov %o0, %g5
  875. sethi %hi(PAGE_SIZE), %o2
  876. brnz,pt %g3, 1b
  877. sub %g3, %o2, %g3
  878. 5: mov %g2, %o0
  879. mov %g4, %o1
  880. mov %g7, %o2
  881. membar #Sync
  882. retry
  883. 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
  884. jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
  885. nop
  886. 2: mov %o3, %g1
  887. mov %o5, %g3
  888. mov 0, %o0 /* ARG0: CPU lists unimplemented */
  889. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  890. mov 0, %o2 /* ARG2: mmu context == nucleus */
  891. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  892. mov HV_FAST_MMU_DEMAP_CTX, %o5
  893. ta HV_FAST_TRAP
  894. mov %g1, %o3
  895. brz,pt %o0, 5b
  896. mov %g3, %o5
  897. mov HV_FAST_MMU_DEMAP_CTX, %g6
  898. ba,pt %xcc, 1b
  899. clr %g5
  900. /* These just get rescheduled to PIL vectors. */
  901. .globl xcall_call_function
  902. xcall_call_function:
  903. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  904. retry
  905. .globl xcall_call_function_single
  906. xcall_call_function_single:
  907. wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
  908. retry
  909. .globl xcall_receive_signal
  910. xcall_receive_signal:
  911. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  912. retry
  913. .globl xcall_capture
  914. xcall_capture:
  915. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  916. retry
  917. #ifdef CONFIG_KGDB
  918. .globl xcall_kgdb_capture
  919. xcall_kgdb_capture:
  920. wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
  921. retry
  922. #endif
  923. #endif /* CONFIG_SMP */
  924. .globl cheetah_patch_cachetlbops
  925. cheetah_patch_cachetlbops:
  926. save %sp, -128, %sp
  927. sethi %hi(__flush_tlb_mm), %o0
  928. or %o0, %lo(__flush_tlb_mm), %o0
  929. sethi %hi(__cheetah_flush_tlb_mm), %o1
  930. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  931. call tlb_patch_one
  932. mov 19, %o2
  933. sethi %hi(__flush_tlb_page), %o0
  934. or %o0, %lo(__flush_tlb_page), %o0
  935. sethi %hi(__cheetah_flush_tlb_page), %o1
  936. or %o1, %lo(__cheetah_flush_tlb_page), %o1
  937. call tlb_patch_one
  938. mov 22, %o2
  939. sethi %hi(__flush_tlb_pending), %o0
  940. or %o0, %lo(__flush_tlb_pending), %o0
  941. sethi %hi(__cheetah_flush_tlb_pending), %o1
  942. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  943. call tlb_patch_one
  944. mov 27, %o2
  945. sethi %hi(__flush_tlb_kernel_range), %o0
  946. or %o0, %lo(__flush_tlb_kernel_range), %o0
  947. sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
  948. or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
  949. call tlb_patch_one
  950. mov 31, %o2
  951. #ifdef DCACHE_ALIASING_POSSIBLE
  952. sethi %hi(__flush_dcache_page), %o0
  953. or %o0, %lo(__flush_dcache_page), %o0
  954. sethi %hi(__cheetah_flush_dcache_page), %o1
  955. or %o1, %lo(__cheetah_flush_dcache_page), %o1
  956. call tlb_patch_one
  957. mov 11, %o2
  958. #endif /* DCACHE_ALIASING_POSSIBLE */
  959. #ifdef CONFIG_SMP
  960. sethi %hi(xcall_flush_tlb_kernel_range), %o0
  961. or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
  962. sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
  963. or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
  964. call tlb_patch_one
  965. mov 44, %o2
  966. #endif /* CONFIG_SMP */
  967. ret
  968. restore
  969. .globl hypervisor_patch_cachetlbops
  970. hypervisor_patch_cachetlbops:
  971. save %sp, -128, %sp
  972. sethi %hi(__flush_tlb_mm), %o0
  973. or %o0, %lo(__flush_tlb_mm), %o0
  974. sethi %hi(__hypervisor_flush_tlb_mm), %o1
  975. or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
  976. call tlb_patch_one
  977. mov 19, %o2
  978. sethi %hi(__flush_tlb_page), %o0
  979. or %o0, %lo(__flush_tlb_page), %o0
  980. sethi %hi(__hypervisor_flush_tlb_page), %o1
  981. or %o1, %lo(__hypervisor_flush_tlb_page), %o1
  982. call tlb_patch_one
  983. mov 22, %o2
  984. sethi %hi(__flush_tlb_pending), %o0
  985. or %o0, %lo(__flush_tlb_pending), %o0
  986. sethi %hi(__hypervisor_flush_tlb_pending), %o1
  987. or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
  988. call tlb_patch_one
  989. mov 27, %o2
  990. sethi %hi(__flush_tlb_kernel_range), %o0
  991. or %o0, %lo(__flush_tlb_kernel_range), %o0
  992. sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
  993. or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
  994. call tlb_patch_one
  995. mov 31, %o2
  996. #ifdef DCACHE_ALIASING_POSSIBLE
  997. sethi %hi(__flush_dcache_page), %o0
  998. or %o0, %lo(__flush_dcache_page), %o0
  999. sethi %hi(__hypervisor_flush_dcache_page), %o1
  1000. or %o1, %lo(__hypervisor_flush_dcache_page), %o1
  1001. call tlb_patch_one
  1002. mov 2, %o2
  1003. #endif /* DCACHE_ALIASING_POSSIBLE */
  1004. #ifdef CONFIG_SMP
  1005. sethi %hi(xcall_flush_tlb_mm), %o0
  1006. or %o0, %lo(xcall_flush_tlb_mm), %o0
  1007. sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
  1008. or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
  1009. call tlb_patch_one
  1010. mov 24, %o2
  1011. sethi %hi(xcall_flush_tlb_page), %o0
  1012. or %o0, %lo(xcall_flush_tlb_page), %o0
  1013. sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
  1014. or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
  1015. call tlb_patch_one
  1016. mov 20, %o2
  1017. sethi %hi(xcall_flush_tlb_kernel_range), %o0
  1018. or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
  1019. sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  1020. or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  1021. call tlb_patch_one
  1022. mov 44, %o2
  1023. #endif /* CONFIG_SMP */
  1024. ret
  1025. restore