exceptions-64e.S 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Boot code and exception vectors for Book3E processors
  4. *
  5. * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
  6. */
  7. #include <linux/threads.h>
  8. #include <asm/reg.h>
  9. #include <asm/page.h>
  10. #include <asm/ppc_asm.h>
  11. #include <asm/asm-offsets.h>
  12. #include <asm/cputable.h>
  13. #include <asm/setup.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/reg_a2.h>
  16. #include <asm/exception-64e.h>
  17. #include <asm/bug.h>
  18. #include <asm/irqflags.h>
  19. #include <asm/ptrace.h>
  20. #include <asm/ppc-opcode.h>
  21. #include <asm/mmu.h>
  22. #include <asm/hw_irq.h>
  23. #include <asm/kvm_asm.h>
  24. #include <asm/kvm_booke_hv_asm.h>
  25. #include <asm/feature-fixups.h>
  26. #include <asm/context_tracking.h>
  27. /* XXX This will ultimately add space for a special exception save
  28. * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
  29. * when taking special interrupts. For now we don't support that,
  30. * special interrupts from within a non-standard level will probably
  31. * blow you up
  32. */
  33. #define SPECIAL_EXC_SRR0 0
  34. #define SPECIAL_EXC_SRR1 1
  35. #define SPECIAL_EXC_SPRG_GEN 2
  36. #define SPECIAL_EXC_SPRG_TLB 3
  37. #define SPECIAL_EXC_MAS0 4
  38. #define SPECIAL_EXC_MAS1 5
  39. #define SPECIAL_EXC_MAS2 6
  40. #define SPECIAL_EXC_MAS3 7
  41. #define SPECIAL_EXC_MAS6 8
  42. #define SPECIAL_EXC_MAS7 9
  43. #define SPECIAL_EXC_MAS5 10 /* E.HV only */
  44. #define SPECIAL_EXC_MAS8 11 /* E.HV only */
  45. #define SPECIAL_EXC_IRQHAPPENED 12
  46. #define SPECIAL_EXC_DEAR 13
  47. #define SPECIAL_EXC_ESR 14
  48. #define SPECIAL_EXC_SOFTE 15
  49. #define SPECIAL_EXC_CSRR0 16
  50. #define SPECIAL_EXC_CSRR1 17
  51. /* must be even to keep 16-byte stack alignment */
  52. #define SPECIAL_EXC_END 18
  53. #define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8)
  54. #define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288)
  55. #define SPECIAL_EXC_STORE(reg, name) \
  56. std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  57. #define SPECIAL_EXC_LOAD(reg, name) \
  58. ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  59. special_reg_save:
  60. lbz r9,PACAIRQHAPPENED(r13)
  61. RECONCILE_IRQ_STATE(r3,r4)
  62. /*
  63. * We only need (or have stack space) to save this stuff if
  64. * we interrupted the kernel.
  65. */
  66. ld r3,_MSR(r1)
  67. andi. r3,r3,MSR_PR
  68. bnelr
  69. /*
  70. * Advance to the next TLB exception frame for handler
  71. * types that don't do it automatically.
  72. */
  73. LOAD_REG_ADDR(r11,extlb_level_exc)
  74. lwz r12,0(r11)
  75. mfspr r10,SPRN_SPRG_TLB_EXFRAME
  76. add r10,r10,r12
  77. mtspr SPRN_SPRG_TLB_EXFRAME,r10
  78. /*
  79. * Save registers needed to allow nesting of certain exceptions
  80. * (such as TLB misses) inside special exception levels
  81. */
  82. mfspr r10,SPRN_SRR0
  83. SPECIAL_EXC_STORE(r10,SRR0)
  84. mfspr r10,SPRN_SRR1
  85. SPECIAL_EXC_STORE(r10,SRR1)
  86. mfspr r10,SPRN_SPRG_GEN_SCRATCH
  87. SPECIAL_EXC_STORE(r10,SPRG_GEN)
  88. mfspr r10,SPRN_SPRG_TLB_SCRATCH
  89. SPECIAL_EXC_STORE(r10,SPRG_TLB)
  90. mfspr r10,SPRN_MAS0
  91. SPECIAL_EXC_STORE(r10,MAS0)
  92. mfspr r10,SPRN_MAS1
  93. SPECIAL_EXC_STORE(r10,MAS1)
  94. mfspr r10,SPRN_MAS2
  95. SPECIAL_EXC_STORE(r10,MAS2)
  96. mfspr r10,SPRN_MAS3
  97. SPECIAL_EXC_STORE(r10,MAS3)
  98. mfspr r10,SPRN_MAS6
  99. SPECIAL_EXC_STORE(r10,MAS6)
  100. mfspr r10,SPRN_MAS7
  101. SPECIAL_EXC_STORE(r10,MAS7)
  102. BEGIN_FTR_SECTION
  103. mfspr r10,SPRN_MAS5
  104. SPECIAL_EXC_STORE(r10,MAS5)
  105. mfspr r10,SPRN_MAS8
  106. SPECIAL_EXC_STORE(r10,MAS8)
  107. /* MAS5/8 could have inappropriate values if we interrupted KVM code */
  108. li r10,0
  109. mtspr SPRN_MAS5,r10
  110. mtspr SPRN_MAS8,r10
  111. END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
  112. SPECIAL_EXC_STORE(r9,IRQHAPPENED)
  113. mfspr r10,SPRN_DEAR
  114. SPECIAL_EXC_STORE(r10,DEAR)
  115. mfspr r10,SPRN_ESR
  116. SPECIAL_EXC_STORE(r10,ESR)
  117. lbz r10,PACAIRQSOFTMASK(r13)
  118. SPECIAL_EXC_STORE(r10,SOFTE)
  119. ld r10,_NIP(r1)
  120. SPECIAL_EXC_STORE(r10,CSRR0)
  121. ld r10,_MSR(r1)
  122. SPECIAL_EXC_STORE(r10,CSRR1)
  123. blr
  124. ret_from_level_except:
  125. ld r3,_MSR(r1)
  126. andi. r3,r3,MSR_PR
  127. beq 1f
  128. b ret_from_except
  129. 1:
  130. LOAD_REG_ADDR(r11,extlb_level_exc)
  131. lwz r12,0(r11)
  132. mfspr r10,SPRN_SPRG_TLB_EXFRAME
  133. sub r10,r10,r12
  134. mtspr SPRN_SPRG_TLB_EXFRAME,r10
  135. /*
  136. * It's possible that the special level exception interrupted a
  137. * TLB miss handler, and inserted the same entry that the
  138. * interrupted handler was about to insert. On CPUs without TLB
  139. * write conditional, this can result in a duplicate TLB entry.
  140. * Wipe all non-bolted entries to be safe.
  141. *
  142. * Note that this doesn't protect against any TLB misses
  143. * we may take accessing the stack from here to the end of
  144. * the special level exception. It's not clear how we can
  145. * reasonably protect against that, but only CPUs with
  146. * neither TLB write conditional nor bolted kernel memory
  147. * are affected. Do any such CPUs even exist?
  148. */
  149. PPC_TLBILX_ALL(0,R0)
  150. REST_NVGPRS(r1)
  151. SPECIAL_EXC_LOAD(r10,SRR0)
  152. mtspr SPRN_SRR0,r10
  153. SPECIAL_EXC_LOAD(r10,SRR1)
  154. mtspr SPRN_SRR1,r10
  155. SPECIAL_EXC_LOAD(r10,SPRG_GEN)
  156. mtspr SPRN_SPRG_GEN_SCRATCH,r10
  157. SPECIAL_EXC_LOAD(r10,SPRG_TLB)
  158. mtspr SPRN_SPRG_TLB_SCRATCH,r10
  159. SPECIAL_EXC_LOAD(r10,MAS0)
  160. mtspr SPRN_MAS0,r10
  161. SPECIAL_EXC_LOAD(r10,MAS1)
  162. mtspr SPRN_MAS1,r10
  163. SPECIAL_EXC_LOAD(r10,MAS2)
  164. mtspr SPRN_MAS2,r10
  165. SPECIAL_EXC_LOAD(r10,MAS3)
  166. mtspr SPRN_MAS3,r10
  167. SPECIAL_EXC_LOAD(r10,MAS6)
  168. mtspr SPRN_MAS6,r10
  169. SPECIAL_EXC_LOAD(r10,MAS7)
  170. mtspr SPRN_MAS7,r10
  171. BEGIN_FTR_SECTION
  172. SPECIAL_EXC_LOAD(r10,MAS5)
  173. mtspr SPRN_MAS5,r10
  174. SPECIAL_EXC_LOAD(r10,MAS8)
  175. mtspr SPRN_MAS8,r10
  176. END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
  177. lbz r6,PACAIRQSOFTMASK(r13)
  178. ld r5,SOFTE(r1)
  179. /* Interrupts had better not already be enabled... */
  180. tweqi r6,IRQS_ENABLED
  181. andi. r6,r5,IRQS_DISABLED
  182. bne 1f
  183. TRACE_ENABLE_INTS
  184. stb r5,PACAIRQSOFTMASK(r13)
  185. 1:
  186. /*
  187. * Restore PACAIRQHAPPENED rather than setting it based on
  188. * the return MSR[EE], since we could have interrupted
  189. * __check_irq_replay() or other inconsistent transitory
  190. * states that must remain that way.
  191. */
  192. SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
  193. stb r10,PACAIRQHAPPENED(r13)
  194. SPECIAL_EXC_LOAD(r10,DEAR)
  195. mtspr SPRN_DEAR,r10
  196. SPECIAL_EXC_LOAD(r10,ESR)
  197. mtspr SPRN_ESR,r10
  198. stdcx. r0,0,r1 /* to clear the reservation */
  199. REST_4GPRS(2, r1)
  200. REST_4GPRS(6, r1)
  201. ld r10,_CTR(r1)
  202. ld r11,_XER(r1)
  203. mtctr r10
  204. mtxer r11
  205. blr
  206. .macro ret_from_level srr0 srr1 paca_ex scratch
  207. bl ret_from_level_except
  208. ld r10,_LINK(r1)
  209. ld r11,_CCR(r1)
  210. ld r0,GPR13(r1)
  211. mtlr r10
  212. mtcr r11
  213. ld r10,GPR10(r1)
  214. ld r11,GPR11(r1)
  215. ld r12,GPR12(r1)
  216. mtspr \scratch,r0
  217. std r10,\paca_ex+EX_R10(r13);
  218. std r11,\paca_ex+EX_R11(r13);
  219. ld r10,_NIP(r1)
  220. ld r11,_MSR(r1)
  221. ld r0,GPR0(r1)
  222. ld r1,GPR1(r1)
  223. mtspr \srr0,r10
  224. mtspr \srr1,r11
  225. ld r10,\paca_ex+EX_R10(r13)
  226. ld r11,\paca_ex+EX_R11(r13)
  227. mfspr r13,\scratch
  228. .endm
  229. ret_from_crit_except:
  230. ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
  231. rfci
  232. ret_from_mc_except:
  233. ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
  234. rfmci
  235. /* Exception prolog code for all exceptions */
  236. #define EXCEPTION_PROLOG(n, intnum, type, addition) \
  237. mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
  238. mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
  239. std r10,PACA_EX##type+EX_R10(r13); \
  240. std r11,PACA_EX##type+EX_R11(r13); \
  241. mfcr r10; /* save CR */ \
  242. mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
  243. DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \
  244. stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
  245. addition; /* additional code for that exc. */ \
  246. std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
  247. type##_SET_KSTACK; /* get special stack if necessary */\
  248. andi. r10,r11,MSR_PR; /* save stack pointer */ \
  249. beq 1f; /* branch around if supervisor */ \
  250. ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
  251. 1: type##_BTB_FLUSH \
  252. cmpdi cr1,r1,0; /* check if SP makes sense */ \
  253. bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
  254. mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
  255. /* Exception type-specific macros */
  256. #define GEN_SET_KSTACK \
  257. subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */
  258. #define SPRN_GEN_SRR0 SPRN_SRR0
  259. #define SPRN_GEN_SRR1 SPRN_SRR1
  260. #define GDBELL_SET_KSTACK GEN_SET_KSTACK
  261. #define SPRN_GDBELL_SRR0 SPRN_GSRR0
  262. #define SPRN_GDBELL_SRR1 SPRN_GSRR1
  263. #define CRIT_SET_KSTACK \
  264. ld r1,PACA_CRIT_STACK(r13); \
  265. subi r1,r1,SPECIAL_EXC_FRAME_SIZE
  266. #define SPRN_CRIT_SRR0 SPRN_CSRR0
  267. #define SPRN_CRIT_SRR1 SPRN_CSRR1
  268. #define DBG_SET_KSTACK \
  269. ld r1,PACA_DBG_STACK(r13); \
  270. subi r1,r1,SPECIAL_EXC_FRAME_SIZE
  271. #define SPRN_DBG_SRR0 SPRN_DSRR0
  272. #define SPRN_DBG_SRR1 SPRN_DSRR1
  273. #define MC_SET_KSTACK \
  274. ld r1,PACA_MC_STACK(r13); \
  275. subi r1,r1,SPECIAL_EXC_FRAME_SIZE
  276. #define SPRN_MC_SRR0 SPRN_MCSRR0
  277. #define SPRN_MC_SRR1 SPRN_MCSRR1
  278. #ifdef CONFIG_PPC_FSL_BOOK3E
  279. #define GEN_BTB_FLUSH \
  280. START_BTB_FLUSH_SECTION \
  281. beq 1f; \
  282. BTB_FLUSH(r10) \
  283. 1: \
  284. END_BTB_FLUSH_SECTION
  285. #define CRIT_BTB_FLUSH \
  286. START_BTB_FLUSH_SECTION \
  287. BTB_FLUSH(r10) \
  288. END_BTB_FLUSH_SECTION
  289. #define DBG_BTB_FLUSH CRIT_BTB_FLUSH
  290. #define MC_BTB_FLUSH CRIT_BTB_FLUSH
  291. #define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
  292. #else
  293. #define GEN_BTB_FLUSH
  294. #define CRIT_BTB_FLUSH
  295. #define DBG_BTB_FLUSH
  296. #define MC_BTB_FLUSH
  297. #define GDBELL_BTB_FLUSH
  298. #endif
  299. #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
  300. EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
  301. #define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \
  302. EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
  303. #define DBG_EXCEPTION_PROLOG(n, intnum, addition) \
  304. EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
  305. #define MC_EXCEPTION_PROLOG(n, intnum, addition) \
  306. EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
  307. #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \
  308. EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
  309. /* Variants of the "addition" argument for the prolog
  310. */
  311. #define PROLOG_ADDITION_NONE_GEN(n)
  312. #define PROLOG_ADDITION_NONE_GDBELL(n)
  313. #define PROLOG_ADDITION_NONE_CRIT(n)
  314. #define PROLOG_ADDITION_NONE_DBG(n)
  315. #define PROLOG_ADDITION_NONE_MC(n)
  316. #define PROLOG_ADDITION_MASKABLE_GEN(n) \
  317. lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \
  318. andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
  319. bne masked_interrupt_book3e_##n
  320. #define PROLOG_ADDITION_2REGS_GEN(n) \
  321. std r14,PACA_EXGEN+EX_R14(r13); \
  322. std r15,PACA_EXGEN+EX_R15(r13)
  323. #define PROLOG_ADDITION_1REG_GEN(n) \
  324. std r14,PACA_EXGEN+EX_R14(r13);
  325. #define PROLOG_ADDITION_2REGS_CRIT(n) \
  326. std r14,PACA_EXCRIT+EX_R14(r13); \
  327. std r15,PACA_EXCRIT+EX_R15(r13)
  328. #define PROLOG_ADDITION_2REGS_DBG(n) \
  329. std r14,PACA_EXDBG+EX_R14(r13); \
  330. std r15,PACA_EXDBG+EX_R15(r13)
  331. #define PROLOG_ADDITION_2REGS_MC(n) \
  332. std r14,PACA_EXMC+EX_R14(r13); \
  333. std r15,PACA_EXMC+EX_R15(r13)
  334. /* Core exception code for all exceptions except TLB misses. */
  335. #define EXCEPTION_COMMON_LVL(n, scratch, excf) \
  336. exc_##n##_common: \
  337. std r0,GPR0(r1); /* save r0 in stackframe */ \
  338. std r2,GPR2(r1); /* save r2 in stackframe */ \
  339. SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
  340. SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
  341. std r9,GPR9(r1); /* save r9 in stackframe */ \
  342. std r10,_NIP(r1); /* save SRR0 to stackframe */ \
  343. std r11,_MSR(r1); /* save SRR1 to stackframe */ \
  344. beq 2f; /* if from kernel mode */ \
  345. ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \
  346. 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
  347. ld r4,excf+EX_R11(r13); /* get back r11 */ \
  348. mfspr r5,scratch; /* get back r13 */ \
  349. std r12,GPR12(r1); /* save r12 in stackframe */ \
  350. ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
  351. mflr r6; /* save LR in stackframe */ \
  352. mfctr r7; /* save CTR in stackframe */ \
  353. mfspr r8,SPRN_XER; /* save XER in stackframe */ \
  354. ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
  355. lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
  356. lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \
  357. ld r12,exception_marker@toc(r2); \
  358. li r0,0; \
  359. std r3,GPR10(r1); /* save r10 to stackframe */ \
  360. std r4,GPR11(r1); /* save r11 to stackframe */ \
  361. std r5,GPR13(r1); /* save it to stackframe */ \
  362. std r6,_LINK(r1); \
  363. std r7,_CTR(r1); \
  364. std r8,_XER(r1); \
  365. li r3,(n)+1; /* indicate partial regs in trap */ \
  366. std r9,0(r1); /* store stack frame back link */ \
  367. std r10,_CCR(r1); /* store orig CR in stackframe */ \
  368. std r9,GPR1(r1); /* store stack frame back link */ \
  369. std r11,SOFTE(r1); /* and save it to stackframe */ \
  370. std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
  371. std r3,_TRAP(r1); /* set trap number */ \
  372. std r0,RESULT(r1); /* clear regs->result */
  373. #define EXCEPTION_COMMON(n) \
  374. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
  375. #define EXCEPTION_COMMON_CRIT(n) \
  376. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT)
  377. #define EXCEPTION_COMMON_MC(n) \
  378. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC)
  379. #define EXCEPTION_COMMON_DBG(n) \
  380. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
  381. /*
  382. * This is meant for exceptions that don't immediately hard-enable. We
  383. * set a bit in paca->irq_happened to ensure that a subsequent call to
  384. * arch_local_irq_restore() will properly hard-enable and avoid the
  385. * fast-path, and then reconcile irq state.
  386. */
  387. #define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4)
  388. /*
  389. * This is called by exceptions that don't use INTS_DISABLE (that did not
  390. * touch irq indicators in the PACA). This will restore MSR:EE to it's
  391. * previous value
  392. *
  393. * XXX In the long run, we may want to open-code it in order to separate the
  394. * load from the wrtee, thus limiting the latency caused by the dependency
  395. * but at this point, I'll favor code clarity until we have a near to final
  396. * implementation
  397. */
  398. #define INTS_RESTORE_HARD \
  399. ld r11,_MSR(r1); \
  400. wrtee r11;
  401. /* XXX FIXME: Restore r14/r15 when necessary */
  402. #define BAD_STACK_TRAMPOLINE(n) \
  403. exc_##n##_bad_stack: \
  404. li r1,(n); /* get exception number */ \
  405. sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
  406. b bad_stack_book3e; /* bad stack error */
  407. /* WARNING: If you change the layout of this stub, make sure you check
  408. * the debug exception handler which handles single stepping
  409. * into exceptions from userspace, and the MM code in
  410. * arch/powerpc/mm/tlb_nohash.c which patches the branch here
  411. * and would need to be updated if that branch is moved
  412. */
  413. #define EXCEPTION_STUB(loc, label) \
  414. . = interrupt_base_book3e + loc; \
  415. nop; /* To make debug interrupts happy */ \
  416. b exc_##label##_book3e;
  417. #define ACK_NONE(r)
  418. #define ACK_DEC(r) \
  419. lis r,TSR_DIS@h; \
  420. mtspr SPRN_TSR,r
  421. #define ACK_FIT(r) \
  422. lis r,TSR_FIS@h; \
  423. mtspr SPRN_TSR,r
  424. /* Used by asynchronous interrupt that may happen in the idle loop.
  425. *
  426. * This check if the thread was in the idle loop, and if yes, returns
  427. * to the caller rather than the PC. This is to avoid a race if
  428. * interrupts happen before the wait instruction.
  429. */
  430. #define CHECK_NAPPING() \
  431. ld r11, PACA_THREAD_INFO(r13); \
  432. ld r10,TI_LOCAL_FLAGS(r11); \
  433. andi. r9,r10,_TLF_NAPPING; \
  434. beq+ 1f; \
  435. ld r8,_LINK(r1); \
  436. rlwinm r7,r10,0,~_TLF_NAPPING; \
  437. std r8,_NIP(r1); \
  438. std r7,TI_LOCAL_FLAGS(r11); \
  439. 1:
  440. #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \
  441. START_EXCEPTION(label); \
  442. NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
  443. EXCEPTION_COMMON(trapnum) \
  444. INTS_DISABLE; \
  445. ack(r8); \
  446. CHECK_NAPPING(); \
  447. addi r3,r1,STACK_FRAME_OVERHEAD; \
  448. bl hdlr; \
  449. b ret_from_except_lite;
  450. /* This value is used to mark exception frames on the stack. */
  451. .section ".toc","aw"
  452. exception_marker:
  453. .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
  454. /*
  455. * And here we have the exception vectors !
  456. */
  457. .text
  458. .balign 0x1000
  459. .globl interrupt_base_book3e
  460. interrupt_base_book3e: /* fake trap */
  461. EXCEPTION_STUB(0x000, machine_check)
  462. EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */
  463. EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */
  464. EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */
  465. EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */
  466. EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */
  467. EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */
  468. EXCEPTION_STUB(0x0e0, program) /* 0x0700 */
  469. EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */
  470. EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */
  471. EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */
  472. EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */
  473. EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */
  474. EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
  475. EXCEPTION_STUB(0x1c0, data_tlb_miss)
  476. EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
  477. EXCEPTION_STUB(0x200, altivec_unavailable)
  478. EXCEPTION_STUB(0x220, altivec_assist)
  479. EXCEPTION_STUB(0x260, perfmon)
  480. EXCEPTION_STUB(0x280, doorbell)
  481. EXCEPTION_STUB(0x2a0, doorbell_crit)
  482. EXCEPTION_STUB(0x2c0, guest_doorbell)
  483. EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
  484. EXCEPTION_STUB(0x300, hypercall)
  485. EXCEPTION_STUB(0x320, ehpriv)
  486. EXCEPTION_STUB(0x340, lrat_error)
  487. .globl __end_interrupts
  488. __end_interrupts:
  489. /* Critical Input Interrupt */
  490. START_EXCEPTION(critical_input);
  491. CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
  492. PROLOG_ADDITION_NONE)
  493. EXCEPTION_COMMON_CRIT(0x100)
  494. bl save_nvgprs
  495. bl special_reg_save
  496. CHECK_NAPPING();
  497. addi r3,r1,STACK_FRAME_OVERHEAD
  498. bl unknown_exception
  499. b ret_from_crit_except
  500. /* Machine Check Interrupt */
  501. START_EXCEPTION(machine_check);
  502. MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
  503. PROLOG_ADDITION_NONE)
  504. EXCEPTION_COMMON_MC(0x000)
  505. bl save_nvgprs
  506. bl special_reg_save
  507. CHECK_NAPPING();
  508. addi r3,r1,STACK_FRAME_OVERHEAD
  509. bl machine_check_exception
  510. b ret_from_mc_except
  511. /* Data Storage Interrupt */
  512. START_EXCEPTION(data_storage)
  513. NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
  514. PROLOG_ADDITION_2REGS)
  515. mfspr r14,SPRN_DEAR
  516. mfspr r15,SPRN_ESR
  517. EXCEPTION_COMMON(0x300)
  518. INTS_DISABLE
  519. b storage_fault_common
  520. /* Instruction Storage Interrupt */
  521. START_EXCEPTION(instruction_storage);
  522. NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
  523. PROLOG_ADDITION_2REGS)
  524. li r15,0
  525. mr r14,r10
  526. EXCEPTION_COMMON(0x400)
  527. INTS_DISABLE
  528. b storage_fault_common
  529. /* External Input Interrupt */
  530. MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
  531. external_input, do_IRQ, ACK_NONE)
  532. /* Alignment */
  533. START_EXCEPTION(alignment);
  534. NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
  535. PROLOG_ADDITION_2REGS)
  536. mfspr r14,SPRN_DEAR
  537. mfspr r15,SPRN_ESR
  538. EXCEPTION_COMMON(0x600)
  539. b alignment_more /* no room, go out of line */
  540. /* Program Interrupt */
  541. START_EXCEPTION(program);
  542. NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
  543. PROLOG_ADDITION_1REG)
  544. mfspr r14,SPRN_ESR
  545. EXCEPTION_COMMON(0x700)
  546. INTS_DISABLE
  547. std r14,_DSISR(r1)
  548. addi r3,r1,STACK_FRAME_OVERHEAD
  549. ld r14,PACA_EXGEN+EX_R14(r13)
  550. bl save_nvgprs
  551. bl program_check_exception
  552. b ret_from_except
  553. /* Floating Point Unavailable Interrupt */
  554. START_EXCEPTION(fp_unavailable);
  555. NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
  556. PROLOG_ADDITION_NONE)
  557. /* we can probably do a shorter exception entry for that one... */
  558. EXCEPTION_COMMON(0x800)
  559. ld r12,_MSR(r1)
  560. andi. r0,r12,MSR_PR;
  561. beq- 1f
  562. bl load_up_fpu
  563. b fast_exception_return
  564. 1: INTS_DISABLE
  565. bl save_nvgprs
  566. addi r3,r1,STACK_FRAME_OVERHEAD
  567. bl kernel_fp_unavailable_exception
  568. b ret_from_except
  569. /* Altivec Unavailable Interrupt */
  570. START_EXCEPTION(altivec_unavailable);
  571. NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
  572. PROLOG_ADDITION_NONE)
  573. /* we can probably do a shorter exception entry for that one... */
  574. EXCEPTION_COMMON(0x200)
  575. #ifdef CONFIG_ALTIVEC
  576. BEGIN_FTR_SECTION
  577. ld r12,_MSR(r1)
  578. andi. r0,r12,MSR_PR;
  579. beq- 1f
  580. bl load_up_altivec
  581. b fast_exception_return
  582. 1:
  583. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  584. #endif
  585. INTS_DISABLE
  586. bl save_nvgprs
  587. addi r3,r1,STACK_FRAME_OVERHEAD
  588. bl altivec_unavailable_exception
  589. b ret_from_except
  590. /* AltiVec Assist */
  591. START_EXCEPTION(altivec_assist);
  592. NORMAL_EXCEPTION_PROLOG(0x220,
  593. BOOKE_INTERRUPT_ALTIVEC_ASSIST,
  594. PROLOG_ADDITION_NONE)
  595. EXCEPTION_COMMON(0x220)
  596. INTS_DISABLE
  597. bl save_nvgprs
  598. addi r3,r1,STACK_FRAME_OVERHEAD
  599. #ifdef CONFIG_ALTIVEC
  600. BEGIN_FTR_SECTION
  601. bl altivec_assist_exception
  602. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  603. #else
  604. bl unknown_exception
  605. #endif
  606. b ret_from_except
  607. /* Decrementer Interrupt */
  608. MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
  609. decrementer, timer_interrupt, ACK_DEC)
  610. /* Fixed Interval Timer Interrupt */
  611. MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
  612. fixed_interval, unknown_exception, ACK_FIT)
  613. /* Watchdog Timer Interrupt */
  614. START_EXCEPTION(watchdog);
  615. CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
  616. PROLOG_ADDITION_NONE)
  617. EXCEPTION_COMMON_CRIT(0x9f0)
  618. bl save_nvgprs
  619. bl special_reg_save
  620. CHECK_NAPPING();
  621. addi r3,r1,STACK_FRAME_OVERHEAD
  622. #ifdef CONFIG_BOOKE_WDT
  623. bl WatchdogException
  624. #else
  625. bl unknown_exception
  626. #endif
  627. b ret_from_crit_except
  628. /* System Call Interrupt */
  629. START_EXCEPTION(system_call)
  630. mr r9,r13 /* keep a copy of userland r13 */
  631. mfspr r11,SPRN_SRR0 /* get return address */
  632. mfspr r12,SPRN_SRR1 /* get previous MSR */
  633. mfspr r13,SPRN_SPRG_PACA /* get our PACA */
  634. b system_call_common
  635. /* Auxiliary Processor Unavailable Interrupt */
  636. START_EXCEPTION(ap_unavailable);
  637. NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
  638. PROLOG_ADDITION_NONE)
  639. EXCEPTION_COMMON(0xf20)
  640. INTS_DISABLE
  641. bl save_nvgprs
  642. addi r3,r1,STACK_FRAME_OVERHEAD
  643. bl unknown_exception
  644. b ret_from_except
  645. /* Debug exception as a critical interrupt*/
  646. START_EXCEPTION(debug_crit);
  647. CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
  648. PROLOG_ADDITION_2REGS)
  649. /*
  650. * If there is a single step or branch-taken exception in an
  651. * exception entry sequence, it was probably meant to apply to
  652. * the code where the exception occurred (since exception entry
  653. * doesn't turn off DE automatically). We simulate the effect
  654. * of turning off DE on entry to an exception handler by turning
  655. * off DE in the CSRR1 value and clearing the debug status.
  656. */
  657. mfspr r14,SPRN_DBSR /* check single-step/branch taken */
  658. andis. r15,r14,(DBSR_IC|DBSR_BT)@h
  659. beq+ 1f
  660. #ifdef CONFIG_RELOCATABLE
  661. ld r15,PACATOC(r13)
  662. ld r14,interrupt_base_book3e@got(r15)
  663. ld r15,__end_interrupts@got(r15)
  664. cmpld cr0,r10,r14
  665. cmpld cr1,r10,r15
  666. #else
  667. LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
  668. cmpld cr0, r10, r14
  669. LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)
  670. cmpld cr1, r10, r14
  671. #endif
  672. blt+ cr0,1f
  673. bge+ cr1,1f
  674. /* here it looks like we got an inappropriate debug exception. */
  675. lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
  676. rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
  677. mtspr SPRN_DBSR,r14
  678. mtspr SPRN_CSRR1,r11
  679. lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */
  680. ld r1,PACA_EXCRIT+EX_R1(r13)
  681. ld r14,PACA_EXCRIT+EX_R14(r13)
  682. ld r15,PACA_EXCRIT+EX_R15(r13)
  683. mtcr r10
  684. ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
  685. ld r11,PACA_EXCRIT+EX_R11(r13)
  686. mfspr r13,SPRN_SPRG_CRIT_SCRATCH
  687. rfci
  688. /* Normal debug exception */
  689. /* XXX We only handle coming from userspace for now since we can't
  690. * quite save properly an interrupted kernel state yet
  691. */
  692. 1: andi. r14,r11,MSR_PR; /* check for userspace again */
  693. beq kernel_dbg_exc; /* if from kernel mode */
  694. /* Now we mash up things to make it look like we are coming on a
  695. * normal exception
  696. */
  697. mfspr r14,SPRN_DBSR
  698. EXCEPTION_COMMON_CRIT(0xd00)
  699. std r14,_DSISR(r1)
  700. addi r3,r1,STACK_FRAME_OVERHEAD
  701. mr r4,r14
  702. ld r14,PACA_EXCRIT+EX_R14(r13)
  703. ld r15,PACA_EXCRIT+EX_R15(r13)
  704. bl save_nvgprs
  705. bl DebugException
  706. b ret_from_except
  707. kernel_dbg_exc:
  708. b . /* NYI */
  709. /* Debug exception as a debug interrupt*/
  710. START_EXCEPTION(debug_debug);
  711. DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
  712. PROLOG_ADDITION_2REGS)
  713. /*
  714. * If there is a single step or branch-taken exception in an
  715. * exception entry sequence, it was probably meant to apply to
  716. * the code where the exception occurred (since exception entry
  717. * doesn't turn off DE automatically). We simulate the effect
  718. * of turning off DE on entry to an exception handler by turning
  719. * off DE in the DSRR1 value and clearing the debug status.
  720. */
  721. mfspr r14,SPRN_DBSR /* check single-step/branch taken */
  722. andis. r15,r14,(DBSR_IC|DBSR_BT)@h
  723. beq+ 1f
  724. #ifdef CONFIG_RELOCATABLE
  725. ld r15,PACATOC(r13)
  726. ld r14,interrupt_base_book3e@got(r15)
  727. ld r15,__end_interrupts@got(r15)
  728. cmpld cr0,r10,r14
  729. cmpld cr1,r10,r15
  730. #else
  731. LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
  732. cmpld cr0, r10, r14
  733. LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)
  734. cmpld cr1, r10, r14
  735. #endif
  736. blt+ cr0,1f
  737. bge+ cr1,1f
  738. /* here it looks like we got an inappropriate debug exception. */
  739. lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
  740. rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
  741. mtspr SPRN_DBSR,r14
  742. mtspr SPRN_DSRR1,r11
  743. lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */
  744. ld r1,PACA_EXDBG+EX_R1(r13)
  745. ld r14,PACA_EXDBG+EX_R14(r13)
  746. ld r15,PACA_EXDBG+EX_R15(r13)
  747. mtcr r10
  748. ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */
  749. ld r11,PACA_EXDBG+EX_R11(r13)
  750. mfspr r13,SPRN_SPRG_DBG_SCRATCH
  751. rfdi
  752. /* Normal debug exception */
  753. /* XXX We only handle coming from userspace for now since we can't
  754. * quite save properly an interrupted kernel state yet
  755. */
  756. 1: andi. r14,r11,MSR_PR; /* check for userspace again */
  757. beq kernel_dbg_exc; /* if from kernel mode */
  758. /* Now we mash up things to make it look like we are coming on a
  759. * normal exception
  760. */
  761. mfspr r14,SPRN_DBSR
  762. EXCEPTION_COMMON_DBG(0xd08)
  763. INTS_DISABLE
  764. std r14,_DSISR(r1)
  765. addi r3,r1,STACK_FRAME_OVERHEAD
  766. mr r4,r14
  767. ld r14,PACA_EXDBG+EX_R14(r13)
  768. ld r15,PACA_EXDBG+EX_R15(r13)
  769. bl save_nvgprs
  770. bl DebugException
  771. b ret_from_except
  772. START_EXCEPTION(perfmon);
  773. NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
  774. PROLOG_ADDITION_NONE)
  775. EXCEPTION_COMMON(0x260)
  776. INTS_DISABLE
  777. CHECK_NAPPING()
  778. addi r3,r1,STACK_FRAME_OVERHEAD
  779. bl performance_monitor_exception
  780. b ret_from_except_lite
  781. /* Doorbell interrupt */
  782. MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
  783. doorbell, doorbell_exception, ACK_NONE)
  784. /* Doorbell critical Interrupt */
  785. START_EXCEPTION(doorbell_crit);
  786. CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
  787. PROLOG_ADDITION_NONE)
  788. EXCEPTION_COMMON_CRIT(0x2a0)
  789. bl save_nvgprs
  790. bl special_reg_save
  791. CHECK_NAPPING();
  792. addi r3,r1,STACK_FRAME_OVERHEAD
  793. bl unknown_exception
  794. b ret_from_crit_except
  795. /*
  796. * Guest doorbell interrupt
  797. * This general exception use GSRRx save/restore registers
  798. */
  799. START_EXCEPTION(guest_doorbell);
  800. GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
  801. PROLOG_ADDITION_NONE)
  802. EXCEPTION_COMMON(0x2c0)
  803. addi r3,r1,STACK_FRAME_OVERHEAD
  804. bl save_nvgprs
  805. INTS_RESTORE_HARD
  806. bl unknown_exception
  807. b ret_from_except
  808. /* Guest Doorbell critical Interrupt */
  809. START_EXCEPTION(guest_doorbell_crit);
  810. CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
  811. PROLOG_ADDITION_NONE)
  812. EXCEPTION_COMMON_CRIT(0x2e0)
  813. bl save_nvgprs
  814. bl special_reg_save
  815. CHECK_NAPPING();
  816. addi r3,r1,STACK_FRAME_OVERHEAD
  817. bl unknown_exception
  818. b ret_from_crit_except
  819. /* Hypervisor call */
  820. START_EXCEPTION(hypercall);
  821. NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
  822. PROLOG_ADDITION_NONE)
  823. EXCEPTION_COMMON(0x310)
  824. addi r3,r1,STACK_FRAME_OVERHEAD
  825. bl save_nvgprs
  826. INTS_RESTORE_HARD
  827. bl unknown_exception
  828. b ret_from_except
  829. /* Embedded Hypervisor priviledged */
  830. START_EXCEPTION(ehpriv);
  831. NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
  832. PROLOG_ADDITION_NONE)
  833. EXCEPTION_COMMON(0x320)
  834. addi r3,r1,STACK_FRAME_OVERHEAD
  835. bl save_nvgprs
  836. INTS_RESTORE_HARD
  837. bl unknown_exception
  838. b ret_from_except
  839. /* LRAT Error interrupt */
  840. START_EXCEPTION(lrat_error);
  841. NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
  842. PROLOG_ADDITION_NONE)
  843. EXCEPTION_COMMON(0x340)
  844. addi r3,r1,STACK_FRAME_OVERHEAD
  845. bl save_nvgprs
  846. INTS_RESTORE_HARD
  847. bl unknown_exception
  848. b ret_from_except
  849. /*
  850. * An interrupt came in while soft-disabled; We mark paca->irq_happened
  851. * accordingly and if the interrupt is level sensitive, we hard disable
  852. * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
  853. * keep these in synch.
  854. */
  855. .macro masked_interrupt_book3e paca_irq full_mask
  856. lbz r10,PACAIRQHAPPENED(r13)
  857. .if \full_mask == 1
  858. ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
  859. .else
  860. ori r10,r10,\paca_irq
  861. .endif
  862. stb r10,PACAIRQHAPPENED(r13)
  863. .if \full_mask == 1
  864. rldicl r10,r11,48,1 /* clear MSR_EE */
  865. rotldi r11,r10,16
  866. mtspr SPRN_SRR1,r11
  867. .endif
  868. lwz r11,PACA_EXGEN+EX_CR(r13)
  869. mtcr r11
  870. ld r10,PACA_EXGEN+EX_R10(r13)
  871. ld r11,PACA_EXGEN+EX_R11(r13)
  872. mfspr r13,SPRN_SPRG_GEN_SCRATCH
  873. rfi
  874. b .
  875. .endm
  876. masked_interrupt_book3e_0x500:
  877. masked_interrupt_book3e PACA_IRQ_EE 1
  878. masked_interrupt_book3e_0x900:
  879. ACK_DEC(r10);
  880. masked_interrupt_book3e PACA_IRQ_DEC 0
  881. masked_interrupt_book3e_0x980:
  882. ACK_FIT(r10);
  883. masked_interrupt_book3e PACA_IRQ_DEC 0
  884. masked_interrupt_book3e_0x280:
  885. masked_interrupt_book3e_0x2c0:
  886. masked_interrupt_book3e PACA_IRQ_DBELL 0
  887. /*
  888. * This is called from 0x300 and 0x400 handlers after the prologs with
  889. * r14 and r15 containing the fault address and error code, with the
  890. * original values stashed away in the PACA
  891. */
  892. storage_fault_common:
  893. std r14,_DAR(r1)
  894. std r15,_DSISR(r1)
  895. addi r3,r1,STACK_FRAME_OVERHEAD
  896. mr r4,r14
  897. mr r5,r15
  898. ld r14,PACA_EXGEN+EX_R14(r13)
  899. ld r15,PACA_EXGEN+EX_R15(r13)
  900. bl do_page_fault
  901. cmpdi r3,0
  902. bne- 1f
  903. b ret_from_except_lite
  904. 1: bl save_nvgprs
  905. mr r5,r3
  906. addi r3,r1,STACK_FRAME_OVERHEAD
  907. ld r4,_DAR(r1)
  908. bl bad_page_fault
  909. b ret_from_except
  910. /*
  911. * Alignment exception doesn't fit entirely in the 0x100 bytes so it
  912. * continues here.
  913. */
  914. alignment_more:
  915. std r14,_DAR(r1)
  916. std r15,_DSISR(r1)
  917. addi r3,r1,STACK_FRAME_OVERHEAD
  918. ld r14,PACA_EXGEN+EX_R14(r13)
  919. ld r15,PACA_EXGEN+EX_R15(r13)
  920. bl save_nvgprs
  921. INTS_RESTORE_HARD
  922. bl alignment_exception
  923. b ret_from_except
  924. .align 7
  925. _GLOBAL(ret_from_except)
  926. ld r11,_TRAP(r1)
  927. andi. r0,r11,1
  928. bne ret_from_except_lite
  929. REST_NVGPRS(r1)
  930. _GLOBAL(ret_from_except_lite)
  931. /*
  932. * Disable interrupts so that current_thread_info()->flags
  933. * can't change between when we test it and when we return
  934. * from the interrupt.
  935. */
  936. wrteei 0
  937. ld r9, PACA_THREAD_INFO(r13)
  938. ld r3,_MSR(r1)
  939. ld r10,PACACURRENT(r13)
  940. ld r4,TI_FLAGS(r9)
  941. andi. r3,r3,MSR_PR
  942. beq resume_kernel
  943. lwz r3,(THREAD+THREAD_DBCR0)(r10)
  944. /* Check current_thread_info()->flags */
  945. andi. r0,r4,_TIF_USER_WORK_MASK
  946. bne 1f
  947. /*
  948. * Check to see if the dbcr0 register is set up to debug.
  949. * Use the internal debug mode bit to do this.
  950. */
  951. andis. r0,r3,DBCR0_IDM@h
  952. beq restore
  953. mfmsr r0
  954. rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
  955. mtmsr r0
  956. mtspr SPRN_DBCR0,r3
  957. li r10, -1
  958. mtspr SPRN_DBSR,r10
  959. b restore
  960. 1: andi. r0,r4,_TIF_NEED_RESCHED
  961. beq 2f
  962. bl restore_interrupts
  963. SCHEDULE_USER
  964. b ret_from_except_lite
  965. 2:
  966. bl save_nvgprs
  967. /*
  968. * Use a non volatile GPR to save and restore our thread_info flags
  969. * across the call to restore_interrupts.
  970. */
  971. mr r30,r4
  972. bl restore_interrupts
  973. mr r4,r30
  974. addi r3,r1,STACK_FRAME_OVERHEAD
  975. bl do_notify_resume
  976. b ret_from_except
  977. resume_kernel:
  978. /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
  979. andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
  980. beq+ 1f
  981. addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
  982. ld r3,GPR1(r1)
  983. subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
  984. mr r4,r1 /* src: current exception frame */
  985. mr r1,r3 /* Reroute the trampoline frame to r1 */
  986. /* Copy from the original to the trampoline. */
  987. li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
  988. li r6,0 /* start offset: 0 */
  989. mtctr r5
  990. 2: ldx r0,r6,r4
  991. stdx r0,r6,r3
  992. addi r6,r6,8
  993. bdnz 2b
  994. /* Do real store operation to complete stdu */
  995. ld r5,GPR1(r1)
  996. std r8,0(r5)
  997. /* Clear _TIF_EMULATE_STACK_STORE flag */
  998. lis r11,_TIF_EMULATE_STACK_STORE@h
  999. addi r5,r9,TI_FLAGS
  1000. 0: ldarx r4,0,r5
  1001. andc r4,r4,r11
  1002. stdcx. r4,0,r5
  1003. bne- 0b
  1004. 1:
  1005. #ifdef CONFIG_PREEMPT
  1006. /* Check if we need to preempt */
  1007. andi. r0,r4,_TIF_NEED_RESCHED
  1008. beq+ restore
  1009. /* Check that preempt_count() == 0 and interrupts are enabled */
  1010. lwz r8,TI_PREEMPT(r9)
  1011. cmpwi cr0,r8,0
  1012. bne restore
  1013. ld r0,SOFTE(r1)
  1014. andi. r0,r0,IRQS_DISABLED
  1015. bne restore
  1016. /*
  1017. * Here we are preempting the current task. We want to make
  1018. * sure we are soft-disabled first and reconcile irq state.
  1019. */
  1020. RECONCILE_IRQ_STATE(r3,r4)
  1021. bl preempt_schedule_irq
  1022. /*
  1023. * arch_local_irq_restore() from preempt_schedule_irq above may
  1024. * enable hard interrupt but we really should disable interrupts
  1025. * when we return from the interrupt, and so that we don't get
  1026. * interrupted after loading SRR0/1.
  1027. */
  1028. wrteei 0
  1029. #endif /* CONFIG_PREEMPT */
  1030. restore:
  1031. /*
  1032. * This is the main kernel exit path. First we check if we
  1033. * are about to re-enable interrupts
  1034. */
  1035. ld r5,SOFTE(r1)
  1036. lbz r6,PACAIRQSOFTMASK(r13)
  1037. andi. r5,r5,IRQS_DISABLED
  1038. bne .Lrestore_irq_off
  1039. /* We are enabling, were we already enabled ? Yes, just return */
  1040. andi. r6,r6,IRQS_DISABLED
  1041. beq cr0,fast_exception_return
  1042. /*
  1043. * We are about to soft-enable interrupts (we are hard disabled
  1044. * at this point). We check if there's anything that needs to
  1045. * be replayed first.
  1046. */
  1047. lbz r0,PACAIRQHAPPENED(r13)
  1048. cmpwi cr0,r0,0
  1049. bne- .Lrestore_check_irq_replay
  1050. /*
  1051. * Get here when nothing happened while soft-disabled, just
  1052. * soft-enable and move-on. We will hard-enable as a side
  1053. * effect of rfi
  1054. */
  1055. .Lrestore_no_replay:
  1056. TRACE_ENABLE_INTS
  1057. li r0,IRQS_ENABLED
  1058. stb r0,PACAIRQSOFTMASK(r13);
  1059. /* This is the return from load_up_fpu fast path which could do with
  1060. * less GPR restores in fact, but for now we have a single return path
  1061. */
  1062. fast_exception_return:
  1063. wrteei 0
  1064. 1: mr r0,r13
  1065. ld r10,_MSR(r1)
  1066. REST_4GPRS(2, r1)
  1067. andi. r6,r10,MSR_PR
  1068. REST_2GPRS(6, r1)
  1069. beq 1f
  1070. ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
  1071. ld r0,GPR13(r1)
  1072. 1: stdcx. r0,0,r1 /* to clear the reservation */
  1073. ld r8,_CCR(r1)
  1074. ld r9,_LINK(r1)
  1075. ld r10,_CTR(r1)
  1076. ld r11,_XER(r1)
  1077. mtcr r8
  1078. mtlr r9
  1079. mtctr r10
  1080. mtxer r11
  1081. REST_2GPRS(8, r1)
  1082. ld r10,GPR10(r1)
  1083. ld r11,GPR11(r1)
  1084. ld r12,GPR12(r1)
  1085. mtspr SPRN_SPRG_GEN_SCRATCH,r0
  1086. std r10,PACA_EXGEN+EX_R10(r13);
  1087. std r11,PACA_EXGEN+EX_R11(r13);
  1088. ld r10,_NIP(r1)
  1089. ld r11,_MSR(r1)
  1090. ld r0,GPR0(r1)
  1091. ld r1,GPR1(r1)
  1092. mtspr SPRN_SRR0,r10
  1093. mtspr SPRN_SRR1,r11
  1094. ld r10,PACA_EXGEN+EX_R10(r13)
  1095. ld r11,PACA_EXGEN+EX_R11(r13)
  1096. mfspr r13,SPRN_SPRG_GEN_SCRATCH
  1097. rfi
  1098. /*
  1099. * We are returning to a context with interrupts soft disabled.
  1100. *
  1101. * However, we may also about to hard enable, so we need to
  1102. * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
  1103. * or that bit can get out of sync and bad things will happen
  1104. */
  1105. .Lrestore_irq_off:
  1106. ld r3,_MSR(r1)
  1107. lbz r7,PACAIRQHAPPENED(r13)
  1108. andi. r0,r3,MSR_EE
  1109. beq 1f
  1110. rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
  1111. stb r7,PACAIRQHAPPENED(r13)
  1112. 1:
  1113. #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
  1114. /* The interrupt should not have soft enabled. */
  1115. lbz r7,PACAIRQSOFTMASK(r13)
  1116. 1: tdeqi r7,IRQS_ENABLED
  1117. EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
  1118. #endif
  1119. b fast_exception_return
  1120. /*
  1121. * Something did happen, check if a re-emit is needed
  1122. * (this also clears paca->irq_happened)
  1123. */
  1124. .Lrestore_check_irq_replay:
  1125. /* XXX: We could implement a fast path here where we check
  1126. * for irq_happened being just 0x01, in which case we can
  1127. * clear it and return. That means that we would potentially
  1128. * miss a decrementer having wrapped all the way around.
  1129. *
  1130. * Still, this might be useful for things like hash_page
  1131. */
  1132. bl __check_irq_replay
  1133. cmpwi cr0,r3,0
  1134. beq .Lrestore_no_replay
  1135. /*
  1136. * We need to re-emit an interrupt. We do so by re-using our
  1137. * existing exception frame. We first change the trap value,
  1138. * but we need to ensure we preserve the low nibble of it
  1139. */
  1140. ld r4,_TRAP(r1)
  1141. clrldi r4,r4,60
  1142. or r4,r4,r3
  1143. std r4,_TRAP(r1)
  1144. /*
  1145. * PACA_IRQ_HARD_DIS won't always be set here, so set it now
  1146. * to reconcile the IRQ state. Tracing is already accounted for.
  1147. */
  1148. lbz r4,PACAIRQHAPPENED(r13)
  1149. ori r4,r4,PACA_IRQ_HARD_DIS
  1150. stb r4,PACAIRQHAPPENED(r13)
  1151. /*
  1152. * Then find the right handler and call it. Interrupts are
  1153. * still soft-disabled and we keep them that way.
  1154. */
  1155. cmpwi cr0,r3,0x500
  1156. bne 1f
  1157. addi r3,r1,STACK_FRAME_OVERHEAD;
  1158. bl do_IRQ
  1159. b ret_from_except
  1160. 1: cmpwi cr0,r3,0x900
  1161. bne 1f
  1162. addi r3,r1,STACK_FRAME_OVERHEAD;
  1163. bl timer_interrupt
  1164. b ret_from_except
  1165. #ifdef CONFIG_PPC_DOORBELL
  1166. 1:
  1167. cmpwi cr0,r3,0x280
  1168. bne 1f
  1169. addi r3,r1,STACK_FRAME_OVERHEAD;
  1170. bl doorbell_exception
  1171. #endif /* CONFIG_PPC_DOORBELL */
  1172. 1: b ret_from_except /* What else to do here ? */
  1173. _ASM_NOKPROBE_SYMBOL(ret_from_except);
  1174. _ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
  1175. _ASM_NOKPROBE_SYMBOL(resume_kernel);
  1176. _ASM_NOKPROBE_SYMBOL(restore);
  1177. _ASM_NOKPROBE_SYMBOL(fast_exception_return);
  1178. /*
  1179. * Trampolines used when spotting a bad kernel stack pointer in
  1180. * the exception entry code.
  1181. *
  1182. * TODO: move some bits like SRR0 read to trampoline, pass PACA
  1183. * index around, etc... to handle crit & mcheck
  1184. */
  1185. BAD_STACK_TRAMPOLINE(0x000)
  1186. BAD_STACK_TRAMPOLINE(0x100)
  1187. BAD_STACK_TRAMPOLINE(0x200)
  1188. BAD_STACK_TRAMPOLINE(0x220)
  1189. BAD_STACK_TRAMPOLINE(0x260)
  1190. BAD_STACK_TRAMPOLINE(0x280)
  1191. BAD_STACK_TRAMPOLINE(0x2a0)
  1192. BAD_STACK_TRAMPOLINE(0x2c0)
  1193. BAD_STACK_TRAMPOLINE(0x2e0)
  1194. BAD_STACK_TRAMPOLINE(0x300)
  1195. BAD_STACK_TRAMPOLINE(0x310)
  1196. BAD_STACK_TRAMPOLINE(0x320)
  1197. BAD_STACK_TRAMPOLINE(0x340)
  1198. BAD_STACK_TRAMPOLINE(0x400)
  1199. BAD_STACK_TRAMPOLINE(0x500)
  1200. BAD_STACK_TRAMPOLINE(0x600)
  1201. BAD_STACK_TRAMPOLINE(0x700)
  1202. BAD_STACK_TRAMPOLINE(0x800)
  1203. BAD_STACK_TRAMPOLINE(0x900)
  1204. BAD_STACK_TRAMPOLINE(0x980)
  1205. BAD_STACK_TRAMPOLINE(0x9f0)
  1206. BAD_STACK_TRAMPOLINE(0xa00)
  1207. BAD_STACK_TRAMPOLINE(0xb00)
  1208. BAD_STACK_TRAMPOLINE(0xc00)
  1209. BAD_STACK_TRAMPOLINE(0xd00)
  1210. BAD_STACK_TRAMPOLINE(0xd08)
  1211. BAD_STACK_TRAMPOLINE(0xe00)
  1212. BAD_STACK_TRAMPOLINE(0xf00)
  1213. BAD_STACK_TRAMPOLINE(0xf20)
  1214. .globl bad_stack_book3e
  1215. bad_stack_book3e:
  1216. /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
  1217. mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
  1218. ld r1,PACAEMERGSP(r13)
  1219. subi r1,r1,64+INT_FRAME_SIZE
  1220. std r10,_NIP(r1)
  1221. std r11,_MSR(r1)
  1222. ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
  1223. lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
  1224. std r10,GPR1(r1)
  1225. std r11,_CCR(r1)
  1226. mfspr r10,SPRN_DEAR
  1227. mfspr r11,SPRN_ESR
  1228. std r10,_DAR(r1)
  1229. std r11,_DSISR(r1)
  1230. std r0,GPR0(r1); /* save r0 in stackframe */ \
  1231. std r2,GPR2(r1); /* save r2 in stackframe */ \
  1232. SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
  1233. SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
  1234. std r9,GPR9(r1); /* save r9 in stackframe */ \
  1235. ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \
  1236. ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \
  1237. mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
  1238. std r3,GPR10(r1); /* save r10 to stackframe */ \
  1239. std r4,GPR11(r1); /* save r11 to stackframe */ \
  1240. std r12,GPR12(r1); /* save r12 in stackframe */ \
  1241. std r5,GPR13(r1); /* save it to stackframe */ \
  1242. mflr r10
  1243. mfctr r11
  1244. mfxer r12
  1245. std r10,_LINK(r1)
  1246. std r11,_CTR(r1)
  1247. std r12,_XER(r1)
  1248. SAVE_10GPRS(14,r1)
  1249. SAVE_8GPRS(24,r1)
  1250. lhz r12,PACA_TRAP_SAVE(r13)
  1251. std r12,_TRAP(r1)
  1252. addi r11,r1,INT_FRAME_SIZE
  1253. std r11,0(r1)
  1254. li r12,0
  1255. std r12,0(r11)
  1256. ld r2,PACATOC(r13)
  1257. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  1258. bl kernel_bad_stack
  1259. b 1b
  1260. /*
  1261. * Setup the initial TLB for a core. This current implementation
  1262. * assume that whatever we are running off will not conflict with
  1263. * the new mapping at PAGE_OFFSET.
  1264. */
  1265. _GLOBAL(initial_tlb_book3e)
  1266. /* Look for the first TLB with IPROT set */
  1267. mfspr r4,SPRN_TLB0CFG
  1268. andi. r3,r4,TLBnCFG_IPROT
  1269. lis r3,MAS0_TLBSEL(0)@h
  1270. bne found_iprot
  1271. mfspr r4,SPRN_TLB1CFG
  1272. andi. r3,r4,TLBnCFG_IPROT
  1273. lis r3,MAS0_TLBSEL(1)@h
  1274. bne found_iprot
  1275. mfspr r4,SPRN_TLB2CFG
  1276. andi. r3,r4,TLBnCFG_IPROT
  1277. lis r3,MAS0_TLBSEL(2)@h
  1278. bne found_iprot
  1279. lis r3,MAS0_TLBSEL(3)@h
  1280. mfspr r4,SPRN_TLB3CFG
  1281. /* fall through */
  1282. found_iprot:
  1283. andi. r5,r4,TLBnCFG_HES
  1284. bne have_hes
  1285. mflr r8 /* save LR */
  1286. /* 1. Find the index of the entry we're executing in
  1287. *
  1288. * r3 = MAS0_TLBSEL (for the iprot array)
  1289. * r4 = SPRN_TLBnCFG
  1290. */
  1291. bl invstr /* Find our address */
  1292. invstr: mflr r6 /* Make it accessible */
  1293. mfmsr r7
  1294. rlwinm r5,r7,27,31,31 /* extract MSR[IS] */
  1295. mfspr r7,SPRN_PID
  1296. slwi r7,r7,16
  1297. or r7,r7,r5
  1298. mtspr SPRN_MAS6,r7
  1299. tlbsx 0,r6 /* search MSR[IS], SPID=PID */
  1300. mfspr r3,SPRN_MAS0
  1301. rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */
  1302. mfspr r7,SPRN_MAS1 /* Insure IPROT set */
  1303. oris r7,r7,MAS1_IPROT@h
  1304. mtspr SPRN_MAS1,r7
  1305. tlbwe
  1306. /* 2. Invalidate all entries except the entry we're executing in
  1307. *
  1308. * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
  1309. * r4 = SPRN_TLBnCFG
  1310. * r5 = ESEL of entry we are running in
  1311. */
  1312. andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */
  1313. li r6,0 /* Set Entry counter to 0 */
  1314. 1: mr r7,r3 /* Set MAS0(TLBSEL) */
  1315. rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
  1316. mtspr SPRN_MAS0,r7
  1317. tlbre
  1318. mfspr r7,SPRN_MAS1
  1319. rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
  1320. cmpw r5,r6
  1321. beq skpinv /* Dont update the current execution TLB */
  1322. mtspr SPRN_MAS1,r7
  1323. tlbwe
  1324. isync
  1325. skpinv: addi r6,r6,1 /* Increment */
  1326. cmpw r6,r4 /* Are we done? */
  1327. bne 1b /* If not, repeat */
  1328. /* Invalidate all TLBs */
  1329. PPC_TLBILX_ALL(0,R0)
  1330. sync
  1331. isync
  1332. /* 3. Setup a temp mapping and jump to it
  1333. *
  1334. * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
  1335. * r5 = ESEL of entry we are running in
  1336. */
  1337. andi. r7,r5,0x1 /* Find an entry not used and is non-zero */
  1338. addi r7,r7,0x1
  1339. mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
  1340. mtspr SPRN_MAS0,r4
  1341. tlbre
  1342. rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */
  1343. mtspr SPRN_MAS0,r4
  1344. mfspr r7,SPRN_MAS1
  1345. xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */
  1346. mtspr SPRN_MAS1,r6
  1347. tlbwe
  1348. mfmsr r6
  1349. xori r6,r6,MSR_IS
  1350. mtspr SPRN_SRR1,r6
  1351. bl 1f /* Find our address */
  1352. 1: mflr r6
  1353. addi r6,r6,(2f - 1b)
  1354. mtspr SPRN_SRR0,r6
  1355. rfi
  1356. 2:
  1357. /* 4. Clear out PIDs & Search info
  1358. *
  1359. * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
  1360. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1361. * r5 = MAS3
  1362. */
  1363. li r6,0
  1364. mtspr SPRN_MAS6,r6
  1365. mtspr SPRN_PID,r6
  1366. /* 5. Invalidate mapping we started in
  1367. *
  1368. * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
  1369. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1370. * r5 = MAS3
  1371. */
  1372. mtspr SPRN_MAS0,r3
  1373. tlbre
  1374. mfspr r6,SPRN_MAS1
  1375. rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */
  1376. mtspr SPRN_MAS1,r6
  1377. tlbwe
  1378. sync
  1379. isync
  1380. /* 6. Setup KERNELBASE mapping in TLB[0]
  1381. *
  1382. * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
  1383. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1384. * r5 = MAS3
  1385. */
  1386. rlwinm r3,r3,0,16,3 /* clear ESEL */
  1387. mtspr SPRN_MAS0,r3
  1388. lis r6,(MAS1_VALID|MAS1_IPROT)@h
  1389. ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
  1390. mtspr SPRN_MAS1,r6
  1391. LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
  1392. mtspr SPRN_MAS2,r6
  1393. rlwinm r5,r5,0,0,25
  1394. ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
  1395. mtspr SPRN_MAS3,r5
  1396. li r5,-1
  1397. rlwinm r5,r5,0,0,25
  1398. tlbwe
  1399. /* 7. Jump to KERNELBASE mapping
  1400. *
  1401. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1402. */
  1403. /* Now we branch the new virtual address mapped by this entry */
  1404. bl 1f /* Find our address */
  1405. 1: mflr r6
  1406. addi r6,r6,(2f - 1b)
  1407. tovirt(r6,r6)
  1408. lis r7,MSR_KERNEL@h
  1409. ori r7,r7,MSR_KERNEL@l
  1410. mtspr SPRN_SRR0,r6
  1411. mtspr SPRN_SRR1,r7
  1412. rfi /* start execution out of TLB1[0] entry */
  1413. 2:
  1414. /* 8. Clear out the temp mapping
  1415. *
  1416. * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
  1417. */
  1418. mtspr SPRN_MAS0,r4
  1419. tlbre
  1420. mfspr r5,SPRN_MAS1
  1421. rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */
  1422. mtspr SPRN_MAS1,r5
  1423. tlbwe
  1424. sync
  1425. isync
  1426. /* We translate LR and return */
  1427. tovirt(r8,r8)
  1428. mtlr r8
  1429. blr
  1430. have_hes:
  1431. /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
  1432. * kernel linear mapping. We also set MAS8 once for all here though
  1433. * that will have to be made dependent on whether we are running under
  1434. * a hypervisor I suppose.
  1435. */
  1436. /* BEWARE, MAGIC
  1437. * This code is called as an ordinary function on the boot CPU. But to
  1438. * avoid duplication, this code is also used in SCOM bringup of
  1439. * secondary CPUs. We read the code between the initial_tlb_code_start
  1440. * and initial_tlb_code_end labels one instruction at a time and RAM it
  1441. * into the new core via SCOM. That doesn't process branches, so there
  1442. * must be none between those two labels. It also means if this code
  1443. * ever takes any parameters, the SCOM code must also be updated to
  1444. * provide them.
  1445. */
  1446. .globl a2_tlbinit_code_start
  1447. a2_tlbinit_code_start:
  1448. ori r11,r3,MAS0_WQ_ALLWAYS
  1449. oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
  1450. mtspr SPRN_MAS0,r11
  1451. lis r3,(MAS1_VALID | MAS1_IPROT)@h
  1452. ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
  1453. mtspr SPRN_MAS1,r3
  1454. LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
  1455. mtspr SPRN_MAS2,r3
  1456. li r3,MAS3_SR | MAS3_SW | MAS3_SX
  1457. mtspr SPRN_MAS7_MAS3,r3
  1458. li r3,0
  1459. mtspr SPRN_MAS8,r3
  1460. /* Write the TLB entry */
  1461. tlbwe
  1462. .globl a2_tlbinit_after_linear_map
  1463. a2_tlbinit_after_linear_map:
  1464. /* Now we branch the new virtual address mapped by this entry */
  1465. LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
  1466. mtctr r3
  1467. bctr
  1468. 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything
  1469. * else (including IPROTed things left by firmware)
  1470. * r4 = TLBnCFG
  1471. * r3 = current address (more or less)
  1472. */
  1473. li r5,0
  1474. mtspr SPRN_MAS6,r5
  1475. tlbsx 0,r3
  1476. rlwinm r9,r4,0,TLBnCFG_N_ENTRY
  1477. rlwinm r10,r4,8,0xff
  1478. addi r10,r10,-1 /* Get inner loop mask */
  1479. li r3,1
  1480. mfspr r5,SPRN_MAS1
  1481. rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
  1482. mfspr r6,SPRN_MAS2
  1483. rldicr r6,r6,0,51 /* Extract EPN */
  1484. mfspr r7,SPRN_MAS0
  1485. rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
  1486. rlwinm r8,r7,16,0xfff /* Extract ESEL */
  1487. 2: add r4,r3,r8
  1488. and r4,r4,r10
  1489. rlwimi r7,r4,16,MAS0_ESEL_MASK
  1490. mtspr SPRN_MAS0,r7
  1491. mtspr SPRN_MAS1,r5
  1492. mtspr SPRN_MAS2,r6
  1493. tlbwe
  1494. addi r3,r3,1
  1495. and. r4,r3,r10
  1496. bne 3f
  1497. addis r6,r6,(1<<30)@h
  1498. 3:
  1499. cmpw r3,r9
  1500. blt 2b
  1501. .globl a2_tlbinit_after_iprot_flush
  1502. a2_tlbinit_after_iprot_flush:
  1503. PPC_TLBILX(0,0,R0)
  1504. sync
  1505. isync
  1506. .globl a2_tlbinit_code_end
  1507. a2_tlbinit_code_end:
  1508. /* We translate LR and return */
  1509. mflr r3
  1510. tovirt(r3,r3)
  1511. mtlr r3
  1512. blr
  1513. /*
  1514. * Main entry (boot CPU, thread 0)
  1515. *
  1516. * We enter here from head_64.S, possibly after the prom_init trampoline
  1517. * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
  1518. * mode. Anything else is as it was left by the bootloader
  1519. *
  1520. * Initial requirements of this port:
  1521. *
  1522. * - Kernel loaded at 0 physical
  1523. * - A good lump of memory mapped 0:0 by UTLB entry 0
  1524. * - MSR:IS & MSR:DS set to 0
  1525. *
  1526. * Note that some of the above requirements will be relaxed in the future
  1527. * as the kernel becomes smarter at dealing with different initial conditions
  1528. * but for now you have to be careful
  1529. */
  1530. _GLOBAL(start_initialization_book3e)
  1531. mflr r28
  1532. /* First, we need to setup some initial TLBs to map the kernel
  1533. * text, data and bss at PAGE_OFFSET. We don't have a real mode
  1534. * and always use AS 0, so we just set it up to match our link
  1535. * address and never use 0 based addresses.
  1536. */
  1537. bl initial_tlb_book3e
  1538. /* Init global core bits */
  1539. bl init_core_book3e
  1540. /* Init per-thread bits */
  1541. bl init_thread_book3e
  1542. /* Return to common init code */
  1543. tovirt(r28,r28)
  1544. mtlr r28
  1545. blr
  1546. /*
  1547. * Secondary core/processor entry
  1548. *
  1549. * This is entered for thread 0 of a secondary core, all other threads
  1550. * are expected to be stopped. It's similar to start_initialization_book3e
  1551. * except that it's generally entered from the holding loop in head_64.S
  1552. * after CPUs have been gathered by Open Firmware.
  1553. *
  1554. * We assume we are in 32 bits mode running with whatever TLB entry was
  1555. * set for us by the firmware or POR engine.
  1556. */
  1557. _GLOBAL(book3e_secondary_core_init_tlb_set)
  1558. li r4,1
  1559. b generic_secondary_smp_init
  1560. _GLOBAL(book3e_secondary_core_init)
  1561. mflr r28
  1562. /* Do we need to setup initial TLB entry ? */
  1563. cmplwi r4,0
  1564. bne 2f
  1565. /* Setup TLB for this core */
  1566. bl initial_tlb_book3e
  1567. /* We can return from the above running at a different
  1568. * address, so recalculate r2 (TOC)
  1569. */
  1570. bl relative_toc
  1571. /* Init global core bits */
  1572. 2: bl init_core_book3e
  1573. /* Init per-thread bits */
  1574. 3: bl init_thread_book3e
  1575. /* Return to common init code at proper virtual address.
  1576. *
  1577. * Due to various previous assumptions, we know we entered this
  1578. * function at either the final PAGE_OFFSET mapping or using a
  1579. * 1:1 mapping at 0, so we don't bother doing a complicated check
  1580. * here, we just ensure the return address has the right top bits.
  1581. *
  1582. * Note that if we ever want to be smarter about where we can be
  1583. * started from, we have to be careful that by the time we reach
  1584. * the code below we may already be running at a different location
  1585. * than the one we were called from since initial_tlb_book3e can
  1586. * have moved us already.
  1587. */
  1588. cmpdi cr0,r28,0
  1589. blt 1f
  1590. lis r3,PAGE_OFFSET@highest
  1591. sldi r3,r3,32
  1592. or r28,r28,r3
  1593. 1: mtlr r28
  1594. blr
  1595. _GLOBAL(book3e_secondary_thread_init)
  1596. mflr r28
  1597. b 3b
  1598. .globl init_core_book3e
  1599. init_core_book3e:
  1600. /* Establish the interrupt vector base */
  1601. tovirt(r2,r2)
  1602. LOAD_REG_ADDR(r3, interrupt_base_book3e)
  1603. mtspr SPRN_IVPR,r3
  1604. sync
  1605. blr
  1606. init_thread_book3e:
  1607. lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
  1608. mtspr SPRN_EPCR,r3
  1609. /* Make sure interrupts are off */
  1610. wrteei 0
  1611. /* disable all timers and clear out status */
  1612. li r3,0
  1613. mtspr SPRN_TCR,r3
  1614. mfspr r3,SPRN_TSR
  1615. mtspr SPRN_TSR,r3
  1616. blr
  1617. _GLOBAL(__setup_base_ivors)
  1618. SET_IVOR(0, 0x020) /* Critical Input */
  1619. SET_IVOR(1, 0x000) /* Machine Check */
  1620. SET_IVOR(2, 0x060) /* Data Storage */
  1621. SET_IVOR(3, 0x080) /* Instruction Storage */
  1622. SET_IVOR(4, 0x0a0) /* External Input */
  1623. SET_IVOR(5, 0x0c0) /* Alignment */
  1624. SET_IVOR(6, 0x0e0) /* Program */
  1625. SET_IVOR(7, 0x100) /* FP Unavailable */
  1626. SET_IVOR(8, 0x120) /* System Call */
  1627. SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */
  1628. SET_IVOR(10, 0x160) /* Decrementer */
  1629. SET_IVOR(11, 0x180) /* Fixed Interval Timer */
  1630. SET_IVOR(12, 0x1a0) /* Watchdog Timer */
  1631. SET_IVOR(13, 0x1c0) /* Data TLB Error */
  1632. SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
  1633. SET_IVOR(15, 0x040) /* Debug */
  1634. sync
  1635. blr
  1636. _GLOBAL(setup_altivec_ivors)
  1637. SET_IVOR(32, 0x200) /* AltiVec Unavailable */
  1638. SET_IVOR(33, 0x220) /* AltiVec Assist */
  1639. blr
  1640. _GLOBAL(setup_perfmon_ivor)
  1641. SET_IVOR(35, 0x260) /* Performance Monitor */
  1642. blr
  1643. _GLOBAL(setup_doorbell_ivors)
  1644. SET_IVOR(36, 0x280) /* Processor Doorbell */
  1645. SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
  1646. blr
  1647. _GLOBAL(setup_ehv_ivors)
  1648. SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
  1649. SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
  1650. SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
  1651. SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
  1652. blr
  1653. _GLOBAL(setup_lrat_ivor)
  1654. SET_IVOR(42, 0x340) /* LRAT Error */
  1655. blr