1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879 |
- /* SPDX-License-Identifier: GPL-2.0-or-later */
- /*
- * Boot code and exception vectors for Book3E processors
- *
- * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
- */
- #include <linux/threads.h>
- #include <asm/reg.h>
- #include <asm/page.h>
- #include <asm/ppc_asm.h>
- #include <asm/asm-offsets.h>
- #include <asm/cputable.h>
- #include <asm/setup.h>
- #include <asm/thread_info.h>
- #include <asm/reg_a2.h>
- #include <asm/exception-64e.h>
- #include <asm/bug.h>
- #include <asm/irqflags.h>
- #include <asm/ptrace.h>
- #include <asm/ppc-opcode.h>
- #include <asm/mmu.h>
- #include <asm/hw_irq.h>
- #include <asm/kvm_asm.h>
- #include <asm/kvm_booke_hv_asm.h>
- #include <asm/feature-fixups.h>
- #include <asm/context_tracking.h>
- /* XXX This will ultimately add space for a special exception save
- * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
- * when taking special interrupts. For now we don't support that,
- * special interrupts from within a non-standard level will probably
- * blow you up
- */
- #define SPECIAL_EXC_SRR0 0
- #define SPECIAL_EXC_SRR1 1
- #define SPECIAL_EXC_SPRG_GEN 2
- #define SPECIAL_EXC_SPRG_TLB 3
- #define SPECIAL_EXC_MAS0 4
- #define SPECIAL_EXC_MAS1 5
- #define SPECIAL_EXC_MAS2 6
- #define SPECIAL_EXC_MAS3 7
- #define SPECIAL_EXC_MAS6 8
- #define SPECIAL_EXC_MAS7 9
- #define SPECIAL_EXC_MAS5 10 /* E.HV only */
- #define SPECIAL_EXC_MAS8 11 /* E.HV only */
- #define SPECIAL_EXC_IRQHAPPENED 12
- #define SPECIAL_EXC_DEAR 13
- #define SPECIAL_EXC_ESR 14
- #define SPECIAL_EXC_SOFTE 15
- #define SPECIAL_EXC_CSRR0 16
- #define SPECIAL_EXC_CSRR1 17
- /* must be even to keep 16-byte stack alignment */
- #define SPECIAL_EXC_END 18
- #define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8)
- #define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288)
- #define SPECIAL_EXC_STORE(reg, name) \
- std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
- #define SPECIAL_EXC_LOAD(reg, name) \
- ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
- special_reg_save:
- lbz r9,PACAIRQHAPPENED(r13)
- RECONCILE_IRQ_STATE(r3,r4)
- /*
- * We only need (or have stack space) to save this stuff if
- * we interrupted the kernel.
- */
- ld r3,_MSR(r1)
- andi. r3,r3,MSR_PR
- bnelr
- /*
- * Advance to the next TLB exception frame for handler
- * types that don't do it automatically.
- */
- LOAD_REG_ADDR(r11,extlb_level_exc)
- lwz r12,0(r11)
- mfspr r10,SPRN_SPRG_TLB_EXFRAME
- add r10,r10,r12
- mtspr SPRN_SPRG_TLB_EXFRAME,r10
- /*
- * Save registers needed to allow nesting of certain exceptions
- * (such as TLB misses) inside special exception levels
- */
- mfspr r10,SPRN_SRR0
- SPECIAL_EXC_STORE(r10,SRR0)
- mfspr r10,SPRN_SRR1
- SPECIAL_EXC_STORE(r10,SRR1)
- mfspr r10,SPRN_SPRG_GEN_SCRATCH
- SPECIAL_EXC_STORE(r10,SPRG_GEN)
- mfspr r10,SPRN_SPRG_TLB_SCRATCH
- SPECIAL_EXC_STORE(r10,SPRG_TLB)
- mfspr r10,SPRN_MAS0
- SPECIAL_EXC_STORE(r10,MAS0)
- mfspr r10,SPRN_MAS1
- SPECIAL_EXC_STORE(r10,MAS1)
- mfspr r10,SPRN_MAS2
- SPECIAL_EXC_STORE(r10,MAS2)
- mfspr r10,SPRN_MAS3
- SPECIAL_EXC_STORE(r10,MAS3)
- mfspr r10,SPRN_MAS6
- SPECIAL_EXC_STORE(r10,MAS6)
- mfspr r10,SPRN_MAS7
- SPECIAL_EXC_STORE(r10,MAS7)
- BEGIN_FTR_SECTION
- mfspr r10,SPRN_MAS5
- SPECIAL_EXC_STORE(r10,MAS5)
- mfspr r10,SPRN_MAS8
- SPECIAL_EXC_STORE(r10,MAS8)
- /* MAS5/8 could have inappropriate values if we interrupted KVM code */
- li r10,0
- mtspr SPRN_MAS5,r10
- mtspr SPRN_MAS8,r10
- END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
- SPECIAL_EXC_STORE(r9,IRQHAPPENED)
- mfspr r10,SPRN_DEAR
- SPECIAL_EXC_STORE(r10,DEAR)
- mfspr r10,SPRN_ESR
- SPECIAL_EXC_STORE(r10,ESR)
- lbz r10,PACAIRQSOFTMASK(r13)
- SPECIAL_EXC_STORE(r10,SOFTE)
- ld r10,_NIP(r1)
- SPECIAL_EXC_STORE(r10,CSRR0)
- ld r10,_MSR(r1)
- SPECIAL_EXC_STORE(r10,CSRR1)
- blr
- ret_from_level_except:
- ld r3,_MSR(r1)
- andi. r3,r3,MSR_PR
- beq 1f
- b ret_from_except
- 1:
- LOAD_REG_ADDR(r11,extlb_level_exc)
- lwz r12,0(r11)
- mfspr r10,SPRN_SPRG_TLB_EXFRAME
- sub r10,r10,r12
- mtspr SPRN_SPRG_TLB_EXFRAME,r10
- /*
- * It's possible that the special level exception interrupted a
- * TLB miss handler, and inserted the same entry that the
- * interrupted handler was about to insert. On CPUs without TLB
- * write conditional, this can result in a duplicate TLB entry.
- * Wipe all non-bolted entries to be safe.
- *
- * Note that this doesn't protect against any TLB misses
- * we may take accessing the stack from here to the end of
- * the special level exception. It's not clear how we can
- * reasonably protect against that, but only CPUs with
- * neither TLB write conditional nor bolted kernel memory
- * are affected. Do any such CPUs even exist?
- */
- PPC_TLBILX_ALL(0,R0)
- REST_NVGPRS(r1)
- SPECIAL_EXC_LOAD(r10,SRR0)
- mtspr SPRN_SRR0,r10
- SPECIAL_EXC_LOAD(r10,SRR1)
- mtspr SPRN_SRR1,r10
- SPECIAL_EXC_LOAD(r10,SPRG_GEN)
- mtspr SPRN_SPRG_GEN_SCRATCH,r10
- SPECIAL_EXC_LOAD(r10,SPRG_TLB)
- mtspr SPRN_SPRG_TLB_SCRATCH,r10
- SPECIAL_EXC_LOAD(r10,MAS0)
- mtspr SPRN_MAS0,r10
- SPECIAL_EXC_LOAD(r10,MAS1)
- mtspr SPRN_MAS1,r10
- SPECIAL_EXC_LOAD(r10,MAS2)
- mtspr SPRN_MAS2,r10
- SPECIAL_EXC_LOAD(r10,MAS3)
- mtspr SPRN_MAS3,r10
- SPECIAL_EXC_LOAD(r10,MAS6)
- mtspr SPRN_MAS6,r10
- SPECIAL_EXC_LOAD(r10,MAS7)
- mtspr SPRN_MAS7,r10
- BEGIN_FTR_SECTION
- SPECIAL_EXC_LOAD(r10,MAS5)
- mtspr SPRN_MAS5,r10
- SPECIAL_EXC_LOAD(r10,MAS8)
- mtspr SPRN_MAS8,r10
- END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
- lbz r6,PACAIRQSOFTMASK(r13)
- ld r5,SOFTE(r1)
- /* Interrupts had better not already be enabled... */
- tweqi r6,IRQS_ENABLED
- andi. r6,r5,IRQS_DISABLED
- bne 1f
- TRACE_ENABLE_INTS
- stb r5,PACAIRQSOFTMASK(r13)
- 1:
- /*
- * Restore PACAIRQHAPPENED rather than setting it based on
- * the return MSR[EE], since we could have interrupted
- * __check_irq_replay() or other inconsistent transitory
- * states that must remain that way.
- */
- SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
- stb r10,PACAIRQHAPPENED(r13)
- SPECIAL_EXC_LOAD(r10,DEAR)
- mtspr SPRN_DEAR,r10
- SPECIAL_EXC_LOAD(r10,ESR)
- mtspr SPRN_ESR,r10
- stdcx. r0,0,r1 /* to clear the reservation */
- REST_4GPRS(2, r1)
- REST_4GPRS(6, r1)
- ld r10,_CTR(r1)
- ld r11,_XER(r1)
- mtctr r10
- mtxer r11
- blr
- .macro ret_from_level srr0 srr1 paca_ex scratch
- bl ret_from_level_except
- ld r10,_LINK(r1)
- ld r11,_CCR(r1)
- ld r0,GPR13(r1)
- mtlr r10
- mtcr r11
- ld r10,GPR10(r1)
- ld r11,GPR11(r1)
- ld r12,GPR12(r1)
- mtspr \scratch,r0
- std r10,\paca_ex+EX_R10(r13);
- std r11,\paca_ex+EX_R11(r13);
- ld r10,_NIP(r1)
- ld r11,_MSR(r1)
- ld r0,GPR0(r1)
- ld r1,GPR1(r1)
- mtspr \srr0,r10
- mtspr \srr1,r11
- ld r10,\paca_ex+EX_R10(r13)
- ld r11,\paca_ex+EX_R11(r13)
- mfspr r13,\scratch
- .endm
- ret_from_crit_except:
- ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
- rfci
- ret_from_mc_except:
- ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
- rfmci
- /* Exception prolog code for all exceptions */
- #define EXCEPTION_PROLOG(n, intnum, type, addition) \
- mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
- mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
- std r10,PACA_EX##type+EX_R10(r13); \
- std r11,PACA_EX##type+EX_R11(r13); \
- mfcr r10; /* save CR */ \
- mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
- DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \
- stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
- addition; /* additional code for that exc. */ \
- std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
- type##_SET_KSTACK; /* get special stack if necessary */\
- andi. r10,r11,MSR_PR; /* save stack pointer */ \
- beq 1f; /* branch around if supervisor */ \
- ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
- 1: type##_BTB_FLUSH \
- cmpdi cr1,r1,0; /* check if SP makes sense */ \
- bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
- mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
- /* Exception type-specific macros */
- #define GEN_SET_KSTACK \
- subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */
- #define SPRN_GEN_SRR0 SPRN_SRR0
- #define SPRN_GEN_SRR1 SPRN_SRR1
- #define GDBELL_SET_KSTACK GEN_SET_KSTACK
- #define SPRN_GDBELL_SRR0 SPRN_GSRR0
- #define SPRN_GDBELL_SRR1 SPRN_GSRR1
- #define CRIT_SET_KSTACK \
- ld r1,PACA_CRIT_STACK(r13); \
- subi r1,r1,SPECIAL_EXC_FRAME_SIZE
- #define SPRN_CRIT_SRR0 SPRN_CSRR0
- #define SPRN_CRIT_SRR1 SPRN_CSRR1
- #define DBG_SET_KSTACK \
- ld r1,PACA_DBG_STACK(r13); \
- subi r1,r1,SPECIAL_EXC_FRAME_SIZE
- #define SPRN_DBG_SRR0 SPRN_DSRR0
- #define SPRN_DBG_SRR1 SPRN_DSRR1
- #define MC_SET_KSTACK \
- ld r1,PACA_MC_STACK(r13); \
- subi r1,r1,SPECIAL_EXC_FRAME_SIZE
- #define SPRN_MC_SRR0 SPRN_MCSRR0
- #define SPRN_MC_SRR1 SPRN_MCSRR1
- #ifdef CONFIG_PPC_FSL_BOOK3E
- #define GEN_BTB_FLUSH \
- START_BTB_FLUSH_SECTION \
- beq 1f; \
- BTB_FLUSH(r10) \
- 1: \
- END_BTB_FLUSH_SECTION
- #define CRIT_BTB_FLUSH \
- START_BTB_FLUSH_SECTION \
- BTB_FLUSH(r10) \
- END_BTB_FLUSH_SECTION
- #define DBG_BTB_FLUSH CRIT_BTB_FLUSH
- #define MC_BTB_FLUSH CRIT_BTB_FLUSH
- #define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
- #else
- #define GEN_BTB_FLUSH
- #define CRIT_BTB_FLUSH
- #define DBG_BTB_FLUSH
- #define MC_BTB_FLUSH
- #define GDBELL_BTB_FLUSH
- #endif
- #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
- EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
- #define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \
- EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
- #define DBG_EXCEPTION_PROLOG(n, intnum, addition) \
- EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
- #define MC_EXCEPTION_PROLOG(n, intnum, addition) \
- EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
- #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \
- EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
- /* Variants of the "addition" argument for the prolog
- */
- #define PROLOG_ADDITION_NONE_GEN(n)
- #define PROLOG_ADDITION_NONE_GDBELL(n)
- #define PROLOG_ADDITION_NONE_CRIT(n)
- #define PROLOG_ADDITION_NONE_DBG(n)
- #define PROLOG_ADDITION_NONE_MC(n)
- #define PROLOG_ADDITION_MASKABLE_GEN(n) \
- lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \
- andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
- bne masked_interrupt_book3e_##n
- #define PROLOG_ADDITION_2REGS_GEN(n) \
- std r14,PACA_EXGEN+EX_R14(r13); \
- std r15,PACA_EXGEN+EX_R15(r13)
- #define PROLOG_ADDITION_1REG_GEN(n) \
- std r14,PACA_EXGEN+EX_R14(r13);
- #define PROLOG_ADDITION_2REGS_CRIT(n) \
- std r14,PACA_EXCRIT+EX_R14(r13); \
- std r15,PACA_EXCRIT+EX_R15(r13)
- #define PROLOG_ADDITION_2REGS_DBG(n) \
- std r14,PACA_EXDBG+EX_R14(r13); \
- std r15,PACA_EXDBG+EX_R15(r13)
- #define PROLOG_ADDITION_2REGS_MC(n) \
- std r14,PACA_EXMC+EX_R14(r13); \
- std r15,PACA_EXMC+EX_R15(r13)
- /* Core exception code for all exceptions except TLB misses. */
- #define EXCEPTION_COMMON_LVL(n, scratch, excf) \
- exc_##n##_common: \
- std r0,GPR0(r1); /* save r0 in stackframe */ \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
- std r9,GPR9(r1); /* save r9 in stackframe */ \
- std r10,_NIP(r1); /* save SRR0 to stackframe */ \
- std r11,_MSR(r1); /* save SRR1 to stackframe */ \
- beq 2f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \
- 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
- ld r4,excf+EX_R11(r13); /* get back r11 */ \
- mfspr r5,scratch; /* get back r13 */ \
- std r12,GPR12(r1); /* save r12 in stackframe */ \
- ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
- mflr r6; /* save LR in stackframe */ \
- mfctr r7; /* save CTR in stackframe */ \
- mfspr r8,SPRN_XER; /* save XER in stackframe */ \
- ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
- lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
- lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \
- ld r12,exception_marker@toc(r2); \
- li r0,0; \
- std r3,GPR10(r1); /* save r10 to stackframe */ \
- std r4,GPR11(r1); /* save r11 to stackframe */ \
- std r5,GPR13(r1); /* save it to stackframe */ \
- std r6,_LINK(r1); \
- std r7,_CTR(r1); \
- std r8,_XER(r1); \
- li r3,(n)+1; /* indicate partial regs in trap */ \
- std r9,0(r1); /* store stack frame back link */ \
- std r10,_CCR(r1); /* store orig CR in stackframe */ \
- std r9,GPR1(r1); /* store stack frame back link */ \
- std r11,SOFTE(r1); /* and save it to stackframe */ \
- std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
- std r3,_TRAP(r1); /* set trap number */ \
- std r0,RESULT(r1); /* clear regs->result */
- #define EXCEPTION_COMMON(n) \
- EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
- #define EXCEPTION_COMMON_CRIT(n) \
- EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT)
- #define EXCEPTION_COMMON_MC(n) \
- EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC)
- #define EXCEPTION_COMMON_DBG(n) \
- EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
- /*
- * This is meant for exceptions that don't immediately hard-enable. We
- * set a bit in paca->irq_happened to ensure that a subsequent call to
- * arch_local_irq_restore() will properly hard-enable and avoid the
- * fast-path, and then reconcile irq state.
- */
- #define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4)
- /*
- * This is called by exceptions that don't use INTS_DISABLE (that did not
- * touch irq indicators in the PACA). This will restore MSR:EE to it's
- * previous value
- *
- * XXX In the long run, we may want to open-code it in order to separate the
- * load from the wrtee, thus limiting the latency caused by the dependency
- * but at this point, I'll favor code clarity until we have a near to final
- * implementation
- */
- #define INTS_RESTORE_HARD \
- ld r11,_MSR(r1); \
- wrtee r11;
- /* XXX FIXME: Restore r14/r15 when necessary */
- #define BAD_STACK_TRAMPOLINE(n) \
- exc_##n##_bad_stack: \
- li r1,(n); /* get exception number */ \
- sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
- b bad_stack_book3e; /* bad stack error */
- /* WARNING: If you change the layout of this stub, make sure you check
- * the debug exception handler which handles single stepping
- * into exceptions from userspace, and the MM code in
- * arch/powerpc/mm/tlb_nohash.c which patches the branch here
- * and would need to be updated if that branch is moved
- */
- #define EXCEPTION_STUB(loc, label) \
- . = interrupt_base_book3e + loc; \
- nop; /* To make debug interrupts happy */ \
- b exc_##label##_book3e;
- #define ACK_NONE(r)
- #define ACK_DEC(r) \
- lis r,TSR_DIS@h; \
- mtspr SPRN_TSR,r
- #define ACK_FIT(r) \
- lis r,TSR_FIS@h; \
- mtspr SPRN_TSR,r
- /* Used by asynchronous interrupt that may happen in the idle loop.
- *
- * This check if the thread was in the idle loop, and if yes, returns
- * to the caller rather than the PC. This is to avoid a race if
- * interrupts happen before the wait instruction.
- */
- #define CHECK_NAPPING() \
- ld r11, PACA_THREAD_INFO(r13); \
- ld r10,TI_LOCAL_FLAGS(r11); \
- andi. r9,r10,_TLF_NAPPING; \
- beq+ 1f; \
- ld r8,_LINK(r1); \
- rlwinm r7,r10,0,~_TLF_NAPPING; \
- std r8,_NIP(r1); \
- std r7,TI_LOCAL_FLAGS(r11); \
- 1:
- #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \
- START_EXCEPTION(label); \
- NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
- EXCEPTION_COMMON(trapnum) \
- INTS_DISABLE; \
- ack(r8); \
- CHECK_NAPPING(); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b ret_from_except_lite;
- /* This value is used to mark exception frames on the stack. */
- .section ".toc","aw"
- exception_marker:
- .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
- /*
- * And here we have the exception vectors !
- */
- .text
- .balign 0x1000
- .globl interrupt_base_book3e
- interrupt_base_book3e: /* fake trap */
- EXCEPTION_STUB(0x000, machine_check)
- EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */
- EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */
- EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */
- EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */
- EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */
- EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */
- EXCEPTION_STUB(0x0e0, program) /* 0x0700 */
- EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */
- EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */
- EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */
- EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */
- EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */
- EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
- EXCEPTION_STUB(0x1c0, data_tlb_miss)
- EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
- EXCEPTION_STUB(0x200, altivec_unavailable)
- EXCEPTION_STUB(0x220, altivec_assist)
- EXCEPTION_STUB(0x260, perfmon)
- EXCEPTION_STUB(0x280, doorbell)
- EXCEPTION_STUB(0x2a0, doorbell_crit)
- EXCEPTION_STUB(0x2c0, guest_doorbell)
- EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
- EXCEPTION_STUB(0x300, hypercall)
- EXCEPTION_STUB(0x320, ehpriv)
- EXCEPTION_STUB(0x340, lrat_error)
- .globl __end_interrupts
- __end_interrupts:
- /* Critical Input Interrupt */
- START_EXCEPTION(critical_input);
- CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON_CRIT(0x100)
- bl save_nvgprs
- bl special_reg_save
- CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
- b ret_from_crit_except
- /* Machine Check Interrupt */
- START_EXCEPTION(machine_check);
- MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON_MC(0x000)
- bl save_nvgprs
- bl special_reg_save
- CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception
- b ret_from_mc_except
- /* Data Storage Interrupt */
- START_EXCEPTION(data_storage)
- NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
- PROLOG_ADDITION_2REGS)
- mfspr r14,SPRN_DEAR
- mfspr r15,SPRN_ESR
- EXCEPTION_COMMON(0x300)
- INTS_DISABLE
- b storage_fault_common
- /* Instruction Storage Interrupt */
- START_EXCEPTION(instruction_storage);
- NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
- PROLOG_ADDITION_2REGS)
- li r15,0
- mr r14,r10
- EXCEPTION_COMMON(0x400)
- INTS_DISABLE
- b storage_fault_common
- /* External Input Interrupt */
- MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
- external_input, do_IRQ, ACK_NONE)
- /* Alignment */
- START_EXCEPTION(alignment);
- NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
- PROLOG_ADDITION_2REGS)
- mfspr r14,SPRN_DEAR
- mfspr r15,SPRN_ESR
- EXCEPTION_COMMON(0x600)
- b alignment_more /* no room, go out of line */
- /* Program Interrupt */
- START_EXCEPTION(program);
- NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
- PROLOG_ADDITION_1REG)
- mfspr r14,SPRN_ESR
- EXCEPTION_COMMON(0x700)
- INTS_DISABLE
- std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- bl save_nvgprs
- bl program_check_exception
- b ret_from_except
- /* Floating Point Unavailable Interrupt */
- START_EXCEPTION(fp_unavailable);
- NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
- PROLOG_ADDITION_NONE)
- /* we can probably do a shorter exception entry for that one... */
- EXCEPTION_COMMON(0x800)
- ld r12,_MSR(r1)
- andi. r0,r12,MSR_PR;
- beq- 1f
- bl load_up_fpu
- b fast_exception_return
- 1: INTS_DISABLE
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl kernel_fp_unavailable_exception
- b ret_from_except
- /* Altivec Unavailable Interrupt */
- START_EXCEPTION(altivec_unavailable);
- NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
- PROLOG_ADDITION_NONE)
- /* we can probably do a shorter exception entry for that one... */
- EXCEPTION_COMMON(0x200)
- #ifdef CONFIG_ALTIVEC
- BEGIN_FTR_SECTION
- ld r12,_MSR(r1)
- andi. r0,r12,MSR_PR;
- beq- 1f
- bl load_up_altivec
- b fast_exception_return
- 1:
- END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- #endif
- INTS_DISABLE
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl altivec_unavailable_exception
- b ret_from_except
- /* AltiVec Assist */
- START_EXCEPTION(altivec_assist);
- NORMAL_EXCEPTION_PROLOG(0x220,
- BOOKE_INTERRUPT_ALTIVEC_ASSIST,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0x220)
- INTS_DISABLE
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- #ifdef CONFIG_ALTIVEC
- BEGIN_FTR_SECTION
- bl altivec_assist_exception
- END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- #else
- bl unknown_exception
- #endif
- b ret_from_except
- /* Decrementer Interrupt */
- MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
- decrementer, timer_interrupt, ACK_DEC)
- /* Fixed Interval Timer Interrupt */
- MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
- fixed_interval, unknown_exception, ACK_FIT)
- /* Watchdog Timer Interrupt */
- START_EXCEPTION(watchdog);
- CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON_CRIT(0x9f0)
- bl save_nvgprs
- bl special_reg_save
- CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
- #ifdef CONFIG_BOOKE_WDT
- bl WatchdogException
- #else
- bl unknown_exception
- #endif
- b ret_from_crit_except
- /* System Call Interrupt */
- START_EXCEPTION(system_call)
- mr r9,r13 /* keep a copy of userland r13 */
- mfspr r11,SPRN_SRR0 /* get return address */
- mfspr r12,SPRN_SRR1 /* get previous MSR */
- mfspr r13,SPRN_SPRG_PACA /* get our PACA */
- b system_call_common
- /* Auxiliary Processor Unavailable Interrupt */
- START_EXCEPTION(ap_unavailable);
- NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0xf20)
- INTS_DISABLE
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
- b ret_from_except
- /* Debug exception as a critical interrupt*/
- START_EXCEPTION(debug_crit);
- CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
- PROLOG_ADDITION_2REGS)
- /*
- * If there is a single step or branch-taken exception in an
- * exception entry sequence, it was probably meant to apply to
- * the code where the exception occurred (since exception entry
- * doesn't turn off DE automatically). We simulate the effect
- * of turning off DE on entry to an exception handler by turning
- * off DE in the CSRR1 value and clearing the debug status.
- */
- mfspr r14,SPRN_DBSR /* check single-step/branch taken */
- andis. r15,r14,(DBSR_IC|DBSR_BT)@h
- beq+ 1f
- #ifdef CONFIG_RELOCATABLE
- ld r15,PACATOC(r13)
- ld r14,interrupt_base_book3e@got(r15)
- ld r15,__end_interrupts@got(r15)
- cmpld cr0,r10,r14
- cmpld cr1,r10,r15
- #else
- LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
- cmpld cr0, r10, r14
- LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)
- cmpld cr1, r10, r14
- #endif
- blt+ cr0,1f
- bge+ cr1,1f
- /* here it looks like we got an inappropriate debug exception. */
- lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
- rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
- mtspr SPRN_DBSR,r14
- mtspr SPRN_CSRR1,r11
- lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */
- ld r1,PACA_EXCRIT+EX_R1(r13)
- ld r14,PACA_EXCRIT+EX_R14(r13)
- ld r15,PACA_EXCRIT+EX_R15(r13)
- mtcr r10
- ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
- ld r11,PACA_EXCRIT+EX_R11(r13)
- mfspr r13,SPRN_SPRG_CRIT_SCRATCH
- rfci
- /* Normal debug exception */
- /* XXX We only handle coming from userspace for now since we can't
- * quite save properly an interrupted kernel state yet
- */
- 1: andi. r14,r11,MSR_PR; /* check for userspace again */
- beq kernel_dbg_exc; /* if from kernel mode */
- /* Now we mash up things to make it look like we are coming on a
- * normal exception
- */
- mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_CRIT(0xd00)
- std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
- ld r14,PACA_EXCRIT+EX_R14(r13)
- ld r15,PACA_EXCRIT+EX_R15(r13)
- bl save_nvgprs
- bl DebugException
- b ret_from_except
- kernel_dbg_exc:
- b . /* NYI */
- /* Debug exception as a debug interrupt*/
- START_EXCEPTION(debug_debug);
- DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
- PROLOG_ADDITION_2REGS)
- /*
- * If there is a single step or branch-taken exception in an
- * exception entry sequence, it was probably meant to apply to
- * the code where the exception occurred (since exception entry
- * doesn't turn off DE automatically). We simulate the effect
- * of turning off DE on entry to an exception handler by turning
- * off DE in the DSRR1 value and clearing the debug status.
- */
- mfspr r14,SPRN_DBSR /* check single-step/branch taken */
- andis. r15,r14,(DBSR_IC|DBSR_BT)@h
- beq+ 1f
- #ifdef CONFIG_RELOCATABLE
- ld r15,PACATOC(r13)
- ld r14,interrupt_base_book3e@got(r15)
- ld r15,__end_interrupts@got(r15)
- cmpld cr0,r10,r14
- cmpld cr1,r10,r15
- #else
- LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
- cmpld cr0, r10, r14
- LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)
- cmpld cr1, r10, r14
- #endif
- blt+ cr0,1f
- bge+ cr1,1f
- /* here it looks like we got an inappropriate debug exception. */
- lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
- rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
- mtspr SPRN_DBSR,r14
- mtspr SPRN_DSRR1,r11
- lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */
- ld r1,PACA_EXDBG+EX_R1(r13)
- ld r14,PACA_EXDBG+EX_R14(r13)
- ld r15,PACA_EXDBG+EX_R15(r13)
- mtcr r10
- ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */
- ld r11,PACA_EXDBG+EX_R11(r13)
- mfspr r13,SPRN_SPRG_DBG_SCRATCH
- rfdi
- /* Normal debug exception */
- /* XXX We only handle coming from userspace for now since we can't
- * quite save properly an interrupted kernel state yet
- */
- 1: andi. r14,r11,MSR_PR; /* check for userspace again */
- beq kernel_dbg_exc; /* if from kernel mode */
- /* Now we mash up things to make it look like we are coming on a
- * normal exception
- */
- mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_DBG(0xd08)
- INTS_DISABLE
- std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
- ld r14,PACA_EXDBG+EX_R14(r13)
- ld r15,PACA_EXDBG+EX_R15(r13)
- bl save_nvgprs
- bl DebugException
- b ret_from_except
- START_EXCEPTION(perfmon);
- NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0x260)
- INTS_DISABLE
- CHECK_NAPPING()
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl performance_monitor_exception
- b ret_from_except_lite
- /* Doorbell interrupt */
- MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
- doorbell, doorbell_exception, ACK_NONE)
- /* Doorbell critical Interrupt */
- START_EXCEPTION(doorbell_crit);
- CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON_CRIT(0x2a0)
- bl save_nvgprs
- bl special_reg_save
- CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
- b ret_from_crit_except
- /*
- * Guest doorbell interrupt
- * This general exception use GSRRx save/restore registers
- */
- START_EXCEPTION(guest_doorbell);
- GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0x2c0)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
- bl unknown_exception
- b ret_from_except
- /* Guest Doorbell critical Interrupt */
- START_EXCEPTION(guest_doorbell_crit);
- CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON_CRIT(0x2e0)
- bl save_nvgprs
- bl special_reg_save
- CHECK_NAPPING();
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
- b ret_from_crit_except
- /* Hypervisor call */
- START_EXCEPTION(hypercall);
- NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0x310)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
- bl unknown_exception
- b ret_from_except
- /* Embedded Hypervisor priviledged */
- START_EXCEPTION(ehpriv);
- NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0x320)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
- bl unknown_exception
- b ret_from_except
- /* LRAT Error interrupt */
- START_EXCEPTION(lrat_error);
- NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
- PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0x340)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
- bl unknown_exception
- b ret_from_except
- /*
- * An interrupt came in while soft-disabled; We mark paca->irq_happened
- * accordingly and if the interrupt is level sensitive, we hard disable
- * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
- * keep these in synch.
- */
- .macro masked_interrupt_book3e paca_irq full_mask
- lbz r10,PACAIRQHAPPENED(r13)
- .if \full_mask == 1
- ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
- .else
- ori r10,r10,\paca_irq
- .endif
- stb r10,PACAIRQHAPPENED(r13)
- .if \full_mask == 1
- rldicl r10,r11,48,1 /* clear MSR_EE */
- rotldi r11,r10,16
- mtspr SPRN_SRR1,r11
- .endif
- lwz r11,PACA_EXGEN+EX_CR(r13)
- mtcr r11
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- mfspr r13,SPRN_SPRG_GEN_SCRATCH
- rfi
- b .
- .endm
- masked_interrupt_book3e_0x500:
- masked_interrupt_book3e PACA_IRQ_EE 1
- masked_interrupt_book3e_0x900:
- ACK_DEC(r10);
- masked_interrupt_book3e PACA_IRQ_DEC 0
- masked_interrupt_book3e_0x980:
- ACK_FIT(r10);
- masked_interrupt_book3e PACA_IRQ_DEC 0
- masked_interrupt_book3e_0x280:
- masked_interrupt_book3e_0x2c0:
- masked_interrupt_book3e PACA_IRQ_DBELL 0
- /*
- * This is called from 0x300 and 0x400 handlers after the prologs with
- * r14 and r15 containing the fault address and error code, with the
- * original values stashed away in the PACA
- */
- storage_fault_common:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
- mr r5,r15
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
- bl do_page_fault
- cmpdi r3,0
- bne- 1f
- b ret_from_except_lite
- 1: bl save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl bad_page_fault
- b ret_from_except
- /*
- * Alignment exception doesn't fit entirely in the 0x100 bytes so it
- * continues here.
- */
- alignment_more:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
- bl save_nvgprs
- INTS_RESTORE_HARD
- bl alignment_exception
- b ret_from_except
- .align 7
- _GLOBAL(ret_from_except)
- ld r11,_TRAP(r1)
- andi. r0,r11,1
- bne ret_from_except_lite
- REST_NVGPRS(r1)
- _GLOBAL(ret_from_except_lite)
- /*
- * Disable interrupts so that current_thread_info()->flags
- * can't change between when we test it and when we return
- * from the interrupt.
- */
- wrteei 0
- ld r9, PACA_THREAD_INFO(r13)
- ld r3,_MSR(r1)
- ld r10,PACACURRENT(r13)
- ld r4,TI_FLAGS(r9)
- andi. r3,r3,MSR_PR
- beq resume_kernel
- lwz r3,(THREAD+THREAD_DBCR0)(r10)
- /* Check current_thread_info()->flags */
- andi. r0,r4,_TIF_USER_WORK_MASK
- bne 1f
- /*
- * Check to see if the dbcr0 register is set up to debug.
- * Use the internal debug mode bit to do this.
- */
- andis. r0,r3,DBCR0_IDM@h
- beq restore
- mfmsr r0
- rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
- mtmsr r0
- mtspr SPRN_DBCR0,r3
- li r10, -1
- mtspr SPRN_DBSR,r10
- b restore
- 1: andi. r0,r4,_TIF_NEED_RESCHED
- beq 2f
- bl restore_interrupts
- SCHEDULE_USER
- b ret_from_except_lite
- 2:
- bl save_nvgprs
- /*
- * Use a non volatile GPR to save and restore our thread_info flags
- * across the call to restore_interrupts.
- */
- mr r30,r4
- bl restore_interrupts
- mr r4,r30
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_notify_resume
- b ret_from_except
- resume_kernel:
- /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
- andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
- beq+ 1f
- addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
- ld r3,GPR1(r1)
- subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
- mr r4,r1 /* src: current exception frame */
- mr r1,r3 /* Reroute the trampoline frame to r1 */
- /* Copy from the original to the trampoline. */
- li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
- li r6,0 /* start offset: 0 */
- mtctr r5
- 2: ldx r0,r6,r4
- stdx r0,r6,r3
- addi r6,r6,8
- bdnz 2b
- /* Do real store operation to complete stdu */
- ld r5,GPR1(r1)
- std r8,0(r5)
- /* Clear _TIF_EMULATE_STACK_STORE flag */
- lis r11,_TIF_EMULATE_STACK_STORE@h
- addi r5,r9,TI_FLAGS
- 0: ldarx r4,0,r5
- andc r4,r4,r11
- stdcx. r4,0,r5
- bne- 0b
- 1:
- #ifdef CONFIG_PREEMPT
- /* Check if we need to preempt */
- andi. r0,r4,_TIF_NEED_RESCHED
- beq+ restore
- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr0,r8,0
- bne restore
- ld r0,SOFTE(r1)
- andi. r0,r0,IRQS_DISABLED
- bne restore
- /*
- * Here we are preempting the current task. We want to make
- * sure we are soft-disabled first and reconcile irq state.
- */
- RECONCILE_IRQ_STATE(r3,r4)
- bl preempt_schedule_irq
- /*
- * arch_local_irq_restore() from preempt_schedule_irq above may
- * enable hard interrupt but we really should disable interrupts
- * when we return from the interrupt, and so that we don't get
- * interrupted after loading SRR0/1.
- */
- wrteei 0
- #endif /* CONFIG_PREEMPT */
- restore:
- /*
- * This is the main kernel exit path. First we check if we
- * are about to re-enable interrupts
- */
- ld r5,SOFTE(r1)
- lbz r6,PACAIRQSOFTMASK(r13)
- andi. r5,r5,IRQS_DISABLED
- bne .Lrestore_irq_off
- /* We are enabling, were we already enabled ? Yes, just return */
- andi. r6,r6,IRQS_DISABLED
- beq cr0,fast_exception_return
- /*
- * We are about to soft-enable interrupts (we are hard disabled
- * at this point). We check if there's anything that needs to
- * be replayed first.
- */
- lbz r0,PACAIRQHAPPENED(r13)
- cmpwi cr0,r0,0
- bne- .Lrestore_check_irq_replay
- /*
- * Get here when nothing happened while soft-disabled, just
- * soft-enable and move-on. We will hard-enable as a side
- * effect of rfi
- */
- .Lrestore_no_replay:
- TRACE_ENABLE_INTS
- li r0,IRQS_ENABLED
- stb r0,PACAIRQSOFTMASK(r13);
- /* This is the return from load_up_fpu fast path which could do with
- * less GPR restores in fact, but for now we have a single return path
- */
- fast_exception_return:
- wrteei 0
- 1: mr r0,r13
- ld r10,_MSR(r1)
- REST_4GPRS(2, r1)
- andi. r6,r10,MSR_PR
- REST_2GPRS(6, r1)
- beq 1f
- ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
- ld r0,GPR13(r1)
- 1: stdcx. r0,0,r1 /* to clear the reservation */
- ld r8,_CCR(r1)
- ld r9,_LINK(r1)
- ld r10,_CTR(r1)
- ld r11,_XER(r1)
- mtcr r8
- mtlr r9
- mtctr r10
- mtxer r11
- REST_2GPRS(8, r1)
- ld r10,GPR10(r1)
- ld r11,GPR11(r1)
- ld r12,GPR12(r1)
- mtspr SPRN_SPRG_GEN_SCRATCH,r0
- std r10,PACA_EXGEN+EX_R10(r13);
- std r11,PACA_EXGEN+EX_R11(r13);
- ld r10,_NIP(r1)
- ld r11,_MSR(r1)
- ld r0,GPR0(r1)
- ld r1,GPR1(r1)
- mtspr SPRN_SRR0,r10
- mtspr SPRN_SRR1,r11
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- mfspr r13,SPRN_SPRG_GEN_SCRATCH
- rfi
- /*
- * We are returning to a context with interrupts soft disabled.
- *
- * However, we may also about to hard enable, so we need to
- * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
- * or that bit can get out of sync and bad things will happen
- */
- .Lrestore_irq_off:
- ld r3,_MSR(r1)
- lbz r7,PACAIRQHAPPENED(r13)
- andi. r0,r3,MSR_EE
- beq 1f
- rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
- stb r7,PACAIRQHAPPENED(r13)
- 1:
- #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
- /* The interrupt should not have soft enabled. */
- lbz r7,PACAIRQSOFTMASK(r13)
- 1: tdeqi r7,IRQS_ENABLED
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
- #endif
- b fast_exception_return
- /*
- * Something did happen, check if a re-emit is needed
- * (this also clears paca->irq_happened)
- */
- .Lrestore_check_irq_replay:
- /* XXX: We could implement a fast path here where we check
- * for irq_happened being just 0x01, in which case we can
- * clear it and return. That means that we would potentially
- * miss a decrementer having wrapped all the way around.
- *
- * Still, this might be useful for things like hash_page
- */
- bl __check_irq_replay
- cmpwi cr0,r3,0
- beq .Lrestore_no_replay
- /*
- * We need to re-emit an interrupt. We do so by re-using our
- * existing exception frame. We first change the trap value,
- * but we need to ensure we preserve the low nibble of it
- */
- ld r4,_TRAP(r1)
- clrldi r4,r4,60
- or r4,r4,r3
- std r4,_TRAP(r1)
- /*
- * PACA_IRQ_HARD_DIS won't always be set here, so set it now
- * to reconcile the IRQ state. Tracing is already accounted for.
- */
- lbz r4,PACAIRQHAPPENED(r13)
- ori r4,r4,PACA_IRQ_HARD_DIS
- stb r4,PACAIRQHAPPENED(r13)
- /*
- * Then find the right handler and call it. Interrupts are
- * still soft-disabled and we keep them that way.
- */
- cmpwi cr0,r3,0x500
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl do_IRQ
- b ret_from_except
- 1: cmpwi cr0,r3,0x900
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl timer_interrupt
- b ret_from_except
- #ifdef CONFIG_PPC_DOORBELL
- 1:
- cmpwi cr0,r3,0x280
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl doorbell_exception
- #endif /* CONFIG_PPC_DOORBELL */
- 1: b ret_from_except /* What else to do here ? */
- _ASM_NOKPROBE_SYMBOL(ret_from_except);
- _ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
- _ASM_NOKPROBE_SYMBOL(resume_kernel);
- _ASM_NOKPROBE_SYMBOL(restore);
- _ASM_NOKPROBE_SYMBOL(fast_exception_return);
- /*
- * Trampolines used when spotting a bad kernel stack pointer in
- * the exception entry code.
- *
- * TODO: move some bits like SRR0 read to trampoline, pass PACA
- * index around, etc... to handle crit & mcheck
- */
- BAD_STACK_TRAMPOLINE(0x000)
- BAD_STACK_TRAMPOLINE(0x100)
- BAD_STACK_TRAMPOLINE(0x200)
- BAD_STACK_TRAMPOLINE(0x220)
- BAD_STACK_TRAMPOLINE(0x260)
- BAD_STACK_TRAMPOLINE(0x280)
- BAD_STACK_TRAMPOLINE(0x2a0)
- BAD_STACK_TRAMPOLINE(0x2c0)
- BAD_STACK_TRAMPOLINE(0x2e0)
- BAD_STACK_TRAMPOLINE(0x300)
- BAD_STACK_TRAMPOLINE(0x310)
- BAD_STACK_TRAMPOLINE(0x320)
- BAD_STACK_TRAMPOLINE(0x340)
- BAD_STACK_TRAMPOLINE(0x400)
- BAD_STACK_TRAMPOLINE(0x500)
- BAD_STACK_TRAMPOLINE(0x600)
- BAD_STACK_TRAMPOLINE(0x700)
- BAD_STACK_TRAMPOLINE(0x800)
- BAD_STACK_TRAMPOLINE(0x900)
- BAD_STACK_TRAMPOLINE(0x980)
- BAD_STACK_TRAMPOLINE(0x9f0)
- BAD_STACK_TRAMPOLINE(0xa00)
- BAD_STACK_TRAMPOLINE(0xb00)
- BAD_STACK_TRAMPOLINE(0xc00)
- BAD_STACK_TRAMPOLINE(0xd00)
- BAD_STACK_TRAMPOLINE(0xd08)
- BAD_STACK_TRAMPOLINE(0xe00)
- BAD_STACK_TRAMPOLINE(0xf00)
- BAD_STACK_TRAMPOLINE(0xf20)
- .globl bad_stack_book3e
- bad_stack_book3e:
- /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
- mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,64+INT_FRAME_SIZE
- std r10,_NIP(r1)
- std r11,_MSR(r1)
- ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
- lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
- std r10,GPR1(r1)
- std r11,_CCR(r1)
- mfspr r10,SPRN_DEAR
- mfspr r11,SPRN_ESR
- std r10,_DAR(r1)
- std r11,_DSISR(r1)
- std r0,GPR0(r1); /* save r0 in stackframe */ \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
- std r9,GPR9(r1); /* save r9 in stackframe */ \
- ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \
- ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \
- mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
- std r3,GPR10(r1); /* save r10 to stackframe */ \
- std r4,GPR11(r1); /* save r11 to stackframe */ \
- std r12,GPR12(r1); /* save r12 in stackframe */ \
- std r5,GPR13(r1); /* save it to stackframe */ \
- mflr r10
- mfctr r11
- mfxer r12
- std r10,_LINK(r1)
- std r11,_CTR(r1)
- std r12,_XER(r1)
- SAVE_10GPRS(14,r1)
- SAVE_8GPRS(24,r1)
- lhz r12,PACA_TRAP_SAVE(r13)
- std r12,_TRAP(r1)
- addi r11,r1,INT_FRAME_SIZE
- std r11,0(r1)
- li r12,0
- std r12,0(r11)
- ld r2,PACATOC(r13)
- 1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl kernel_bad_stack
- b 1b
- /*
- * Setup the initial TLB for a core. This current implementation
- * assume that whatever we are running off will not conflict with
- * the new mapping at PAGE_OFFSET.
- */
- _GLOBAL(initial_tlb_book3e)
- /* Look for the first TLB with IPROT set */
- mfspr r4,SPRN_TLB0CFG
- andi. r3,r4,TLBnCFG_IPROT
- lis r3,MAS0_TLBSEL(0)@h
- bne found_iprot
- mfspr r4,SPRN_TLB1CFG
- andi. r3,r4,TLBnCFG_IPROT
- lis r3,MAS0_TLBSEL(1)@h
- bne found_iprot
- mfspr r4,SPRN_TLB2CFG
- andi. r3,r4,TLBnCFG_IPROT
- lis r3,MAS0_TLBSEL(2)@h
- bne found_iprot
- lis r3,MAS0_TLBSEL(3)@h
- mfspr r4,SPRN_TLB3CFG
- /* fall through */
- found_iprot:
- andi. r5,r4,TLBnCFG_HES
- bne have_hes
- mflr r8 /* save LR */
- /* 1. Find the index of the entry we're executing in
- *
- * r3 = MAS0_TLBSEL (for the iprot array)
- * r4 = SPRN_TLBnCFG
- */
- bl invstr /* Find our address */
- invstr: mflr r6 /* Make it accessible */
- mfmsr r7
- rlwinm r5,r7,27,31,31 /* extract MSR[IS] */
- mfspr r7,SPRN_PID
- slwi r7,r7,16
- or r7,r7,r5
- mtspr SPRN_MAS6,r7
- tlbsx 0,r6 /* search MSR[IS], SPID=PID */
- mfspr r3,SPRN_MAS0
- rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */
- mfspr r7,SPRN_MAS1 /* Insure IPROT set */
- oris r7,r7,MAS1_IPROT@h
- mtspr SPRN_MAS1,r7
- tlbwe
- /* 2. Invalidate all entries except the entry we're executing in
- *
- * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
- * r4 = SPRN_TLBnCFG
- * r5 = ESEL of entry we are running in
- */
- andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */
- li r6,0 /* Set Entry counter to 0 */
- 1: mr r7,r3 /* Set MAS0(TLBSEL) */
- rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
- mtspr SPRN_MAS0,r7
- tlbre
- mfspr r7,SPRN_MAS1
- rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
- cmpw r5,r6
- beq skpinv /* Dont update the current execution TLB */
- mtspr SPRN_MAS1,r7
- tlbwe
- isync
- skpinv: addi r6,r6,1 /* Increment */
- cmpw r6,r4 /* Are we done? */
- bne 1b /* If not, repeat */
- /* Invalidate all TLBs */
- PPC_TLBILX_ALL(0,R0)
- sync
- isync
- /* 3. Setup a temp mapping and jump to it
- *
- * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
- * r5 = ESEL of entry we are running in
- */
- andi. r7,r5,0x1 /* Find an entry not used and is non-zero */
- addi r7,r7,0x1
- mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
- mtspr SPRN_MAS0,r4
- tlbre
- rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */
- mtspr SPRN_MAS0,r4
- mfspr r7,SPRN_MAS1
- xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */
- mtspr SPRN_MAS1,r6
- tlbwe
- mfmsr r6
- xori r6,r6,MSR_IS
- mtspr SPRN_SRR1,r6
- bl 1f /* Find our address */
- 1: mflr r6
- addi r6,r6,(2f - 1b)
- mtspr SPRN_SRR0,r6
- rfi
- 2:
- /* 4. Clear out PIDs & Search info
- *
- * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
- * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
- * r5 = MAS3
- */
- li r6,0
- mtspr SPRN_MAS6,r6
- mtspr SPRN_PID,r6
- /* 5. Invalidate mapping we started in
- *
- * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
- * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
- * r5 = MAS3
- */
- mtspr SPRN_MAS0,r3
- tlbre
- mfspr r6,SPRN_MAS1
- rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */
- mtspr SPRN_MAS1,r6
- tlbwe
- sync
- isync
- /* 6. Setup KERNELBASE mapping in TLB[0]
- *
- * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
- * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
- * r5 = MAS3
- */
- rlwinm r3,r3,0,16,3 /* clear ESEL */
- mtspr SPRN_MAS0,r3
- lis r6,(MAS1_VALID|MAS1_IPROT)@h
- ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
- mtspr SPRN_MAS1,r6
- LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
- mtspr SPRN_MAS2,r6
- rlwinm r5,r5,0,0,25
- ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
- mtspr SPRN_MAS3,r5
- li r5,-1
- rlwinm r5,r5,0,0,25
- tlbwe
- /* 7. Jump to KERNELBASE mapping
- *
- * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
- */
- /* Now we branch the new virtual address mapped by this entry */
- bl 1f /* Find our address */
- 1: mflr r6
- addi r6,r6,(2f - 1b)
- tovirt(r6,r6)
- lis r7,MSR_KERNEL@h
- ori r7,r7,MSR_KERNEL@l
- mtspr SPRN_SRR0,r6
- mtspr SPRN_SRR1,r7
- rfi /* start execution out of TLB1[0] entry */
- 2:
- /* 8. Clear out the temp mapping
- *
- * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
- */
- mtspr SPRN_MAS0,r4
- tlbre
- mfspr r5,SPRN_MAS1
- rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */
- mtspr SPRN_MAS1,r5
- tlbwe
- sync
- isync
- /* We translate LR and return */
- tovirt(r8,r8)
- mtlr r8
- blr
- have_hes:
- /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
- * kernel linear mapping. We also set MAS8 once for all here though
- * that will have to be made dependent on whether we are running under
- * a hypervisor I suppose.
- */
- /* BEWARE, MAGIC
- * This code is called as an ordinary function on the boot CPU. But to
- * avoid duplication, this code is also used in SCOM bringup of
- * secondary CPUs. We read the code between the initial_tlb_code_start
- * and initial_tlb_code_end labels one instruction at a time and RAM it
- * into the new core via SCOM. That doesn't process branches, so there
- * must be none between those two labels. It also means if this code
- * ever takes any parameters, the SCOM code must also be updated to
- * provide them.
- */
- .globl a2_tlbinit_code_start
- a2_tlbinit_code_start:
- ori r11,r3,MAS0_WQ_ALLWAYS
- oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
- mtspr SPRN_MAS0,r11
- lis r3,(MAS1_VALID | MAS1_IPROT)@h
- ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
- mtspr SPRN_MAS1,r3
- LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
- mtspr SPRN_MAS2,r3
- li r3,MAS3_SR | MAS3_SW | MAS3_SX
- mtspr SPRN_MAS7_MAS3,r3
- li r3,0
- mtspr SPRN_MAS8,r3
- /* Write the TLB entry */
- tlbwe
- .globl a2_tlbinit_after_linear_map
- a2_tlbinit_after_linear_map:
- /* Now we branch the new virtual address mapped by this entry */
- LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
- mtctr r3
- bctr
- 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything
- * else (including IPROTed things left by firmware)
- * r4 = TLBnCFG
- * r3 = current address (more or less)
- */
- li r5,0
- mtspr SPRN_MAS6,r5
- tlbsx 0,r3
- rlwinm r9,r4,0,TLBnCFG_N_ENTRY
- rlwinm r10,r4,8,0xff
- addi r10,r10,-1 /* Get inner loop mask */
- li r3,1
- mfspr r5,SPRN_MAS1
- rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
- mfspr r6,SPRN_MAS2
- rldicr r6,r6,0,51 /* Extract EPN */
- mfspr r7,SPRN_MAS0
- rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
- rlwinm r8,r7,16,0xfff /* Extract ESEL */
- 2: add r4,r3,r8
- and r4,r4,r10
- rlwimi r7,r4,16,MAS0_ESEL_MASK
- mtspr SPRN_MAS0,r7
- mtspr SPRN_MAS1,r5
- mtspr SPRN_MAS2,r6
- tlbwe
- addi r3,r3,1
- and. r4,r3,r10
- bne 3f
- addis r6,r6,(1<<30)@h
- 3:
- cmpw r3,r9
- blt 2b
- .globl a2_tlbinit_after_iprot_flush
- a2_tlbinit_after_iprot_flush:
- PPC_TLBILX(0,0,R0)
- sync
- isync
- .globl a2_tlbinit_code_end
- a2_tlbinit_code_end:
- /* We translate LR and return */
- mflr r3
- tovirt(r3,r3)
- mtlr r3
- blr
- /*
- * Main entry (boot CPU, thread 0)
- *
- * We enter here from head_64.S, possibly after the prom_init trampoline
- * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
- * mode. Anything else is as it was left by the bootloader
- *
- * Initial requirements of this port:
- *
- * - Kernel loaded at 0 physical
- * - A good lump of memory mapped 0:0 by UTLB entry 0
- * - MSR:IS & MSR:DS set to 0
- *
- * Note that some of the above requirements will be relaxed in the future
- * as the kernel becomes smarter at dealing with different initial conditions
- * but for now you have to be careful
- */
- _GLOBAL(start_initialization_book3e)
- mflr r28
- /* First, we need to setup some initial TLBs to map the kernel
- * text, data and bss at PAGE_OFFSET. We don't have a real mode
- * and always use AS 0, so we just set it up to match our link
- * address and never use 0 based addresses.
- */
- bl initial_tlb_book3e
- /* Init global core bits */
- bl init_core_book3e
- /* Init per-thread bits */
- bl init_thread_book3e
- /* Return to common init code */
- tovirt(r28,r28)
- mtlr r28
- blr
- /*
- * Secondary core/processor entry
- *
- * This is entered for thread 0 of a secondary core, all other threads
- * are expected to be stopped. It's similar to start_initialization_book3e
- * except that it's generally entered from the holding loop in head_64.S
- * after CPUs have been gathered by Open Firmware.
- *
- * We assume we are in 32 bits mode running with whatever TLB entry was
- * set for us by the firmware or POR engine.
- */
- _GLOBAL(book3e_secondary_core_init_tlb_set)
- li r4,1
- b generic_secondary_smp_init
- _GLOBAL(book3e_secondary_core_init)
- mflr r28
- /* Do we need to setup initial TLB entry ? */
- cmplwi r4,0
- bne 2f
- /* Setup TLB for this core */
- bl initial_tlb_book3e
- /* We can return from the above running at a different
- * address, so recalculate r2 (TOC)
- */
- bl relative_toc
- /* Init global core bits */
- 2: bl init_core_book3e
- /* Init per-thread bits */
- 3: bl init_thread_book3e
- /* Return to common init code at proper virtual address.
- *
- * Due to various previous assumptions, we know we entered this
- * function at either the final PAGE_OFFSET mapping or using a
- * 1:1 mapping at 0, so we don't bother doing a complicated check
- * here, we just ensure the return address has the right top bits.
- *
- * Note that if we ever want to be smarter about where we can be
- * started from, we have to be careful that by the time we reach
- * the code below we may already be running at a different location
- * than the one we were called from since initial_tlb_book3e can
- * have moved us already.
- */
- cmpdi cr0,r28,0
- blt 1f
- lis r3,PAGE_OFFSET@highest
- sldi r3,r3,32
- or r28,r28,r3
- 1: mtlr r28
- blr
- _GLOBAL(book3e_secondary_thread_init)
- mflr r28
- b 3b
- .globl init_core_book3e
- init_core_book3e:
- /* Establish the interrupt vector base */
- tovirt(r2,r2)
- LOAD_REG_ADDR(r3, interrupt_base_book3e)
- mtspr SPRN_IVPR,r3
- sync
- blr
- init_thread_book3e:
- lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
- mtspr SPRN_EPCR,r3
- /* Make sure interrupts are off */
- wrteei 0
- /* disable all timers and clear out status */
- li r3,0
- mtspr SPRN_TCR,r3
- mfspr r3,SPRN_TSR
- mtspr SPRN_TSR,r3
- blr
- _GLOBAL(__setup_base_ivors)
- SET_IVOR(0, 0x020) /* Critical Input */
- SET_IVOR(1, 0x000) /* Machine Check */
- SET_IVOR(2, 0x060) /* Data Storage */
- SET_IVOR(3, 0x080) /* Instruction Storage */
- SET_IVOR(4, 0x0a0) /* External Input */
- SET_IVOR(5, 0x0c0) /* Alignment */
- SET_IVOR(6, 0x0e0) /* Program */
- SET_IVOR(7, 0x100) /* FP Unavailable */
- SET_IVOR(8, 0x120) /* System Call */
- SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */
- SET_IVOR(10, 0x160) /* Decrementer */
- SET_IVOR(11, 0x180) /* Fixed Interval Timer */
- SET_IVOR(12, 0x1a0) /* Watchdog Timer */
- SET_IVOR(13, 0x1c0) /* Data TLB Error */
- SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
- SET_IVOR(15, 0x040) /* Debug */
- sync
- blr
- _GLOBAL(setup_altivec_ivors)
- SET_IVOR(32, 0x200) /* AltiVec Unavailable */
- SET_IVOR(33, 0x220) /* AltiVec Assist */
- blr
- _GLOBAL(setup_perfmon_ivor)
- SET_IVOR(35, 0x260) /* Performance Monitor */
- blr
- _GLOBAL(setup_doorbell_ivors)
- SET_IVOR(36, 0x280) /* Processor Doorbell */
- SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
- blr
- _GLOBAL(setup_ehv_ivors)
- SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
- SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
- SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
- SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
- blr
- _GLOBAL(setup_lrat_ivor)
- SET_IVOR(42, 0x340) /* LRAT Error */
- blr
|