xuantie-linux-5.4.36.patch 124 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269
  1. diff -Nur linux-5.4.36/arch/riscv/include/asm/asid.h kernel/arch/riscv/include/asm/asid.h
  2. --- linux-5.4.36/arch/riscv/include/asm/asid.h 1970-01-01 00:00:00.000000000 +0000
  3. +++ kernel/arch/riscv/include/asm/asid.h 2020-09-03 06:01:13.901989796 +0000
  4. @@ -0,0 +1,78 @@
  5. +/* SPDX-License-Identifier: GPL-2.0 */
  6. +#ifndef __ASM_ASM_ASID_H
  7. +#define __ASM_ASM_ASID_H
  8. +
  9. +#include <linux/atomic.h>
  10. +#include <linux/compiler.h>
  11. +#include <linux/cpumask.h>
  12. +#include <linux/percpu.h>
  13. +#include <linux/spinlock.h>
  14. +
  15. +struct asid_info
  16. +{
  17. + atomic64_t generation;
  18. + unsigned long *map;
  19. + atomic64_t __percpu *active;
  20. + u64 __percpu *reserved;
  21. + u32 bits;
  22. + /* Lock protecting the structure */
  23. + raw_spinlock_t lock;
  24. + /* Which CPU requires context flush on next call */
  25. + cpumask_t flush_pending;
  26. + /* Number of ASID allocated by context (shift value) */
  27. + unsigned int ctxt_shift;
  28. + /* Callback to locally flush the context. */
  29. + void (*flush_cpu_ctxt_cb)(void);
  30. +};
  31. +
  32. +#define NUM_ASIDS(info) (1UL << ((info)->bits))
  33. +#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
  34. +
  35. +#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
  36. +
  37. +void asid_new_context(struct asid_info *info, atomic64_t *pasid,
  38. + unsigned int cpu, struct mm_struct *mm);
  39. +
  40. +/*
  41. + * Check the ASID is still valid for the context. If not generate a new ASID.
  42. + *
  43. + * @pasid: Pointer to the current ASID batch
  44. + * @cpu: current CPU ID. Must have been acquired throught get_cpu()
  45. + */
  46. +static inline void asid_check_context(struct asid_info *info,
  47. + atomic64_t *pasid, unsigned int cpu,
  48. + struct mm_struct *mm)
  49. +{
  50. + u64 asid, old_active_asid;
  51. +
  52. + asid = atomic64_read(pasid);
  53. +
  54. + /*
  55. + * The memory ordering here is subtle.
  56. + * If our active_asid is non-zero and the ASID matches the current
  57. + * generation, then we update the active_asid entry with a relaxed
  58. + * cmpxchg. Racing with a concurrent rollover means that either:
  59. + *
  60. + * - We get a zero back from the cmpxchg and end up waiting on the
  61. + * lock. Taking the lock synchronises with the rollover and so
  62. + * we are forced to see the updated generation.
  63. + *
  64. + * - We get a valid ASID back from the cmpxchg, which means the
  65. + * relaxed xchg in flush_context will treat us as reserved
  66. + * because atomic RmWs are totally ordered for a given location.
  67. + */
  68. + old_active_asid = atomic64_read(&active_asid(info, cpu));
  69. + if (old_active_asid &&
  70. + !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
  71. + atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
  72. + old_active_asid, asid))
  73. + return;
  74. +
  75. + asid_new_context(info, pasid, cpu, mm);
  76. +}
  77. +
  78. +int asid_allocator_init(struct asid_info *info,
  79. + u32 bits, unsigned int asid_per_ctxt,
  80. + void (*flush_cpu_ctxt_cb)(void));
  81. +
  82. +#endif
  83. diff -Nur linux-5.4.36/arch/riscv/include/asm/cacheflush.h kernel/arch/riscv/include/asm/cacheflush.h
  84. --- linux-5.4.36/arch/riscv/include/asm/cacheflush.h 2020-04-29 14:33:25.000000000 +0000
  85. +++ kernel/arch/riscv/include/asm/cacheflush.h 2020-09-03 06:01:13.902989796 +0000
  86. @@ -99,6 +99,9 @@
  87. #endif /* CONFIG_SMP */
  88. +void dma_wbinv_range(unsigned long start, unsigned long end);
  89. +void dma_wb_range(unsigned long start, unsigned long end);
  90. +
  91. /*
  92. * Bits in sys_riscv_flush_icache()'s flags argument.
  93. */
  94. diff -Nur linux-5.4.36/arch/riscv/include/asm/csr.h kernel/arch/riscv/include/asm/csr.h
  95. --- linux-5.4.36/arch/riscv/include/asm/csr.h 2020-04-29 14:33:25.000000000 +0000
  96. +++ kernel/arch/riscv/include/asm/csr.h 2020-09-03 06:01:13.902989796 +0000
  97. @@ -21,6 +21,12 @@
  98. #define SR_FS_CLEAN _AC(0x00004000, UL)
  99. #define SR_FS_DIRTY _AC(0x00006000, UL)
  100. +#define SR_VS _AC(0x01800000, UL) /* Vector Status */
  101. +#define SR_VS_OFF _AC(0x00000000, UL)
  102. +#define SR_VS_INITIAL _AC(0x00800000, UL)
  103. +#define SR_VS_CLEAN _AC(0x01000000, UL)
  104. +#define SR_VS_DIRTY _AC(0x01800000, UL)
  105. +
  106. #define SR_XS _AC(0x00018000, UL) /* Extension Status */
  107. #define SR_XS_OFF _AC(0x00000000, UL)
  108. #define SR_XS_INITIAL _AC(0x00008000, UL)
  109. @@ -42,6 +48,9 @@
  110. #define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
  111. #define SATP_MODE_39 _AC(0x8000000000000000, UL)
  112. #define SATP_MODE SATP_MODE_39
  113. +#define SATP_ASID_BITS 16
  114. +#define SATP_ASID_SHIFT 44
  115. +#define SATP_ASID_MASK _AC(0xFFFF, UL)
  116. #endif
  117. /* SCAUSE */
  118. @@ -56,6 +65,7 @@
  119. #define IRQ_U_EXT 8
  120. #define IRQ_S_EXT 9
  121. #define IRQ_M_EXT 11
  122. +#define IRQ_S_PMU 17
  123. #define EXC_INST_MISALIGNED 0
  124. #define EXC_INST_ACCESS 1
  125. @@ -67,10 +77,18 @@
  126. #define EXC_LOAD_PAGE_FAULT 13
  127. #define EXC_STORE_PAGE_FAULT 15
  128. +#define CSR_VSTART 0x8
  129. +#define CSR_VXSAT 0x9
  130. +#define CSR_VXRM 0xa
  131. +#define CSR_VL 0xc20
  132. +#define CSR_VTYPE 0xc21
  133. +#define CSR_VLENB 0xc22
  134. +
  135. /* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */
  136. #define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
  137. #define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
  138. #define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
  139. +#define SIE_SMIE (_AC(0x1, UL) << IRQ_S_PMU)
  140. #define CSR_CYCLE 0xc00
  141. #define CSR_TIME 0xc01
  142. diff -Nur linux-5.4.36/arch/riscv/include/asm/elf.h kernel/arch/riscv/include/asm/elf.h
  143. --- linux-5.4.36/arch/riscv/include/asm/elf.h 2020-04-29 14:33:25.000000000 +0000
  144. +++ kernel/arch/riscv/include/asm/elf.h 2020-09-03 06:01:13.902989796 +0000
  145. @@ -49,6 +49,9 @@
  146. #define ELF_HWCAP (elf_hwcap)
  147. extern unsigned long elf_hwcap;
  148. +#define ELF_CORE_COPY_REGS(dest, regs) \
  149. + *(struct user_regs_struct *)&(dest) = (regs)->user_regs;
  150. +
  151. /*
  152. * This yields a string that ld.so will use to load implementation
  153. * specific libraries for optimization. This is more specific in
  154. diff -Nur linux-5.4.36/arch/riscv/include/asm/fence.h kernel/arch/riscv/include/asm/fence.h
  155. --- linux-5.4.36/arch/riscv/include/asm/fence.h 2020-04-29 14:33:25.000000000 +0000
  156. +++ kernel/arch/riscv/include/asm/fence.h 2020-09-03 06:01:13.902989796 +0000
  157. @@ -9,4 +9,8 @@
  158. #define RISCV_RELEASE_BARRIER
  159. #endif
  160. +extern int c910_mmu_v1_flag;
  161. +#define sync_mmu_v1() \
  162. + if (c910_mmu_v1_flag) asm volatile (".long 0x01b0000b");
  163. +
  164. #endif /* _ASM_RISCV_FENCE_H */
  165. diff -Nur linux-5.4.36/arch/riscv/include/asm/io.h kernel/arch/riscv/include/asm/io.h
  166. --- linux-5.4.36/arch/riscv/include/asm/io.h 2020-04-29 14:33:25.000000000 +0000
  167. +++ kernel/arch/riscv/include/asm/io.h 2020-09-03 06:01:13.903989796 +0000
  168. @@ -12,19 +12,17 @@
  169. #define _ASM_RISCV_IO_H
  170. #include <linux/types.h>
  171. +#include <asm/fence.h>
  172. #include <asm/mmiowb.h>
  173. #include <asm/pgtable.h>
  174. -extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
  175. +extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size);
  176. +extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot);
  177. -/*
  178. - * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
  179. - * change the properties of memory regions. This should be fixed by the
  180. - * upcoming platform spec.
  181. - */
  182. -#define ioremap_nocache(addr, size) ioremap((addr), (size))
  183. -#define ioremap_wc(addr, size) ioremap((addr), (size))
  184. -#define ioremap_wt(addr, size) ioremap((addr), (size))
  185. +#define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL))
  186. +#define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL))
  187. +#define ioremap_nocache(addr, size) ioremap((addr), (size))
  188. +#define ioremap_cache ioremap_cache
  189. extern void iounmap(volatile void __iomem *addr);
  190. @@ -32,26 +30,34 @@
  191. #define __raw_writeb __raw_writeb
  192. static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
  193. {
  194. + sync_mmu_v1();
  195. asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
  196. + sync_mmu_v1();
  197. }
  198. #define __raw_writew __raw_writew
  199. static inline void __raw_writew(u16 val, volatile void __iomem *addr)
  200. {
  201. + sync_mmu_v1();
  202. asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
  203. + sync_mmu_v1();
  204. }
  205. #define __raw_writel __raw_writel
  206. static inline void __raw_writel(u32 val, volatile void __iomem *addr)
  207. {
  208. + sync_mmu_v1();
  209. asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
  210. + sync_mmu_v1();
  211. }
  212. #ifdef CONFIG_64BIT
  213. #define __raw_writeq __raw_writeq
  214. static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
  215. {
  216. + sync_mmu_v1();
  217. asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
  218. + sync_mmu_v1();
  219. }
  220. #endif
  221. @@ -60,7 +66,9 @@
  222. {
  223. u8 val;
  224. + sync_mmu_v1();
  225. asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
  226. + sync_mmu_v1();
  227. return val;
  228. }
  229. @@ -69,7 +77,9 @@
  230. {
  231. u16 val;
  232. + sync_mmu_v1();
  233. asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
  234. + sync_mmu_v1();
  235. return val;
  236. }
  237. @@ -78,7 +88,9 @@
  238. {
  239. u32 val;
  240. + sync_mmu_v1();
  241. asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
  242. + sync_mmu_v1();
  243. return val;
  244. }
  245. @@ -88,7 +100,9 @@
  246. {
  247. u64 val;
  248. + sync_mmu_v1();
  249. asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
  250. + sync_mmu_v1();
  251. return val;
  252. }
  253. #endif
  254. diff -Nur linux-5.4.36/arch/riscv/include/asm/kexec.h kernel/arch/riscv/include/asm/kexec.h
  255. --- linux-5.4.36/arch/riscv/include/asm/kexec.h 1970-01-01 00:00:00.000000000 +0000
  256. +++ kernel/arch/riscv/include/asm/kexec.h 2020-09-03 06:01:13.903989796 +0000
  257. @@ -0,0 +1,99 @@
  258. +/*
  259. + * kexec for riscv
  260. + *
  261. + * Copyright (C) 2020-2025 Alibaba Group Holding Limited
  262. + *
  263. + * This program is free software; you can redistribute it and/or modify
  264. + * it under the terms of the GNU General Public License version 2 as
  265. + * published by the Free Software Foundation.
  266. + */
  267. +
  268. +#ifndef _RISCV_KEXEC_H
  269. +#define _RISCV_KEXEC_H
  270. +
  271. +/* Maximum physical address we can use pages from */
  272. +
  273. +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
  274. +
  275. +/* Maximum address we can reach in physical address mode */
  276. +
  277. +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
  278. +
  279. +/* Maximum address we can use for the control code buffer */
  280. +
  281. +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
  282. +
  283. +#define KEXEC_CONTROL_PAGE_SIZE 4096
  284. +
  285. +#define KEXEC_ARCH KEXEC_ARCH_RISCV
  286. +
  287. +#ifndef __ASSEMBLY__
  288. +
  289. +/**
  290. + * crash_setup_regs() - save registers for the panic kernel
  291. + *
  292. + * @newregs: registers are saved here
  293. + * @oldregs: registers to be saved (may be %NULL)
  294. + */
  295. +static inline void crash_setup_regs(struct pt_regs *newregs,
  296. + struct pt_regs *oldregs)
  297. +{
  298. + if (oldregs) {
  299. + memcpy(newregs, oldregs, sizeof(*newregs));
  300. + } else {
  301. + u64 tmp1, tmp2;
  302. +
  303. + __asm__ __volatile__ (
  304. + "sd ra, 8(%2)\n"
  305. + "sd gp, 24(%2)\n"
  306. + "sd t0, 40(%2)\n"
  307. + "sd t1, 48(%2)\n"
  308. + "sd t2, 56(%2)\n"
  309. + "sd s0, 64(%2)\n"
  310. + "sd s1, 72(%2)\n"
  311. + "sd a0, 80(%2)\n"
  312. + "sd a1, 88(%2)\n"
  313. + "sd a2, 96(%2)\n"
  314. + "sd a3, 104(%2)\n"
  315. + "sd a4, 112(%2)\n"
  316. + "sd a5, 120(%2)\n"
  317. + "sd a6, 128(%2)\n"
  318. + "sd a7, 136(%2)\n"
  319. + "sd s2, 144(%2)\n"
  320. + "sd s3, 152(%2)\n"
  321. + "sd s4, 160(%2)\n"
  322. + "sd s5, 168(%2)\n"
  323. + "sd s6, 176(%2)\n"
  324. + "sd s7, 184(%2)\n"
  325. + "sd s8, 192(%2)\n"
  326. + "sd s9, 200(%2)\n"
  327. + "sd s10, 208(%2)\n"
  328. + "sd s11, 216(%2)\n"
  329. + "sd t3, 224(%2)\n"
  330. + "sd t4, 232(%2)\n"
  331. + "sd t5, 240(%2)\n"
  332. + "sd t6, 248(%2)\n"
  333. + "auipc %0, 0\n"
  334. + "sd %0, 0(%2)\n"
  335. + "csrr %0, sstatus\n"
  336. + "sd %0, 256(%2)\n"
  337. + "csrr %0, stval\n"
  338. + "sd %0, 264(%2)\n"
  339. + "csrr %0, scause\n"
  340. + "sd %0, 272(%2)\n"
  341. + "sd tp, 32(%2)\n"
  342. + "sd sp, 16(%2)\n"
  343. + : "=&r" (tmp1), "=&r" (tmp2)
  344. + : "r" (newregs)
  345. + : "memory"
  346. + );
  347. + }
  348. +}
  349. +
  350. +static inline bool crash_is_nosave(unsigned long pfn) {return false; }
  351. +static inline void crash_prepare_suspend(void) {}
  352. +static inline void crash_post_resume(void) {}
  353. +
  354. +#endif /* __ASSEMBLY__ */
  355. +
  356. +#endif
  357. diff -Nur linux-5.4.36/arch/riscv/include/asm/mmu_context.h kernel/arch/riscv/include/asm/mmu_context.h
  358. --- linux-5.4.36/arch/riscv/include/asm/mmu_context.h 2020-04-29 14:33:25.000000000 +0000
  359. +++ kernel/arch/riscv/include/asm/mmu_context.h 2020-09-03 06:01:13.903989796 +0000
  360. @@ -12,19 +12,20 @@
  361. #include <linux/mm.h>
  362. #include <linux/sched.h>
  363. +#include <asm/tlbflush.h>
  364. +#include <asm/cacheflush.h>
  365. +#include <asm/asid.h>
  366. +
  367. +#define ASID_MASK ((1 << SATP_ASID_BITS) - 1)
  368. +#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
  369. +
  370. +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
  371. static inline void enter_lazy_tlb(struct mm_struct *mm,
  372. struct task_struct *task)
  373. {
  374. }
  375. -/* Initialize context-related info for a new mm_struct */
  376. -static inline int init_new_context(struct task_struct *task,
  377. - struct mm_struct *mm)
  378. -{
  379. - return 0;
  380. -}
  381. -
  382. static inline void destroy_context(struct mm_struct *mm)
  383. {
  384. }
  385. @@ -32,6 +33,8 @@
  386. void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  387. struct task_struct *task);
  388. +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
  389. +
  390. static inline void activate_mm(struct mm_struct *prev,
  391. struct mm_struct *next)
  392. {
  393. diff -Nur linux-5.4.36/arch/riscv/include/asm/mmu.h kernel/arch/riscv/include/asm/mmu.h
  394. --- linux-5.4.36/arch/riscv/include/asm/mmu.h 2020-04-29 14:33:25.000000000 +0000
  395. +++ kernel/arch/riscv/include/asm/mmu.h 2020-09-03 06:01:13.903989796 +0000
  396. @@ -11,6 +11,7 @@
  397. typedef struct {
  398. void *vdso;
  399. + atomic64_t asid;
  400. #ifdef CONFIG_SMP
  401. /* A local icache flush is needed before user execution can resume. */
  402. cpumask_t icache_stale_mask;
  403. diff -Nur linux-5.4.36/arch/riscv/include/asm/perf_event.h kernel/arch/riscv/include/asm/perf_event.h
  404. --- linux-5.4.36/arch/riscv/include/asm/perf_event.h 2020-04-29 14:33:25.000000000 +0000
  405. +++ kernel/arch/riscv/include/asm/perf_event.h 2020-09-03 06:01:13.903989796 +0000
  406. @@ -18,8 +18,8 @@
  407. * The RISCV_MAX_COUNTERS parameter should be specified.
  408. */
  409. -#ifdef CONFIG_RISCV_BASE_PMU
  410. -#define RISCV_MAX_COUNTERS 2
  411. +#if defined(CONFIG_RISCV_BASE_PMU) || defined(CONFIG_THEAD_XT_V1_PMU)
  412. +#define RISCV_MAX_COUNTERS 32
  413. #endif
  414. #ifndef RISCV_MAX_COUNTERS
  415. diff -Nur linux-5.4.36/arch/riscv/include/asm/pgtable-64.h kernel/arch/riscv/include/asm/pgtable-64.h
  416. --- linux-5.4.36/arch/riscv/include/asm/pgtable-64.h 2020-04-29 14:33:25.000000000 +0000
  417. +++ kernel/arch/riscv/include/asm/pgtable-64.h 2020-09-03 06:01:13.903989796 +0000
  418. @@ -55,7 +55,7 @@
  419. static inline unsigned long pud_page_vaddr(pud_t pud)
  420. {
  421. - return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
  422. + return (unsigned long)pfn_to_virt((pud_val(pud) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
  423. }
  424. #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
  425. diff -Nur linux-5.4.36/arch/riscv/include/asm/pgtable-bits.h kernel/arch/riscv/include/asm/pgtable-bits.h
  426. --- linux-5.4.36/arch/riscv/include/asm/pgtable-bits.h 2020-04-29 14:33:25.000000000 +0000
  427. +++ kernel/arch/riscv/include/asm/pgtable-bits.h 2020-09-03 06:01:13.903989796 +0000
  428. @@ -24,6 +24,13 @@
  429. #define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
  430. #define _PAGE_SOFT (1 << 8) /* Reserved for software */
  431. +/* C-SKY extend */
  432. +#define _PAGE_SEC (1UL << 59) /* Security */
  433. +#define _PAGE_SHARE (1UL << 60) /* Shareable */
  434. +#define _PAGE_BUF (1UL << 61) /* Bufferable */
  435. +#define _PAGE_CACHE (1UL << 62) /* Cacheable */
  436. +#define _PAGE_SO (1UL << 63) /* Strong Order */
  437. +
  438. #define _PAGE_SPECIAL _PAGE_SOFT
  439. #define _PAGE_TABLE _PAGE_PRESENT
  440. @@ -38,6 +45,9 @@
  441. /* Set of bits to preserve across pte_modify() */
  442. #define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
  443. _PAGE_WRITE | _PAGE_EXEC | \
  444. - _PAGE_USER | _PAGE_GLOBAL))
  445. + _PAGE_USER | _PAGE_GLOBAL | \
  446. + _PAGE_SEC | _PAGE_SHARE | \
  447. + _PAGE_BUF | _PAGE_CACHE | \
  448. + _PAGE_SO ))
  449. #endif /* _ASM_RISCV_PGTABLE_BITS_H */
  450. diff -Nur linux-5.4.36/arch/riscv/include/asm/pgtable.h kernel/arch/riscv/include/asm/pgtable.h
  451. --- linux-5.4.36/arch/riscv/include/asm/pgtable.h 2020-04-29 14:33:25.000000000 +0000
  452. +++ kernel/arch/riscv/include/asm/pgtable.h 2020-09-03 06:01:13.903989796 +0000
  453. @@ -35,9 +35,11 @@
  454. #define FIRST_USER_ADDRESS 0
  455. /* Page protection bits */
  456. -#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
  457. +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER | \
  458. + _PAGE_SHARE | _PAGE_CACHE | _PAGE_BUF)
  459. -#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
  460. +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_CACHE | \
  461. + _PAGE_BUF | _PAGE_SHARE | _PAGE_SHARE)
  462. #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
  463. #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
  464. #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
  465. @@ -54,11 +56,17 @@
  466. #define _PAGE_KERNEL (_PAGE_READ \
  467. | _PAGE_WRITE \
  468. | _PAGE_PRESENT \
  469. + | _PAGE_GLOBAL \
  470. | _PAGE_ACCESSED \
  471. - | _PAGE_DIRTY)
  472. + | _PAGE_DIRTY \
  473. + | _PAGE_CACHE \
  474. + | _PAGE_SHARE \
  475. + | _PAGE_BUF)
  476. #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
  477. #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
  478. +#define PAGE_KERNEL_SO __pgprot((_PAGE_KERNEL | _PAGE_SO) & \
  479. + ~(_PAGE_CACHE | _PAGE_BUF))
  480. #define PAGE_TABLE __pgprot(_PAGE_TABLE)
  481. @@ -167,18 +175,18 @@
  482. static inline struct page *pmd_page(pmd_t pmd)
  483. {
  484. - return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
  485. + return pfn_to_page((pmd_val(pmd) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
  486. }
  487. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  488. {
  489. - return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
  490. + return (unsigned long)pfn_to_virt((pmd_val(pmd) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
  491. }
  492. /* Yields the page frame number (PFN) of a page table entry */
  493. static inline unsigned long pte_pfn(pte_t pte)
  494. {
  495. - return (pte_val(pte) >> _PAGE_PFN_SHIFT);
  496. + return ((pte_val(pte) & _PAGE_CHG_MASK) >> _PAGE_PFN_SHIFT);
  497. }
  498. #define pte_page(x) pfn_to_page(pte_pfn(x))
  499. @@ -405,6 +413,32 @@
  500. return ptep_test_and_clear_young(vma, address, ptep);
  501. }
  502. +#define __HAVE_PHYS_MEM_ACCESS_PROT
  503. +struct file;
  504. +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  505. + unsigned long size, pgprot_t vma_prot);
  506. +
  507. +#define pgprot_noncached pgprot_noncached
  508. +static inline pgprot_t pgprot_noncached(pgprot_t _prot)
  509. +{
  510. + unsigned long prot = pgprot_val(_prot);
  511. +
  512. + prot &= ~(_PAGE_CACHE | _PAGE_BUF);
  513. + prot |= _PAGE_SO;
  514. +
  515. + return __pgprot(prot);
  516. +}
  517. +
  518. +#define pgprot_writecombine pgprot_writecombine
  519. +static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
  520. +{
  521. + unsigned long prot = pgprot_val(_prot);
  522. +
  523. + prot &= ~(_PAGE_CACHE | _PAGE_BUF);
  524. +
  525. + return __pgprot(prot);
  526. +}
  527. +
  528. /*
  529. * Encode and decode a swap entry
  530. *
  531. diff -Nur linux-5.4.36/arch/riscv/include/asm/processor.h kernel/arch/riscv/include/asm/processor.h
  532. --- linux-5.4.36/arch/riscv/include/asm/processor.h 2020-04-29 14:33:25.000000000 +0000
  533. +++ kernel/arch/riscv/include/asm/processor.h 2020-09-03 06:01:13.903989796 +0000
  534. @@ -32,6 +32,7 @@
  535. unsigned long sp; /* Kernel mode stack */
  536. unsigned long s[12]; /* s[0]: frame pointer */
  537. struct __riscv_d_ext_state fstate;
  538. + struct __riscv_v_state vstate;
  539. };
  540. #define INIT_THREAD { \
  541. diff -Nur linux-5.4.36/arch/riscv/include/asm/ptrace.h kernel/arch/riscv/include/asm/ptrace.h
  542. --- linux-5.4.36/arch/riscv/include/asm/ptrace.h 2020-04-29 14:33:25.000000000 +0000
  543. +++ kernel/arch/riscv/include/asm/ptrace.h 2020-09-03 06:01:13.903989796 +0000
  544. @@ -12,38 +12,43 @@
  545. #ifndef __ASSEMBLY__
  546. struct pt_regs {
  547. - unsigned long sepc;
  548. - unsigned long ra;
  549. - unsigned long sp;
  550. - unsigned long gp;
  551. - unsigned long tp;
  552. - unsigned long t0;
  553. - unsigned long t1;
  554. - unsigned long t2;
  555. - unsigned long s0;
  556. - unsigned long s1;
  557. - unsigned long a0;
  558. - unsigned long a1;
  559. - unsigned long a2;
  560. - unsigned long a3;
  561. - unsigned long a4;
  562. - unsigned long a5;
  563. - unsigned long a6;
  564. - unsigned long a7;
  565. - unsigned long s2;
  566. - unsigned long s3;
  567. - unsigned long s4;
  568. - unsigned long s5;
  569. - unsigned long s6;
  570. - unsigned long s7;
  571. - unsigned long s8;
  572. - unsigned long s9;
  573. - unsigned long s10;
  574. - unsigned long s11;
  575. - unsigned long t3;
  576. - unsigned long t4;
  577. - unsigned long t5;
  578. - unsigned long t6;
  579. + union {
  580. + struct user_regs_struct user_regs;
  581. + struct {
  582. + unsigned long sepc;
  583. + unsigned long ra;
  584. + unsigned long sp;
  585. + unsigned long gp;
  586. + unsigned long tp;
  587. + unsigned long t0;
  588. + unsigned long t1;
  589. + unsigned long t2;
  590. + unsigned long s0;
  591. + unsigned long s1;
  592. + unsigned long a0;
  593. + unsigned long a1;
  594. + unsigned long a2;
  595. + unsigned long a3;
  596. + unsigned long a4;
  597. + unsigned long a5;
  598. + unsigned long a6;
  599. + unsigned long a7;
  600. + unsigned long s2;
  601. + unsigned long s3;
  602. + unsigned long s4;
  603. + unsigned long s5;
  604. + unsigned long s6;
  605. + unsigned long s7;
  606. + unsigned long s8;
  607. + unsigned long s9;
  608. + unsigned long s10;
  609. + unsigned long s11;
  610. + unsigned long t3;
  611. + unsigned long t4;
  612. + unsigned long t5;
  613. + unsigned long t6;
  614. + };
  615. + };
  616. /* Supervisor CSRs */
  617. unsigned long sstatus;
  618. unsigned long sbadaddr;
  619. diff -Nur linux-5.4.36/arch/riscv/include/asm/sbi.h kernel/arch/riscv/include/asm/sbi.h
  620. --- linux-5.4.36/arch/riscv/include/asm/sbi.h 2020-04-29 14:33:25.000000000 +0000
  621. +++ kernel/arch/riscv/include/asm/sbi.h 2020-09-03 06:01:13.903989796 +0000
  622. @@ -17,6 +17,7 @@
  623. #define SBI_REMOTE_SFENCE_VMA 6
  624. #define SBI_REMOTE_SFENCE_VMA_ASID 7
  625. #define SBI_SHUTDOWN 8
  626. +#define SBI_PMU 0x09000001
  627. #define SBI_CALL(which, arg0, arg1, arg2, arg3) ({ \
  628. register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
  629. @@ -94,4 +95,9 @@
  630. SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
  631. }
  632. +static inline void sbi_set_pmu(int start)
  633. +{
  634. + SBI_CALL_1(SBI_PMU, start);
  635. +}
  636. +
  637. #endif
  638. diff -Nur linux-5.4.36/arch/riscv/include/asm/switch_to.h kernel/arch/riscv/include/asm/switch_to.h
  639. --- linux-5.4.36/arch/riscv/include/asm/switch_to.h 2020-04-29 14:33:25.000000000 +0000
  640. +++ kernel/arch/riscv/include/asm/switch_to.h 2020-09-03 06:01:13.904989796 +0000
  641. @@ -63,6 +63,52 @@
  642. #define __switch_to_aux(__prev, __next) do { } while (0)
  643. #endif
  644. +#ifdef CONFIG_VECTOR
  645. +extern void __vstate_save(struct task_struct *save_to);
  646. +extern void __vstate_restore(struct task_struct *restore_from);
  647. +
  648. +static inline void __vstate_clean(struct pt_regs *regs)
  649. +{
  650. + regs->sstatus |= (regs->sstatus & ~(SR_VS)) | SR_VS_CLEAN;
  651. +}
  652. +
  653. +static inline void vstate_save(struct task_struct *task,
  654. + struct pt_regs *regs)
  655. +{
  656. + if ((regs->sstatus & SR_VS) == SR_VS_DIRTY) {
  657. + __vstate_save(task);
  658. + __vstate_clean(regs);
  659. + }
  660. +}
  661. +
  662. +static inline void vstate_restore(struct task_struct *task,
  663. + struct pt_regs *regs)
  664. +{
  665. + if ((regs->sstatus & SR_VS) != SR_VS_OFF) {
  666. + __vstate_restore(task);
  667. + __vstate_clean(regs);
  668. + }
  669. +}
  670. +
  671. +static inline void __switch_to_vector(struct task_struct *prev,
  672. + struct task_struct *next)
  673. +{
  674. + struct pt_regs *regs;
  675. +
  676. + regs = task_pt_regs(prev);
  677. + if (unlikely(regs->sstatus & SR_SD))
  678. + vstate_save(prev, regs);
  679. + vstate_restore(next, task_pt_regs(next));
  680. +}
  681. +
  682. +extern bool has_vector;
  683. +#else
  684. +#define has_vector false
  685. +#define vstate_save(task, regs) do { } while (0)
  686. +#define vstate_restore(task, regs) do { } while (0)
  687. +#define __switch_to_vector(__prev, __next) do { } while (0)
  688. +#endif
  689. +
  690. extern struct task_struct *__switch_to(struct task_struct *,
  691. struct task_struct *);
  692. @@ -72,6 +118,8 @@
  693. struct task_struct *__next = (next); \
  694. if (has_fpu) \
  695. __switch_to_aux(__prev, __next); \
  696. + if (has_vector) \
  697. + __switch_to_vector(__prev, __next); \
  698. ((last) = __switch_to(__prev, __next)); \
  699. } while (0)
  700. diff -Nur linux-5.4.36/arch/riscv/include/asm/tlbflush.h kernel/arch/riscv/include/asm/tlbflush.h
  701. --- linux-5.4.36/arch/riscv/include/asm/tlbflush.h 2020-04-29 14:33:25.000000000 +0000
  702. +++ kernel/arch/riscv/include/asm/tlbflush.h 2020-09-03 06:01:13.904989796 +0000
  703. @@ -12,12 +12,18 @@
  704. static inline void local_flush_tlb_all(void)
  705. {
  706. + sync_mmu_v1();
  707. + sync_mmu_v1();
  708. + sync_mmu_v1();
  709. __asm__ __volatile__ ("sfence.vma" : : : "memory");
  710. }
  711. /* Flush one page from local TLB */
  712. static inline void local_flush_tlb_page(unsigned long addr)
  713. {
  714. + sync_mmu_v1();
  715. + sync_mmu_v1();
  716. + sync_mmu_v1();
  717. __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
  718. }
  719. @@ -44,7 +50,17 @@
  720. static inline void flush_tlb_kernel_range(unsigned long start,
  721. unsigned long end)
  722. {
  723. - flush_tlb_all();
  724. + start &= PAGE_MASK;
  725. + end += PAGE_SIZE - 1;
  726. + end &= PAGE_MASK;
  727. +
  728. + while (start < end) {
  729. + sync_mmu_v1();
  730. + sync_mmu_v1();
  731. + sync_mmu_v1();
  732. + __asm__ __volatile__ ("sfence.vma %0" : : "r" (start) : "memory");
  733. + start += PAGE_SIZE;
  734. + }
  735. }
  736. #endif /* _ASM_RISCV_TLBFLUSH_H */
  737. diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/elf.h kernel/arch/riscv/include/uapi/asm/elf.h
  738. --- linux-5.4.36/arch/riscv/include/uapi/asm/elf.h 2020-04-29 14:33:25.000000000 +0000
  739. +++ kernel/arch/riscv/include/uapi/asm/elf.h 2020-09-03 06:01:13.904989796 +0000
  740. @@ -24,6 +24,8 @@
  741. typedef union __riscv_fp_state elf_fpregset_t;
  742. #define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t))
  743. +#define ELF_NVREG (sizeof(struct __riscv_v_state) / sizeof(elf_greg_t))
  744. +
  745. #if __riscv_xlen == 64
  746. #define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
  747. #define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info)
  748. diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/hwcap.h kernel/arch/riscv/include/uapi/asm/hwcap.h
  749. --- linux-5.4.36/arch/riscv/include/uapi/asm/hwcap.h 2020-04-29 14:33:25.000000000 +0000
  750. +++ kernel/arch/riscv/include/uapi/asm/hwcap.h 2020-09-03 06:01:13.904989796 +0000
  751. @@ -21,5 +21,6 @@
  752. #define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A'))
  753. #define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
  754. #define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
  755. +#define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A'))
  756. #endif
  757. diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/ptrace.h kernel/arch/riscv/include/uapi/asm/ptrace.h
  758. --- linux-5.4.36/arch/riscv/include/uapi/asm/ptrace.h 2020-04-29 14:33:25.000000000 +0000
  759. +++ kernel/arch/riscv/include/uapi/asm/ptrace.h 2020-09-03 06:01:13.905989796 +0000
  760. @@ -77,6 +77,15 @@
  761. struct __riscv_q_ext_state q;
  762. };
  763. +struct __riscv_v_state {
  764. + __uint128_t v[32];
  765. + unsigned long vstart;
  766. + unsigned long vxsat;
  767. + unsigned long vxrm;
  768. + unsigned long vl;
  769. + unsigned long vtype;
  770. +};
  771. +
  772. #endif /* __ASSEMBLY__ */
  773. #endif /* _UAPI_ASM_RISCV_PTRACE_H */
  774. diff -Nur linux-5.4.36/arch/riscv/include/uapi/asm/sigcontext.h kernel/arch/riscv/include/uapi/asm/sigcontext.h
  775. --- linux-5.4.36/arch/riscv/include/uapi/asm/sigcontext.h 2020-04-29 14:33:25.000000000 +0000
  776. +++ kernel/arch/riscv/include/uapi/asm/sigcontext.h 2020-09-03 06:01:13.905989796 +0000
  777. @@ -17,6 +17,7 @@
  778. struct sigcontext {
  779. struct user_regs_struct sc_regs;
  780. union __riscv_fp_state sc_fpregs;
  781. + struct __riscv_v_state sc_vregs;
  782. };
  783. #endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
  784. diff -Nur linux-5.4.36/arch/riscv/Kconfig kernel/arch/riscv/Kconfig
  785. --- linux-5.4.36/arch/riscv/Kconfig 2020-04-29 14:33:25.000000000 +0000
  786. +++ kernel/arch/riscv/Kconfig 2020-09-14 01:45:17.782702016 +0000
  787. @@ -30,9 +30,11 @@
  788. select GENERIC_STRNLEN_USER
  789. select GENERIC_SMP_IDLE_THREAD
  790. select GENERIC_ATOMIC64 if !64BIT
  791. + select GENERIC_ALLOCATOR
  792. select HAVE_ARCH_AUDITSYSCALL
  793. select HAVE_ASM_MODVERSIONS
  794. select HAVE_MEMBLOCK_NODE_MAP
  795. + select DMA_DIRECT_REMAP
  796. select HAVE_DMA_CONTIGUOUS
  797. select HAVE_FUTEX_CMPXCHG if FUTEX
  798. select HAVE_PERF_EVENTS
  799. @@ -50,10 +52,16 @@
  800. select PCI_DOMAINS_GENERIC if PCI
  801. select PCI_MSI if PCI
  802. select RISCV_TIMER
  803. + select DW_APB_TIMER_OF
  804. select GENERIC_IRQ_MULTI_HANDLER
  805. select GENERIC_ARCH_TOPOLOGY if SMP
  806. select ARCH_HAS_PTE_SPECIAL
  807. select ARCH_HAS_MMIOWB
  808. + select ARCH_HAS_DMA_PREP_COHERENT
  809. + select ARCH_HAS_SYNC_DMA_FOR_CPU
  810. + select ARCH_HAS_SYNC_DMA_FOR_DEVICE
  811. + select ARCH_HAS_DMA_WRITE_COMBINE
  812. + select ARCH_HAS_DMA_MMAP_PGPROT
  813. select HAVE_EBPF_JIT if 64BIT
  814. select EDAC_SUPPORT
  815. select ARCH_HAS_GIGANTIC_PAGE
  816. @@ -62,6 +70,7 @@
  817. select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
  818. select HAVE_ARCH_MMAP_RND_BITS
  819. select HAVE_COPY_THREAD_TLS
  820. + select RTC_DRV_XGENE
  821. config ARCH_MMAP_RND_BITS_MIN
  822. default 18 if 64BIT
  823. @@ -268,12 +277,41 @@
  824. If you don't know what to do here, say Y.
  825. +config VECTOR
  826. + bool "VECTOR support"
  827. + default n
  828. + help
  829. + Say N here if you want to disable all vector related procedure
  830. + in the kernel.
  831. +
  832. + If you don't know what to do here, say Y.
  833. +
  834. endmenu
  835. menu "Kernel features"
  836. source "kernel/Kconfig.hz"
  837. +config KEXEC
  838. + select KEXEC_CORE
  839. + bool "kexec system call"
  840. + ---help---
  841. + kexec is a system call that implements the ability to shutdown your
  842. + current kernel, and to start another kernel. It is like a reboot
  843. + but it is independent of the system firmware. And like a reboot
  844. + you can start any kernel with it, not just Linux.
  845. +
  846. +config CRASH_DUMP
  847. + bool "Build kdump crash kernel"
  848. + help
  849. + Generate crash dump after being started by kexec. This should
  850. + be normally only set in special crash dump kernels which are
  851. + loaded in the main kernel with kexec-tools into a specially
  852. + reserved region and then later executed after a crash by
  853. + kdump/kexec.
  854. +
  855. + For more details see Documentation/kdump/kdump.txt
  856. +
  857. endmenu
  858. menu "Boot options"
  859. diff -Nur linux-5.4.36/arch/riscv/kernel/asm-offsets.c kernel/arch/riscv/kernel/asm-offsets.c
  860. --- linux-5.4.36/arch/riscv/kernel/asm-offsets.c 2020-04-29 14:33:25.000000000 +0000
  861. +++ kernel/arch/riscv/kernel/asm-offsets.c 2020-09-03 06:01:13.905989796 +0000
  862. @@ -70,6 +70,45 @@
  863. OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
  864. OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
  865. + OFFSET(TASK_THREAD_V0, task_struct, thread.vstate.v[0]);
  866. + OFFSET(TASK_THREAD_V1, task_struct, thread.vstate.v[1]);
  867. + OFFSET(TASK_THREAD_V2, task_struct, thread.vstate.v[2]);
  868. + OFFSET(TASK_THREAD_V3, task_struct, thread.vstate.v[3]);
  869. + OFFSET(TASK_THREAD_V4, task_struct, thread.vstate.v[4]);
  870. + OFFSET(TASK_THREAD_V5, task_struct, thread.vstate.v[5]);
  871. + OFFSET(TASK_THREAD_V6, task_struct, thread.vstate.v[6]);
  872. + OFFSET(TASK_THREAD_V7, task_struct, thread.vstate.v[7]);
  873. + OFFSET(TASK_THREAD_V8, task_struct, thread.vstate.v[8]);
  874. + OFFSET(TASK_THREAD_V9, task_struct, thread.vstate.v[9]);
  875. + OFFSET(TASK_THREAD_V10, task_struct, thread.vstate.v[10]);
  876. + OFFSET(TASK_THREAD_V11, task_struct, thread.vstate.v[11]);
  877. + OFFSET(TASK_THREAD_V12, task_struct, thread.vstate.v[12]);
  878. + OFFSET(TASK_THREAD_V13, task_struct, thread.vstate.v[13]);
  879. + OFFSET(TASK_THREAD_V14, task_struct, thread.vstate.v[14]);
  880. + OFFSET(TASK_THREAD_V15, task_struct, thread.vstate.v[15]);
  881. + OFFSET(TASK_THREAD_V16, task_struct, thread.vstate.v[16]);
  882. + OFFSET(TASK_THREAD_V17, task_struct, thread.vstate.v[17]);
  883. + OFFSET(TASK_THREAD_V18, task_struct, thread.vstate.v[18]);
  884. + OFFSET(TASK_THREAD_V19, task_struct, thread.vstate.v[19]);
  885. + OFFSET(TASK_THREAD_V20, task_struct, thread.vstate.v[20]);
  886. + OFFSET(TASK_THREAD_V21, task_struct, thread.vstate.v[21]);
  887. + OFFSET(TASK_THREAD_V22, task_struct, thread.vstate.v[22]);
  888. + OFFSET(TASK_THREAD_V23, task_struct, thread.vstate.v[23]);
  889. + OFFSET(TASK_THREAD_V24, task_struct, thread.vstate.v[24]);
  890. + OFFSET(TASK_THREAD_V25, task_struct, thread.vstate.v[25]);
  891. + OFFSET(TASK_THREAD_V26, task_struct, thread.vstate.v[26]);
  892. + OFFSET(TASK_THREAD_V27, task_struct, thread.vstate.v[27]);
  893. + OFFSET(TASK_THREAD_V28, task_struct, thread.vstate.v[28]);
  894. + OFFSET(TASK_THREAD_V29, task_struct, thread.vstate.v[29]);
  895. + OFFSET(TASK_THREAD_V30, task_struct, thread.vstate.v[30]);
  896. + OFFSET(TASK_THREAD_V31, task_struct, thread.vstate.v[31]);
  897. + OFFSET(TASK_THREAD_VSTART, task_struct, thread.vstate.vstart);
  898. + OFFSET(TASK_THREAD_VXSAT, task_struct, thread.vstate.vxsat);
  899. + OFFSET(TASK_THREAD_VXRM, task_struct, thread.vstate.vxrm);
  900. + OFFSET(TASK_THREAD_VL, task_struct, thread.vstate.vl);
  901. + OFFSET(TASK_THREAD_VTYPE, task_struct, thread.vstate.vtype);
  902. + DEFINE(RISCV_VECTOR_VLENB, sizeof(__uint128_t));
  903. +
  904. DEFINE(PT_SIZE, sizeof(struct pt_regs));
  905. OFFSET(PT_SEPC, pt_regs, sepc);
  906. OFFSET(PT_RA, pt_regs, ra);
  907. @@ -171,6 +210,7 @@
  908. - offsetof(struct task_struct, thread.ra)
  909. );
  910. + /* Float Point */
  911. DEFINE(TASK_THREAD_F0_F0,
  912. offsetof(struct task_struct, thread.fstate.f[0])
  913. - offsetof(struct task_struct, thread.fstate.f[0])
  914. @@ -304,6 +344,156 @@
  915. - offsetof(struct task_struct, thread.fstate.f[0])
  916. );
  917. + /* Vector */
  918. + DEFINE(TASK_THREAD_V0_V0,
  919. + offsetof(struct task_struct, thread.vstate.v[0])
  920. + - offsetof(struct task_struct, thread.vstate.v[0])
  921. + );
  922. + DEFINE(TASK_THREAD_V1_V0,
  923. + offsetof(struct task_struct, thread.vstate.v[1])
  924. + - offsetof(struct task_struct, thread.vstate.v[0])
  925. + );
  926. + DEFINE(TASK_THREAD_V2_V0,
  927. + offsetof(struct task_struct, thread.vstate.v[2])
  928. + - offsetof(struct task_struct, thread.vstate.v[0])
  929. + );
  930. + DEFINE(TASK_THREAD_V3_V0,
  931. + offsetof(struct task_struct, thread.vstate.v[3])
  932. + - offsetof(struct task_struct, thread.vstate.v[0])
  933. + );
  934. + DEFINE(TASK_THREAD_V4_V0,
  935. + offsetof(struct task_struct, thread.vstate.v[4])
  936. + - offsetof(struct task_struct, thread.vstate.v[0])
  937. + );
  938. + DEFINE(TASK_THREAD_V5_V0,
  939. + offsetof(struct task_struct, thread.vstate.v[5])
  940. + - offsetof(struct task_struct, thread.vstate.v[0])
  941. + );
  942. + DEFINE(TASK_THREAD_V6_V0,
  943. + offsetof(struct task_struct, thread.vstate.v[6])
  944. + - offsetof(struct task_struct, thread.vstate.v[0])
  945. + );
  946. + DEFINE(TASK_THREAD_V7_V0,
  947. + offsetof(struct task_struct, thread.vstate.v[7])
  948. + - offsetof(struct task_struct, thread.vstate.v[0])
  949. + );
  950. + DEFINE(TASK_THREAD_V8_V0,
  951. + offsetof(struct task_struct, thread.vstate.v[8])
  952. + - offsetof(struct task_struct, thread.vstate.v[0])
  953. + );
  954. + DEFINE(TASK_THREAD_V9_V0,
  955. + offsetof(struct task_struct, thread.vstate.v[9])
  956. + - offsetof(struct task_struct, thread.vstate.v[0])
  957. + );
  958. + DEFINE(TASK_THREAD_V10_V0,
  959. + offsetof(struct task_struct, thread.vstate.v[10])
  960. + - offsetof(struct task_struct, thread.vstate.v[0])
  961. + );
  962. + DEFINE(TASK_THREAD_V11_V0,
  963. + offsetof(struct task_struct, thread.vstate.v[11])
  964. + - offsetof(struct task_struct, thread.vstate.v[0])
  965. + );
  966. + DEFINE(TASK_THREAD_V12_V0,
  967. + offsetof(struct task_struct, thread.vstate.v[12])
  968. + - offsetof(struct task_struct, thread.vstate.v[0])
  969. + );
  970. + DEFINE(TASK_THREAD_V13_V0,
  971. + offsetof(struct task_struct, thread.vstate.v[13])
  972. + - offsetof(struct task_struct, thread.vstate.v[0])
  973. + );
  974. + DEFINE(TASK_THREAD_V14_V0,
  975. + offsetof(struct task_struct, thread.vstate.v[14])
  976. + - offsetof(struct task_struct, thread.vstate.v[0])
  977. + );
  978. + DEFINE(TASK_THREAD_V15_V0,
  979. + offsetof(struct task_struct, thread.vstate.v[15])
  980. + - offsetof(struct task_struct, thread.vstate.v[0])
  981. + );
  982. + DEFINE(TASK_THREAD_V16_V0,
  983. + offsetof(struct task_struct, thread.vstate.v[16])
  984. + - offsetof(struct task_struct, thread.vstate.v[0])
  985. + );
  986. + DEFINE(TASK_THREAD_V17_V0,
  987. + offsetof(struct task_struct, thread.vstate.v[17])
  988. + - offsetof(struct task_struct, thread.vstate.v[0])
  989. + );
  990. + DEFINE(TASK_THREAD_V18_V0,
  991. + offsetof(struct task_struct, thread.vstate.v[18])
  992. + - offsetof(struct task_struct, thread.vstate.v[0])
  993. + );
  994. + DEFINE(TASK_THREAD_V19_V0,
  995. + offsetof(struct task_struct, thread.vstate.v[19])
  996. + - offsetof(struct task_struct, thread.vstate.v[0])
  997. + );
  998. + DEFINE(TASK_THREAD_V20_V0,
  999. + offsetof(struct task_struct, thread.vstate.v[20])
  1000. + - offsetof(struct task_struct, thread.vstate.v[0])
  1001. + );
  1002. + DEFINE(TASK_THREAD_V21_V0,
  1003. + offsetof(struct task_struct, thread.vstate.v[21])
  1004. + - offsetof(struct task_struct, thread.vstate.v[0])
  1005. + );
  1006. + DEFINE(TASK_THREAD_V22_V0,
  1007. + offsetof(struct task_struct, thread.vstate.v[22])
  1008. + - offsetof(struct task_struct, thread.vstate.v[0])
  1009. + );
  1010. + DEFINE(TASK_THREAD_V23_V0,
  1011. + offsetof(struct task_struct, thread.vstate.v[23])
  1012. + - offsetof(struct task_struct, thread.vstate.v[0])
  1013. + );
  1014. + DEFINE(TASK_THREAD_V24_V0,
  1015. + offsetof(struct task_struct, thread.vstate.v[24])
  1016. + - offsetof(struct task_struct, thread.vstate.v[0])
  1017. + );
  1018. + DEFINE(TASK_THREAD_V25_V0,
  1019. + offsetof(struct task_struct, thread.vstate.v[25])
  1020. + - offsetof(struct task_struct, thread.vstate.v[0])
  1021. + );
  1022. + DEFINE(TASK_THREAD_V26_V0,
  1023. + offsetof(struct task_struct, thread.vstate.v[26])
  1024. + - offsetof(struct task_struct, thread.vstate.v[0])
  1025. + );
  1026. + DEFINE(TASK_THREAD_V27_V0,
  1027. + offsetof(struct task_struct, thread.vstate.v[27])
  1028. + - offsetof(struct task_struct, thread.vstate.v[0])
  1029. + );
  1030. + DEFINE(TASK_THREAD_V28_V0,
  1031. + offsetof(struct task_struct, thread.vstate.v[28])
  1032. + - offsetof(struct task_struct, thread.vstate.v[0])
  1033. + );
  1034. + DEFINE(TASK_THREAD_V29_V0,
  1035. + offsetof(struct task_struct, thread.vstate.v[29])
  1036. + - offsetof(struct task_struct, thread.vstate.v[0])
  1037. + );
  1038. + DEFINE(TASK_THREAD_V30_V0,
  1039. + offsetof(struct task_struct, thread.vstate.v[30])
  1040. + - offsetof(struct task_struct, thread.vstate.v[0])
  1041. + );
  1042. + DEFINE(TASK_THREAD_V31_V0,
  1043. + offsetof(struct task_struct, thread.vstate.v[31])
  1044. + - offsetof(struct task_struct, thread.vstate.v[0])
  1045. + );
  1046. + DEFINE(TASK_THREAD_VSTART_V0,
  1047. + offsetof(struct task_struct, thread.vstate.vstart)
  1048. + - offsetof(struct task_struct, thread.vstate.v[0])
  1049. + );
  1050. + DEFINE(TASK_THREAD_VXSAT_V0,
  1051. + offsetof(struct task_struct, thread.vstate.vxsat)
  1052. + - offsetof(struct task_struct, thread.vstate.v[0])
  1053. + );
  1054. + DEFINE(TASK_THREAD_VXRM_V0,
  1055. + offsetof(struct task_struct, thread.vstate.vxrm)
  1056. + - offsetof(struct task_struct, thread.vstate.v[0])
  1057. + );
  1058. + DEFINE(TASK_THREAD_VL_V0,
  1059. + offsetof(struct task_struct, thread.vstate.vl)
  1060. + - offsetof(struct task_struct, thread.vstate.v[0])
  1061. + );
  1062. + DEFINE(TASK_THREAD_VTYPE_V0,
  1063. + offsetof(struct task_struct, thread.vstate.vtype)
  1064. + - offsetof(struct task_struct, thread.vstate.v[0])
  1065. + );
  1066. +
  1067. /*
  1068. * We allocate a pt_regs on the stack when entering the kernel. This
  1069. * ensures the alignment is sane.
  1070. diff -Nur linux-5.4.36/arch/riscv/kernel/cpu.c kernel/arch/riscv/kernel/cpu.c
  1071. --- linux-5.4.36/arch/riscv/kernel/cpu.c 2020-04-29 14:33:25.000000000 +0000
  1072. +++ kernel/arch/riscv/kernel/cpu.c 2020-09-03 06:01:13.905989796 +0000
  1073. @@ -48,7 +48,7 @@
  1074. static void print_isa(struct seq_file *f, const char *orig_isa)
  1075. {
  1076. - static const char *ext = "mafdcsu";
  1077. + static const char *ext = "mafdcvsu";
  1078. const char *isa = orig_isa;
  1079. const char *e;
  1080. diff -Nur linux-5.4.36/arch/riscv/kernel/cpufeature.c kernel/arch/riscv/kernel/cpufeature.c
  1081. --- linux-5.4.36/arch/riscv/kernel/cpufeature.c 2020-04-29 14:33:25.000000000 +0000
  1082. +++ kernel/arch/riscv/kernel/cpufeature.c 2020-09-03 06:01:13.905989796 +0000
  1083. @@ -17,6 +17,10 @@
  1084. bool has_fpu __read_mostly;
  1085. #endif
  1086. +#ifdef CONFIG_VECTOR
  1087. +bool has_vector __read_mostly;
  1088. +#endif
  1089. +
  1090. void riscv_fill_hwcap(void)
  1091. {
  1092. struct device_node *node;
  1093. @@ -30,6 +34,7 @@
  1094. isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
  1095. isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
  1096. isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
  1097. + isa2hwcap['v'] = isa2hwcap['V'] = COMPAT_HWCAP_ISA_V;
  1098. elf_hwcap = 0;
  1099. @@ -44,7 +49,8 @@
  1100. continue;
  1101. }
  1102. - for (i = 0; i < strlen(isa); ++i)
  1103. + /* skip "rv64" */
  1104. + for (i = 4; i < strlen(isa); ++i)
  1105. this_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
  1106. /*
  1107. @@ -71,4 +77,9 @@
  1108. if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
  1109. has_fpu = true;
  1110. #endif
  1111. +
  1112. +#ifdef CONFIG_VECTOR
  1113. + if (elf_hwcap & COMPAT_HWCAP_ISA_V)
  1114. + has_vector = true;
  1115. +#endif
  1116. }
  1117. diff -Nur linux-5.4.36/arch/riscv/kernel/cpu-reset.h kernel/arch/riscv/kernel/cpu-reset.h
  1118. --- linux-5.4.36/arch/riscv/kernel/cpu-reset.h 1970-01-01 00:00:00.000000000 +0000
  1119. +++ kernel/arch/riscv/kernel/cpu-reset.h 2020-09-03 06:01:13.905989796 +0000
  1120. @@ -0,0 +1,58 @@
  1121. +/*
  1122. + * CPU reset routines
  1123. + *
  1124. + * Copyright (C) 2020-2025 Alibaba Group Holding Limited
  1125. + *
  1126. + * This program is free software; you can redistribute it and/or modify
  1127. + * it under the terms of the GNU General Public License version 2 as
  1128. + * published by the Free Software Foundation.
  1129. + */
  1130. +
  1131. +#ifndef _RISCV_CPU_RESET_H
  1132. +#define _RISCV_CPU_RESET_H
  1133. +
  1134. +extern struct resource *standard_resources;
  1135. +void __cpu_soft_restart(unsigned long entry, unsigned long arg0, unsigned long arg1,
  1136. + unsigned long arg2);
  1137. +
  1138. +__attribute__ ((optimize("-O0"))) static void __noreturn cpu_soft_restart(unsigned long entry,
  1139. + unsigned long arg0,
  1140. + unsigned long arg1,
  1141. + unsigned long arg2)
  1142. +{
  1143. + typeof(__cpu_soft_restart) *restart;
  1144. + pgd_t *idmap_pgd;
  1145. + pmd_t *idmap_pmd;
  1146. + long pa_start, pa_end;
  1147. + long i, j, m, n, delta;
  1148. + long idmap_pmd_size;
  1149. +
  1150. + pa_start = standard_resources->start;
  1151. + pa_end = standard_resources->end;
  1152. +
  1153. + idmap_pmd_size = (pa_end - pa_start + 1) / PMD_SIZE * sizeof(pmd_t);
  1154. +
  1155. + idmap_pgd = (pgd_t *)__va((csr_read(CSR_SATP) & ((1UL<<44)-1))<< PAGE_SHIFT);
  1156. + idmap_pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, get_order(idmap_pmd_size));
  1157. +
  1158. + m = (pa_start >> PGDIR_SHIFT) % PTRS_PER_PGD;
  1159. + n = (pa_end >> PGDIR_SHIFT) % PTRS_PER_PGD;
  1160. +
  1161. + for (i = 0; m <= n; m++,i++)
  1162. + idmap_pgd[m] = pfn_pgd(PFN_DOWN(__pa(idmap_pmd)) + i,
  1163. + __pgprot(_PAGE_TABLE));
  1164. +
  1165. + m = pa_start >> PMD_SHIFT;
  1166. + n = (pa_end + 1) >> PMD_SHIFT;
  1167. + delta = n - m;
  1168. +
  1169. + for (i = (pa_start + 1) % PMD_SIZE,j=0; i <= delta; i++,j++)
  1170. + idmap_pmd[i] = pfn_pmd(PFN_DOWN(pa_start + j * PMD_SIZE),
  1171. + __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_EXEC));
  1172. +
  1173. + restart = (void *)__pa_symbol(__cpu_soft_restart);
  1174. + restart(entry, arg0, arg1, arg2);
  1175. + unreachable();
  1176. +}
  1177. +
  1178. +#endif
  1179. diff -Nur linux-5.4.36/arch/riscv/kernel/cpu-reset.S kernel/arch/riscv/kernel/cpu-reset.S
  1180. --- linux-5.4.36/arch/riscv/kernel/cpu-reset.S 1970-01-01 00:00:00.000000000 +0000
  1181. +++ kernel/arch/riscv/kernel/cpu-reset.S 2020-09-03 06:01:13.905989796 +0000
  1182. @@ -0,0 +1,23 @@
  1183. +/*
  1184. + * CPU reset routines
  1185. + *
  1186. + * Copyright (C) 2020-2025 Alibaba Group Holding Limited
  1187. + *
  1188. + * This program is free software; you can redistribute it and/or modify
  1189. + * it under the terms of the GNU General Public License version 2 as
  1190. + * published by the Free Software Foundation.
  1191. + */
  1192. +
  1193. +#include <linux/linkage.h>
  1194. +
  1195. +ENTRY(__cpu_soft_restart)
  1196. + fence
  1197. + fence.i
  1198. + sfence.vma
  1199. + mv s1, a0 //entry
  1200. + mv a0, a1 //arg0
  1201. + mv a1, a2 //arg1
  1202. + mv a2, a3 //arg2
  1203. + jr s1
  1204. + ebreak
  1205. +ENDPROC(__cpu_soft_restart)
  1206. diff -Nur linux-5.4.36/arch/riscv/kernel/crash_dump.c kernel/arch/riscv/kernel/crash_dump.c
  1207. --- linux-5.4.36/arch/riscv/kernel/crash_dump.c 1970-01-01 00:00:00.000000000 +0000
  1208. +++ kernel/arch/riscv/kernel/crash_dump.c 2020-09-03 06:01:13.905989796 +0000
  1209. @@ -0,0 +1,69 @@
  1210. +/*
  1211. + * Routines for doing kexec-based kdump
  1212. + *
  1213. + * Copyright (C) 2020-2025 Alibaba Group Holding Limited
  1214. + *
  1215. + * This program is free software; you can redistribute it and/or modify
  1216. + * it under the terms of the GNU General Public License version 2 as
  1217. + * published by the Free Software Foundation.
  1218. + */
  1219. +
  1220. +#include <linux/crash_dump.h>
  1221. +#include <linux/errno.h>
  1222. +#include <linux/io.h>
  1223. +#include <linux/memblock.h>
  1224. +#include <linux/uaccess.h>
  1225. +
  1226. +/**
  1227. + * copy_oldmem_page() - copy one page from old kernel memory
  1228. + * @pfn: page frame number to be copied
  1229. + * @buf: buffer where the copied page is placed
  1230. + * @csize: number of bytes to copy
  1231. + * @offset: offset in bytes into the page
  1232. + * @userbuf: if set, @buf is in a user address space
  1233. + *
  1234. + * This function copies one page from old kernel memory into buffer pointed by
  1235. + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
  1236. + * copied or negative error in case of failure.
  1237. + */
  1238. +ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
  1239. + size_t csize, unsigned long offset,
  1240. + int userbuf)
  1241. +{
  1242. + void *vaddr;
  1243. +
  1244. + if (!csize)
  1245. + return 0;
  1246. +
  1247. + vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
  1248. + if (!vaddr)
  1249. + return -ENOMEM;
  1250. +
  1251. + if (userbuf) {
  1252. + if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
  1253. + memunmap(vaddr);
  1254. + return -EFAULT;
  1255. + }
  1256. + } else {
  1257. + memcpy(buf, vaddr + offset, csize);
  1258. + }
  1259. +
  1260. + memunmap(vaddr);
  1261. +
  1262. + return csize;
  1263. +}
  1264. +
  1265. +/**
  1266. + * elfcorehdr_read - read from ELF core header
  1267. + * @buf: buffer where the data is placed
  1268. + * @count: number of bytes to read
  1269. + * @ppos: address in the memory
  1270. + *
  1271. + * This function reads @count bytes from elf core header which exists
  1272. + * on crash dump kernel's memory.
  1273. + */
  1274. +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
  1275. +{
  1276. + memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
  1277. + return count;
  1278. +}
  1279. diff -Nur linux-5.4.36/arch/riscv/kernel/entry.S kernel/arch/riscv/kernel/entry.S
  1280. --- linux-5.4.36/arch/riscv/kernel/entry.S 2020-04-29 14:33:25.000000000 +0000
  1281. +++ kernel/arch/riscv/kernel/entry.S 2020-09-03 06:01:13.905989796 +0000
  1282. @@ -76,7 +76,7 @@
  1283. * Disable the FPU to detect illegal usage of floating point in kernel
  1284. * space.
  1285. */
  1286. - li t0, SR_SUM | SR_FS
  1287. + li t0, SR_SUM | SR_FS | SR_VS
  1288. REG_L s0, TASK_TI_USER_SP(tp)
  1289. csrrc s1, CSR_SSTATUS, t0
  1290. diff -Nur linux-5.4.36/arch/riscv/kernel/head.S kernel/arch/riscv/kernel/head.S
  1291. --- linux-5.4.36/arch/riscv/kernel/head.S 2020-04-29 14:33:25.000000000 +0000
  1292. +++ kernel/arch/riscv/kernel/head.S 2020-09-03 06:01:13.905989796 +0000
  1293. @@ -60,7 +60,7 @@
  1294. * Disable FPU to detect illegal usage of
  1295. * floating point in kernel space
  1296. */
  1297. - li t0, SR_FS
  1298. + li t0, SR_FS | SR_VS
  1299. csrc CSR_SSTATUS, t0
  1300. #ifdef CONFIG_SMP
  1301. diff -Nur linux-5.4.36/arch/riscv/kernel/irq.c kernel/arch/riscv/kernel/irq.c
  1302. --- linux-5.4.36/arch/riscv/kernel/irq.c 2020-04-29 14:33:25.000000000 +0000
  1303. +++ kernel/arch/riscv/kernel/irq.c 2020-09-03 06:01:13.905989796 +0000
  1304. @@ -17,6 +17,7 @@
  1305. #define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
  1306. #define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
  1307. #define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
  1308. +#define INTERRUPT_CAUSE_PMU IRQ_S_PMU
  1309. int arch_show_interrupts(struct seq_file *p, int prec)
  1310. {
  1311. @@ -24,6 +25,7 @@
  1312. return 0;
  1313. }
  1314. +extern int riscv_pmu_handle_irq(void);
  1315. asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
  1316. {
  1317. struct pt_regs *old_regs = set_irq_regs(regs);
  1318. @@ -33,6 +35,11 @@
  1319. case INTERRUPT_CAUSE_TIMER:
  1320. riscv_timer_interrupt();
  1321. break;
  1322. +#ifdef CONFIG_THEAD_XT_V1_PMU
  1323. + case INTERRUPT_CAUSE_PMU:
  1324. + riscv_pmu_handle_irq();
  1325. + break;
  1326. +#endif
  1327. #ifdef CONFIG_SMP
  1328. case INTERRUPT_CAUSE_SOFTWARE:
  1329. /*
  1330. diff -Nur linux-5.4.36/arch/riscv/kernel/machine_kexec.c kernel/arch/riscv/kernel/machine_kexec.c
  1331. --- linux-5.4.36/arch/riscv/kernel/machine_kexec.c 1970-01-01 00:00:00.000000000 +0000
  1332. +++ kernel/arch/riscv/kernel/machine_kexec.c 2020-09-03 06:01:13.906989796 +0000
  1333. @@ -0,0 +1,163 @@
  1334. +/*
  1335. + * kexec for riscv
  1336. + *
  1337. + * Copyright (C) 2020-2025 Alibaba Group Holding Limited
  1338. + *
  1339. + * This program is free software; you can redistribute it and/or modify
  1340. + * it under the terms of the GNU General Public License version 2 as
  1341. + * published by the Free Software Foundation.
  1342. + */
  1343. +
  1344. +#include <linux/interrupt.h>
  1345. +#include <linux/irq.h>
  1346. +#include <linux/kernel.h>
  1347. +#include <linux/kexec.h>
  1348. +#include <linux/page-flags.h>
  1349. +#include <linux/smp.h>
  1350. +
  1351. +#include <asm/cacheflush.h>
  1352. +#include <asm/mmu.h>
  1353. +#include <asm/mmu_context.h>
  1354. +#include <asm/page.h>
  1355. +
  1356. +#include "cpu-reset.h"
  1357. +
  1358. +/* Global variables for the riscv_relocate_new_kernel routine. */
  1359. +extern const unsigned char riscv_relocate_new_kernel[];
  1360. +extern const unsigned long riscv_relocate_new_kernel_size;
  1361. +
  1362. +/*
  1363. + * kexec_image_info - For debugging output.
  1364. + */
  1365. +#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
  1366. +static void _kexec_image_info(const char *func, int line,
  1367. + const struct kimage *kimage)
  1368. +{
  1369. + unsigned long i;
  1370. +
  1371. + pr_debug("%s:%d:\n", func, line);
  1372. + pr_debug(" kexec kimage info:\n");
  1373. + pr_debug(" type: %d\n", kimage->type);
  1374. + pr_debug(" start: 0x%lx\n", kimage->start);
  1375. + pr_debug(" head: 0x%lx\n", kimage->head);
  1376. + pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
  1377. +
  1378. + for (i = 0; i < kimage->nr_segments; i++) {
  1379. + pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
  1380. + i,
  1381. + kimage->segment[i].mem,
  1382. + kimage->segment[i].mem + kimage->segment[i].memsz,
  1383. + kimage->segment[i].memsz,
  1384. + kimage->segment[i].memsz / PAGE_SIZE);
  1385. + }
  1386. +}
  1387. +
  1388. +void machine_kexec_cleanup(struct kimage *kimage)
  1389. +{
  1390. + /* Empty routine needed to avoid build errors. */
  1391. +}
  1392. +
  1393. +/**
  1394. + * machine_kexec_prepare - Prepare for a kexec reboot.
  1395. + *
  1396. + * Called from the core kexec code when a kernel image is loaded.
  1397. + * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
  1398. + * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
  1399. + */
  1400. +int machine_kexec_prepare(struct kimage *kimage)
  1401. +{
  1402. + kexec_image_info(kimage);
  1403. +
  1404. + return 0;
  1405. +}
  1406. +
  1407. +/**
  1408. + * machine_kexec - Do the kexec reboot.
  1409. + *
  1410. + * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
  1411. + */
  1412. +void machine_kexec(struct kimage *kimage)
  1413. +{
  1414. + phys_addr_t reboot_code_buffer_phys;
  1415. + void *reboot_code_buffer;
  1416. +
  1417. + reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
  1418. + reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
  1419. +
  1420. + pr_debug("%s:%d: control_code_page: 0x%lx\n", __func__, __LINE__,
  1421. + (long)kimage->control_code_page);
  1422. + pr_debug("%s:%d: reboot_code_buffer_phys: 0x%lx\n", __func__, __LINE__,
  1423. + (long)reboot_code_buffer_phys);
  1424. + pr_debug("%s:%d: reboot_code_buffer: 0x%lx\n", __func__, __LINE__,
  1425. + (long)reboot_code_buffer);
  1426. + pr_debug("%s:%d: relocate_new_kernel: 0x%lx\n", __func__, __LINE__,
  1427. + (long)riscv_relocate_new_kernel);
  1428. + pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
  1429. + __func__, __LINE__, riscv_relocate_new_kernel_size,
  1430. + riscv_relocate_new_kernel_size);
  1431. +
  1432. + /*
  1433. + * Copy riscv_relocate_new_kernel to the reboot_code_buffer for use
  1434. + * after the kernel is shut down.
  1435. + */
  1436. + memcpy(reboot_code_buffer, riscv_relocate_new_kernel,
  1437. + riscv_relocate_new_kernel_size);
  1438. +
  1439. + pr_info("Bye!\n");
  1440. +
  1441. + local_irq_disable();
  1442. +
  1443. + cpu_soft_restart(reboot_code_buffer_phys, kimage->head,
  1444. + kimage->segment[0].mem, kimage->segment[2].mem);
  1445. +
  1446. + BUG(); /* Should never get here. */
  1447. +}
  1448. +
  1449. +static void machine_kexec_mask_interrupts(void)
  1450. +{
  1451. + unsigned int i;
  1452. + struct irq_desc *desc;
  1453. +
  1454. + for_each_irq_desc(i, desc) {
  1455. + struct irq_chip *chip;
  1456. + int ret;
  1457. +
  1458. + chip = irq_desc_get_chip(desc);
  1459. + if (!chip)
  1460. + continue;
  1461. +
  1462. + /*
  1463. + * First try to remove the active state. If this
  1464. + * fails, try to EOI the interrupt.
  1465. + */
  1466. + ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
  1467. +
  1468. + if (ret && irqd_irq_inprogress(&desc->irq_data) &&
  1469. + chip->irq_eoi)
  1470. + chip->irq_eoi(&desc->irq_data);
  1471. +
  1472. + if (chip->irq_mask)
  1473. + chip->irq_mask(&desc->irq_data);
  1474. +
  1475. + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
  1476. + chip->irq_disable(&desc->irq_data);
  1477. + }
  1478. +}
  1479. +
  1480. +/**
  1481. + * machine_crash_shutdown - shutdown non-crashing cpus and save registers
  1482. + */
  1483. +extern void crash_smp_send_stop(void);
  1484. +void machine_crash_shutdown(struct pt_regs *regs)
  1485. +{
  1486. + local_irq_disable();
  1487. +
  1488. + /* shutdown non-crashing cpus */
  1489. + crash_smp_send_stop();
  1490. +
  1491. + /* for crashing cpu */
  1492. + crash_save_cpu(regs, smp_processor_id());
  1493. + machine_kexec_mask_interrupts();
  1494. +
  1495. + pr_info("Starting crashdump kernel...\n");
  1496. +}
  1497. diff -Nur linux-5.4.36/arch/riscv/kernel/Makefile kernel/arch/riscv/kernel/Makefile
  1498. --- linux-5.4.36/arch/riscv/kernel/Makefile 2020-04-29 14:33:25.000000000 +0000
  1499. +++ kernel/arch/riscv/kernel/Makefile 2020-09-03 06:01:13.905989796 +0000
  1500. @@ -30,6 +30,7 @@
  1501. obj-y += vdso/
  1502. obj-$(CONFIG_FPU) += fpu.o
  1503. +obj-$(CONFIG_VECTOR) += vector.o
  1504. obj-$(CONFIG_SMP) += smpboot.o
  1505. obj-$(CONFIG_SMP) += smp.o
  1506. obj-$(CONFIG_MODULES) += module.o
  1507. @@ -42,4 +43,8 @@
  1508. obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
  1509. obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
  1510. +obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
  1511. + cpu-reset.o
  1512. +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
  1513. +
  1514. clean:
  1515. diff -Nur linux-5.4.36/arch/riscv/kernel/module.c kernel/arch/riscv/kernel/module.c
  1516. --- linux-5.4.36/arch/riscv/kernel/module.c 2020-04-29 14:33:25.000000000 +0000
  1517. +++ kernel/arch/riscv/kernel/module.c 2020-09-03 06:01:13.906989796 +0000
  1518. @@ -253,7 +253,7 @@
  1519. pr_err(
  1520. "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
  1521. me->name, location);
  1522. - return -EINVAL;
  1523. + return 0; /* Do not return -EINVAL when relocation type is R_RISCV_ALIGN */
  1524. }
  1525. static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
  1526. diff -Nur linux-5.4.36/arch/riscv/kernel/perf_event.c kernel/arch/riscv/kernel/perf_event.c
  1527. --- linux-5.4.36/arch/riscv/kernel/perf_event.c 2020-04-29 14:33:25.000000000 +0000
  1528. +++ kernel/arch/riscv/kernel/perf_event.c 2020-09-03 06:01:13.906989796 +0000
  1529. @@ -477,9 +477,9 @@
  1530. if (of_id)
  1531. riscv_pmu = of_id->data;
  1532. of_node_put(node);
  1533. + perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
  1534. }
  1535. - perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
  1536. return 0;
  1537. }
  1538. arch_initcall(init_hw_perf_events);
  1539. diff -Nur linux-5.4.36/arch/riscv/kernel/process.c kernel/arch/riscv/kernel/process.c
  1540. --- linux-5.4.36/arch/riscv/kernel/process.c 2020-04-29 14:33:25.000000000 +0000
  1541. +++ kernel/arch/riscv/kernel/process.c 2020-09-03 06:01:13.906989796 +0000
  1542. @@ -74,6 +74,12 @@
  1543. */
  1544. fstate_restore(current, regs);
  1545. }
  1546. +
  1547. + if (has_vector) {
  1548. + regs->sstatus |= SR_VS_INITIAL;
  1549. + vstate_restore(current, regs);
  1550. + }
  1551. +
  1552. regs->sepc = pc;
  1553. regs->sp = sp;
  1554. set_fs(USER_DS);
  1555. diff -Nur linux-5.4.36/arch/riscv/kernel/ptrace.c kernel/arch/riscv/kernel/ptrace.c
  1556. --- linux-5.4.36/arch/riscv/kernel/ptrace.c 2020-04-29 14:33:25.000000000 +0000
  1557. +++ kernel/arch/riscv/kernel/ptrace.c 2020-09-03 06:01:13.906989796 +0000
  1558. @@ -26,6 +26,9 @@
  1559. #ifdef CONFIG_FPU
  1560. REGSET_F,
  1561. #endif
  1562. +#ifdef CONFIG_VECTOR
  1563. + REGSET_V,
  1564. +#endif
  1565. };
  1566. static int riscv_gpr_get(struct task_struct *target,
  1567. @@ -92,6 +95,34 @@
  1568. }
  1569. #endif
  1570. +#ifdef CONFIG_VECTOR
  1571. +static int riscv_vr_get(struct task_struct *target,
  1572. + const struct user_regset *regset,
  1573. + unsigned int pos, unsigned int count,
  1574. + void *kbuf, void __user *ubuf)
  1575. +{
  1576. + int ret;
  1577. + struct __riscv_v_state *vstate = &target->thread.vstate;
  1578. +
  1579. + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, vstate, 0,
  1580. + offsetof(struct __riscv_v_state, vtype));
  1581. + return ret;
  1582. +}
  1583. +
  1584. +static int riscv_vr_set(struct task_struct *target,
  1585. + const struct user_regset *regset,
  1586. + unsigned int pos, unsigned int count,
  1587. + const void *kbuf, const void __user *ubuf)
  1588. +{
  1589. + int ret;
  1590. + struct __riscv_v_state *vstate = &target->thread.vstate;
  1591. +
  1592. + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate, 0,
  1593. + offsetof(struct __riscv_v_state, vtype));
  1594. + return ret;
  1595. +}
  1596. +#endif
  1597. +
  1598. static const struct user_regset riscv_user_regset[] = {
  1599. [REGSET_X] = {
  1600. .core_note_type = NT_PRSTATUS,
  1601. @@ -111,6 +142,16 @@
  1602. .set = &riscv_fpr_set,
  1603. },
  1604. #endif
  1605. +#ifdef CONFIG_VECTOR
  1606. + [REGSET_V] = {
  1607. + .core_note_type = NT_RISCV_VECTOR,
  1608. + .n = ELF_NVREG,
  1609. + .size = sizeof(elf_greg_t),
  1610. + .align = sizeof(elf_greg_t),
  1611. + .get = &riscv_vr_get,
  1612. + .set = &riscv_vr_set,
  1613. + },
  1614. +#endif
  1615. };
  1616. static const struct user_regset_view riscv_user_native_view = {
  1617. diff -Nur linux-5.4.36/arch/riscv/kernel/relocate_kernel.S kernel/arch/riscv/kernel/relocate_kernel.S
  1618. --- linux-5.4.36/arch/riscv/kernel/relocate_kernel.S 1970-01-01 00:00:00.000000000 +0000
  1619. +++ kernel/arch/riscv/kernel/relocate_kernel.S 2020-09-03 06:01:13.906989796 +0000
  1620. @@ -0,0 +1,32 @@
  1621. +/*
  1622. + * kexec for riscv
  1623. + *
  1624. + * Copyright (C) 2020-2025 Alibaba Group Holding Limited
  1625. + *
  1626. + * This program is free software; you can redistribute it and/or modify
  1627. + * it under the terms of the GNU General Public License version 2 as
  1628. + * published by the Free Software Foundation.
  1629. + */
  1630. +
  1631. +#include <linux/kexec.h>
  1632. +#include <linux/linkage.h>
  1633. +
  1634. +#include <asm/kexec.h>
  1635. +#include <asm/page.h>
  1636. +
  1637. +ENTRY(riscv_relocate_new_kernel)
  1638. + /* Start new image. */
  1639. + mv s1, a1
  1640. + mv a0, zero
  1641. + mv a1, a2
  1642. + mv a2, zero
  1643. + mv a3, zero
  1644. + jr s1
  1645. +ENDPROC(riscv_relocate_new_kernel)
  1646. +
  1647. +.Lcopy_end:
  1648. +.org KEXEC_CONTROL_PAGE_SIZE
  1649. +
  1650. +.globl riscv_relocate_new_kernel_size
  1651. +riscv_relocate_new_kernel_size:
  1652. + .quad .Lcopy_end - riscv_relocate_new_kernel
  1653. diff -Nur linux-5.4.36/arch/riscv/kernel/reset.c kernel/arch/riscv/kernel/reset.c
  1654. --- linux-5.4.36/arch/riscv/kernel/reset.c 2020-04-29 14:33:25.000000000 +0000
  1655. +++ kernel/arch/riscv/kernel/reset.c 2020-09-03 06:01:13.906989796 +0000
  1656. @@ -31,3 +31,7 @@
  1657. {
  1658. pm_power_off();
  1659. }
  1660. +
  1661. +void machine_shutdown(void)
  1662. +{
  1663. +}
  1664. diff -Nur linux-5.4.36/arch/riscv/kernel/setup.c kernel/arch/riscv/kernel/setup.c
  1665. --- linux-5.4.36/arch/riscv/kernel/setup.c 2020-04-29 14:33:25.000000000 +0000
  1666. +++ kernel/arch/riscv/kernel/setup.c 2020-09-03 06:01:13.906989796 +0000
  1667. @@ -38,7 +38,7 @@
  1668. #endif
  1669. /* The lucky hart to first increment this variable will boot the other cores */
  1670. -atomic_t hart_lottery;
  1671. +__section(.data) atomic_t hart_lottery;
  1672. unsigned long boot_cpu_hartid;
  1673. void __init parse_dtb(void)
  1674. diff -Nur linux-5.4.36/arch/riscv/kernel/signal.c kernel/arch/riscv/kernel/signal.c
  1675. --- linux-5.4.36/arch/riscv/kernel/signal.c 2020-04-29 14:33:25.000000000 +0000
  1676. +++ kernel/arch/riscv/kernel/signal.c 2020-09-03 06:01:13.906989796 +0000
  1677. @@ -78,6 +78,41 @@
  1678. #define restore_fp_state(task, regs) (0)
  1679. #endif
  1680. +#ifdef CONFIG_VECTOR
  1681. +static long restore_v_state(struct pt_regs *regs,
  1682. + struct __riscv_v_state *sc_vregs)
  1683. +{
  1684. + long err;
  1685. + struct __riscv_v_state __user *state = sc_vregs;
  1686. +
  1687. + err = __copy_from_user(&current->thread.vstate, state, sizeof(*state));
  1688. + if (unlikely(err))
  1689. + return err;
  1690. +
  1691. + vstate_restore(current, regs);
  1692. +
  1693. + return err;
  1694. +}
  1695. +
  1696. +static long save_v_state(struct pt_regs *regs,
  1697. + struct __riscv_v_state *sc_vregs)
  1698. +{
  1699. + long err;
  1700. + struct __riscv_v_state __user *state = sc_vregs;
  1701. +
  1702. + vstate_save(current, regs);
  1703. + err = __copy_to_user(state, &current->thread.vstate, sizeof(*state));
  1704. + if (unlikely(err))
  1705. + return err;
  1706. +
  1707. + return err;
  1708. +}
  1709. +#else
  1710. +#define save_v_state(task, regs) (0)
  1711. +#define restore_v_state(task, regs) (0)
  1712. +#endif
  1713. +
  1714. +
  1715. static long restore_sigcontext(struct pt_regs *regs,
  1716. struct sigcontext __user *sc)
  1717. {
  1718. @@ -87,6 +122,9 @@
  1719. /* Restore the floating-point state. */
  1720. if (has_fpu)
  1721. err |= restore_fp_state(regs, &sc->sc_fpregs);
  1722. + /* Restore the vector state. */
  1723. + if (has_vector)
  1724. + err |= restore_v_state(regs, &sc->sc_vregs);
  1725. return err;
  1726. }
  1727. @@ -140,6 +178,9 @@
  1728. /* Save the floating-point state. */
  1729. if (has_fpu)
  1730. err |= save_fp_state(regs, &sc->sc_fpregs);
  1731. + /* Save the vector state. */
  1732. + if (has_vector)
  1733. + err |= save_v_state(regs, &sc->sc_vregs);
  1734. return err;
  1735. }
  1736. diff -Nur linux-5.4.36/arch/riscv/kernel/smpboot.c kernel/arch/riscv/kernel/smpboot.c
  1737. --- linux-5.4.36/arch/riscv/kernel/smpboot.c 2020-04-29 14:33:25.000000000 +0000
  1738. +++ kernel/arch/riscv/kernel/smpboot.c 2020-09-03 06:01:13.906989796 +0000
  1739. @@ -33,8 +33,8 @@
  1740. #include "head.h"
  1741. -void *__cpu_up_stack_pointer[NR_CPUS];
  1742. -void *__cpu_up_task_pointer[NR_CPUS];
  1743. +__section(.data) void *__cpu_up_stack_pointer[NR_CPUS];
  1744. +__section(.data) void *__cpu_up_task_pointer[NR_CPUS];
  1745. static DECLARE_COMPLETION(cpu_running);
  1746. void __init smp_prepare_boot_cpu(void)
  1747. @@ -102,6 +102,7 @@
  1748. int hartid = cpuid_to_hartid_map(cpu);
  1749. tidle->thread_info.cpu = cpu;
  1750. + SBI_CALL_1(0x09000003, hartid);
  1751. /*
  1752. * On RISC-V systems, all harts boot on their own accord. Our _start
  1753. * selects the first hart to boot the kernel and causes the remainder
  1754. diff -Nur linux-5.4.36/arch/riscv/kernel/vector.S kernel/arch/riscv/kernel/vector.S
  1755. --- linux-5.4.36/arch/riscv/kernel/vector.S 1970-01-01 00:00:00.000000000 +0000
  1756. +++ kernel/arch/riscv/kernel/vector.S 2020-09-03 06:01:13.907989796 +0000
  1757. @@ -0,0 +1,84 @@
  1758. +/* SPDX-License-Identifier: GPL-2.0 */
  1759. +/*
  1760. + * Copyright (C) 2012 Regents of the University of California
  1761. + * Copyright (C) 2017 SiFive
  1762. + * Copyright (C) 2019 T-HEAD
  1763. + *
  1764. + * This program is free software; you can redistribute it and/or
  1765. + * modify it under the terms of the GNU General Public License
  1766. + * as published by the Free Software Foundation, version 2.
  1767. + *
  1768. + * This program is distributed in the hope that it will be useful,
  1769. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  1770. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1771. + * GNU General Public License for more details.
  1772. + */
  1773. +
  1774. +#include <linux/linkage.h>
  1775. +
  1776. +#include <asm/asm.h>
  1777. +#include <asm/csr.h>
  1778. +#include <asm/asm-offsets.h>
  1779. +
  1780. +ENTRY(__vstate_save)
  1781. + li a2, TASK_THREAD_V0
  1782. + add a0, a0, a2
  1783. +
  1784. + li t1, (SR_VS | SR_FS)
  1785. + csrs sstatus, t1
  1786. +
  1787. + csrr t0, CSR_VSTART
  1788. + sd t0, TASK_THREAD_VSTART_V0(a0)
  1789. + csrr t0, CSR_VXSAT
  1790. + sd t0, TASK_THREAD_VXSAT_V0(a0)
  1791. + csrr t0, CSR_VXRM
  1792. + sd t0, TASK_THREAD_VXRM_V0(a0)
  1793. + csrr t0, CSR_VL
  1794. + sd t0, TASK_THREAD_VL_V0(a0)
  1795. + csrr t0, CSR_VTYPE
  1796. + sd t0, TASK_THREAD_VTYPE_V0(a0)
  1797. +
  1798. + vsetvli t0, x0, e8,m8
  1799. + vsb.v v0, (a0)
  1800. + addi a0, a0, RISCV_VECTOR_VLENB*8
  1801. + vsb.v v8, (a0)
  1802. + addi a0, a0, RISCV_VECTOR_VLENB*8
  1803. + vsb.v v16, (a0)
  1804. + addi a0, a0, RISCV_VECTOR_VLENB*8
  1805. + vsb.v v24, (a0)
  1806. +
  1807. + csrc sstatus, t1
  1808. + ret
  1809. +ENDPROC(__vstate_save)
  1810. +
  1811. +ENTRY(__vstate_restore)
  1812. + li a2, TASK_THREAD_V0
  1813. + add a0, a0, a2
  1814. + mv t2, a0
  1815. +
  1816. + li t1, (SR_VS | SR_FS)
  1817. + csrs sstatus, t1
  1818. +
  1819. + vsetvli t0, x0, e8,m8
  1820. + vlb.v v0, (a0)
  1821. + addi a0, a0, RISCV_VECTOR_VLENB*8
  1822. + vlb.v v8, (a0)
  1823. + addi a0, a0, RISCV_VECTOR_VLENB*8
  1824. + vlb.v v16, (a0)
  1825. + addi a0, a0, RISCV_VECTOR_VLENB*8
  1826. + vlb.v v24, (a0)
  1827. +
  1828. + mv a0, t2
  1829. + ld t0, TASK_THREAD_VSTART_V0(a0)
  1830. + csrw CSR_VSTART, t0
  1831. + ld t0, TASK_THREAD_VXSAT_V0(a0)
  1832. + csrw CSR_VXSAT, t0
  1833. + ld t0, TASK_THREAD_VXRM_V0(a0)
  1834. + csrw CSR_VXRM, t0
  1835. + ld t0, TASK_THREAD_VL_V0(a0)
  1836. + ld t2, TASK_THREAD_VTYPE_V0(a0)
  1837. + vsetvl t0, t0, t2
  1838. +
  1839. + csrc sstatus, t1
  1840. + ret
  1841. +ENDPROC(__vstate_restore)
  1842. diff -Nur linux-5.4.36/arch/riscv/Makefile kernel/arch/riscv/Makefile
  1843. --- linux-5.4.36/arch/riscv/Makefile 2020-04-29 14:33:25.000000000 +0000
  1844. +++ kernel/arch/riscv/Makefile 2020-09-03 06:01:13.901989796 +0000
  1845. @@ -35,12 +35,19 @@
  1846. endif
  1847. # ISA string setting
  1848. -riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
  1849. -riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
  1850. -riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
  1851. -riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
  1852. -KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
  1853. -KBUILD_AFLAGS += -march=$(riscv-march-y)
  1854. +riscv-march-cflags-$(CONFIG_ARCH_RV32I) := rv32ima
  1855. +riscv-march-cflags-$(CONFIG_ARCH_RV64I) := rv64ima
  1856. +riscv-march-cflags-$(CONFIG_RISCV_ISA_C) := $(riscv-march-cflags-y)c
  1857. +
  1858. +riscv-march-aflags-$(CONFIG_ARCH_RV32I) := rv32ima
  1859. +riscv-march-aflags-$(CONFIG_ARCH_RV64I) := rv64ima
  1860. +riscv-march-aflags-$(CONFIG_FPU) := $(riscv-march-aflags-y)fd
  1861. +riscv-march-aflags-$(CONFIG_RISCV_ISA_C) := $(riscv-march-aflags-y)c
  1862. +riscv-march-aflags-$(CONFIG_VECTOR) := $(riscv-march-aflags-y)v
  1863. +riscv-march-aflags-$(CONFIG_RISCV_ISA_THEAD) := $(riscv-march-aflags-y)xthead
  1864. +
  1865. +KBUILD_CFLAGS += -march=$(riscv-march-cflags-y) -Wa,-march=$(riscv-march-aflags-y)
  1866. +KBUILD_AFLAGS += -march=$(riscv-march-aflags-y)
  1867. KBUILD_CFLAGS += -mno-save-restore
  1868. KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
  1869. diff -Nur linux-5.4.36/arch/riscv/mm/asid.c kernel/arch/riscv/mm/asid.c
  1870. --- linux-5.4.36/arch/riscv/mm/asid.c 1970-01-01 00:00:00.000000000 +0000
  1871. +++ kernel/arch/riscv/mm/asid.c 2020-09-03 06:01:13.908989796 +0000
  1872. @@ -0,0 +1,189 @@
  1873. +// SPDX-License-Identifier: GPL-2.0
  1874. +/*
  1875. + * Generic ASID allocator.
  1876. + *
  1877. + * Based on arch/arm/mm/context.c
  1878. + *
  1879. + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  1880. + * Copyright (C) 2012 ARM Ltd.
  1881. + */
  1882. +
  1883. +#include <linux/slab.h>
  1884. +#include <linux/mm_types.h>
  1885. +
  1886. +#include <asm/asid.h>
  1887. +
  1888. +#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
  1889. +
  1890. +#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
  1891. +#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
  1892. +
  1893. +#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
  1894. +#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
  1895. +
  1896. +static void flush_context(struct asid_info *info)
  1897. +{
  1898. + int i;
  1899. + u64 asid;
  1900. +
  1901. + /* Update the list of reserved ASIDs and the ASID bitmap. */
  1902. + bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
  1903. +
  1904. + for_each_possible_cpu(i) {
  1905. + asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
  1906. + /*
  1907. + * If this CPU has already been through a
  1908. + * rollover, but hasn't run another task in
  1909. + * the meantime, we must preserve its reserved
  1910. + * ASID, as this is the only trace we have of
  1911. + * the process it is still running.
  1912. + */
  1913. + if (asid == 0)
  1914. + asid = reserved_asid(info, i);
  1915. + __set_bit(asid2idx(info, asid), info->map);
  1916. + reserved_asid(info, i) = asid;
  1917. + }
  1918. +
  1919. + /*
  1920. + * Queue a TLB invalidation for each CPU to perform on next
  1921. + * context-switch
  1922. + */
  1923. + cpumask_setall(&info->flush_pending);
  1924. +}
  1925. +
  1926. +static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
  1927. + u64 newasid)
  1928. +{
  1929. + int cpu;
  1930. + bool hit = false;
  1931. +
  1932. + /*
  1933. + * Iterate over the set of reserved ASIDs looking for a match.
  1934. + * If we find one, then we can update our mm to use newasid
  1935. + * (i.e. the same ASID in the current generation) but we can't
  1936. + * exit the loop early, since we need to ensure that all copies
  1937. + * of the old ASID are updated to reflect the mm. Failure to do
  1938. + * so could result in us missing the reserved ASID in a future
  1939. + * generation.
  1940. + */
  1941. + for_each_possible_cpu(cpu) {
  1942. + if (reserved_asid(info, cpu) == asid) {
  1943. + hit = true;
  1944. + reserved_asid(info, cpu) = newasid;
  1945. + }
  1946. + }
  1947. +
  1948. + return hit;
  1949. +}
  1950. +
  1951. +static u64 new_context(struct asid_info *info, atomic64_t *pasid,
  1952. + struct mm_struct *mm)
  1953. +{
  1954. + static u32 cur_idx = 1;
  1955. + u64 asid = atomic64_read(pasid);
  1956. + u64 generation = atomic64_read(&info->generation);
  1957. +
  1958. + if (asid != 0) {
  1959. + u64 newasid = generation | (asid & ~ASID_MASK(info));
  1960. +
  1961. + /*
  1962. + * If our current ASID was active during a rollover, we
  1963. + * can continue to use it and this was just a false alarm.
  1964. + */
  1965. + if (check_update_reserved_asid(info, asid, newasid))
  1966. + return newasid;
  1967. +
  1968. + /*
  1969. + * We had a valid ASID in a previous life, so try to re-use
  1970. + * it if possible.
  1971. + */
  1972. + if (!__test_and_set_bit(asid2idx(info, asid), info->map))
  1973. + return newasid;
  1974. + }
  1975. +
  1976. + /*
  1977. + * Allocate a free ASID. If we can't find one, take a note of the
  1978. + * currently active ASIDs and mark the TLBs as requiring flushes. We
  1979. + * always count from ASID #2 (index 1), as we use ASID #0 when setting
  1980. + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
  1981. + * pairs.
  1982. + */
  1983. + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
  1984. + if (asid != NUM_CTXT_ASIDS(info))
  1985. + goto set_asid;
  1986. +
  1987. + /* We're out of ASIDs, so increment the global generation count */
  1988. + generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
  1989. + &info->generation);
  1990. + flush_context(info);
  1991. +
  1992. + /* We have more ASIDs than CPUs, so this will always succeed */
  1993. + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
  1994. +
  1995. +set_asid:
  1996. + __set_bit(asid, info->map);
  1997. + cur_idx = asid;
  1998. + cpumask_clear(mm_cpumask(mm));
  1999. + return idx2asid(info, asid) | generation;
  2000. +}
  2001. +
  2002. +/*
  2003. + * Generate a new ASID for the context.
  2004. + *
  2005. + * @pasid: Pointer to the current ASID batch allocated. It will be updated
  2006. + * with the new ASID batch.
  2007. + * @cpu: current CPU ID. Must have been acquired through get_cpu()
  2008. + */
  2009. +void asid_new_context(struct asid_info *info, atomic64_t *pasid,
  2010. + unsigned int cpu, struct mm_struct *mm)
  2011. +{
  2012. + unsigned long flags;
  2013. + u64 asid;
  2014. +
  2015. + raw_spin_lock_irqsave(&info->lock, flags);
  2016. + /* Check that our ASID belongs to the current generation. */
  2017. + asid = atomic64_read(pasid);
  2018. + if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
  2019. + asid = new_context(info, pasid, mm);
  2020. + atomic64_set(pasid, asid);
  2021. + }
  2022. +
  2023. + if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
  2024. + info->flush_cpu_ctxt_cb();
  2025. +
  2026. + atomic64_set(&active_asid(info, cpu), asid);
  2027. + cpumask_set_cpu(cpu, mm_cpumask(mm));
  2028. + raw_spin_unlock_irqrestore(&info->lock, flags);
  2029. +}
  2030. +
  2031. +/*
  2032. + * Initialize the ASID allocator
  2033. + *
  2034. + * @info: Pointer to the asid allocator structure
  2035. + * @bits: Number of ASIDs available
  2036. + * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
  2037. + * allocated contiguously for a given context. This value should be a power of
  2038. + * 2.
  2039. + */
  2040. +int asid_allocator_init(struct asid_info *info,
  2041. + u32 bits, unsigned int asid_per_ctxt,
  2042. + void (*flush_cpu_ctxt_cb)(void))
  2043. +{
  2044. + info->bits = bits;
  2045. + info->ctxt_shift = ilog2(asid_per_ctxt);
  2046. + info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
  2047. + /*
  2048. + * Expect allocation after rollover to fail if we don't have at least
  2049. + * one more ASID than CPUs. ASID #0 is always reserved.
  2050. + */
  2051. + WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
  2052. + atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
  2053. + info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
  2054. + sizeof(*info->map), GFP_KERNEL);
  2055. + if (!info->map)
  2056. + return -ENOMEM;
  2057. +
  2058. + raw_spin_lock_init(&info->lock);
  2059. +
  2060. + return 0;
  2061. +}
  2062. diff -Nur linux-5.4.36/arch/riscv/mm/cacheflush.c kernel/arch/riscv/mm/cacheflush.c
  2063. --- linux-5.4.36/arch/riscv/mm/cacheflush.c 2020-04-29 14:33:25.000000000 +0000
  2064. +++ kernel/arch/riscv/mm/cacheflush.c 2020-09-03 06:01:13.908989796 +0000
  2065. @@ -74,3 +74,24 @@
  2066. if (!test_and_set_bit(PG_dcache_clean, &page->flags))
  2067. flush_icache_all();
  2068. }
  2069. +
  2070. +#define sync_is() asm volatile (".long 0x01b0000b")
  2071. +void dma_wbinv_range(unsigned long start, unsigned long end)
  2072. +{
  2073. + register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1);
  2074. +
  2075. + for (; i < end; i += L1_CACHE_BYTES)
  2076. + asm volatile (".long 0x02b5000b"); /* dcache.cipa a0 */
  2077. +
  2078. + sync_is();
  2079. +}
  2080. +
  2081. +void dma_wb_range(unsigned long start, unsigned long end)
  2082. +{
  2083. + register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1);
  2084. +
  2085. + for (; i < end; i += L1_CACHE_BYTES)
  2086. + asm volatile (".long 0x0295000b"); /* dcache.cpa a0 */
  2087. +
  2088. + sync_is();
  2089. +}
  2090. diff -Nur linux-5.4.36/arch/riscv/mm/context.c kernel/arch/riscv/mm/context.c
  2091. --- linux-5.4.36/arch/riscv/mm/context.c 2020-04-29 14:33:25.000000000 +0000
  2092. +++ kernel/arch/riscv/mm/context.c 2020-09-03 06:01:13.908989796 +0000
  2093. @@ -8,6 +8,7 @@
  2094. #include <asm/tlbflush.h>
  2095. #include <asm/cacheflush.h>
  2096. #include <asm/mmu_context.h>
  2097. +#include <asm/fence.h>
  2098. /*
  2099. * When necessary, performs a deferred icache flush for the given MM context,
  2100. @@ -44,6 +45,8 @@
  2101. struct task_struct *task)
  2102. {
  2103. unsigned int cpu;
  2104. + unsigned long asid;
  2105. + unsigned long x;
  2106. if (unlikely(prev == next))
  2107. return;
  2108. @@ -58,8 +61,50 @@
  2109. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  2110. cpumask_set_cpu(cpu, mm_cpumask(next));
  2111. - csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
  2112. - local_flush_tlb_all();
  2113. + check_and_switch_context(next, cpu);
  2114. + asid = (next->context.asid.counter & SATP_ASID_MASK)
  2115. + << SATP_ASID_SHIFT;
  2116. +
  2117. + x = virt_to_pfn(next->pgd) | SATP_MODE | asid;
  2118. + sync_mmu_v1();
  2119. + sync_mmu_v1();
  2120. + sync_mmu_v1();
  2121. + csr_write(sptbr, x);
  2122. flush_icache_deferred(next);
  2123. }
  2124. +
  2125. +static DEFINE_PER_CPU(atomic64_t, active_asids);
  2126. +static DEFINE_PER_CPU(u64, reserved_asids);
  2127. +
  2128. +struct asid_info asid_info;
  2129. +
  2130. +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
  2131. +{
  2132. + asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
  2133. +}
  2134. +
  2135. +static void asid_flush_cpu_ctxt(void)
  2136. +{
  2137. + local_flush_tlb_all();
  2138. +}
  2139. +
  2140. +static int asids_init(void)
  2141. +{
  2142. + BUG_ON(((1 << SATP_ASID_BITS) - 1) <= num_possible_cpus());
  2143. +
  2144. + if (asid_allocator_init(&asid_info, SATP_ASID_BITS, 1,
  2145. + asid_flush_cpu_ctxt))
  2146. + panic("Unable to initialize ASID allocator for %lu ASIDs\n",
  2147. + NUM_ASIDS(&asid_info));
  2148. +
  2149. + asid_info.active = &active_asids;
  2150. + asid_info.reserved = &reserved_asids;
  2151. +
  2152. + pr_info("ASID allocator initialised with %lu entries\n",
  2153. + NUM_CTXT_ASIDS(&asid_info));
  2154. +
  2155. + local_flush_tlb_all();
  2156. + return 0;
  2157. +}
  2158. +early_initcall(asids_init);
  2159. diff -Nur linux-5.4.36/arch/riscv/mm/dma-mapping.c kernel/arch/riscv/mm/dma-mapping.c
  2160. --- linux-5.4.36/arch/riscv/mm/dma-mapping.c 1970-01-01 00:00:00.000000000 +0000
  2161. +++ kernel/arch/riscv/mm/dma-mapping.c 2020-09-03 06:01:13.908989796 +0000
  2162. @@ -0,0 +1,70 @@
  2163. +// SPDX-License-Identifier: GPL-2.0
  2164. +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  2165. +
  2166. +#include <linux/cache.h>
  2167. +#include <linux/dma-mapping.h>
  2168. +#include <linux/dma-contiguous.h>
  2169. +#include <linux/dma-noncoherent.h>
  2170. +#include <linux/genalloc.h>
  2171. +#include <linux/highmem.h>
  2172. +#include <linux/io.h>
  2173. +#include <linux/mm.h>
  2174. +#include <linux/scatterlist.h>
  2175. +#include <linux/types.h>
  2176. +#include <linux/version.h>
  2177. +#include <asm/cache.h>
  2178. +
  2179. +void arch_dma_prep_coherent(struct page *page, size_t size)
  2180. +{
  2181. + void *ptr = page_address(page);
  2182. +
  2183. + memset(ptr, 0, size);
  2184. + dma_wbinv_range(page_to_phys(page), page_to_phys(page) + size);
  2185. +}
  2186. +
  2187. +static inline void cache_op(phys_addr_t paddr, size_t size,
  2188. + void (*fn)(unsigned long start, unsigned long end))
  2189. +{
  2190. + unsigned long start = (unsigned long)paddr;
  2191. +
  2192. + fn(start, start + size);
  2193. +}
  2194. +
  2195. +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
  2196. + size_t size, enum dma_data_direction dir)
  2197. +{
  2198. + switch (dir) {
  2199. + case DMA_TO_DEVICE:
  2200. + cache_op(paddr, size, dma_wb_range);
  2201. + break;
  2202. + case DMA_FROM_DEVICE:
  2203. + case DMA_BIDIRECTIONAL:
  2204. + cache_op(paddr, size, dma_wbinv_range);
  2205. + break;
  2206. + default:
  2207. + BUG();
  2208. + }
  2209. +}
  2210. +
  2211. +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
  2212. + size_t size, enum dma_data_direction dir)
  2213. +{
  2214. + switch (dir) {
  2215. + case DMA_TO_DEVICE:
  2216. + return;
  2217. + case DMA_FROM_DEVICE:
  2218. + case DMA_BIDIRECTIONAL:
  2219. + cache_op(paddr, size, dma_wbinv_range);
  2220. + break;
  2221. + default:
  2222. + BUG();
  2223. + }
  2224. +}
  2225. +
  2226. +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
  2227. + unsigned long attrs)
  2228. +{
  2229. + if (attrs & DMA_ATTR_WRITE_COMBINE)
  2230. + return pgprot_writecombine(prot);
  2231. + return pgprot_noncached(prot);
  2232. +}
  2233. diff -Nur linux-5.4.36/arch/riscv/mm/fault.c kernel/arch/riscv/mm/fault.c
  2234. --- linux-5.4.36/arch/riscv/mm/fault.c 2020-04-29 14:33:25.000000000 +0000
  2235. +++ kernel/arch/riscv/mm/fault.c 2020-09-03 06:01:13.908989796 +0000
  2236. @@ -232,7 +232,7 @@
  2237. * of a task switch.
  2238. */
  2239. index = pgd_index(addr);
  2240. - pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
  2241. + pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP) & SATP_PPN) + index;
  2242. pgd_k = init_mm.pgd + index;
  2243. if (!pgd_present(*pgd_k))
  2244. diff -Nur linux-5.4.36/arch/riscv/mm/init.c kernel/arch/riscv/mm/init.c
  2245. --- linux-5.4.36/arch/riscv/mm/init.c 2020-04-29 14:33:25.000000000 +0000
  2246. +++ kernel/arch/riscv/mm/init.c 2020-09-03 06:01:13.908989796 +0000
  2247. @@ -13,6 +13,8 @@
  2248. #include <linux/of_fdt.h>
  2249. #include <linux/libfdt.h>
  2250. +#include <linux/crash_dump.h>
  2251. +
  2252. #include <asm/fixmap.h>
  2253. #include <asm/tlbflush.h>
  2254. #include <asm/sections.h>
  2255. @@ -27,6 +29,167 @@
  2256. extern char _start[];
  2257. +#ifdef CONFIG_KEXEC_CORE
  2258. +static void __init reserve_crashkernel(void)
  2259. +{
  2260. + unsigned long long crash_base, crash_size;
  2261. + int ret;
  2262. +
  2263. + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
  2264. + &crash_size, &crash_base);
  2265. + if (ret || !crash_size)
  2266. + return;
  2267. +
  2268. + if (crash_base == 0) {
  2269. + crash_base = memblock_find_in_range(0, __pfn_to_phys(max_low_pfn)-1,
  2270. + crash_size, SZ_2M);
  2271. + pr_debug("crash_base: 0x%llx\n", crash_base);
  2272. + }
  2273. +
  2274. + memblock_reserve(crash_base, crash_size);
  2275. +
  2276. + pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
  2277. + crash_base, crash_base + crash_size, crash_size >> 20);
  2278. +
  2279. + crashk_res.start = crash_base;
  2280. + crashk_res.end = crash_base + crash_size - 1;
  2281. +}
  2282. +#else
  2283. +static void __init reserve_crashkernel(void)
  2284. +{
  2285. +}
  2286. +#endif /* CONFIG_KEXEC_CORE */
  2287. +
  2288. +#ifdef CONFIG_CRASH_DUMP
  2289. +static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
  2290. + const char *uname, int depth, void *data)
  2291. +{
  2292. + const __be32 *reg;
  2293. + int len;
  2294. +
  2295. + if (depth != 1 || strcmp(uname, "chosen") != 0)
  2296. + return 0;
  2297. +
  2298. + reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
  2299. + if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
  2300. + return 1;
  2301. +
  2302. + elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
  2303. + elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
  2304. +
  2305. + return 1;
  2306. +}
  2307. +
  2308. +/*
  2309. + * reserve_elfcorehdr() - reserves memory for elf core header
  2310. + *
  2311. + * This function reserves the memory occupied by an elf core header
  2312. + * described in the device tree. This region contains all the
  2313. + * information about primary kernel's core image and is used by a dump
  2314. + * capture kernel to access the system memory on primary kernel.
  2315. + */
  2316. +static void __init reserve_elfcorehdr(void)
  2317. +{
  2318. + of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
  2319. +
  2320. + if (!elfcorehdr_size)
  2321. + return;
  2322. +
  2323. + if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
  2324. + pr_warn("elfcorehdr is overlapped\n");
  2325. + return;
  2326. + }
  2327. +
  2328. + memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
  2329. +
  2330. + pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
  2331. + elfcorehdr_size >> 10, elfcorehdr_addr);
  2332. +}
  2333. +#else
  2334. +static void __init reserve_elfcorehdr(void)
  2335. +{
  2336. +}
  2337. +#endif /* CONFIG_CRASH_DUMP */
  2338. +
  2339. +/*
  2340. + * Standard memory resources
  2341. + */
  2342. +static struct resource mem_res[] = {
  2343. + {
  2344. + .name = "Kernel code",
  2345. + .start = 0,
  2346. + .end = 0,
  2347. + .flags = IORESOURCE_SYSTEM_RAM
  2348. + },
  2349. + {
  2350. + .name = "Kernel data",
  2351. + .start = 0,
  2352. + .end = 0,
  2353. + .flags = IORESOURCE_SYSTEM_RAM
  2354. + }
  2355. +};
  2356. +
  2357. +#define kernel_code mem_res[0]
  2358. +#define kernel_data mem_res[1]
  2359. +
  2360. +static int num_standard_resources;
  2361. +struct resource *standard_resources;
  2362. +
  2363. +extern char _start[];
  2364. +static void __init request_standard_resources(void)
  2365. +{
  2366. + struct memblock_region *region;
  2367. + struct resource *res;
  2368. + unsigned long i = 0;
  2369. + size_t res_size;
  2370. +
  2371. + kernel_code.start = __pa_symbol(_start);
  2372. + kernel_code.end = __pa_symbol(__init_end - 1);
  2373. + kernel_data.start = __pa_symbol(_sdata);
  2374. + kernel_data.end = __pa_symbol(_end - 1);
  2375. +
  2376. + num_standard_resources = memblock.memory.cnt;
  2377. + res_size = num_standard_resources * sizeof(*standard_resources);
  2378. + standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
  2379. + if (!standard_resources)
  2380. + panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
  2381. +
  2382. + for_each_memblock(memory, region) {
  2383. + res = &standard_resources[i++];
  2384. + if (memblock_is_nomap(region)) {
  2385. + res->name = "reserved";
  2386. + res->flags = IORESOURCE_MEM;
  2387. + } else {
  2388. + res->name = "System RAM";
  2389. + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  2390. + }
  2391. + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
  2392. + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
  2393. +
  2394. + request_resource(&iomem_resource, res);
  2395. +
  2396. + if (kernel_code.start >= res->start &&
  2397. + kernel_code.end <= res->end)
  2398. + request_resource(res, &kernel_code);
  2399. + if (kernel_data.start >= res->start &&
  2400. + kernel_data.end <= res->end)
  2401. + request_resource(res, &kernel_data);
  2402. +#ifdef CONFIG_KEXEC_CORE
  2403. + /* Userspace will find "Crash kernel" region in /proc/iomem. */
  2404. + if (crashk_res.end && crashk_res.start >= res->start &&
  2405. + crashk_res.end <= res->end)
  2406. + request_resource(res, &crashk_res);
  2407. +#endif
  2408. + }
  2409. +}
  2410. +
  2411. +void __init riscv_kdump_crash(void)
  2412. +{
  2413. + reserve_crashkernel();
  2414. + reserve_elfcorehdr();
  2415. + request_standard_resources();
  2416. +}
  2417. +
  2418. static void __init zone_sizes_init(void)
  2419. {
  2420. unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
  2421. @@ -170,8 +333,8 @@
  2422. set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
  2423. } else {
  2424. pte_clear(&init_mm, addr, ptep);
  2425. - local_flush_tlb_page(addr);
  2426. }
  2427. + local_flush_tlb_page(addr);
  2428. }
  2429. static pte_t *__init get_pte_virt(phys_addr_t pa)
  2430. @@ -457,6 +620,7 @@
  2431. sparse_init();
  2432. setup_zero_page();
  2433. zone_sizes_init();
  2434. + riscv_kdump_crash();
  2435. }
  2436. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  2437. diff -Nur linux-5.4.36/arch/riscv/mm/ioremap.c kernel/arch/riscv/mm/ioremap.c
  2438. --- linux-5.4.36/arch/riscv/mm/ioremap.c 2020-04-29 14:33:25.000000000 +0000
  2439. +++ kernel/arch/riscv/mm/ioremap.c 2020-09-03 06:01:13.908989796 +0000
  2440. @@ -50,26 +50,19 @@
  2441. return (void __iomem *)(vaddr + offset);
  2442. }
  2443. -/*
  2444. - * ioremap - map bus memory into CPU space
  2445. - * @offset: bus address of the memory
  2446. - * @size: size of the resource to map
  2447. - *
  2448. - * ioremap performs a platform specific sequence of operations to
  2449. - * make bus memory CPU accessible via the readb/readw/readl/writeb/
  2450. - * writew/writel functions and the other mmio helpers. The returned
  2451. - * address is not guaranteed to be usable directly as a virtual
  2452. - * address.
  2453. - *
  2454. - * Must be freed with iounmap.
  2455. - */
  2456. -void __iomem *ioremap(phys_addr_t offset, unsigned long size)
  2457. +void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
  2458. {
  2459. - return __ioremap_caller(offset, size, PAGE_KERNEL,
  2460. - __builtin_return_address(0));
  2461. + return __ioremap_caller(phys_addr, size, prot,
  2462. + __builtin_return_address(0));
  2463. }
  2464. -EXPORT_SYMBOL(ioremap);
  2465. +EXPORT_SYMBOL(__ioremap);
  2466. +void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
  2467. +{
  2468. + return __ioremap_caller(phys_addr, size, PAGE_KERNEL,
  2469. + __builtin_return_address(0));
  2470. +}
  2471. +EXPORT_SYMBOL(ioremap_cache);
  2472. /**
  2473. * iounmap - Free a IO remapping
  2474. @@ -82,3 +75,16 @@
  2475. vunmap((void *)((unsigned long)addr & PAGE_MASK));
  2476. }
  2477. EXPORT_SYMBOL(iounmap);
  2478. +
  2479. +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  2480. + unsigned long size, pgprot_t vma_prot)
  2481. +{
  2482. + if (!pfn_valid(pfn)) {
  2483. + return pgprot_noncached(vma_prot);
  2484. + } else if (file->f_flags & O_SYNC) {
  2485. + return pgprot_writecombine(vma_prot);
  2486. + }
  2487. +
  2488. + return vma_prot;
  2489. +}
  2490. +EXPORT_SYMBOL(phys_mem_access_prot);
  2491. diff -Nur linux-5.4.36/arch/riscv/mm/Makefile kernel/arch/riscv/mm/Makefile
  2492. --- linux-5.4.36/arch/riscv/mm/Makefile 2020-04-29 14:33:25.000000000 +0000
  2493. +++ kernel/arch/riscv/mm/Makefile 2020-09-03 06:01:13.908989796 +0000
  2494. @@ -12,6 +12,9 @@
  2495. obj-y += cacheflush.o
  2496. obj-y += context.o
  2497. obj-y += sifive_l2_cache.o
  2498. +obj-y += dma-mapping.o
  2499. +obj-y += asid.o
  2500. +obj-y += context.o
  2501. ifeq ($(CONFIG_MMU),y)
  2502. obj-$(CONFIG_SMP) += tlbflush.o
  2503. diff -Nur linux-5.4.36/arch/riscv/mm/tlbflush.c kernel/arch/riscv/mm/tlbflush.c
  2504. --- linux-5.4.36/arch/riscv/mm/tlbflush.c 2020-04-29 14:33:25.000000000 +0000
  2505. +++ kernel/arch/riscv/mm/tlbflush.c 2020-09-03 06:01:13.908989796 +0000
  2506. @@ -2,6 +2,106 @@
  2507. #include <linux/mm.h>
  2508. #include <linux/smp.h>
  2509. +
  2510. +#define XUANTIE
  2511. +#ifdef XUANTIE
  2512. +#include <asm/mmu_context.h>
  2513. +
  2514. +int c910_mmu_v1_flag = 0;
  2515. +
  2516. +void flush_tlb_all(void)
  2517. +{
  2518. +if (c910_mmu_v1_flag) {
  2519. + sync_mmu_v1();
  2520. + sync_mmu_v1();
  2521. + sync_mmu_v1();
  2522. +}
  2523. +
  2524. + __asm__ __volatile__ ("sfence.vma" : : : "memory");
  2525. +}
  2526. +
  2527. +void flush_tlb_mm(struct mm_struct *mm)
  2528. +{
  2529. +if (c910_mmu_v1_flag) {
  2530. + int newpid = cpu_asid(mm);
  2531. +
  2532. + __asm__ __volatile__ ("sfence.vma zero, %0"
  2533. + :
  2534. + : "r"(newpid)
  2535. + : "memory");
  2536. +} else {
  2537. + sync_mmu_v1();
  2538. + sync_mmu_v1();
  2539. + sync_mmu_v1();
  2540. + __asm__ __volatile__ ("sfence.vma" : : : "memory");
  2541. +}
  2542. +}
  2543. +
  2544. +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  2545. +{
  2546. +if (c910_mmu_v1_flag) {
  2547. + int newpid = cpu_asid(vma->vm_mm);
  2548. +
  2549. + addr &= PAGE_MASK;
  2550. +
  2551. + __asm__ __volatile__ ("sfence.vma %0, %1"
  2552. + :
  2553. + : "r"(addr), "r"(newpid)
  2554. + : "memory");
  2555. +
  2556. +} else {
  2557. + addr &= PAGE_MASK;
  2558. +
  2559. + sync_mmu_v1();
  2560. + sync_mmu_v1();
  2561. + sync_mmu_v1();
  2562. + __asm__ __volatile__ ("sfence.vma %0"
  2563. + :
  2564. + : "r"(addr)
  2565. + : "memory");
  2566. +}
  2567. +}
  2568. +
  2569. +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  2570. + unsigned long end)
  2571. +{
  2572. + unsigned long newpid = cpu_asid(vma->vm_mm);
  2573. +
  2574. + start &= PAGE_MASK;
  2575. + end += PAGE_SIZE - 1;
  2576. + end &= PAGE_MASK;
  2577. +
  2578. +if (c910_mmu_v1_flag) {
  2579. + while (start < end) {
  2580. + sync_mmu_v1();
  2581. + sync_mmu_v1();
  2582. + sync_mmu_v1();
  2583. + __asm__ __volatile__ ("sfence.vma %0"
  2584. + :
  2585. + : "r"(start)
  2586. + : "memory");
  2587. + start += PAGE_SIZE;
  2588. + }
  2589. +} else {
  2590. + while (start < end) {
  2591. + __asm__ __volatile__ ("sfence.vma %0, %1"
  2592. + :
  2593. + : "r"(start), "r"(newpid)
  2594. + : "memory");
  2595. + start += PAGE_SIZE;
  2596. + }
  2597. +}
  2598. +}
  2599. +
  2600. +static int __init c910_mmu_v1(char *str)
  2601. +{
  2602. + c910_mmu_v1_flag = 1;
  2603. + return 0;
  2604. +}
  2605. +early_param("c910_mmu_v1", c910_mmu_v1);
  2606. +EXPORT_SYMBOL(c910_mmu_v1_flag);
  2607. +
  2608. +#else
  2609. #include <asm/sbi.h>
  2610. void flush_tlb_all(void)
  2611. @@ -33,3 +133,4 @@
  2612. {
  2613. __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
  2614. }
  2615. +#endif
  2616. diff -Nur linux-5.4.36/drivers/i2c/busses/i2c-designware-master.c kernel/drivers/i2c/busses/i2c-designware-master.c
  2617. --- linux-5.4.36/drivers/i2c/busses/i2c-designware-master.c 2020-04-29 14:33:25.000000000 +0000
  2618. +++ kernel/drivers/i2c/busses/i2c-designware-master.c 2020-09-03 06:01:15.387989734 +0000
  2619. @@ -54,6 +54,10 @@
  2620. /* Calculate SCL timing parameters for standard mode if not set */
  2621. if (!dev->ss_hcnt || !dev->ss_lcnt) {
  2622. ic_clk = i2c_dw_clk_rate(dev);
  2623. + /* Fixme begin: If can't get ic_clk from devicetree */
  2624. + if (ic_clk == 0)
  2625. + ic_clk = 50000; // unit: khz Fix
  2626. + /* Fixme end */
  2627. dev->ss_hcnt =
  2628. i2c_dw_scl_hcnt(ic_clk,
  2629. 4000, /* tHD;STA = tHIGH = 4.0 us */
  2630. diff -Nur linux-5.4.36/drivers/mmc/host/Kconfig kernel/drivers/mmc/host/Kconfig
  2631. --- linux-5.4.36/drivers/mmc/host/Kconfig 2020-04-29 14:33:25.000000000 +0000
  2632. +++ kernel/drivers/mmc/host/Kconfig 2020-09-03 06:01:15.973989709 +0000
  2633. @@ -736,7 +736,7 @@
  2634. config MMC_DW
  2635. tristate "Synopsys DesignWare Memory Card Interface"
  2636. - depends on ARC || ARM || ARM64 || MIPS || COMPILE_TEST
  2637. + depends on ARC || ARM || ARM64 || MIPS || RISCV || CSKY || COMPILE_TEST
  2638. help
  2639. This selects support for the Synopsys DesignWare Mobile Storage IP
  2640. block, this provides host support for SD and MMC interfaces, in both
  2641. diff -Nur linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
  2642. --- linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c 1970-01-01 00:00:00.000000000 +0000
  2643. +++ kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c 2020-09-03 06:01:16.358989693 +0000
  2644. @@ -0,0 +1,602 @@
  2645. +// SPDX-License-Identifier: GPL-2.0
  2646. +
  2647. +#include <linux/module.h>
  2648. +#include <linux/of.h>
  2649. +#include <linux/of_net.h>
  2650. +#include <linux/platform_device.h>
  2651. +
  2652. +#include "stmmac_platform.h"
  2653. +
  2654. +/* clock registers */
  2655. +#define GMAC_CLK_CFG0 0x00
  2656. +#define GMAC_CLK_CFG1 0x04
  2657. +#define GMAC_CLK_CFG2 0x08
  2658. +#define GMAC_CLK_CFG3 0x0C
  2659. +#define GMAC_CLK_CFG4 0x10
  2660. +#define GMAC_CLK_CFG5 0x14
  2661. +#define GMAC_CLK_CFG6 0x18
  2662. +
  2663. +/* phy interface */
  2664. +#define DWMAC_PHYIF_MII_GMII 0
  2665. +#define DWMAC_PHYIF_RGMII 1
  2666. +#define DWMAC_PHYIF_RMII 4
  2667. +/* register bit fields, bit[3]: reserved, bit[2:0]: phy interface */
  2668. +#define DWMAC_PHYIF_MASK 0x7
  2669. +#define DWMAC_PHYIF_BIT_WIDTH 4
  2670. +
  2671. +/* TXCLK direction, 1:input, 0:output */
  2672. +#define TXCLK_DIR_OUTPUT 0
  2673. +#define TXCLK_DIR_INPUT 1
  2674. +
  2675. +#define GMAC_CLK_PLLOUT_250M 250000000
  2676. +#define GMAC_GMII_RGMII_RATE 125000000
  2677. +#define GMAC_MII_RATE 25000000
  2678. +/* clock divider for speed */
  2679. +#define GMAC_CLKDIV_125M (GMAC_CLK_PLLOUT_250M / GMAC_GMII_RGMII_RATE)
  2680. +#define GMAC_CLKDIV_25M (GMAC_CLK_PLLOUT_250M / GMAC_MII_RATE)
  2681. +
  2682. +struct thead_dwmac_priv_data {
  2683. + int id;
  2684. + void __iomem *phy_if_reg;
  2685. + void __iomem *txclk_dir_reg;
  2686. + void __iomem *gmac_clk_reg;
  2687. + int interface;
  2688. + struct clk *gmac_pll_clk;
  2689. + unsigned int gmac_pll_clk_freq;
  2690. +};
  2691. +
  2692. +/* set GMAC PHY interface, 0:MII/GMII, 1:RGMII, 4:RMII */
  2693. +static void thead_dwmac_set_phy_if(struct platform_device *pdev,
  2694. + void __iomem *phy_if_reg, int interface,
  2695. + int devid)
  2696. +{
  2697. + struct device *dev = &pdev->dev;
  2698. + unsigned int phyif = PHY_INTERFACE_MODE_MII;
  2699. + volatile uint32_t reg;
  2700. +
  2701. + if (phy_if_reg == NULL)
  2702. + return;
  2703. +
  2704. + switch (interface)
  2705. + {
  2706. + case PHY_INTERFACE_MODE_MII:
  2707. + case PHY_INTERFACE_MODE_GMII:
  2708. + phyif = DWMAC_PHYIF_MII_GMII;
  2709. + break;
  2710. + case PHY_INTERFACE_MODE_RGMII:
  2711. + case PHY_INTERFACE_MODE_RGMII_TXID:
  2712. + case PHY_INTERFACE_MODE_RGMII_RXID:
  2713. + case PHY_INTERFACE_MODE_RGMII_ID:
  2714. + phyif = DWMAC_PHYIF_RGMII;
  2715. + break;
  2716. + case PHY_INTERFACE_MODE_RMII:
  2717. + phyif = DWMAC_PHYIF_RMII;
  2718. + break;
  2719. + default:
  2720. + dev_err(dev, "phy interface %d not supported\n", interface);
  2721. + return;
  2722. + };
  2723. +
  2724. + reg = readl(phy_if_reg);
  2725. + reg &= ~(DWMAC_PHYIF_MASK << (DWMAC_PHYIF_BIT_WIDTH * devid));
  2726. + reg |= (phyif & DWMAC_PHYIF_MASK) << (DWMAC_PHYIF_BIT_WIDTH * devid);
  2727. + writel(reg, phy_if_reg);
  2728. +}
  2729. +
  2730. +/*
  2731. + * set GMAC TXCLK direction
  2732. + * MII : TXCLK is input
  2733. + * GMII/RGMII : TXCLK is output
  2734. + */
  2735. +static void thead_dwmac_set_txclk_dir(struct platform_device *pdev,
  2736. + void __iomem *txclk_dir_reg, int interface)
  2737. +{
  2738. + struct device *dev = &pdev->dev;
  2739. + unsigned int txclk_dir = TXCLK_DIR_INPUT;
  2740. +
  2741. + if (txclk_dir_reg == NULL)
  2742. + return;
  2743. +
  2744. + switch (interface)
  2745. + {
  2746. + case PHY_INTERFACE_MODE_MII:
  2747. + case PHY_INTERFACE_MODE_RMII:
  2748. + txclk_dir = TXCLK_DIR_INPUT;
  2749. + break;
  2750. + case PHY_INTERFACE_MODE_GMII:
  2751. + case PHY_INTERFACE_MODE_RGMII:
  2752. + case PHY_INTERFACE_MODE_RGMII_TXID:
  2753. + case PHY_INTERFACE_MODE_RGMII_RXID:
  2754. + case PHY_INTERFACE_MODE_RGMII_ID:
  2755. + txclk_dir = TXCLK_DIR_OUTPUT;
  2756. + break;
  2757. + default:
  2758. + dev_err(dev, "phy interface %d not supported\n", interface);
  2759. + return;
  2760. + };
  2761. +
  2762. + writel(txclk_dir, txclk_dir_reg);
  2763. +}
  2764. +
  2765. +static void thead_dwmac_set_clk_source(struct platform_device *pdev,
  2766. + void __iomem *gmac_clk_reg, int interface)
  2767. +{
  2768. + struct device *dev = &pdev->dev;
  2769. + volatile uint32_t reg;
  2770. +
  2771. + if (gmac_clk_reg == NULL)
  2772. + return;
  2773. +
  2774. + reg = readl(gmac_clk_reg + GMAC_CLK_CFG0);
  2775. +
  2776. + /* RX clock source */
  2777. + reg |= BIT(7); /* gmac_rx_clk_sel: extern pin */
  2778. +
  2779. + /* TX clock source */
  2780. + if (interface == PHY_INTERFACE_MODE_MII) {
  2781. + reg |= BIT(1); /* gmac_tx_clk_sel: extern pin */
  2782. + reg &= ~BIT(2); /* gmac_tx_clk_gbit_sel: u_tx_clk_mux */
  2783. + } else if (interface == PHY_INTERFACE_MODE_GMII) {
  2784. + reg &= ~BIT(5); /* gmac_tx_clk_out_sel: GMAC PLL */
  2785. + reg |= BIT(2); /* gmac_tx_clk_gbit_sel: GMAC PLL */
  2786. + } else if (interface == PHY_INTERFACE_MODE_RGMII
  2787. + || interface == PHY_INTERFACE_MODE_RGMII_ID
  2788. + || interface == PHY_INTERFACE_MODE_RGMII_RXID
  2789. + || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
  2790. + reg &= ~BIT(5); /* gmac_tx_clk_out_sel: GMAC PLL */
  2791. + reg |= BIT(2); /* gmac_tx_clk_gbit_sel: GMAC PLL */
  2792. + } else {
  2793. + dev_err(dev, "phy interface %d not supported\n", interface);
  2794. + return;
  2795. + }
  2796. +
  2797. + writel(reg, gmac_clk_reg + GMAC_CLK_CFG0);
  2798. +}
  2799. +
  2800. +
  2801. +/* set clock source */
  2802. +static void thead_dwmac_set_clock_delay(struct platform_device *pdev,
  2803. + void __iomem *gmac_clk_reg, int interface)
  2804. +{
  2805. + unsigned int delay;
  2806. +
  2807. + if (gmac_clk_reg == NULL)
  2808. + return;
  2809. +
  2810. + if (of_property_read_u32(pdev->dev.of_node, "rx-clk-delay",
  2811. + &delay) == 0) {
  2812. + /* RX clk delay */
  2813. + writel(delay, gmac_clk_reg + GMAC_CLK_CFG1);
  2814. + pr_info("RX clk delay: 0x%X\n", delay);
  2815. + }
  2816. +
  2817. + if (of_property_read_u32(pdev->dev.of_node, "tx-clk-delay",
  2818. + &delay) == 0) {
  2819. + /* TX clk delay */
  2820. + writel(delay, gmac_clk_reg + GMAC_CLK_CFG2);
  2821. + pr_info("TX clk delay: 0x%X\n", delay);
  2822. + }
  2823. +}
  2824. +
  2825. +/* set gmac pll divider (u_pll_clk_div) to get 250MHz clock */
  2826. +static void thead_dwmac_set_pll_250M(void __iomem *gmac_clk_reg, int interface,
  2827. + unsigned int src_freq)
  2828. +{
  2829. + volatile unsigned int reg;
  2830. + unsigned int div = 1;
  2831. +
  2832. + if (gmac_clk_reg == NULL)
  2833. + return;
  2834. +
  2835. + if (interface == PHY_INTERFACE_MODE_MII) {
  2836. + /* For MII, no internal PLL is used */
  2837. + return;
  2838. + } else if (interface == PHY_INTERFACE_MODE_GMII
  2839. + || interface == PHY_INTERFACE_MODE_RGMII
  2840. + || interface == PHY_INTERFACE_MODE_RGMII_ID
  2841. + || interface == PHY_INTERFACE_MODE_RGMII_RXID
  2842. + || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
  2843. +
  2844. + /* check clock */
  2845. + if ((src_freq == 0) || (src_freq % GMAC_CLK_PLLOUT_250M != 0)) {
  2846. + pr_err("error! invalid gmac pll freq %d\n", src_freq);
  2847. + return;
  2848. + }
  2849. + div = src_freq / GMAC_CLK_PLLOUT_250M;
  2850. +
  2851. + /* disable pll_clk_div */
  2852. + reg = readl(gmac_clk_reg + GMAC_CLK_CFG3);
  2853. + reg &= ~BIT(31);
  2854. + writel(reg, gmac_clk_reg + GMAC_CLK_CFG3);
  2855. +
  2856. + /* modify divider */
  2857. + writel(div, gmac_clk_reg + GMAC_CLK_CFG3);
  2858. +
  2859. + /* enable pll_clk_div */
  2860. + reg = readl(gmac_clk_reg + GMAC_CLK_CFG3);
  2861. + reg |= BIT(31);
  2862. + writel(reg, gmac_clk_reg + GMAC_CLK_CFG3);
  2863. + } else {
  2864. + pr_err("phy interface %d not supported\n", interface);
  2865. + return;
  2866. + }
  2867. +}
  2868. +
  2869. +/* set gmac speed */
  2870. +static void thead_dwmac_set_speed(void __iomem *gmac_clk_reg, int interface,
  2871. + unsigned int speed)
  2872. +{
  2873. + volatile unsigned int reg;
  2874. +
  2875. + if (gmac_clk_reg == NULL)
  2876. + return;
  2877. +
  2878. + if (interface == PHY_INTERFACE_MODE_MII) {
  2879. + /* For MII, no internal PLL is used */
  2880. + return;
  2881. + } else if (interface == PHY_INTERFACE_MODE_GMII
  2882. + || interface == PHY_INTERFACE_MODE_RGMII
  2883. + || interface == PHY_INTERFACE_MODE_RGMII_ID
  2884. + || interface == PHY_INTERFACE_MODE_RGMII_RXID
  2885. + || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
  2886. +
  2887. + /* disable gtx_clk_div */
  2888. + reg = readl(gmac_clk_reg + GMAC_CLK_CFG4);
  2889. + reg &= ~BIT(31);
  2890. + writel(reg, gmac_clk_reg + GMAC_CLK_CFG4);
  2891. +
  2892. + /*
  2893. + * modify divider
  2894. + */
  2895. + /* gtx_clk_div */
  2896. + if (speed == SPEED_1000) {
  2897. + writel(GMAC_CLKDIV_125M, gmac_clk_reg + GMAC_CLK_CFG4);
  2898. + } else if (speed == SPEED_100) {
  2899. + writel(GMAC_CLKDIV_25M, gmac_clk_reg + GMAC_CLK_CFG4);
  2900. + } else {
  2901. + writel(GMAC_CLKDIV_25M / 10, gmac_clk_reg + GMAC_CLK_CFG4);
  2902. + }
  2903. +
  2904. + /* enable gtx_clk_div */
  2905. + reg = readl(gmac_clk_reg + GMAC_CLK_CFG4);
  2906. + reg |= BIT(31);
  2907. + writel(reg, gmac_clk_reg + GMAC_CLK_CFG4);
  2908. + } else {
  2909. + pr_err("phy interface %d not supported\n", interface);
  2910. + return;
  2911. + }
  2912. +}
  2913. +
  2914. +/* enable gmac clock */
  2915. +static void thead_dwmac_enable_clock(struct platform_device *pdev,
  2916. + void __iomem *gmac_clk_reg, int interface)
  2917. +{
  2918. + struct device *dev = &pdev->dev;
  2919. + volatile unsigned int reg;
  2920. +
  2921. + if (gmac_clk_reg == NULL)
  2922. + return;
  2923. +
  2924. + reg = readl(gmac_clk_reg + GMAC_CLK_CFG0);
  2925. +
  2926. + /* enable gmac_hclk */
  2927. + reg |= BIT(14);
  2928. +
  2929. + if (interface == PHY_INTERFACE_MODE_MII) {
  2930. + reg |= BIT(8); /* enable gmac_rx_clk */
  2931. + reg |= BIT(3); /* enable gmac_tx_clk */
  2932. + } else if (interface == PHY_INTERFACE_MODE_GMII) {
  2933. + reg |= BIT(8); /* enable gmac_rx_clk */
  2934. + reg |= BIT(3); /* enable gmac_tx_clk */
  2935. + reg |= BIT(6); /* enable gmac_tx_clk_out */
  2936. + } else if (interface == PHY_INTERFACE_MODE_RGMII
  2937. + || interface == PHY_INTERFACE_MODE_RGMII_ID
  2938. + || interface == PHY_INTERFACE_MODE_RGMII_RXID
  2939. + || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
  2940. + reg |= BIT(8); /* enable gmac_rx_clk */
  2941. + reg |= BIT(3); /* enable gmac_tx_clk */
  2942. + reg |= BIT(6); /* enable gmac_tx_clk_out */
  2943. + reg |= BIT(9); /* enable gmac_rx_clk_n */
  2944. + reg |= BIT(4); /* enable gmac_tx_clk_n */
  2945. + } else {
  2946. + dev_err(dev, "phy interface %d not supported\n", interface);
  2947. + return;
  2948. + }
  2949. +
  2950. + writel(reg, gmac_clk_reg + GMAC_CLK_CFG0);
  2951. +}
  2952. +
  2953. +#if 0
  2954. +/* disable gmac clock */
  2955. +static void thead_dwmac_disable_clock(struct platform_device *pdev,
  2956. + void __iomem *gmac_clk_reg, int interface)
  2957. +{
  2958. + struct device *dev = &pdev->dev;
  2959. + volatile unsigned int reg;
  2960. +
  2961. + if (gmac_clk_reg == NULL)
  2962. + return;
  2963. +
  2964. + reg = readl(gmac_clk_reg + GMAC_CLK_CFG0);
  2965. +
  2966. + /* disable gmac_hclk */
  2967. + reg &= ~BIT(14);
  2968. +
  2969. + if (interface == PHY_INTERFACE_MODE_MII) {
  2970. + reg &= ~BIT(8); /* disable gmac_rx_clk */
  2971. + reg &= ~BIT(3); /* disable gmac_tx_clk */
  2972. + } else if (interface == PHY_INTERFACE_MODE_GMII) {
  2973. + reg &= ~BIT(8); /* disable gmac_rx_clk */
  2974. + reg &= ~BIT(3); /* disable gmac_tx_clk */
  2975. + reg &= ~BIT(6); /* disable gmac_tx_clk_out */
  2976. + } else if (interface == PHY_INTERFACE_MODE_RGMII
  2977. + || interface == PHY_INTERFACE_MODE_RGMII_ID
  2978. + || interface == PHY_INTERFACE_MODE_RGMII_RXID
  2979. + || interface == PHY_INTERFACE_MODE_RGMII_TXID) {
  2980. + reg &= ~BIT(8); /* disable gmac_rx_clk */
  2981. + reg &= ~BIT(3); /* disable gmac_tx_clk */
  2982. + reg &= ~BIT(6); /* disable gmac_tx_clk_out */
  2983. + reg &= ~BIT(9); /* disable gmac_rx_clk_n */
  2984. + reg &= ~BIT(4); /* disable gmac_tx_clk_n */
  2985. + } else {
  2986. + dev_err(dev, "phy interface %d not supported\n", interface);
  2987. + return;
  2988. + }
  2989. +
  2990. + writel(reg, gmac_clk_reg + GMAC_CLK_CFG0);
  2991. +}
  2992. +#endif
  2993. +
  2994. +static int thead_dwmac_init(struct platform_device *pdev, void *bsp_priv)
  2995. +{
  2996. + struct thead_dwmac_priv_data *thead_plat_dat = bsp_priv;
  2997. + struct device *dev = &pdev->dev;
  2998. + struct device_node *np = pdev->dev.of_node;
  2999. + struct resource *res;
  3000. + void __iomem *ptr;
  3001. + struct clk *clktmp;
  3002. + int ret;
  3003. +
  3004. + thead_plat_dat->id = of_alias_get_id(np, "ethernet");
  3005. + if (thead_plat_dat->id < 0) {
  3006. + thead_plat_dat->id = 0;
  3007. + }
  3008. + dev_info(dev, "id: %d\n", thead_plat_dat->id);
  3009. +
  3010. + thead_plat_dat->interface = of_get_phy_mode(dev->of_node);
  3011. + dev_info(dev, "phy interface: %d\n", thead_plat_dat->interface);
  3012. +
  3013. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_if_reg");
  3014. + if ((res != NULL) && (resource_type(res) == IORESOURCE_MEM)) {
  3015. + ptr = devm_ioremap(dev, res->start, resource_size(res));
  3016. + if (!ptr) {
  3017. + dev_err(dev, "phy interface register not exist, skipped it\n");
  3018. + } else {
  3019. + thead_plat_dat->phy_if_reg = ptr;
  3020. + }
  3021. + }
  3022. +
  3023. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "txclk_dir_reg");
  3024. + ptr = devm_ioremap_resource(dev, res);
  3025. + if (IS_ERR(ptr)) {
  3026. + dev_err(dev, "txclk_dir register not exist, skipped it\n");
  3027. + } else {
  3028. + thead_plat_dat->txclk_dir_reg = ptr;
  3029. + }
  3030. +
  3031. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clk_mgr_reg");
  3032. + ptr = devm_ioremap_resource(dev, res);
  3033. + if (IS_ERR(ptr)) {
  3034. + dev_err(dev, "gmac_clk register not exist, skipped it\n");
  3035. + } else {
  3036. + thead_plat_dat->gmac_clk_reg = ptr;
  3037. + }
  3038. +
  3039. + /* get gmac pll clk */
  3040. + clktmp = devm_clk_get(dev, "gmac_pll_clk");
  3041. + if (IS_ERR(clktmp)) {
  3042. + dev_err(dev, "gmac_pll_clk not exist, skipped it\n");
  3043. + } else {
  3044. + thead_plat_dat->gmac_pll_clk = clktmp;
  3045. +
  3046. + ret = clk_prepare_enable(thead_plat_dat->gmac_pll_clk);
  3047. + if (ret) {
  3048. + dev_err(dev, "Failed to enable clk 'gmac_pll_clk'\n");
  3049. + return -1;
  3050. + }
  3051. +
  3052. + thead_plat_dat->gmac_pll_clk_freq =
  3053. + clk_get_rate(thead_plat_dat->gmac_pll_clk);
  3054. + }
  3055. +
  3056. + thead_dwmac_set_phy_if(pdev, thead_plat_dat->phy_if_reg,
  3057. + thead_plat_dat->interface, thead_plat_dat->id);
  3058. +
  3059. + thead_dwmac_set_txclk_dir(pdev, thead_plat_dat->txclk_dir_reg,
  3060. + thead_plat_dat->interface);
  3061. +
  3062. + thead_dwmac_set_clk_source(pdev, thead_plat_dat->gmac_clk_reg,
  3063. + thead_plat_dat->interface);
  3064. + thead_dwmac_set_clock_delay(pdev, thead_plat_dat->gmac_clk_reg,
  3065. + thead_plat_dat->interface);
  3066. +
  3067. + thead_dwmac_set_pll_250M(thead_plat_dat->gmac_clk_reg,
  3068. + thead_plat_dat->interface,
  3069. + thead_plat_dat->gmac_pll_clk_freq);
  3070. +
  3071. + /* default speed is 1Gbps */
  3072. + thead_dwmac_set_speed(thead_plat_dat->gmac_clk_reg,
  3073. + thead_plat_dat->interface, SPEED_1000);
  3074. +
  3075. + thead_dwmac_enable_clock(pdev, thead_plat_dat->gmac_clk_reg,
  3076. + thead_plat_dat->interface);
  3077. + return 0;
  3078. +}
  3079. +
  3080. +static void thead_dwmac_fix_speed(void *bsp_priv, unsigned int speed)
  3081. +{
  3082. + struct thead_dwmac_priv_data *thead_plat_dat = bsp_priv;
  3083. +
  3084. + thead_dwmac_set_speed(thead_plat_dat->gmac_clk_reg,
  3085. + thead_plat_dat->interface, speed);
  3086. +}
  3087. +
  3088. +/**
  3089. + * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
  3090. + * @mcast_bins: Multicast filtering bins
  3091. + * Description:
  3092. + * this function validates the number of Multicast filtering bins specified
  3093. + * by the configuration through the device tree. The Synopsys GMAC supports
  3094. + * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
  3095. + * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
  3096. + * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
  3097. + * invalid and will cause the filtering algorithm to use Multicast
  3098. + * promiscuous mode.
  3099. + */
  3100. +static int dwmac1000_validate_mcast_bins(int mcast_bins)
  3101. +{
  3102. + int x = mcast_bins;
  3103. +
  3104. + switch (x) {
  3105. + case HASH_TABLE_SIZE:
  3106. + case 128:
  3107. + case 256:
  3108. + break;
  3109. + default:
  3110. + x = 0;
  3111. + pr_info("Hash table entries set to unexpected value %d",
  3112. + mcast_bins);
  3113. + break;
  3114. + }
  3115. + return x;
  3116. +}
  3117. +
  3118. +/**
  3119. + * dwmac1000_validate_ucast_entries - validate the Unicast address entries
  3120. + * @ucast_entries: number of Unicast address entries
  3121. + * Description:
  3122. + * This function validates the number of Unicast address entries supported
  3123. + * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
  3124. + * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
  3125. + * logic. This function validates a valid, supported configuration is
  3126. + * selected, and defaults to 1 Unicast address if an unsupported
  3127. + * configuration is selected.
  3128. + */
  3129. +static int dwmac1000_validate_ucast_entries(int ucast_entries)
  3130. +{
  3131. + int x = ucast_entries;
  3132. +
  3133. + switch (x) {
  3134. + case 1 ... 32:
  3135. + case 64:
  3136. + case 128:
  3137. + break;
  3138. + default:
  3139. + x = 1;
  3140. + pr_info("Unicast table entries set to unexpected value %d\n",
  3141. + ucast_entries);
  3142. + break;
  3143. + }
  3144. + return x;
  3145. +}
  3146. +
  3147. +static int thead_dwmac_probe(struct platform_device *pdev)
  3148. +{
  3149. + struct plat_stmmacenet_data *plat_dat;
  3150. + struct stmmac_resources stmmac_res;
  3151. + struct thead_dwmac_priv_data *thead_plat_dat;
  3152. + struct device *dev = &pdev->dev;
  3153. + struct device_node *np = pdev->dev.of_node;
  3154. + int ret;
  3155. +
  3156. + thead_plat_dat = devm_kzalloc(dev, sizeof(*thead_plat_dat), GFP_KERNEL);
  3157. + if (thead_plat_dat == NULL) {
  3158. + dev_err(&pdev->dev, "allocate memory failed\n");
  3159. + return -ENOMEM;
  3160. + }
  3161. +
  3162. + ret = stmmac_get_platform_resources(pdev, &stmmac_res);
  3163. + if (ret)
  3164. + return ret;
  3165. +
  3166. + if (pdev->dev.of_node) {
  3167. + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
  3168. + if (IS_ERR(plat_dat)) {
  3169. + dev_err(&pdev->dev, "dt configuration failed\n");
  3170. + return PTR_ERR(plat_dat);
  3171. + }
  3172. + } else {
  3173. + plat_dat = dev_get_platdata(&pdev->dev);
  3174. + if (!plat_dat) {
  3175. + dev_err(&pdev->dev, "no platform data provided\n");
  3176. + return -EINVAL;
  3177. + }
  3178. +
  3179. + /* Set default value for multicast hash bins */
  3180. + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
  3181. +
  3182. + /* Set default value for unicast filter entries */
  3183. + plat_dat->unicast_filter_entries = 1;
  3184. + }
  3185. +
  3186. + /* Custom initialisation (if needed) */
  3187. + if (plat_dat->init) {
  3188. + ret = plat_dat->init(pdev, plat_dat->bsp_priv);
  3189. + if (ret)
  3190. + goto err_remove_config_dt;
  3191. + }
  3192. +
  3193. + /* populate bsp private data */
  3194. + plat_dat->bsp_priv = thead_plat_dat;
  3195. + plat_dat->fix_mac_speed = thead_dwmac_fix_speed;
  3196. + of_property_read_u32(np, "max-frame-size", &plat_dat->maxmtu);
  3197. + of_property_read_u32(np, "snps,multicast-filter-bins",
  3198. + &plat_dat->multicast_filter_bins);
  3199. + of_property_read_u32(np, "snps,perfect-filter-entries",
  3200. + &plat_dat->unicast_filter_entries);
  3201. + plat_dat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
  3202. + plat_dat->unicast_filter_entries);
  3203. + plat_dat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
  3204. + plat_dat->multicast_filter_bins);
  3205. + plat_dat->has_gmac = 1;
  3206. + plat_dat->pmt = 1;
  3207. +
  3208. + ret = thead_dwmac_init(pdev, plat_dat->bsp_priv);
  3209. + if (ret)
  3210. + goto err_exit;
  3211. +
  3212. + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
  3213. + if (ret)
  3214. + goto err_exit;
  3215. +
  3216. + return 0;
  3217. +
  3218. +err_exit:
  3219. + if (plat_dat->exit)
  3220. + plat_dat->exit(pdev, plat_dat->bsp_priv);
  3221. +err_remove_config_dt:
  3222. + if (pdev->dev.of_node)
  3223. + stmmac_remove_config_dt(pdev, plat_dat);
  3224. +
  3225. + return ret;
  3226. +}
  3227. +
  3228. +static const struct of_device_id thead_dwmac_match[] = {
  3229. + { .compatible = "thead,dwmac"},
  3230. + { }
  3231. +};
  3232. +MODULE_DEVICE_TABLE(of, thead_dwmac_match);
  3233. +
  3234. +static struct platform_driver thead_dwmac_driver = {
  3235. + .probe = thead_dwmac_probe,
  3236. + .remove = stmmac_pltfr_remove,
  3237. + .driver = {
  3238. + .name = "thead_dwmac_eth",
  3239. + .pm = &stmmac_pltfr_pm_ops,
  3240. + .of_match_table = of_match_ptr(thead_dwmac_match),
  3241. + },
  3242. +};
  3243. +module_platform_driver(thead_dwmac_driver);
  3244. +
  3245. +MODULE_DESCRIPTION("T-HEAD dwmac driver");
  3246. +MODULE_LICENSE("GPL v2");
  3247. diff -Nur linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/Makefile kernel/drivers/net/ethernet/stmicro/stmmac/Makefile
  3248. --- linux-5.4.36/drivers/net/ethernet/stmicro/stmmac/Makefile 2020-04-29 14:33:25.000000000 +0000
  3249. +++ kernel/drivers/net/ethernet/stmicro/stmmac/Makefile 2020-09-03 06:01:16.356989693 +0000
  3250. @@ -1,5 +1,5 @@
  3251. # SPDX-License-Identifier: GPL-2.0
  3252. -obj-$(CONFIG_STMMAC_ETH) += stmmac.o
  3253. +obj-$(CONFIG_STMMAC_ETH) += stmmac.o dwmac-thead.o
  3254. stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
  3255. chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
  3256. dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
  3257. diff -Nur linux-5.4.36/drivers/perf/Kconfig kernel/drivers/perf/Kconfig
  3258. --- linux-5.4.36/drivers/perf/Kconfig 2020-04-29 14:33:25.000000000 +0000
  3259. +++ kernel/drivers/perf/Kconfig 2020-09-03 06:01:16.736989677 +0000
  3260. @@ -71,6 +71,14 @@
  3261. system, control logic. The PMU allows counting various events related
  3262. to DSU.
  3263. +config THEAD_XT_V1_PMU
  3264. + bool "T-HEAD XuanTie v1 Performance Monitoring Unit"
  3265. + depends on RISCV
  3266. + def_bool y
  3267. + help
  3268. + T-HEAD XuanTie PMU support various hardware event, including cycles,
  3269. + instructions, cache access/miss, LSU event and etc.
  3270. +
  3271. config FSL_IMX8_DDR_PMU
  3272. tristate "Freescale i.MX8 DDR perf monitor"
  3273. depends on ARCH_MXC
  3274. diff -Nur linux-5.4.36/drivers/perf/Makefile kernel/drivers/perf/Makefile
  3275. --- linux-5.4.36/drivers/perf/Makefile 2020-04-29 14:33:25.000000000 +0000
  3276. +++ kernel/drivers/perf/Makefile 2020-09-03 06:01:16.736989677 +0000
  3277. @@ -12,3 +12,4 @@
  3278. obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
  3279. obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
  3280. obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
  3281. +obj-$(CONFIG_THEAD_XT_V1_PMU) += thead_xt_pmu_v1.o
  3282. diff -Nur linux-5.4.36/drivers/perf/thead_xt_pmu_v1.c kernel/drivers/perf/thead_xt_pmu_v1.c
  3283. --- linux-5.4.36/drivers/perf/thead_xt_pmu_v1.c 1970-01-01 00:00:00.000000000 +0000
  3284. +++ kernel/drivers/perf/thead_xt_pmu_v1.c 2020-09-03 06:01:16.738989677 +0000
  3285. @@ -0,0 +1,768 @@
  3286. +// SPDX-License-Identifier: GPL-2.0
  3287. +/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
  3288. +
  3289. +#include <linux/errno.h>
  3290. +#include <linux/interrupt.h>
  3291. +#include <linux/module.h>
  3292. +#include <linux/of.h>
  3293. +#include <linux/perf_event.h>
  3294. +#include <linux/platform_device.h>
  3295. +#include <linux/smp.h>
  3296. +#include <asm/perf_event.h>
  3297. +#include <asm/sbi.h>
  3298. +
  3299. +#define RISCV_PMU_CYCLE 0
  3300. +#define RISCV_PMU_TIME 1
  3301. +#undef RISCV_PMU_INSTRET
  3302. +#define RISCV_PMU_INSTRET 2
  3303. +#define RISCV_PMU_L1ICAC 3 /* ICache Access */
  3304. +#define RISCV_PMU_L1ICMC 4 /* ICache Miss */
  3305. +#define RISCV_PMU_IUTLBMC 5 /* I-UTLB Miss */
  3306. +#define RISCV_PMU_DUTLBMC 6 /* D-UTLB Miss */
  3307. +#define RISCV_PMU_JTLBMC 7 /* JTLB Miss Counter */
  3308. +
  3309. +#define RISCV_PMU_CBMC 8 /* Cond-br-mispredict */
  3310. +#define RISCV_PMU_CBIC 9 /* Cond-br-instruction */
  3311. +#define RISCV_PMU_IBMC 10 /* Indirect Branch Mispredict */
  3312. +#define RISCV_PMU_IBIC 11 /* Indirect Branch Instruction */
  3313. +#define RISCV_PMU_LSUSFC 12 /* LSU Spec Fail */
  3314. +#define RISCV_PMU_STC 13 /* Store Instruction */
  3315. +
  3316. +#define RISCV_PMU_L1DCRAC 14 /* L1 DCache Read Access */
  3317. +#define RISCV_PMU_L1DCRMC 15 /* L1 DCache Read Miss */
  3318. +#define RISCV_PMU_L1DCWAC 16 /* L1 DCache Write Access */
  3319. +#define RISCV_PMU_L1DCWMC 17 /* L1 DCache Write Miss */
  3320. +
  3321. +#define RISCV_PMU_L2CRAC 18 /* L2 Cache Read Access */
  3322. +#define RISCV_PMU_L2CRMC 19 /* L2 Cache Read Miss */
  3323. +#define RISCV_PMU_L2CWAC 20 /* L2 Cache Write Access */
  3324. +#define RISCV_PMU_L2CWMC 21 /* L2 Cache Write Miss */
  3325. +
  3326. +#define RISCV_PMU_RFLFC 22 /* RF Launch Fail */
  3327. +#define RISCV_PMU_RFRLFC 23 /* RF Reg Launch Fail */
  3328. +#define RISCV_PMU_RFIC 24 /* RF Instruction */
  3329. +
  3330. +#define RISCV_PMU_LSUC4SC 25 /* LSU Cross 4K Stall */
  3331. +#define RISCV_PMU_LSUOSC 26 /* LSU Other Stall */
  3332. +#define RISCV_PMU_LSUSQDC 27 /* LSU SQ Discard */
  3333. +#define RISCV_PMU_LSUSQDDC 28 /* LSU SQ Data Discard */
  3334. +
  3335. +#define SCOUNTERINTEN 0x5c4
  3336. +#define SCOUNTEROF 0x5c5
  3337. +#define SCOUNTERBASE 0x5e0
  3338. +
  3339. +#define WRITE_COUNTER(idx, value) \
  3340. + csr_write(SCOUNTERBASE + idx, value)
  3341. +
  3342. +/* The events for a given PMU register set. */
  3343. +struct pmu_hw_events {
  3344. + /*
  3345. + * The events that are active on the PMU for the given index.
  3346. + */
  3347. + struct perf_event *events[RISCV_MAX_COUNTERS];
  3348. +
  3349. + /*
  3350. + * A 1 bit for an index indicates that the counter is being used for
  3351. + * an event. A 0 means that the counter can be used.
  3352. + */
  3353. + unsigned long used_mask[BITS_TO_LONGS(RISCV_MAX_COUNTERS)];
  3354. +};
  3355. +
  3356. +static struct riscv_pmu_t {
  3357. + struct pmu pmu;
  3358. + struct pmu_hw_events __percpu *hw_events;
  3359. + struct platform_device *plat_device;
  3360. + u64 max_period;
  3361. +} riscv_pmu;
  3362. +
  3363. +/*
  3364. + * Hardware & cache maps and their methods
  3365. + */
  3366. +
  3367. +static const int riscv_hw_event_map[] = {
  3368. + [PERF_COUNT_HW_CPU_CYCLES] = RISCV_PMU_CYCLE,
  3369. + [PERF_COUNT_HW_INSTRUCTIONS] = RISCV_PMU_INSTRET,
  3370. +
  3371. + [PERF_COUNT_HW_CACHE_REFERENCES] = RISCV_PMU_L1ICAC,
  3372. + [PERF_COUNT_HW_CACHE_MISSES] = RISCV_PMU_L1ICMC,
  3373. +
  3374. + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = RISCV_PMU_CBIC,
  3375. + [PERF_COUNT_HW_BRANCH_MISSES] = RISCV_PMU_CBMC,
  3376. +
  3377. + [PERF_COUNT_HW_BUS_CYCLES] = RISCV_PMU_IBMC,
  3378. + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = RISCV_PMU_IBIC,
  3379. + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = RISCV_PMU_LSUSFC,
  3380. + [PERF_COUNT_HW_REF_CPU_CYCLES] = RISCV_PMU_STC,
  3381. +};
  3382. +
  3383. +#define C(x) PERF_COUNT_HW_CACHE_##x
  3384. +static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
  3385. +[PERF_COUNT_HW_CACHE_OP_MAX]
  3386. +[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  3387. + [C(L1D)] = {
  3388. + [C(OP_READ)] = {
  3389. + [C(RESULT_ACCESS)] = RISCV_PMU_L1DCRAC,
  3390. + [C(RESULT_MISS)] = RISCV_PMU_L1DCRMC,
  3391. + },
  3392. + [C(OP_WRITE)] = {
  3393. + [C(RESULT_ACCESS)] = RISCV_PMU_L1DCWAC,
  3394. + [C(RESULT_MISS)] = RISCV_PMU_L1DCWMC,
  3395. + },
  3396. + [C(OP_PREFETCH)] = {
  3397. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3398. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3399. + },
  3400. + },
  3401. + [C(L1I)] = {
  3402. + [C(OP_READ)] = {
  3403. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3404. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3405. + },
  3406. + [C(OP_WRITE)] = {
  3407. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3408. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3409. + },
  3410. + [C(OP_PREFETCH)] = {
  3411. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3412. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3413. + },
  3414. + },
  3415. + [C(LL)] = {
  3416. + [C(OP_READ)] = {
  3417. + [C(RESULT_ACCESS)] = RISCV_PMU_L2CRAC,
  3418. + [C(RESULT_MISS)] = RISCV_PMU_L2CRMC,
  3419. + },
  3420. + [C(OP_WRITE)] = {
  3421. + [C(RESULT_ACCESS)] = RISCV_PMU_L2CWAC,
  3422. + [C(RESULT_MISS)] = RISCV_PMU_L2CWMC,
  3423. + },
  3424. + [C(OP_PREFETCH)] = {
  3425. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3426. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3427. + },
  3428. + },
  3429. + [C(DTLB)] = {
  3430. + [C(OP_READ)] = {
  3431. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3432. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3433. + },
  3434. + [C(OP_WRITE)] = {
  3435. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3436. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3437. + },
  3438. + [C(OP_PREFETCH)] = {
  3439. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3440. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3441. + },
  3442. + },
  3443. + [C(ITLB)] = {
  3444. + [C(OP_READ)] = {
  3445. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3446. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3447. + },
  3448. + [C(OP_WRITE)] = {
  3449. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3450. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3451. + },
  3452. + [C(OP_PREFETCH)] = {
  3453. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3454. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3455. + },
  3456. + },
  3457. + [C(BPU)] = {
  3458. + [C(OP_READ)] = {
  3459. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3460. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3461. + },
  3462. + [C(OP_WRITE)] = {
  3463. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3464. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3465. + },
  3466. + [C(OP_PREFETCH)] = {
  3467. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3468. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3469. + },
  3470. + },
  3471. + [C(NODE)] = {
  3472. + [C(OP_READ)] = {
  3473. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3474. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3475. + },
  3476. + [C(OP_WRITE)] = {
  3477. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3478. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3479. + },
  3480. + [C(OP_PREFETCH)] = {
  3481. + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  3482. + [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  3483. + },
  3484. + },
  3485. +};
  3486. +
  3487. +/*
  3488. + * Low-level functions: reading/writing counters
  3489. + */
  3490. +static inline u64 read_counter(int idx)
  3491. +{
  3492. + u64 val = 0;
  3493. +
  3494. + switch (idx) {
  3495. + case RISCV_PMU_CYCLE:
  3496. + val = csr_read(cycle);
  3497. + break;
  3498. + case RISCV_PMU_INSTRET:
  3499. + val = csr_read(instret);
  3500. + break;
  3501. + case RISCV_PMU_L1ICAC:
  3502. + val = csr_read(hpmcounter3);
  3503. + break;
  3504. + case RISCV_PMU_L1ICMC:
  3505. + val = csr_read(hpmcounter4);
  3506. + break;
  3507. + case RISCV_PMU_IUTLBMC:
  3508. + val = csr_read(hpmcounter5);
  3509. + break;
  3510. + case RISCV_PMU_DUTLBMC:
  3511. + val = csr_read(hpmcounter6);
  3512. + break;
  3513. + case RISCV_PMU_JTLBMC:
  3514. + val = csr_read(hpmcounter7);
  3515. + break;
  3516. + case RISCV_PMU_CBMC:
  3517. + val = csr_read(hpmcounter8);
  3518. + break;
  3519. + case RISCV_PMU_CBIC:
  3520. + val = csr_read(hpmcounter9);
  3521. + break;
  3522. + case RISCV_PMU_IBMC:
  3523. + val = csr_read(hpmcounter10);
  3524. + break;
  3525. + case RISCV_PMU_IBIC:
  3526. + val = csr_read(hpmcounter11);
  3527. + break;
  3528. + case RISCV_PMU_LSUSFC:
  3529. + val = csr_read(hpmcounter12);
  3530. + break;
  3531. + case RISCV_PMU_STC:
  3532. + val = csr_read(hpmcounter13);
  3533. + break;
  3534. + case RISCV_PMU_L1DCRAC:
  3535. + val = csr_read(hpmcounter14);
  3536. + break;
  3537. + case RISCV_PMU_L1DCRMC:
  3538. + val = csr_read(hpmcounter15);
  3539. + break;
  3540. + case RISCV_PMU_L1DCWAC:
  3541. + val = csr_read(hpmcounter16);
  3542. + break;
  3543. + case RISCV_PMU_L1DCWMC:
  3544. + val = csr_read(hpmcounter17);
  3545. + break;
  3546. + case RISCV_PMU_L2CRAC:
  3547. + val = csr_read(hpmcounter18);
  3548. + break;
  3549. + case RISCV_PMU_L2CRMC:
  3550. + val = csr_read(hpmcounter19);
  3551. + break;
  3552. + case RISCV_PMU_L2CWAC:
  3553. + val = csr_read(hpmcounter20);
  3554. + break;
  3555. + case RISCV_PMU_L2CWMC:
  3556. + val = csr_read(hpmcounter21);
  3557. + break;
  3558. + case RISCV_PMU_RFLFC:
  3559. + val = csr_read(hpmcounter22);
  3560. + break;
  3561. + case RISCV_PMU_RFRLFC:
  3562. + val = csr_read(hpmcounter23);
  3563. + break;
  3564. + case RISCV_PMU_RFIC:
  3565. + val = csr_read(hpmcounter24);
  3566. + break;
  3567. + case RISCV_PMU_LSUC4SC:
  3568. + val = csr_read(hpmcounter25);
  3569. + break;
  3570. + case RISCV_PMU_LSUOSC:
  3571. + val = csr_read(hpmcounter26);
  3572. + break;
  3573. + case RISCV_PMU_LSUSQDC:
  3574. + val = csr_read(hpmcounter27);
  3575. + break;
  3576. + case RISCV_PMU_LSUSQDDC:
  3577. + val = csr_read(hpmcounter28);
  3578. + break;
  3579. + default:
  3580. + WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
  3581. + return -EINVAL;
  3582. + }
  3583. +
  3584. + return val;
  3585. +}
  3586. +
  3587. +static inline void write_counter(int idx, u64 value)
  3588. +{
  3589. + switch (idx) {
  3590. + case RISCV_PMU_CYCLE:
  3591. + WRITE_COUNTER(RISCV_PMU_CYCLE, value);
  3592. + break;
  3593. + case RISCV_PMU_INSTRET:
  3594. + WRITE_COUNTER(RISCV_PMU_INSTRET, value);
  3595. + break;
  3596. + case RISCV_PMU_L1ICAC:
  3597. + WRITE_COUNTER(RISCV_PMU_L1ICAC, value);
  3598. + break;
  3599. + case RISCV_PMU_L1ICMC:
  3600. + WRITE_COUNTER(RISCV_PMU_L1ICMC, value);
  3601. + break;
  3602. + case RISCV_PMU_IUTLBMC:
  3603. + WRITE_COUNTER(RISCV_PMU_IUTLBMC, value);
  3604. + break;
  3605. + case RISCV_PMU_DUTLBMC:
  3606. + WRITE_COUNTER(RISCV_PMU_DUTLBMC, value);
  3607. + break;
  3608. + case RISCV_PMU_JTLBMC:
  3609. + WRITE_COUNTER(RISCV_PMU_JTLBMC, value);
  3610. + break;
  3611. + case RISCV_PMU_CBMC:
  3612. + WRITE_COUNTER(RISCV_PMU_CBMC, value);
  3613. + break;
  3614. + case RISCV_PMU_CBIC:
  3615. + WRITE_COUNTER(RISCV_PMU_CBIC, value);
  3616. + break;
  3617. + case RISCV_PMU_IBMC:
  3618. + WRITE_COUNTER(RISCV_PMU_IBMC, value);
  3619. + break;
  3620. + case RISCV_PMU_IBIC:
  3621. + WRITE_COUNTER(RISCV_PMU_IBIC, value);
  3622. + break;
  3623. + case RISCV_PMU_LSUSFC:
  3624. + WRITE_COUNTER(RISCV_PMU_LSUSFC, value);
  3625. + break;
  3626. + case RISCV_PMU_STC:
  3627. + WRITE_COUNTER(RISCV_PMU_STC, value);
  3628. + break;
  3629. + case RISCV_PMU_L1DCRAC:
  3630. + WRITE_COUNTER(RISCV_PMU_L1DCRAC, value);
  3631. + break;
  3632. + case RISCV_PMU_L1DCRMC:
  3633. + WRITE_COUNTER(RISCV_PMU_L1DCRMC, value);
  3634. + break;
  3635. + case RISCV_PMU_L1DCWAC:
  3636. + WRITE_COUNTER(RISCV_PMU_L1DCWAC, value);
  3637. + break;
  3638. + case RISCV_PMU_L1DCWMC:
  3639. + WRITE_COUNTER(RISCV_PMU_L1DCWMC, value);
  3640. + break;
  3641. + case RISCV_PMU_L2CRAC:
  3642. + WRITE_COUNTER(RISCV_PMU_L2CRAC, value);
  3643. + break;
  3644. + case RISCV_PMU_L2CRMC:
  3645. + WRITE_COUNTER(RISCV_PMU_L2CRMC, value);
  3646. + break;
  3647. + case RISCV_PMU_L2CWAC:
  3648. + WRITE_COUNTER(RISCV_PMU_L2CWAC, value);
  3649. + break;
  3650. + case RISCV_PMU_L2CWMC:
  3651. + WRITE_COUNTER(RISCV_PMU_L2CWMC, value);
  3652. + break;
  3653. + case RISCV_PMU_RFLFC:
  3654. + WRITE_COUNTER(RISCV_PMU_RFLFC, value);
  3655. + break;
  3656. + case RISCV_PMU_RFRLFC:
  3657. + WRITE_COUNTER(RISCV_PMU_RFRLFC, value);
  3658. + break;
  3659. + case RISCV_PMU_RFIC:
  3660. + WRITE_COUNTER(RISCV_PMU_RFIC, value);
  3661. + break;
  3662. + case RISCV_PMU_LSUC4SC:
  3663. + WRITE_COUNTER(RISCV_PMU_LSUC4SC, value);
  3664. + break;
  3665. + case RISCV_PMU_LSUOSC:
  3666. + WRITE_COUNTER(RISCV_PMU_LSUOSC, value);
  3667. + break;
  3668. + case RISCV_PMU_LSUSQDC:
  3669. + WRITE_COUNTER(RISCV_PMU_LSUSQDC, value);
  3670. + break;
  3671. + case RISCV_PMU_LSUSQDDC:
  3672. + WRITE_COUNTER(RISCV_PMU_LSUSQDDC, value);
  3673. + break;
  3674. + default:
  3675. + WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
  3676. + }
  3677. +}
  3678. +
  3679. +int riscv_pmu_event_is_frequent(int idx)
  3680. +{
  3681. + return idx >= RISCV_PMU_CYCLE &&
  3682. + idx <= RISCV_PMU_L1DCWMC;
  3683. +}
  3684. +
  3685. +int riscv_pmu_event_set_period(struct perf_event *event)
  3686. +{
  3687. + struct hw_perf_event *hwc = &event->hw;
  3688. + s64 left = local64_read(&hwc->period_left);
  3689. + s64 period = hwc->sample_period;
  3690. + int ret = 0;
  3691. +
  3692. + if (period < 4096 && period != 0 &&
  3693. + riscv_pmu_event_is_frequent(hwc->idx)) {
  3694. + hwc->sample_period = period = 4096;
  3695. + }
  3696. +
  3697. + if (unlikely(left <= -period)) {
  3698. + left = period;
  3699. + local64_set(&hwc->period_left, left);
  3700. + hwc->last_period = period;
  3701. + ret = 1;
  3702. + }
  3703. +
  3704. + if (unlikely(left <= 0)) {
  3705. + left += period;
  3706. + local64_set(&hwc->period_left, left);
  3707. + hwc->last_period = period;
  3708. + ret = 1;
  3709. + }
  3710. +
  3711. + if (left < 0)
  3712. + left = riscv_pmu.max_period;
  3713. +
  3714. + /*
  3715. + * The hw event starts counting from this event offset,
  3716. + * mark it to be able to extract future "deltas":
  3717. + */
  3718. + local64_set(&hwc->prev_count, (u64)(-left));
  3719. + csr_write(SCOUNTEROF, csr_read(SCOUNTEROF) & ~BIT(hwc->idx));
  3720. + write_counter(hwc->idx, (u64)(-left));
  3721. +
  3722. + perf_event_update_userpage(event);
  3723. +
  3724. + return ret;
  3725. +}
  3726. +
  3727. +static void riscv_perf_event_update(struct perf_event *event,
  3728. + struct hw_perf_event *hwc)
  3729. +{
  3730. + uint64_t prev_raw_count = local64_read(&hwc->prev_count);
  3731. + /*
  3732. + * Sign extend count value to 64bit, otherwise delta calculation
  3733. + * would be incorrect when overflow occurs.
  3734. + */
  3735. + uint64_t new_raw_count = read_counter(hwc->idx);
  3736. + int64_t delta = new_raw_count - prev_raw_count;
  3737. +
  3738. + /*
  3739. + * We aren't afraid of hwc->prev_count changing beneath our feet
  3740. + * because there's no way for us to re-enter this function anytime.
  3741. + */
  3742. + local64_set(&hwc->prev_count, new_raw_count);
  3743. + local64_add(delta, &event->count);
  3744. + local64_sub(delta, &hwc->period_left);
  3745. +}
  3746. +
  3747. +static void riscv_pmu_read(struct perf_event *event)
  3748. +{
  3749. + riscv_perf_event_update(event, &event->hw);
  3750. +}
  3751. +
  3752. +static int riscv_pmu_cache_event(u64 config)
  3753. +{
  3754. + unsigned int cache_type, cache_op, cache_result;
  3755. +
  3756. + cache_type = (config >> 0) & 0xff;
  3757. + cache_op = (config >> 8) & 0xff;
  3758. + cache_result = (config >> 16) & 0xff;
  3759. +
  3760. + if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  3761. + return -EINVAL;
  3762. + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  3763. + return -EINVAL;
  3764. + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  3765. + return -EINVAL;
  3766. +
  3767. + return riscv_cache_event_map[cache_type][cache_op][cache_result];
  3768. +}
  3769. +
  3770. +static int riscv_pmu_event_init(struct perf_event *event)
  3771. +{
  3772. + struct hw_perf_event *hwc = &event->hw;
  3773. + int ret;
  3774. +
  3775. + switch (event->attr.type) {
  3776. + case PERF_TYPE_HARDWARE:
  3777. + if (event->attr.config >= PERF_COUNT_HW_MAX)
  3778. + return -ENOENT;
  3779. + ret = riscv_hw_event_map[event->attr.config];
  3780. + if (ret == RISCV_OP_UNSUPP)
  3781. + return -ENOENT;
  3782. + hwc->idx = ret;
  3783. + break;
  3784. + case PERF_TYPE_HW_CACHE:
  3785. + ret = riscv_pmu_cache_event(event->attr.config);
  3786. + if (ret == RISCV_OP_UNSUPP)
  3787. + return -ENOENT;
  3788. + hwc->idx = ret;
  3789. + break;
  3790. + case PERF_TYPE_RAW:
  3791. + if (event->attr.config < 0 || event->attr.config >
  3792. + RISCV_MAX_COUNTERS)
  3793. + return -ENOENT;
  3794. + hwc->idx = event->attr.config;
  3795. + break;
  3796. + default:
  3797. + return -ENOENT;
  3798. + }
  3799. +
  3800. + return 0;
  3801. +}
  3802. +
  3803. +static void riscv_pmu_enable(struct pmu *pmu)
  3804. +{
  3805. +}
  3806. +
  3807. +/* stops all counters */
  3808. +static void riscv_pmu_disable(struct pmu *pmu)
  3809. +{
  3810. +}
  3811. +
  3812. +static void riscv_pmu_start(struct perf_event *event, int flags)
  3813. +{
  3814. + unsigned long flg;
  3815. + struct hw_perf_event *hwc = &event->hw;
  3816. + int idx = hwc->idx;
  3817. +
  3818. + if (WARN_ON_ONCE(idx == -1))
  3819. + return;
  3820. +
  3821. + if (flags & PERF_EF_RELOAD)
  3822. + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  3823. +
  3824. + hwc->state = 0;
  3825. +
  3826. + riscv_pmu_event_set_period(event);
  3827. +
  3828. + local_irq_save(flg);
  3829. +
  3830. + csr_write(SCOUNTERINTEN, BIT(idx) | csr_read(SCOUNTERINTEN));
  3831. +
  3832. + local_irq_restore(flg);
  3833. +}
  3834. +
  3835. +static void riscv_pmu_stop_event(struct perf_event *event)
  3836. +{
  3837. + unsigned long flg;
  3838. + struct hw_perf_event *hwc = &event->hw;
  3839. + int idx = hwc->idx;
  3840. +
  3841. + local_irq_save(flg);
  3842. +
  3843. + csr_write(SCOUNTERINTEN, ~BIT(idx) & csr_read(SCOUNTERINTEN));
  3844. +
  3845. + local_irq_restore(flg);
  3846. +}
  3847. +
  3848. +static void riscv_pmu_stop(struct perf_event *event, int flags)
  3849. +{
  3850. + if (!(event->hw.state & PERF_HES_STOPPED)) {
  3851. + riscv_pmu_stop_event(event);
  3852. + event->hw.state |= PERF_HES_STOPPED;
  3853. + }
  3854. +
  3855. + if ((flags & PERF_EF_UPDATE) &&
  3856. + !(event->hw.state & PERF_HES_UPTODATE)) {
  3857. + riscv_perf_event_update(event, &event->hw);
  3858. + event->hw.state |= PERF_HES_UPTODATE;
  3859. + }
  3860. +}
  3861. +
  3862. +static void riscv_pmu_del(struct perf_event *event, int flags)
  3863. +{
  3864. + struct pmu_hw_events *hw_events = this_cpu_ptr(riscv_pmu.hw_events);
  3865. + struct hw_perf_event *hwc = &event->hw;
  3866. +
  3867. + riscv_pmu_stop(event, PERF_EF_UPDATE);
  3868. +
  3869. + hw_events->events[hwc->idx] = NULL;
  3870. +
  3871. + perf_event_update_userpage(event);
  3872. +}
  3873. +
  3874. +/* allocate hardware counter and optionally start counting */
  3875. +static int riscv_pmu_add(struct perf_event *event, int flags)
  3876. +{
  3877. + struct pmu_hw_events *hw_events = this_cpu_ptr(riscv_pmu.hw_events);
  3878. + struct hw_perf_event *hwc = &event->hw;
  3879. +
  3880. + hw_events->events[hwc->idx] = event;
  3881. +
  3882. + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  3883. +
  3884. + if (flags & PERF_EF_START)
  3885. + riscv_pmu_start(event, PERF_EF_RELOAD);
  3886. +
  3887. + perf_event_update_userpage(event);
  3888. +
  3889. + return 0;
  3890. +}
  3891. +
  3892. +irqreturn_t riscv_pmu_handle_irq(void)
  3893. +{
  3894. + struct perf_sample_data data;
  3895. + struct pmu_hw_events *cpuc = this_cpu_ptr(riscv_pmu.hw_events);
  3896. + struct pt_regs *regs;
  3897. + int idx;
  3898. +
  3899. + /*
  3900. + * Did an overflow occur?
  3901. + */
  3902. + if (!csr_read(SCOUNTEROF))
  3903. + return IRQ_NONE;
  3904. +
  3905. + /*
  3906. + * Handle the counter(s) overflow(s)
  3907. + */
  3908. + regs = get_irq_regs();
  3909. +
  3910. + for (idx = 0; idx < RISCV_MAX_COUNTERS; ++idx) {
  3911. + struct perf_event *event = cpuc->events[idx];
  3912. + struct hw_perf_event *hwc;
  3913. +
  3914. + /* Ignore if we don't have an event. */
  3915. + if (!event)
  3916. + continue;
  3917. + /*
  3918. + * We have a single interrupt for all counters. Check that
  3919. + * each counter has overflowed before we process it.
  3920. + */
  3921. + if (!(csr_read(SCOUNTEROF) & BIT(idx)))
  3922. + continue;
  3923. +
  3924. + hwc = &event->hw;
  3925. + riscv_perf_event_update(event, &event->hw);
  3926. + perf_sample_data_init(&data, 0, hwc->last_period);
  3927. + riscv_pmu_event_set_period(event);
  3928. +
  3929. + if (perf_event_overflow(event, &data, regs))
  3930. + riscv_pmu_stop_event(event);
  3931. + }
  3932. +
  3933. + /*
  3934. + * Handle the pending perf events.
  3935. + *
  3936. + * Note: this call *must* be run with interrupts disabled. For
  3937. + * platforms that can have the PMU interrupts raised as an NMI, this
  3938. + * will not work.
  3939. + */
  3940. + irq_work_run();
  3941. +
  3942. + return IRQ_HANDLED;
  3943. +}
  3944. +
  3945. +static void riscv_pmu_free_irq(void)
  3946. +{
  3947. + int irq;
  3948. + struct platform_device *pmu_device = riscv_pmu.plat_device;
  3949. +
  3950. + irq = platform_get_irq(pmu_device, 0);
  3951. + if (irq >= 0)
  3952. + free_percpu_irq(irq, this_cpu_ptr(riscv_pmu.hw_events));
  3953. +}
  3954. +
  3955. +static int init_hw_perf_events(void)
  3956. +{
  3957. + riscv_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events,
  3958. + GFP_KERNEL);
  3959. + if (!riscv_pmu.hw_events) {
  3960. + pr_info("failed to allocate per-cpu PMU data.\n");
  3961. + return -ENOMEM;
  3962. + }
  3963. +
  3964. + riscv_pmu.pmu = (struct pmu) {
  3965. + .pmu_enable = riscv_pmu_enable,
  3966. + .pmu_disable = riscv_pmu_disable,
  3967. + .event_init = riscv_pmu_event_init,
  3968. + .add = riscv_pmu_add,
  3969. + .del = riscv_pmu_del,
  3970. + .start = riscv_pmu_start,
  3971. + .stop = riscv_pmu_stop,
  3972. + .read = riscv_pmu_read,
  3973. + };
  3974. +
  3975. + return 0;
  3976. +}
  3977. +
  3978. +static int riscv_pmu_starting_cpu(unsigned int cpu)
  3979. +{
  3980. + sbi_set_pmu(1);
  3981. + csr_set(sie, SIE_SMIE);
  3982. + return 0;
  3983. +}
  3984. +
  3985. +static int riscv_pmu_dying_cpu(unsigned int cpu)
  3986. +{
  3987. + csr_clear(sie, SIE_SMIE);
  3988. + return 0;
  3989. +}
  3990. +
  3991. +int riscv_pmu_device_probe(struct platform_device *pdev,
  3992. + const struct of_device_id *of_table)
  3993. +{
  3994. + int ret;
  3995. +
  3996. + ret = init_hw_perf_events();
  3997. + if (ret) {
  3998. + pr_notice("[perf] failed to probe PMU!\n");
  3999. + return ret;
  4000. + }
  4001. + riscv_pmu.max_period = ULONG_MAX;
  4002. + riscv_pmu.plat_device = pdev;
  4003. +
  4004. + ret = cpuhp_setup_state(CPUHP_AP_PERF_RISCV_ONLINE, "perf riscv:online",
  4005. + riscv_pmu_starting_cpu,
  4006. + riscv_pmu_dying_cpu);
  4007. + if (ret) {
  4008. + riscv_pmu_free_irq();
  4009. + free_percpu(riscv_pmu.hw_events);
  4010. + return ret;
  4011. + }
  4012. +
  4013. + ret = perf_pmu_register(&riscv_pmu.pmu, "thead_xt_pmu", PERF_TYPE_RAW);
  4014. + if (ret) {
  4015. + riscv_pmu_free_irq();
  4016. + free_percpu(riscv_pmu.hw_events);
  4017. + }
  4018. +
  4019. + return ret;
  4020. +}
  4021. +
  4022. +const static struct of_device_id riscv_pmu_of_device_ids[] = {
  4023. + {.compatible = "riscv,thead_xt_pmu"},
  4024. + {.compatible = "riscv,c910_pmu"},
  4025. + {},
  4026. +};
  4027. +
  4028. +static int riscv_pmu_dev_probe(struct platform_device *pdev)
  4029. +{
  4030. + return riscv_pmu_device_probe(pdev, riscv_pmu_of_device_ids);
  4031. +}
  4032. +
  4033. +static struct platform_driver riscv_pmu_driver = {
  4034. + .driver = {
  4035. + .name = "thead_xt_pmu",
  4036. + .of_match_table = riscv_pmu_of_device_ids,
  4037. + },
  4038. + .probe = riscv_pmu_dev_probe,
  4039. +};
  4040. +
  4041. +int __init riscv_pmu_probe(void)
  4042. +{
  4043. + int ret;
  4044. +
  4045. + ret = platform_driver_register(&riscv_pmu_driver);
  4046. + if (ret)
  4047. + pr_notice("[perf] PMU initialization failed\n");
  4048. + else
  4049. + pr_notice("[perf] PMU initialization done\n");
  4050. +
  4051. + return ret;
  4052. +}
  4053. +device_initcall(riscv_pmu_probe);
  4054. diff -Nur linux-5.4.36/drivers/rtc/rtc-xgene.c kernel/drivers/rtc/rtc-xgene.c
  4055. --- linux-5.4.36/drivers/rtc/rtc-xgene.c 2020-04-29 14:33:25.000000000 +0000
  4056. +++ kernel/drivers/rtc/rtc-xgene.c 2020-09-14 01:45:17.783702016 +0000
  4057. @@ -26,11 +26,13 @@
  4058. #define RTC_CCR_MASK BIT(1)
  4059. #define RTC_CCR_EN BIT(2)
  4060. #define RTC_CCR_WEN BIT(3)
  4061. +#define RTC_CCR_PSCLR BIT(4)
  4062. #define RTC_STAT 0x10
  4063. #define RTC_STAT_BIT BIT(0)
  4064. #define RTC_RSTAT 0x14
  4065. #define RTC_EOI 0x18
  4066. #define RTC_VER 0x1C
  4067. +#define RTC_CPSR 0x20
  4068. struct xgene_rtc_dev {
  4069. struct rtc_device *rtc;
  4070. @@ -140,6 +142,7 @@
  4071. struct resource *res;
  4072. int ret;
  4073. int irq;
  4074. + u32 freq;
  4075. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  4076. if (!pdata)
  4077. @@ -175,8 +178,15 @@
  4078. if (ret)
  4079. return ret;
  4080. - /* Turn on the clock and the crystal */
  4081. - writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
  4082. + freq = clk_get_rate(pdata->clk);
  4083. + if (freq) {
  4084. + writel(freq, pdata->csr_base + RTC_CPSR);
  4085. + /* Turn on the clock and prescaler counter */
  4086. + writel(RTC_CCR_EN | RTC_CCR_PSCLR, pdata->csr_base + RTC_CCR);
  4087. + } else {
  4088. + /* Turn on the clock and the crystal */
  4089. + writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
  4090. + }
  4091. ret = device_init_wakeup(&pdev->dev, 1);
  4092. if (ret) {
  4093. diff -Nur linux-5.4.36/include/linux/cpuhotplug.h kernel/include/linux/cpuhotplug.h
  4094. --- linux-5.4.36/include/linux/cpuhotplug.h 2020-04-29 14:33:25.000000000 +0000
  4095. +++ kernel/include/linux/cpuhotplug.h 2020-09-03 06:01:17.919989627 +0000
  4096. @@ -174,6 +174,7 @@
  4097. CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
  4098. CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
  4099. CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
  4100. + CPUHP_AP_PERF_RISCV_ONLINE,
  4101. CPUHP_AP_WATCHDOG_ONLINE,
  4102. CPUHP_AP_WORKQUEUE_ONLINE,
  4103. CPUHP_AP_RCUTREE_ONLINE,
  4104. diff -Nur linux-5.4.36/include/uapi/linux/elf.h kernel/include/uapi/linux/elf.h
  4105. --- linux-5.4.36/include/uapi/linux/elf.h 2020-04-29 14:33:25.000000000 +0000
  4106. +++ kernel/include/uapi/linux/elf.h 2020-09-03 06:01:18.158989617 +0000
  4107. @@ -428,6 +428,7 @@
  4108. #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
  4109. #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
  4110. #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
  4111. +#define NT_RISCV_VECTOR 0x900 /* RISC-V vector registers */
  4112. /* Note header in a PT_NOTE section */
  4113. typedef struct elf32_note {
  4114. diff -Nur linux-5.4.36/include/uapi/linux/kexec.h kernel/include/uapi/linux/kexec.h
  4115. --- linux-5.4.36/include/uapi/linux/kexec.h 2020-04-29 14:33:25.000000000 +0000
  4116. +++ kernel/include/uapi/linux/kexec.h 2020-09-03 06:01:18.167989617 +0000
  4117. @@ -42,6 +42,7 @@
  4118. #define KEXEC_ARCH_MIPS_LE (10 << 16)
  4119. #define KEXEC_ARCH_MIPS ( 8 << 16)
  4120. #define KEXEC_ARCH_AARCH64 (183 << 16)
  4121. +#define KEXEC_ARCH_RISCV (243 << 16)
  4122. /* The artificial cap on the number of segments passed to kexec_load. */
  4123. #define KEXEC_SEGMENT_MAX 16
  4124. diff -Nur linux-5.4.36/kernel/Kconfig.hz kernel/kernel/Kconfig.hz
  4125. --- linux-5.4.36/kernel/Kconfig.hz 2020-04-29 14:33:25.000000000 +0000
  4126. +++ kernel/kernel/Kconfig.hz 2020-09-03 06:01:18.218989615 +0000
  4127. @@ -16,6 +16,8 @@
  4128. environment leading to NR_CPUS * HZ number of timer interrupts
  4129. per second.
  4130. + config HZ_12
  4131. + bool "12 HZ"
  4132. config HZ_100
  4133. bool "100 HZ"
  4134. @@ -50,6 +52,7 @@
  4135. config HZ
  4136. int
  4137. + default 12 if HZ_12
  4138. default 100 if HZ_100
  4139. default 250 if HZ_250
  4140. default 300 if HZ_300