cvmx-pow.h 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020 Marvell International Ltd.
  4. *
  5. * Interface to the hardware Scheduling unit.
  6. *
  7. * New, starting with SDK 1.7.0, cvmx-pow supports a number of
  8. * extended consistency checks. The define
  9. * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
  10. * internal state checks to find common programming errors. If
  11. * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
  12. * enabled. For example, cvmx-pow will check for the following
  13. * program errors or POW state inconsistency.
  14. * - Requesting a POW operation with an active tag switch in
  15. * progress.
  16. * - Waiting for a tag switch to complete for an excessively
  17. * long period. This is normally a sign of an error in locking
  18. * causing deadlock.
  19. * - Illegal tag switches from NULL_NULL.
  20. * - Illegal tag switches from NULL.
  21. * - Illegal deschedule request.
  22. * - WQE pointer not matching the one attached to the core by
  23. * the POW.
  24. */
  25. #ifndef __CVMX_POW_H__
  26. #define __CVMX_POW_H__
  27. #include "cvmx-wqe.h"
  28. #include "cvmx-pow-defs.h"
  29. #include "cvmx-sso-defs.h"
  30. #include "cvmx-address.h"
  31. #include "cvmx-coremask.h"
  32. /* Default to having all POW constancy checks turned on */
  33. #ifndef CVMX_ENABLE_POW_CHECKS
  34. #define CVMX_ENABLE_POW_CHECKS 1
  35. #endif
  36. /*
  37. * Special type for CN78XX style SSO groups (0..255),
  38. * for distinction from legacy-style groups (0..15)
  39. */
  40. typedef union {
  41. u8 xgrp;
  42. /* Fields that map XGRP for backwards compatibility */
  43. struct __attribute__((__packed__)) {
  44. u8 group : 5;
  45. u8 qus : 3;
  46. };
  47. } cvmx_xgrp_t;
  48. /*
  49. * Softwsare-only structure to convey a return value
  50. * containing multiple information fields about an work queue entry
  51. */
  52. typedef struct {
  53. u32 tag;
  54. u16 index;
  55. u8 grp; /* Legacy group # (0..15) */
  56. u8 tag_type;
  57. } cvmx_pow_tag_info_t;
  58. /**
  59. * Wait flag values for pow functions.
  60. */
  61. typedef enum {
  62. CVMX_POW_WAIT = 1,
  63. CVMX_POW_NO_WAIT = 0,
  64. } cvmx_pow_wait_t;
  65. /**
  66. * POW tag operations. These are used in the data stored to the POW.
  67. */
  68. typedef enum {
  69. CVMX_POW_TAG_OP_SWTAG = 0L,
  70. CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
  71. CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
  72. CVMX_POW_TAG_OP_DESCH = 3L,
  73. CVMX_POW_TAG_OP_ADDWQ = 4L,
  74. CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
  75. CVMX_POW_TAG_OP_SET_NSCHED = 6L,
  76. CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
  77. CVMX_POW_TAG_OP_NOP = 15L
  78. } cvmx_pow_tag_op_t;
  79. /**
  80. * This structure defines the store data on a store to POW
  81. */
  82. typedef union {
  83. u64 u64;
  84. struct {
  85. u64 no_sched : 1;
  86. u64 unused : 2;
  87. u64 index : 13;
  88. cvmx_pow_tag_op_t op : 4;
  89. u64 unused2 : 2;
  90. u64 qos : 3;
  91. u64 grp : 4;
  92. cvmx_pow_tag_type_t type : 3;
  93. u64 tag : 32;
  94. } s_cn38xx;
  95. struct {
  96. u64 no_sched : 1;
  97. cvmx_pow_tag_op_t op : 4;
  98. u64 unused1 : 4;
  99. u64 index : 11;
  100. u64 unused2 : 1;
  101. u64 grp : 6;
  102. u64 unused3 : 3;
  103. cvmx_pow_tag_type_t type : 2;
  104. u64 tag : 32;
  105. } s_cn68xx_clr;
  106. struct {
  107. u64 no_sched : 1;
  108. cvmx_pow_tag_op_t op : 4;
  109. u64 unused1 : 12;
  110. u64 qos : 3;
  111. u64 unused2 : 1;
  112. u64 grp : 6;
  113. u64 unused3 : 3;
  114. cvmx_pow_tag_type_t type : 2;
  115. u64 tag : 32;
  116. } s_cn68xx_add;
  117. struct {
  118. u64 no_sched : 1;
  119. cvmx_pow_tag_op_t op : 4;
  120. u64 unused1 : 16;
  121. u64 grp : 6;
  122. u64 unused3 : 3;
  123. cvmx_pow_tag_type_t type : 2;
  124. u64 tag : 32;
  125. } s_cn68xx_other;
  126. struct {
  127. u64 rsvd_62_63 : 2;
  128. u64 grp : 10;
  129. cvmx_pow_tag_type_t type : 2;
  130. u64 no_sched : 1;
  131. u64 rsvd_48 : 1;
  132. cvmx_pow_tag_op_t op : 4;
  133. u64 rsvd_42_43 : 2;
  134. u64 wqp : 42;
  135. } s_cn78xx_other;
  136. } cvmx_pow_tag_req_t;
  137. union cvmx_pow_tag_req_addr {
  138. u64 u64;
  139. struct {
  140. u64 mem_region : 2;
  141. u64 reserved_49_61 : 13;
  142. u64 is_io : 1;
  143. u64 did : 8;
  144. u64 addr : 40;
  145. } s;
  146. struct {
  147. u64 mem_region : 2;
  148. u64 reserved_49_61 : 13;
  149. u64 is_io : 1;
  150. u64 did : 8;
  151. u64 node : 4;
  152. u64 tag : 32;
  153. u64 reserved_0_3 : 4;
  154. } s_cn78xx;
  155. };
  156. /**
  157. * This structure describes the address to load stuff from POW
  158. */
  159. typedef union {
  160. u64 u64;
  161. /**
  162. * Address for new work request loads (did<2:0> == 0)
  163. */
  164. struct {
  165. u64 mem_region : 2;
  166. u64 reserved_49_61 : 13;
  167. u64 is_io : 1;
  168. u64 did : 8;
  169. u64 reserved_4_39 : 36;
  170. u64 wait : 1;
  171. u64 reserved_0_2 : 3;
  172. } swork;
  173. struct {
  174. u64 mem_region : 2;
  175. u64 reserved_49_61 : 13;
  176. u64 is_io : 1;
  177. u64 did : 8;
  178. u64 node : 4;
  179. u64 reserved_32_35 : 4;
  180. u64 indexed : 1;
  181. u64 grouped : 1;
  182. u64 rtngrp : 1;
  183. u64 reserved_16_28 : 13;
  184. u64 index : 12;
  185. u64 wait : 1;
  186. u64 reserved_0_2 : 3;
  187. } swork_78xx;
  188. /**
  189. * Address for loads to get POW internal status
  190. */
  191. struct {
  192. u64 mem_region : 2;
  193. u64 reserved_49_61 : 13;
  194. u64 is_io : 1;
  195. u64 did : 8;
  196. u64 reserved_10_39 : 30;
  197. u64 coreid : 4;
  198. u64 get_rev : 1;
  199. u64 get_cur : 1;
  200. u64 get_wqp : 1;
  201. u64 reserved_0_2 : 3;
  202. } sstatus;
  203. /**
  204. * Address for loads to get 68XX SS0 internal status
  205. */
  206. struct {
  207. u64 mem_region : 2;
  208. u64 reserved_49_61 : 13;
  209. u64 is_io : 1;
  210. u64 did : 8;
  211. u64 reserved_14_39 : 26;
  212. u64 coreid : 5;
  213. u64 reserved_6_8 : 3;
  214. u64 opcode : 3;
  215. u64 reserved_0_2 : 3;
  216. } sstatus_cn68xx;
  217. /**
  218. * Address for memory loads to get POW internal state
  219. */
  220. struct {
  221. u64 mem_region : 2;
  222. u64 reserved_49_61 : 13;
  223. u64 is_io : 1;
  224. u64 did : 8;
  225. u64 reserved_16_39 : 24;
  226. u64 index : 11;
  227. u64 get_des : 1;
  228. u64 get_wqp : 1;
  229. u64 reserved_0_2 : 3;
  230. } smemload;
  231. /**
  232. * Address for memory loads to get SSO internal state
  233. */
  234. struct {
  235. u64 mem_region : 2;
  236. u64 reserved_49_61 : 13;
  237. u64 is_io : 1;
  238. u64 did : 8;
  239. u64 reserved_20_39 : 20;
  240. u64 index : 11;
  241. u64 reserved_6_8 : 3;
  242. u64 opcode : 3;
  243. u64 reserved_0_2 : 3;
  244. } smemload_cn68xx;
  245. /**
  246. * Address for index/pointer loads
  247. */
  248. struct {
  249. u64 mem_region : 2;
  250. u64 reserved_49_61 : 13;
  251. u64 is_io : 1;
  252. u64 did : 8;
  253. u64 reserved_9_39 : 31;
  254. u64 qosgrp : 4;
  255. u64 get_des_get_tail : 1;
  256. u64 get_rmt : 1;
  257. u64 reserved_0_2 : 3;
  258. } sindexload;
  259. /**
  260. * Address for a Index/Pointer loads to get SSO internal state
  261. */
  262. struct {
  263. u64 mem_region : 2;
  264. u64 reserved_49_61 : 13;
  265. u64 is_io : 1;
  266. u64 did : 8;
  267. u64 reserved_15_39 : 25;
  268. u64 qos_grp : 6;
  269. u64 reserved_6_8 : 3;
  270. u64 opcode : 3;
  271. u64 reserved_0_2 : 3;
  272. } sindexload_cn68xx;
  273. /**
  274. * Address for NULL_RD request (did<2:0> == 4)
  275. * when this is read, HW attempts to change the state to NULL if it is NULL_NULL
  276. * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available -
  277. * software may need to recover by finishing another piece of work before a POW
  278. * entry can ever become available.)
  279. */
  280. struct {
  281. u64 mem_region : 2;
  282. u64 reserved_49_61 : 13;
  283. u64 is_io : 1;
  284. u64 did : 8;
  285. u64 reserved_0_39 : 40;
  286. } snull_rd;
  287. } cvmx_pow_load_addr_t;
  288. /**
  289. * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads)
  290. */
  291. typedef union {
  292. u64 u64;
  293. /**
  294. * Response to new work request loads
  295. */
  296. struct {
  297. u64 no_work : 1;
  298. u64 pend_switch : 1;
  299. u64 tt : 2;
  300. u64 reserved_58_59 : 2;
  301. u64 grp : 10;
  302. u64 reserved_42_47 : 6;
  303. u64 addr : 42;
  304. } s_work;
  305. /**
  306. * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
  307. */
  308. struct {
  309. u64 reserved_62_63 : 2;
  310. u64 pend_switch : 1;
  311. u64 pend_switch_full : 1;
  312. u64 pend_switch_null : 1;
  313. u64 pend_desched : 1;
  314. u64 pend_desched_switch : 1;
  315. u64 pend_nosched : 1;
  316. u64 pend_new_work : 1;
  317. u64 pend_new_work_wait : 1;
  318. u64 pend_null_rd : 1;
  319. u64 pend_nosched_clr : 1;
  320. u64 reserved_51 : 1;
  321. u64 pend_index : 11;
  322. u64 pend_grp : 4;
  323. u64 reserved_34_35 : 2;
  324. u64 pend_type : 2;
  325. u64 pend_tag : 32;
  326. } s_sstatus0;
  327. /**
  328. * Result for a SSO Status Load (when opcode is SL_PENDTAG)
  329. */
  330. struct {
  331. u64 pend_switch : 1;
  332. u64 pend_get_work : 1;
  333. u64 pend_get_work_wait : 1;
  334. u64 pend_nosched : 1;
  335. u64 pend_nosched_clr : 1;
  336. u64 pend_desched : 1;
  337. u64 pend_alloc_we : 1;
  338. u64 reserved_48_56 : 9;
  339. u64 pend_index : 11;
  340. u64 reserved_34_36 : 3;
  341. u64 pend_type : 2;
  342. u64 pend_tag : 32;
  343. } s_sstatus0_cn68xx;
  344. /**
  345. * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
  346. */
  347. struct {
  348. u64 reserved_62_63 : 2;
  349. u64 pend_switch : 1;
  350. u64 pend_switch_full : 1;
  351. u64 pend_switch_null : 1;
  352. u64 pend_desched : 1;
  353. u64 pend_desched_switch : 1;
  354. u64 pend_nosched : 1;
  355. u64 pend_new_work : 1;
  356. u64 pend_new_work_wait : 1;
  357. u64 pend_null_rd : 1;
  358. u64 pend_nosched_clr : 1;
  359. u64 reserved_51 : 1;
  360. u64 pend_index : 11;
  361. u64 pend_grp : 4;
  362. u64 pend_wqp : 36;
  363. } s_sstatus1;
  364. /**
  365. * Result for a SSO Status Load (when opcode is SL_PENDWQP)
  366. */
  367. struct {
  368. u64 pend_switch : 1;
  369. u64 pend_get_work : 1;
  370. u64 pend_get_work_wait : 1;
  371. u64 pend_nosched : 1;
  372. u64 pend_nosched_clr : 1;
  373. u64 pend_desched : 1;
  374. u64 pend_alloc_we : 1;
  375. u64 reserved_51_56 : 6;
  376. u64 pend_index : 11;
  377. u64 reserved_38_39 : 2;
  378. u64 pend_wqp : 38;
  379. } s_sstatus1_cn68xx;
  380. struct {
  381. u64 pend_switch : 1;
  382. u64 pend_get_work : 1;
  383. u64 pend_get_work_wait : 1;
  384. u64 pend_nosched : 1;
  385. u64 pend_nosched_clr : 1;
  386. u64 pend_desched : 1;
  387. u64 pend_alloc_we : 1;
  388. u64 reserved_56 : 1;
  389. u64 prep_index : 12;
  390. u64 reserved_42_43 : 2;
  391. u64 pend_tag : 42;
  392. } s_sso_ppx_pendwqp_cn78xx;
  393. /**
  394. * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0)
  395. */
  396. struct {
  397. u64 reserved_62_63 : 2;
  398. u64 link_index : 11;
  399. u64 index : 11;
  400. u64 grp : 4;
  401. u64 head : 1;
  402. u64 tail : 1;
  403. u64 tag_type : 2;
  404. u64 tag : 32;
  405. } s_sstatus2;
  406. /**
  407. * Result for a SSO Status Load (when opcode is SL_TAG)
  408. */
  409. struct {
  410. u64 reserved_57_63 : 7;
  411. u64 index : 11;
  412. u64 reserved_45 : 1;
  413. u64 grp : 6;
  414. u64 head : 1;
  415. u64 tail : 1;
  416. u64 reserved_34_36 : 3;
  417. u64 tag_type : 2;
  418. u64 tag : 32;
  419. } s_sstatus2_cn68xx;
  420. struct {
  421. u64 tailc : 1;
  422. u64 reserved_60_62 : 3;
  423. u64 index : 12;
  424. u64 reserved_46_47 : 2;
  425. u64 grp : 10;
  426. u64 head : 1;
  427. u64 tail : 1;
  428. u64 tt : 2;
  429. u64 tag : 32;
  430. } s_sso_ppx_tag_cn78xx;
  431. /**
  432. * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
  433. */
  434. struct {
  435. u64 reserved_62_63 : 2;
  436. u64 revlink_index : 11;
  437. u64 index : 11;
  438. u64 grp : 4;
  439. u64 head : 1;
  440. u64 tail : 1;
  441. u64 tag_type : 2;
  442. u64 tag : 32;
  443. } s_sstatus3;
  444. /**
  445. * Result for a SSO Status Load (when opcode is SL_WQP)
  446. */
  447. struct {
  448. u64 reserved_58_63 : 6;
  449. u64 index : 11;
  450. u64 reserved_46 : 1;
  451. u64 grp : 6;
  452. u64 reserved_38_39 : 2;
  453. u64 wqp : 38;
  454. } s_sstatus3_cn68xx;
  455. struct {
  456. u64 reserved_58_63 : 6;
  457. u64 grp : 10;
  458. u64 reserved_42_47 : 6;
  459. u64 tag : 42;
  460. } s_sso_ppx_wqp_cn78xx;
  461. /**
  462. * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0)
  463. */
  464. struct {
  465. u64 reserved_62_63 : 2;
  466. u64 link_index : 11;
  467. u64 index : 11;
  468. u64 grp : 4;
  469. u64 wqp : 36;
  470. } s_sstatus4;
  471. /**
  472. * Result for a SSO Status Load (when opcode is SL_LINKS)
  473. */
  474. struct {
  475. u64 reserved_46_63 : 18;
  476. u64 index : 11;
  477. u64 reserved_34 : 1;
  478. u64 grp : 6;
  479. u64 head : 1;
  480. u64 tail : 1;
  481. u64 reserved_24_25 : 2;
  482. u64 revlink_index : 11;
  483. u64 reserved_11_12 : 2;
  484. u64 link_index : 11;
  485. } s_sstatus4_cn68xx;
  486. struct {
  487. u64 tailc : 1;
  488. u64 reserved_60_62 : 3;
  489. u64 index : 12;
  490. u64 reserved_38_47 : 10;
  491. u64 grp : 10;
  492. u64 head : 1;
  493. u64 tail : 1;
  494. u64 reserved_25 : 1;
  495. u64 revlink_index : 12;
  496. u64 link_index_vld : 1;
  497. u64 link_index : 12;
  498. } s_sso_ppx_links_cn78xx;
  499. /**
  500. * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1)
  501. */
  502. struct {
  503. u64 reserved_62_63 : 2;
  504. u64 revlink_index : 11;
  505. u64 index : 11;
  506. u64 grp : 4;
  507. u64 wqp : 36;
  508. } s_sstatus5;
  509. /**
  510. * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
  511. */
  512. struct {
  513. u64 reserved_51_63 : 13;
  514. u64 next_index : 11;
  515. u64 grp : 4;
  516. u64 reserved_35 : 1;
  517. u64 tail : 1;
  518. u64 tag_type : 2;
  519. u64 tag : 32;
  520. } s_smemload0;
  521. /**
  522. * Result For SSO Memory Load (opcode is ML_TAG)
  523. */
  524. struct {
  525. u64 reserved_38_63 : 26;
  526. u64 tail : 1;
  527. u64 reserved_34_36 : 3;
  528. u64 tag_type : 2;
  529. u64 tag : 32;
  530. } s_smemload0_cn68xx;
  531. struct {
  532. u64 reserved_39_63 : 25;
  533. u64 tail : 1;
  534. u64 reserved_34_36 : 3;
  535. u64 tag_type : 2;
  536. u64 tag : 32;
  537. } s_sso_iaq_ppx_tag_cn78xx;
  538. /**
  539. * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
  540. */
  541. struct {
  542. u64 reserved_51_63 : 13;
  543. u64 next_index : 11;
  544. u64 grp : 4;
  545. u64 wqp : 36;
  546. } s_smemload1;
  547. /**
  548. * Result For SSO Memory Load (opcode is ML_WQPGRP)
  549. */
  550. struct {
  551. u64 reserved_48_63 : 16;
  552. u64 nosched : 1;
  553. u64 reserved_46 : 1;
  554. u64 grp : 6;
  555. u64 reserved_38_39 : 2;
  556. u64 wqp : 38;
  557. } s_smemload1_cn68xx;
  558. /**
  559. * Entry structures for the CN7XXX chips.
  560. */
  561. struct {
  562. u64 reserved_39_63 : 25;
  563. u64 tailc : 1;
  564. u64 tail : 1;
  565. u64 reserved_34_36 : 3;
  566. u64 tt : 2;
  567. u64 tag : 32;
  568. } s_sso_ientx_tag_cn78xx;
  569. struct {
  570. u64 reserved_62_63 : 2;
  571. u64 head : 1;
  572. u64 nosched : 1;
  573. u64 reserved_56_59 : 4;
  574. u64 grp : 8;
  575. u64 reserved_42_47 : 6;
  576. u64 wqp : 42;
  577. } s_sso_ientx_wqpgrp_cn73xx;
  578. struct {
  579. u64 reserved_62_63 : 2;
  580. u64 head : 1;
  581. u64 nosched : 1;
  582. u64 reserved_58_59 : 2;
  583. u64 grp : 10;
  584. u64 reserved_42_47 : 6;
  585. u64 wqp : 42;
  586. } s_sso_ientx_wqpgrp_cn78xx;
  587. struct {
  588. u64 reserved_38_63 : 26;
  589. u64 pend_switch : 1;
  590. u64 reserved_34_36 : 3;
  591. u64 pend_tt : 2;
  592. u64 pend_tag : 32;
  593. } s_sso_ientx_pendtag_cn78xx;
  594. struct {
  595. u64 reserved_26_63 : 38;
  596. u64 prev_index : 10;
  597. u64 reserved_11_15 : 5;
  598. u64 next_index_vld : 1;
  599. u64 next_index : 10;
  600. } s_sso_ientx_links_cn73xx;
  601. struct {
  602. u64 reserved_28_63 : 36;
  603. u64 prev_index : 12;
  604. u64 reserved_13_15 : 3;
  605. u64 next_index_vld : 1;
  606. u64 next_index : 12;
  607. } s_sso_ientx_links_cn78xx;
  608. /**
  609. * Result For POW Memory Load (get_des == 1)
  610. */
  611. struct {
  612. u64 reserved_51_63 : 13;
  613. u64 fwd_index : 11;
  614. u64 grp : 4;
  615. u64 nosched : 1;
  616. u64 pend_switch : 1;
  617. u64 pend_type : 2;
  618. u64 pend_tag : 32;
  619. } s_smemload2;
  620. /**
  621. * Result For SSO Memory Load (opcode is ML_PENTAG)
  622. */
  623. struct {
  624. u64 reserved_38_63 : 26;
  625. u64 pend_switch : 1;
  626. u64 reserved_34_36 : 3;
  627. u64 pend_type : 2;
  628. u64 pend_tag : 32;
  629. } s_smemload2_cn68xx;
  630. struct {
  631. u64 pend_switch : 1;
  632. u64 pend_get_work : 1;
  633. u64 pend_get_work_wait : 1;
  634. u64 pend_nosched : 1;
  635. u64 pend_nosched_clr : 1;
  636. u64 pend_desched : 1;
  637. u64 pend_alloc_we : 1;
  638. u64 reserved_34_56 : 23;
  639. u64 pend_tt : 2;
  640. u64 pend_tag : 32;
  641. } s_sso_ppx_pendtag_cn78xx;
  642. /**
  643. * Result For SSO Memory Load (opcode is ML_LINKS)
  644. */
  645. struct {
  646. u64 reserved_24_63 : 40;
  647. u64 fwd_index : 11;
  648. u64 reserved_11_12 : 2;
  649. u64 next_index : 11;
  650. } s_smemload3_cn68xx;
  651. /**
  652. * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
  653. */
  654. struct {
  655. u64 reserved_52_63 : 12;
  656. u64 free_val : 1;
  657. u64 free_one : 1;
  658. u64 reserved_49 : 1;
  659. u64 free_head : 11;
  660. u64 reserved_37 : 1;
  661. u64 free_tail : 11;
  662. u64 loc_val : 1;
  663. u64 loc_one : 1;
  664. u64 reserved_23 : 1;
  665. u64 loc_head : 11;
  666. u64 reserved_11 : 1;
  667. u64 loc_tail : 11;
  668. } sindexload0;
  669. /**
  670. * Result for SSO Index/Pointer Load(opcode ==
  671. * IPL_IQ/IPL_DESCHED/IPL_NOSCHED)
  672. */
  673. struct {
  674. u64 reserved_28_63 : 36;
  675. u64 queue_val : 1;
  676. u64 queue_one : 1;
  677. u64 reserved_24_25 : 2;
  678. u64 queue_head : 11;
  679. u64 reserved_11_12 : 2;
  680. u64 queue_tail : 11;
  681. } sindexload0_cn68xx;
  682. /**
  683. * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
  684. */
  685. struct {
  686. u64 reserved_52_63 : 12;
  687. u64 nosched_val : 1;
  688. u64 nosched_one : 1;
  689. u64 reserved_49 : 1;
  690. u64 nosched_head : 11;
  691. u64 reserved_37 : 1;
  692. u64 nosched_tail : 11;
  693. u64 des_val : 1;
  694. u64 des_one : 1;
  695. u64 reserved_23 : 1;
  696. u64 des_head : 11;
  697. u64 reserved_11 : 1;
  698. u64 des_tail : 11;
  699. } sindexload1;
  700. /**
  701. * Result for SSO Index/Pointer Load(opcode == IPL_FREE0/IPL_FREE1/IPL_FREE2)
  702. */
  703. struct {
  704. u64 reserved_60_63 : 4;
  705. u64 qnum_head : 2;
  706. u64 qnum_tail : 2;
  707. u64 reserved_28_55 : 28;
  708. u64 queue_val : 1;
  709. u64 queue_one : 1;
  710. u64 reserved_24_25 : 2;
  711. u64 queue_head : 11;
  712. u64 reserved_11_12 : 2;
  713. u64 queue_tail : 11;
  714. } sindexload1_cn68xx;
  715. /**
  716. * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
  717. */
  718. struct {
  719. u64 reserved_39_63 : 25;
  720. u64 rmt_is_head : 1;
  721. u64 rmt_val : 1;
  722. u64 rmt_one : 1;
  723. u64 rmt_head : 36;
  724. } sindexload2;
  725. /**
  726. * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1)
  727. */
  728. struct {
  729. u64 reserved_39_63 : 25;
  730. u64 rmt_is_head : 1;
  731. u64 rmt_val : 1;
  732. u64 rmt_one : 1;
  733. u64 rmt_tail : 36;
  734. } sindexload3;
  735. /**
  736. * Response to NULL_RD request loads
  737. */
  738. struct {
  739. u64 unused : 62;
  740. u64 state : 2;
  741. } s_null_rd;
  742. } cvmx_pow_tag_load_resp_t;
  743. typedef union {
  744. u64 u64;
  745. struct {
  746. u64 reserved_57_63 : 7;
  747. u64 index : 11;
  748. u64 reserved_45 : 1;
  749. u64 grp : 6;
  750. u64 head : 1;
  751. u64 tail : 1;
  752. u64 reserved_34_36 : 3;
  753. u64 tag_type : 2;
  754. u64 tag : 32;
  755. } s;
  756. } cvmx_pow_sl_tag_resp_t;
  757. /**
  758. * This structure describes the address used for stores to the POW.
  759. * The store address is meaningful on stores to the POW. The hardware assumes that an aligned
  760. * 64-bit store was used for all these stores.
  761. * Note the assumption that the work queue entry is aligned on an 8-byte
  762. * boundary (since the low-order 3 address bits must be zero).
  763. * Note that not all fields are used by all operations.
  764. *
  765. * NOTE: The following is the behavior of the pending switch bit at the PP
  766. * for POW stores (i.e. when did<7:3> == 0xc)
  767. * - did<2:0> == 0 => pending switch bit is set
  768. * - did<2:0> == 1 => no affect on the pending switch bit
  769. * - did<2:0> == 3 => pending switch bit is cleared
  770. * - did<2:0> == 7 => no affect on the pending switch bit
  771. * - did<2:0> == others => must not be used
  772. * - No other loads/stores have an affect on the pending switch bit
  773. * - The switch bus from POW can clear the pending switch bit
  774. *
  775. * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command
  776. * that only contains the pointer). SW must never use did<2:0> == 2.
  777. */
  778. typedef union {
  779. u64 u64;
  780. struct {
  781. u64 mem_reg : 2;
  782. u64 reserved_49_61 : 13;
  783. u64 is_io : 1;
  784. u64 did : 8;
  785. u64 addr : 40;
  786. } stag;
  787. } cvmx_pow_tag_store_addr_t; /* FIXME- this type is unused */
  788. /**
  789. * Decode of the store data when an IOBDMA SENDSINGLE is sent to POW
  790. */
  791. typedef union {
  792. u64 u64;
  793. struct {
  794. u64 scraddr : 8;
  795. u64 len : 8;
  796. u64 did : 8;
  797. u64 unused : 36;
  798. u64 wait : 1;
  799. u64 unused2 : 3;
  800. } s;
  801. struct {
  802. u64 scraddr : 8;
  803. u64 len : 8;
  804. u64 did : 8;
  805. u64 node : 4;
  806. u64 unused1 : 4;
  807. u64 indexed : 1;
  808. u64 grouped : 1;
  809. u64 rtngrp : 1;
  810. u64 unused2 : 13;
  811. u64 index_grp_mask : 12;
  812. u64 wait : 1;
  813. u64 unused3 : 3;
  814. } s_cn78xx;
  815. } cvmx_pow_iobdma_store_t;
  816. /* CSR typedefs have been moved to cvmx-pow-defs.h */
  817. /*enum for group priority parameters which needs modification*/
  818. enum cvmx_sso_group_modify_mask {
  819. CVMX_SSO_MODIFY_GROUP_PRIORITY = 0x01,
  820. CVMX_SSO_MODIFY_GROUP_WEIGHT = 0x02,
  821. CVMX_SSO_MODIFY_GROUP_AFFINITY = 0x04
  822. };
  823. /**
  824. * @INTERNAL
  825. * Return the number of SSO groups for a given SoC model
  826. */
  827. static inline unsigned int cvmx_sso_num_xgrp(void)
  828. {
  829. if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  830. return 256;
  831. if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
  832. return 64;
  833. if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  834. return 64;
  835. printf("ERROR: %s: Unknown model\n", __func__);
  836. return 0;
  837. }
  838. /**
  839. * @INTERNAL
  840. * Return the number of POW groups on current model.
  841. * In case of CN78XX/CN73XX this is the number of equivalent
  842. * "legacy groups" on the chip when it is used in backward
  843. * compatible mode.
  844. */
  845. static inline unsigned int cvmx_pow_num_groups(void)
  846. {
  847. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
  848. return cvmx_sso_num_xgrp() >> 3;
  849. else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
  850. return 64;
  851. else
  852. return 16;
  853. }
  854. /**
  855. * @INTERNAL
  856. * Return the number of mask-set registers.
  857. */
  858. static inline unsigned int cvmx_sso_num_maskset(void)
  859. {
  860. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
  861. return 2;
  862. else
  863. return 1;
  864. }
  865. /**
  866. * Get the POW tag for this core. This returns the current
  867. * tag type, tag, group, and POW entry index associated with
  868. * this core. Index is only valid if the tag type isn't NULL_NULL.
  869. * If a tag switch is pending this routine returns the tag before
  870. * the tag switch, not after.
  871. *
  872. * Return: Current tag
  873. */
  874. static inline cvmx_pow_tag_info_t cvmx_pow_get_current_tag(void)
  875. {
  876. cvmx_pow_load_addr_t load_addr;
  877. cvmx_pow_tag_info_t result;
  878. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  879. cvmx_sso_sl_ppx_tag_t sl_ppx_tag;
  880. cvmx_xgrp_t xgrp;
  881. int node, core;
  882. CVMX_SYNCS;
  883. node = cvmx_get_node_num();
  884. core = cvmx_get_local_core_num();
  885. sl_ppx_tag.u64 = csr_rd_node(node, CVMX_SSO_SL_PPX_TAG(core));
  886. result.index = sl_ppx_tag.s.index;
  887. result.tag_type = sl_ppx_tag.s.tt;
  888. result.tag = sl_ppx_tag.s.tag;
  889. /* Get native XGRP value */
  890. xgrp.xgrp = sl_ppx_tag.s.grp;
  891. /* Return legacy style group 0..15 */
  892. result.grp = xgrp.group;
  893. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  894. cvmx_pow_sl_tag_resp_t load_resp;
  895. load_addr.u64 = 0;
  896. load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
  897. load_addr.sstatus_cn68xx.is_io = 1;
  898. load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
  899. load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
  900. load_addr.sstatus_cn68xx.opcode = 3;
  901. load_resp.u64 = csr_rd(load_addr.u64);
  902. result.grp = load_resp.s.grp;
  903. result.index = load_resp.s.index;
  904. result.tag_type = load_resp.s.tag_type;
  905. result.tag = load_resp.s.tag;
  906. } else {
  907. cvmx_pow_tag_load_resp_t load_resp;
  908. load_addr.u64 = 0;
  909. load_addr.sstatus.mem_region = CVMX_IO_SEG;
  910. load_addr.sstatus.is_io = 1;
  911. load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
  912. load_addr.sstatus.coreid = cvmx_get_core_num();
  913. load_addr.sstatus.get_cur = 1;
  914. load_resp.u64 = csr_rd(load_addr.u64);
  915. result.grp = load_resp.s_sstatus2.grp;
  916. result.index = load_resp.s_sstatus2.index;
  917. result.tag_type = load_resp.s_sstatus2.tag_type;
  918. result.tag = load_resp.s_sstatus2.tag;
  919. }
  920. return result;
  921. }
  922. /**
  923. * Get the POW WQE for this core. This returns the work queue
  924. * entry currently associated with this core.
  925. *
  926. * Return: WQE pointer
  927. */
  928. static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
  929. {
  930. cvmx_pow_load_addr_t load_addr;
  931. cvmx_pow_tag_load_resp_t load_resp;
  932. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  933. cvmx_sso_sl_ppx_wqp_t sso_wqp;
  934. int node = cvmx_get_node_num();
  935. int core = cvmx_get_local_core_num();
  936. sso_wqp.u64 = csr_rd_node(node, CVMX_SSO_SL_PPX_WQP(core));
  937. if (sso_wqp.s.wqp)
  938. return (cvmx_wqe_t *)cvmx_phys_to_ptr(sso_wqp.s.wqp);
  939. return (cvmx_wqe_t *)0;
  940. }
  941. if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  942. load_addr.u64 = 0;
  943. load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
  944. load_addr.sstatus_cn68xx.is_io = 1;
  945. load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
  946. load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
  947. load_addr.sstatus_cn68xx.opcode = 4;
  948. load_resp.u64 = csr_rd(load_addr.u64);
  949. if (load_resp.s_sstatus3_cn68xx.wqp)
  950. return (cvmx_wqe_t *)cvmx_phys_to_ptr(load_resp.s_sstatus3_cn68xx.wqp);
  951. else
  952. return (cvmx_wqe_t *)0;
  953. } else {
  954. load_addr.u64 = 0;
  955. load_addr.sstatus.mem_region = CVMX_IO_SEG;
  956. load_addr.sstatus.is_io = 1;
  957. load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
  958. load_addr.sstatus.coreid = cvmx_get_core_num();
  959. load_addr.sstatus.get_cur = 1;
  960. load_addr.sstatus.get_wqp = 1;
  961. load_resp.u64 = csr_rd(load_addr.u64);
  962. return (cvmx_wqe_t *)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
  963. }
  964. }
  965. /**
  966. * @INTERNAL
  967. * Print a warning if a tag switch is pending for this core
  968. *
  969. * @param function Function name checking for a pending tag switch
  970. */
  971. static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
  972. {
  973. u64 switch_complete;
  974. CVMX_MF_CHORD(switch_complete);
  975. cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function);
  976. }
  977. /**
  978. * Waits for a tag switch to complete by polling the completion bit.
  979. * Note that switches to NULL complete immediately and do not need
  980. * to be waited for.
  981. */
  982. static inline void cvmx_pow_tag_sw_wait(void)
  983. {
  984. const u64 TIMEOUT_MS = 10; /* 10ms timeout */
  985. u64 switch_complete;
  986. u64 start_cycle;
  987. if (CVMX_ENABLE_POW_CHECKS)
  988. start_cycle = get_timer(0);
  989. while (1) {
  990. CVMX_MF_CHORD(switch_complete);
  991. if (cvmx_likely(switch_complete))
  992. break;
  993. if (CVMX_ENABLE_POW_CHECKS) {
  994. if (cvmx_unlikely(get_timer(start_cycle) > TIMEOUT_MS)) {
  995. debug("WARNING: %s: Tag switch is taking a long time, possible deadlock\n",
  996. __func__);
  997. }
  998. }
  999. }
  1000. }
  1001. /**
  1002. * Synchronous work request. Requests work from the POW.
  1003. * This function does NOT wait for previous tag switches to complete,
  1004. * so the caller must ensure that there is not a pending tag switch.
  1005. *
  1006. * @param wait When set, call stalls until work becomes available, or
  1007. * times out. If not set, returns immediately.
  1008. *
  1009. * Return: Returns the WQE pointer from POW. Returns NULL if no work was
  1010. * available.
  1011. */
  1012. static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait)
  1013. {
  1014. cvmx_pow_load_addr_t ptr;
  1015. cvmx_pow_tag_load_resp_t result;
  1016. if (CVMX_ENABLE_POW_CHECKS)
  1017. __cvmx_pow_warn_if_pending_switch(__func__);
  1018. ptr.u64 = 0;
  1019. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1020. ptr.swork_78xx.node = cvmx_get_node_num();
  1021. ptr.swork_78xx.mem_region = CVMX_IO_SEG;
  1022. ptr.swork_78xx.is_io = 1;
  1023. ptr.swork_78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  1024. ptr.swork_78xx.wait = wait;
  1025. } else {
  1026. ptr.swork.mem_region = CVMX_IO_SEG;
  1027. ptr.swork.is_io = 1;
  1028. ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
  1029. ptr.swork.wait = wait;
  1030. }
  1031. result.u64 = csr_rd(ptr.u64);
  1032. if (result.s_work.no_work)
  1033. return NULL;
  1034. else
  1035. return (cvmx_wqe_t *)cvmx_phys_to_ptr(result.s_work.addr);
  1036. }
  1037. /**
  1038. * Synchronous work request. Requests work from the POW.
  1039. * This function waits for any previous tag switch to complete before
  1040. * requesting the new work.
  1041. *
  1042. * @param wait When set, call stalls until work becomes available, or
  1043. * times out. If not set, returns immediately.
  1044. *
  1045. * Return: Returns the WQE pointer from POW. Returns NULL if no work was
  1046. * available.
  1047. */
  1048. static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
  1049. {
  1050. /* Must not have a switch pending when requesting work */
  1051. cvmx_pow_tag_sw_wait();
  1052. return (cvmx_pow_work_request_sync_nocheck(wait));
  1053. }
  1054. /**
  1055. * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
  1056. * This function waits for any previous tag switch to complete before
  1057. * requesting the null_rd.
  1058. *
  1059. * Return: Returns the POW state of type cvmx_pow_tag_type_t.
  1060. */
  1061. static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void)
  1062. {
  1063. cvmx_pow_load_addr_t ptr;
  1064. cvmx_pow_tag_load_resp_t result;
  1065. /* Must not have a switch pending when requesting work */
  1066. cvmx_pow_tag_sw_wait();
  1067. ptr.u64 = 0;
  1068. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1069. ptr.swork_78xx.mem_region = CVMX_IO_SEG;
  1070. ptr.swork_78xx.is_io = 1;
  1071. ptr.swork_78xx.did = CVMX_OCT_DID_TAG_NULL_RD;
  1072. ptr.swork_78xx.node = cvmx_get_node_num();
  1073. } else {
  1074. ptr.snull_rd.mem_region = CVMX_IO_SEG;
  1075. ptr.snull_rd.is_io = 1;
  1076. ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
  1077. }
  1078. result.u64 = csr_rd(ptr.u64);
  1079. return (cvmx_pow_tag_type_t)result.s_null_rd.state;
  1080. }
  1081. /**
  1082. * Asynchronous work request.
  1083. * Work is requested from the POW unit, and should later be checked with
  1084. * function cvmx_pow_work_response_async.
  1085. * This function does NOT wait for previous tag switches to complete,
  1086. * so the caller must ensure that there is not a pending tag switch.
  1087. *
  1088. * @param scr_addr Scratch memory address that response will be returned to,
  1089. * which is either a valid WQE, or a response with the invalid bit set.
  1090. * Byte address, must be 8 byte aligned.
  1091. * @param wait 1 to cause response to wait for work to become available
  1092. * (or timeout)
  1093. * 0 to cause response to return immediately
  1094. */
  1095. static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait)
  1096. {
  1097. cvmx_pow_iobdma_store_t data;
  1098. if (CVMX_ENABLE_POW_CHECKS)
  1099. __cvmx_pow_warn_if_pending_switch(__func__);
  1100. /* scr_addr must be 8 byte aligned */
  1101. data.u64 = 0;
  1102. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1103. data.s_cn78xx.node = cvmx_get_node_num();
  1104. data.s_cn78xx.scraddr = scr_addr >> 3;
  1105. data.s_cn78xx.len = 1;
  1106. data.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  1107. data.s_cn78xx.wait = wait;
  1108. } else {
  1109. data.s.scraddr = scr_addr >> 3;
  1110. data.s.len = 1;
  1111. data.s.did = CVMX_OCT_DID_TAG_SWTAG;
  1112. data.s.wait = wait;
  1113. }
  1114. cvmx_send_single(data.u64);
  1115. }
  1116. /**
  1117. * Asynchronous work request.
  1118. * Work is requested from the POW unit, and should later be checked with
  1119. * function cvmx_pow_work_response_async.
  1120. * This function waits for any previous tag switch to complete before
  1121. * requesting the new work.
  1122. *
  1123. * @param scr_addr Scratch memory address that response will be returned to,
  1124. * which is either a valid WQE, or a response with the invalid bit set.
  1125. * Byte address, must be 8 byte aligned.
  1126. * @param wait 1 to cause response to wait for work to become available
  1127. * (or timeout)
  1128. * 0 to cause response to return immediately
  1129. */
  1130. static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait)
  1131. {
  1132. /* Must not have a switch pending when requesting work */
  1133. cvmx_pow_tag_sw_wait();
  1134. cvmx_pow_work_request_async_nocheck(scr_addr, wait);
  1135. }
  1136. /**
  1137. * Gets result of asynchronous work request. Performs a IOBDMA sync
  1138. * to wait for the response.
  1139. *
  1140. * @param scr_addr Scratch memory address to get result from
  1141. * Byte address, must be 8 byte aligned.
  1142. * Return: Returns the WQE from the scratch register, or NULL if no work was
  1143. * available.
  1144. */
  1145. static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
  1146. {
  1147. cvmx_pow_tag_load_resp_t result;
  1148. CVMX_SYNCIOBDMA;
  1149. result.u64 = cvmx_scratch_read64(scr_addr);
  1150. if (result.s_work.no_work)
  1151. return NULL;
  1152. else
  1153. return (cvmx_wqe_t *)cvmx_phys_to_ptr(result.s_work.addr);
  1154. }
  1155. /**
  1156. * Checks if a work queue entry pointer returned by a work
  1157. * request is valid. It may be invalid due to no work
  1158. * being available or due to a timeout.
  1159. *
  1160. * @param wqe_ptr pointer to a work queue entry returned by the POW
  1161. *
  1162. * Return: 0 if pointer is valid
  1163. * 1 if invalid (no work was returned)
  1164. */
  1165. static inline u64 cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
  1166. {
  1167. return (!wqe_ptr); /* FIXME: improve */
  1168. }
  1169. /**
  1170. * Starts a tag switch to the provided tag value and tag type. Completion for
  1171. * the tag switch must be checked for separately.
  1172. * This function does NOT update the
  1173. * work queue entry in dram to match tag value and type, so the application must
  1174. * keep track of these if they are important to the application.
  1175. * This tag switch command must not be used for switches to NULL, as the tag
  1176. * switch pending bit will be set by the switch request, but never cleared by
  1177. * the hardware.
  1178. *
  1179. * NOTE: This should not be used when switching from a NULL tag. Use
  1180. * cvmx_pow_tag_sw_full() instead.
  1181. *
  1182. * This function does no checks, so the caller must ensure that any previous tag
  1183. * switch has completed.
  1184. *
  1185. * @param tag new tag value
  1186. * @param tag_type new tag type (ordered or atomic)
  1187. */
  1188. static inline void cvmx_pow_tag_sw_nocheck(u32 tag, cvmx_pow_tag_type_t tag_type)
  1189. {
  1190. union cvmx_pow_tag_req_addr ptr;
  1191. cvmx_pow_tag_req_t tag_req;
  1192. if (CVMX_ENABLE_POW_CHECKS) {
  1193. cvmx_pow_tag_info_t current_tag;
  1194. __cvmx_pow_warn_if_pending_switch(__func__);
  1195. current_tag = cvmx_pow_get_current_tag();
  1196. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  1197. "%s called with NULL_NULL tag\n", __func__);
  1198. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,
  1199. "%s called with NULL tag\n", __func__);
  1200. cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),
  1201. "%s called to perform a tag switch to the same tag\n", __func__);
  1202. cvmx_warn_if(
  1203. tag_type == CVMX_POW_TAG_TYPE_NULL,
  1204. "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
  1205. __func__);
  1206. }
  1207. /*
  1208. * Note that WQE in DRAM is not updated here, as the POW does not read
  1209. * from DRAM once the WQE is in flight. See hardware manual for
  1210. * complete details.
  1211. * It is the application's responsibility to keep track of the
  1212. * current tag value if that is important.
  1213. */
  1214. tag_req.u64 = 0;
  1215. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1216. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG;
  1217. tag_req.s_cn78xx_other.type = tag_type;
  1218. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1219. tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
  1220. tag_req.s_cn68xx_other.tag = tag;
  1221. tag_req.s_cn68xx_other.type = tag_type;
  1222. } else {
  1223. tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
  1224. tag_req.s_cn38xx.tag = tag;
  1225. tag_req.s_cn38xx.type = tag_type;
  1226. }
  1227. ptr.u64 = 0;
  1228. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1229. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  1230. ptr.s_cn78xx.is_io = 1;
  1231. ptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  1232. ptr.s_cn78xx.node = cvmx_get_node_num();
  1233. ptr.s_cn78xx.tag = tag;
  1234. } else {
  1235. ptr.s.mem_region = CVMX_IO_SEG;
  1236. ptr.s.is_io = 1;
  1237. ptr.s.did = CVMX_OCT_DID_TAG_SWTAG;
  1238. }
  1239. /* Once this store arrives at POW, it will attempt the switch
  1240. software must wait for the switch to complete separately */
  1241. cvmx_write_io(ptr.u64, tag_req.u64);
  1242. }
  1243. /**
  1244. * Starts a tag switch to the provided tag value and tag type. Completion for
  1245. * the tag switch must be checked for separately.
  1246. * This function does NOT update the
  1247. * work queue entry in dram to match tag value and type, so the application must
  1248. * keep track of these if they are important to the application.
  1249. * This tag switch command must not be used for switches to NULL, as the tag
  1250. * switch pending bit will be set by the switch request, but never cleared by
  1251. * the hardware.
  1252. *
  1253. * NOTE: This should not be used when switching from a NULL tag. Use
  1254. * cvmx_pow_tag_sw_full() instead.
  1255. *
  1256. * This function waits for any previous tag switch to complete, and also
  1257. * displays an error on tag switches to NULL.
  1258. *
  1259. * @param tag new tag value
  1260. * @param tag_type new tag type (ordered or atomic)
  1261. */
  1262. static inline void cvmx_pow_tag_sw(u32 tag, cvmx_pow_tag_type_t tag_type)
  1263. {
  1264. /*
  1265. * Note that WQE in DRAM is not updated here, as the POW does not read
  1266. * from DRAM once the WQE is in flight. See hardware manual for
  1267. * complete details. It is the application's responsibility to keep
  1268. * track of the current tag value if that is important.
  1269. */
  1270. /*
  1271. * Ensure that there is not a pending tag switch, as a tag switch
  1272. * cannot be started if a previous switch is still pending.
  1273. */
  1274. cvmx_pow_tag_sw_wait();
  1275. cvmx_pow_tag_sw_nocheck(tag, tag_type);
  1276. }
  1277. /**
  1278. * Starts a tag switch to the provided tag value and tag type. Completion for
  1279. * the tag switch must be checked for separately.
  1280. * This function does NOT update the
  1281. * work queue entry in dram to match tag value and type, so the application must
  1282. * keep track of these if they are important to the application.
  1283. * This tag switch command must not be used for switches to NULL, as the tag
  1284. * switch pending bit will be set by the switch request, but never cleared by
  1285. * the hardware.
  1286. *
  1287. * This function must be used for tag switches from NULL.
  1288. *
  1289. * This function does no checks, so the caller must ensure that any previous tag
  1290. * switch has completed.
  1291. *
  1292. * @param wqp pointer to work queue entry to submit. This entry is
  1293. * updated to match the other parameters
  1294. * @param tag tag value to be assigned to work queue entry
  1295. * @param tag_type type of tag
  1296. * @param group group value for the work queue entry.
  1297. */
  1298. static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, u32 tag,
  1299. cvmx_pow_tag_type_t tag_type, u64 group)
  1300. {
  1301. union cvmx_pow_tag_req_addr ptr;
  1302. cvmx_pow_tag_req_t tag_req;
  1303. unsigned int node = cvmx_get_node_num();
  1304. u64 wqp_phys = cvmx_ptr_to_phys(wqp);
  1305. if (CVMX_ENABLE_POW_CHECKS) {
  1306. cvmx_pow_tag_info_t current_tag;
  1307. __cvmx_pow_warn_if_pending_switch(__func__);
  1308. current_tag = cvmx_pow_get_current_tag();
  1309. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  1310. "%s called with NULL_NULL tag\n", __func__);
  1311. cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),
  1312. "%s called to perform a tag switch to the same tag\n", __func__);
  1313. cvmx_warn_if(
  1314. tag_type == CVMX_POW_TAG_TYPE_NULL,
  1315. "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
  1316. __func__);
  1317. if ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())
  1318. cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(),
  1319. "%s passed WQE(%p) doesn't match the address in the POW(%p)\n",
  1320. __func__, wqp, cvmx_pow_get_current_wqp());
  1321. }
  1322. /*
  1323. * Note that WQE in DRAM is not updated here, as the POW does not
  1324. * read from DRAM once the WQE is in flight. See hardware manual
  1325. * for complete details. It is the application's responsibility to
  1326. * keep track of the current tag value if that is important.
  1327. */
  1328. tag_req.u64 = 0;
  1329. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1330. unsigned int xgrp;
  1331. if (wqp_phys != 0x80) {
  1332. /* If WQE is valid, use its XGRP:
  1333. * WQE GRP is 10 bits, and is mapped
  1334. * to legacy GRP + QoS, includes node number.
  1335. */
  1336. xgrp = wqp->word1.cn78xx.grp;
  1337. /* Use XGRP[node] too */
  1338. node = xgrp >> 8;
  1339. /* Modify XGRP with legacy group # from arg */
  1340. xgrp &= ~0xf8;
  1341. xgrp |= 0xf8 & (group << 3);
  1342. } else {
  1343. /* If no WQE, build XGRP with QoS=0 and current node */
  1344. xgrp = group << 3;
  1345. xgrp |= node << 8;
  1346. }
  1347. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;
  1348. tag_req.s_cn78xx_other.type = tag_type;
  1349. tag_req.s_cn78xx_other.grp = xgrp;
  1350. tag_req.s_cn78xx_other.wqp = wqp_phys;
  1351. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1352. tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;
  1353. tag_req.s_cn68xx_other.tag = tag;
  1354. tag_req.s_cn68xx_other.type = tag_type;
  1355. tag_req.s_cn68xx_other.grp = group;
  1356. } else {
  1357. tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_FULL;
  1358. tag_req.s_cn38xx.tag = tag;
  1359. tag_req.s_cn38xx.type = tag_type;
  1360. tag_req.s_cn38xx.grp = group;
  1361. }
  1362. ptr.u64 = 0;
  1363. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1364. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  1365. ptr.s_cn78xx.is_io = 1;
  1366. ptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  1367. ptr.s_cn78xx.node = node;
  1368. ptr.s_cn78xx.tag = tag;
  1369. } else {
  1370. ptr.s.mem_region = CVMX_IO_SEG;
  1371. ptr.s.is_io = 1;
  1372. ptr.s.did = CVMX_OCT_DID_TAG_SWTAG;
  1373. ptr.s.addr = wqp_phys;
  1374. }
  1375. /* Once this store arrives at POW, it will attempt the switch
  1376. software must wait for the switch to complete separately */
  1377. cvmx_write_io(ptr.u64, tag_req.u64);
  1378. }
  1379. /**
  1380. * Starts a tag switch to the provided tag value and tag type.
  1381. * Completion for the tag switch must be checked for separately.
  1382. * This function does NOT update the work queue entry in dram to match tag value
  1383. * and type, so the application must keep track of these if they are important
  1384. * to the application. This tag switch command must not be used for switches
  1385. * to NULL, as the tag switch pending bit will be set by the switch request,
  1386. * but never cleared by the hardware.
  1387. *
  1388. * This function must be used for tag switches from NULL.
  1389. *
  1390. * This function waits for any pending tag switches to complete
  1391. * before requesting the tag switch.
  1392. *
  1393. * @param wqp Pointer to work queue entry to submit.
  1394. * This entry is updated to match the other parameters
  1395. * @param tag Tag value to be assigned to work queue entry
  1396. * @param tag_type Type of tag
  1397. * @param group Group value for the work queue entry.
  1398. */
  1399. static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,
  1400. u64 group)
  1401. {
  1402. /*
  1403. * Ensure that there is not a pending tag switch, as a tag switch cannot
  1404. * be started if a previous switch is still pending.
  1405. */
  1406. cvmx_pow_tag_sw_wait();
  1407. cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
  1408. }
  1409. /**
  1410. * Switch to a NULL tag, which ends any ordering or
  1411. * synchronization provided by the POW for the current
  1412. * work queue entry. This operation completes immediately,
  1413. * so completion should not be waited for.
  1414. * This function does NOT wait for previous tag switches to complete,
  1415. * so the caller must ensure that any previous tag switches have completed.
  1416. */
  1417. static inline void cvmx_pow_tag_sw_null_nocheck(void)
  1418. {
  1419. union cvmx_pow_tag_req_addr ptr;
  1420. cvmx_pow_tag_req_t tag_req;
  1421. if (CVMX_ENABLE_POW_CHECKS) {
  1422. cvmx_pow_tag_info_t current_tag;
  1423. __cvmx_pow_warn_if_pending_switch(__func__);
  1424. current_tag = cvmx_pow_get_current_tag();
  1425. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  1426. "%s called with NULL_NULL tag\n", __func__);
  1427. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,
  1428. "%s called when we already have a NULL tag\n", __func__);
  1429. }
  1430. tag_req.u64 = 0;
  1431. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1432. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG;
  1433. tag_req.s_cn78xx_other.type = CVMX_POW_TAG_TYPE_NULL;
  1434. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1435. tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
  1436. tag_req.s_cn68xx_other.type = CVMX_POW_TAG_TYPE_NULL;
  1437. } else {
  1438. tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
  1439. tag_req.s_cn38xx.type = CVMX_POW_TAG_TYPE_NULL;
  1440. }
  1441. ptr.u64 = 0;
  1442. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1443. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  1444. ptr.s_cn78xx.is_io = 1;
  1445. ptr.s_cn78xx.did = CVMX_OCT_DID_TAG_TAG1;
  1446. ptr.s_cn78xx.node = cvmx_get_node_num();
  1447. } else {
  1448. ptr.s.mem_region = CVMX_IO_SEG;
  1449. ptr.s.is_io = 1;
  1450. ptr.s.did = CVMX_OCT_DID_TAG_TAG1;
  1451. }
  1452. cvmx_write_io(ptr.u64, tag_req.u64);
  1453. }
  1454. /**
  1455. * Switch to a NULL tag, which ends any ordering or
  1456. * synchronization provided by the POW for the current
  1457. * work queue entry. This operation completes immediately,
  1458. * so completion should not be waited for.
  1459. * This function waits for any pending tag switches to complete
  1460. * before requesting the switch to NULL.
  1461. */
  1462. static inline void cvmx_pow_tag_sw_null(void)
  1463. {
  1464. /*
  1465. * Ensure that there is not a pending tag switch, as a tag switch cannot
  1466. * be started if a previous switch is still pending.
  1467. */
  1468. cvmx_pow_tag_sw_wait();
  1469. cvmx_pow_tag_sw_null_nocheck();
  1470. }
  1471. /**
  1472. * Submits work to an input queue.
  1473. * This function updates the work queue entry in DRAM to match the arguments given.
  1474. * Note that the tag provided is for the work queue entry submitted, and
  1475. * is unrelated to the tag that the core currently holds.
  1476. *
  1477. * @param wqp pointer to work queue entry to submit.
  1478. * This entry is updated to match the other parameters
  1479. * @param tag tag value to be assigned to work queue entry
  1480. * @param tag_type type of tag
  1481. * @param qos Input queue to add to.
  1482. * @param grp group value for the work queue entry.
  1483. */
  1484. static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,
  1485. u64 qos, u64 grp)
  1486. {
  1487. union cvmx_pow_tag_req_addr ptr;
  1488. cvmx_pow_tag_req_t tag_req;
  1489. tag_req.u64 = 0;
  1490. ptr.u64 = 0;
  1491. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1492. unsigned int node = cvmx_get_node_num();
  1493. unsigned int xgrp;
  1494. xgrp = (grp & 0x1f) << 3;
  1495. xgrp |= (qos & 7);
  1496. xgrp |= 0x300 & (node << 8);
  1497. wqp->word1.cn78xx.rsvd_0 = 0;
  1498. wqp->word1.cn78xx.rsvd_1 = 0;
  1499. wqp->word1.cn78xx.tag = tag;
  1500. wqp->word1.cn78xx.tag_type = tag_type;
  1501. wqp->word1.cn78xx.grp = xgrp;
  1502. CVMX_SYNCWS;
  1503. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_ADDWQ;
  1504. tag_req.s_cn78xx_other.type = tag_type;
  1505. tag_req.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);
  1506. tag_req.s_cn78xx_other.grp = xgrp;
  1507. ptr.s_cn78xx.did = 0x66; // CVMX_OCT_DID_TAG_TAG6;
  1508. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  1509. ptr.s_cn78xx.is_io = 1;
  1510. ptr.s_cn78xx.node = node;
  1511. ptr.s_cn78xx.tag = tag;
  1512. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1513. /* Reset all reserved bits */
  1514. wqp->word1.cn68xx.zero_0 = 0;
  1515. wqp->word1.cn68xx.zero_1 = 0;
  1516. wqp->word1.cn68xx.zero_2 = 0;
  1517. wqp->word1.cn68xx.qos = qos;
  1518. wqp->word1.cn68xx.grp = grp;
  1519. wqp->word1.tag = tag;
  1520. wqp->word1.tag_type = tag_type;
  1521. tag_req.s_cn68xx_add.op = CVMX_POW_TAG_OP_ADDWQ;
  1522. tag_req.s_cn68xx_add.type = tag_type;
  1523. tag_req.s_cn68xx_add.tag = tag;
  1524. tag_req.s_cn68xx_add.qos = qos;
  1525. tag_req.s_cn68xx_add.grp = grp;
  1526. ptr.s.mem_region = CVMX_IO_SEG;
  1527. ptr.s.is_io = 1;
  1528. ptr.s.did = CVMX_OCT_DID_TAG_TAG1;
  1529. ptr.s.addr = cvmx_ptr_to_phys(wqp);
  1530. } else {
  1531. /* Reset all reserved bits */
  1532. wqp->word1.cn38xx.zero_2 = 0;
  1533. wqp->word1.cn38xx.qos = qos;
  1534. wqp->word1.cn38xx.grp = grp;
  1535. wqp->word1.tag = tag;
  1536. wqp->word1.tag_type = tag_type;
  1537. tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_ADDWQ;
  1538. tag_req.s_cn38xx.type = tag_type;
  1539. tag_req.s_cn38xx.tag = tag;
  1540. tag_req.s_cn38xx.qos = qos;
  1541. tag_req.s_cn38xx.grp = grp;
  1542. ptr.s.mem_region = CVMX_IO_SEG;
  1543. ptr.s.is_io = 1;
  1544. ptr.s.did = CVMX_OCT_DID_TAG_TAG1;
  1545. ptr.s.addr = cvmx_ptr_to_phys(wqp);
  1546. }
  1547. /* SYNC write to memory before the work submit.
  1548. * This is necessary as POW may read values from DRAM at this time */
  1549. CVMX_SYNCWS;
  1550. cvmx_write_io(ptr.u64, tag_req.u64);
  1551. }
  1552. /**
  1553. * This function sets the group mask for a core. The group mask
  1554. * indicates which groups each core will accept work from. There are
  1555. * 16 groups.
  1556. *
  1557. * @param core_num core to apply mask to
  1558. * @param mask Group mask, one bit for up to 64 groups.
  1559. * Each 1 bit in the mask enables the core to accept work from
  1560. * the corresponding group.
  1561. * The CN68XX supports 64 groups, earlier models only support
  1562. * 16 groups.
  1563. *
  1564. * The CN78XX in backwards compatibility mode allows up to 32 groups,
  1565. * so the 'mask' argument has one bit for every of the legacy
  1566. * groups, and a '1' in the mask causes a total of 8 groups
  1567. * which share the legacy group numbher and 8 qos levels,
  1568. * to be enabled for the calling processor core.
  1569. * A '0' in the mask will disable the current core
  1570. * from receiving work from the associated group.
  1571. */
  1572. static inline void cvmx_pow_set_group_mask(u64 core_num, u64 mask)
  1573. {
  1574. u64 valid_mask;
  1575. int num_groups = cvmx_pow_num_groups();
  1576. if (num_groups >= 64)
  1577. valid_mask = ~0ull;
  1578. else
  1579. valid_mask = (1ull << num_groups) - 1;
  1580. if ((mask & valid_mask) == 0) {
  1581. printf("ERROR: %s empty group mask disables work on core# %llu, ignored.\n",
  1582. __func__, (unsigned long long)core_num);
  1583. return;
  1584. }
  1585. cvmx_warn_if(mask & (~valid_mask), "%s group number range exceeded: %#llx\n", __func__,
  1586. (unsigned long long)mask);
  1587. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1588. unsigned int mask_set;
  1589. cvmx_sso_ppx_sx_grpmskx_t grp_msk;
  1590. unsigned int core, node;
  1591. unsigned int rix; /* Register index */
  1592. unsigned int grp; /* Legacy group # */
  1593. unsigned int bit; /* bit index */
  1594. unsigned int xgrp; /* native group # */
  1595. node = cvmx_coremask_core_to_node(core_num);
  1596. core = cvmx_coremask_core_on_node(core_num);
  1597. /* 78xx: 256 groups divided into 4 X 64 bit registers */
  1598. /* 73xx: 64 groups are in one register */
  1599. for (rix = 0; rix < (cvmx_sso_num_xgrp() >> 6); rix++) {
  1600. grp_msk.u64 = 0;
  1601. for (bit = 0; bit < 64; bit++) {
  1602. /* 8-bit native XGRP number */
  1603. xgrp = (rix << 6) | bit;
  1604. /* Legacy 5-bit group number */
  1605. grp = (xgrp >> 3) & 0x1f;
  1606. /* Inspect legacy mask by legacy group */
  1607. if (mask & (1ull << grp))
  1608. grp_msk.s.grp_msk |= 1ull << bit;
  1609. /* Pre-set to all 0's */
  1610. }
  1611. for (mask_set = 0; mask_set < cvmx_sso_num_maskset(); mask_set++) {
  1612. csr_wr_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, mask_set, rix),
  1613. grp_msk.u64);
  1614. }
  1615. }
  1616. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1617. cvmx_sso_ppx_grp_msk_t grp_msk;
  1618. grp_msk.s.grp_msk = mask;
  1619. csr_wr(CVMX_SSO_PPX_GRP_MSK(core_num), grp_msk.u64);
  1620. } else {
  1621. cvmx_pow_pp_grp_mskx_t grp_msk;
  1622. grp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));
  1623. grp_msk.s.grp_msk = mask & 0xffff;
  1624. csr_wr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
  1625. }
  1626. }
  1627. /**
  1628. * This function gets the group mask for a core. The group mask
  1629. * indicates which groups each core will accept work from.
  1630. *
  1631. * @param core_num core to apply mask to
  1632. * Return: Group mask, one bit for up to 64 groups.
  1633. * Each 1 bit in the mask enables the core to accept work from
  1634. * the corresponding group.
  1635. * The CN68XX supports 64 groups, earlier models only support
  1636. * 16 groups.
  1637. *
  1638. * The CN78XX in backwards compatibility mode allows up to 32 groups,
  1639. * so the 'mask' argument has one bit for every of the legacy
  1640. * groups, and a '1' in the mask causes a total of 8 groups
  1641. * which share the legacy group numbher and 8 qos levels,
  1642. * to be enabled for the calling processor core.
  1643. * A '0' in the mask will disable the current core
  1644. * from receiving work from the associated group.
  1645. */
  1646. static inline u64 cvmx_pow_get_group_mask(u64 core_num)
  1647. {
  1648. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1649. cvmx_sso_ppx_sx_grpmskx_t grp_msk;
  1650. unsigned int core, node, i;
  1651. int rix; /* Register index */
  1652. u64 mask = 0;
  1653. node = cvmx_coremask_core_to_node(core_num);
  1654. core = cvmx_coremask_core_on_node(core_num);
  1655. /* 78xx: 256 groups divided into 4 X 64 bit registers */
  1656. /* 73xx: 64 groups are in one register */
  1657. for (rix = (cvmx_sso_num_xgrp() >> 6) - 1; rix >= 0; rix--) {
  1658. /* read only mask_set=0 (both 'set' was written same) */
  1659. grp_msk.u64 = csr_rd_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, 0, rix));
  1660. /* ASSUME: (this is how mask bits got written) */
  1661. /* grp_mask[7:0]: all bits 0..7 are same */
  1662. /* grp_mask[15:8]: all bits 8..15 are same, etc */
  1663. /* DO: mask[7:0] = grp_mask.u64[56,48,40,32,24,16,8,0] */
  1664. for (i = 0; i < 8; i++)
  1665. mask |= (grp_msk.u64 & ((u64)1 << (i * 8))) >> (7 * i);
  1666. /* we collected 8 MSBs in mask[7:0], <<=8 and continue */
  1667. if (cvmx_likely(rix != 0))
  1668. mask <<= 8;
  1669. }
  1670. return mask & 0xFFFFFFFF;
  1671. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1672. cvmx_sso_ppx_grp_msk_t grp_msk;
  1673. grp_msk.u64 = csr_rd(CVMX_SSO_PPX_GRP_MSK(core_num));
  1674. return grp_msk.u64;
  1675. } else {
  1676. cvmx_pow_pp_grp_mskx_t grp_msk;
  1677. grp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));
  1678. return grp_msk.u64 & 0xffff;
  1679. }
  1680. }
  1681. /*
  1682. * Returns 0 if 78xx(73xx,75xx) is not programmed in legacy compatible mode
  1683. * Returns 1 if 78xx(73xx,75xx) is programmed in legacy compatible mode
  1684. * Returns 1 if octeon model is not 78xx(73xx,75xx)
  1685. */
  1686. static inline u64 cvmx_pow_is_legacy78mode(u64 core_num)
  1687. {
  1688. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1689. cvmx_sso_ppx_sx_grpmskx_t grp_msk0, grp_msk1;
  1690. unsigned int core, node, i;
  1691. int rix; /* Register index */
  1692. u64 mask = 0;
  1693. node = cvmx_coremask_core_to_node(core_num);
  1694. core = cvmx_coremask_core_on_node(core_num);
  1695. /* 78xx: 256 groups divided into 4 X 64 bit registers */
  1696. /* 73xx: 64 groups are in one register */
  1697. /* 1) in order for the 78_SSO to be in legacy compatible mode
  1698. * the both mask_sets should be programmed the same */
  1699. for (rix = (cvmx_sso_num_xgrp() >> 6) - 1; rix >= 0; rix--) {
  1700. /* read mask_set=0 (both 'set' was written same) */
  1701. grp_msk0.u64 = csr_rd_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, 0, rix));
  1702. grp_msk1.u64 = csr_rd_node(node, CVMX_SSO_PPX_SX_GRPMSKX(core, 1, rix));
  1703. if (grp_msk0.u64 != grp_msk1.u64) {
  1704. return 0;
  1705. }
  1706. /* (this is how mask bits should be written) */
  1707. /* grp_mask[7:0]: all bits 0..7 are same */
  1708. /* grp_mask[15:8]: all bits 8..15 are same, etc */
  1709. /* 2) in order for the 78_SSO to be in legacy compatible
  1710. * mode above should be true (test only mask_set=0 */
  1711. for (i = 0; i < 8; i++) {
  1712. mask = (grp_msk0.u64 >> (i << 3)) & 0xFF;
  1713. if (!(mask == 0 || mask == 0xFF)) {
  1714. return 0;
  1715. }
  1716. }
  1717. }
  1718. /* if we come here, the 78_SSO is in legacy compatible mode */
  1719. }
  1720. return 1; /* the SSO/POW is in legacy (or compatible) mode */
  1721. }
  1722. /**
  1723. * This function sets POW static priorities for a core. Each input queue has
  1724. * an associated priority value.
  1725. *
  1726. * @param core_num core to apply priorities to
  1727. * @param priority Vector of 8 priorities, one per POW Input Queue (0-7).
  1728. * Highest priority is 0 and lowest is 7. A priority value
  1729. * of 0xF instructs POW to skip the Input Queue when
  1730. * scheduling to this specific core.
  1731. * NOTE: priorities should not have gaps in values, meaning
  1732. * {0,1,1,1,1,1,1,1} is a valid configuration while
  1733. * {0,2,2,2,2,2,2,2} is not.
  1734. */
  1735. static inline void cvmx_pow_set_priority(u64 core_num, const u8 priority[])
  1736. {
  1737. /* Detect gaps between priorities and flag error */
  1738. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1739. int i;
  1740. u32 prio_mask = 0;
  1741. for (i = 0; i < 8; i++)
  1742. if (priority[i] != 0xF)
  1743. prio_mask |= 1 << priority[i];
  1744. if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
  1745. debug("ERROR: POW static priorities should be contiguous (0x%llx)\n",
  1746. (unsigned long long)prio_mask);
  1747. return;
  1748. }
  1749. }
  1750. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1751. unsigned int group;
  1752. unsigned int node = cvmx_get_node_num();
  1753. cvmx_sso_grpx_pri_t grp_pri;
  1754. /*grp_pri.s.weight = 0x3f; these will be anyway overwritten */
  1755. /*grp_pri.s.affinity = 0xf; by the next csr_rd_node(..), */
  1756. for (group = 0; group < cvmx_sso_num_xgrp(); group++) {
  1757. grp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
  1758. grp_pri.s.pri = priority[group & 0x7];
  1759. csr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grp_pri.u64);
  1760. }
  1761. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1762. cvmx_sso_ppx_qos_pri_t qos_pri;
  1763. qos_pri.u64 = csr_rd(CVMX_SSO_PPX_QOS_PRI(core_num));
  1764. qos_pri.s.qos0_pri = priority[0];
  1765. qos_pri.s.qos1_pri = priority[1];
  1766. qos_pri.s.qos2_pri = priority[2];
  1767. qos_pri.s.qos3_pri = priority[3];
  1768. qos_pri.s.qos4_pri = priority[4];
  1769. qos_pri.s.qos5_pri = priority[5];
  1770. qos_pri.s.qos6_pri = priority[6];
  1771. qos_pri.s.qos7_pri = priority[7];
  1772. csr_wr(CVMX_SSO_PPX_QOS_PRI(core_num), qos_pri.u64);
  1773. } else {
  1774. /* POW priorities on CN5xxx .. CN66XX */
  1775. cvmx_pow_pp_grp_mskx_t grp_msk;
  1776. grp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));
  1777. grp_msk.s.qos0_pri = priority[0];
  1778. grp_msk.s.qos1_pri = priority[1];
  1779. grp_msk.s.qos2_pri = priority[2];
  1780. grp_msk.s.qos3_pri = priority[3];
  1781. grp_msk.s.qos4_pri = priority[4];
  1782. grp_msk.s.qos5_pri = priority[5];
  1783. grp_msk.s.qos6_pri = priority[6];
  1784. grp_msk.s.qos7_pri = priority[7];
  1785. csr_wr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
  1786. }
  1787. }
  1788. /**
  1789. * This function gets POW static priorities for a core. Each input queue has
  1790. * an associated priority value.
  1791. *
  1792. * @param[in] core_num core to get priorities for
  1793. * @param[out] priority Pointer to u8[] where to return priorities
  1794. * Vector of 8 priorities, one per POW Input Queue (0-7).
  1795. * Highest priority is 0 and lowest is 7. A priority value
  1796. * of 0xF instructs POW to skip the Input Queue when
  1797. * scheduling to this specific core.
  1798. * NOTE: priorities should not have gaps in values, meaning
  1799. * {0,1,1,1,1,1,1,1} is a valid configuration while
  1800. * {0,2,2,2,2,2,2,2} is not.
  1801. */
  1802. static inline void cvmx_pow_get_priority(u64 core_num, u8 priority[])
  1803. {
  1804. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1805. unsigned int group;
  1806. unsigned int node = cvmx_get_node_num();
  1807. cvmx_sso_grpx_pri_t grp_pri;
  1808. /* read priority only from the first 8 groups */
  1809. /* the next groups are programmed the same (periodicaly) */
  1810. for (group = 0; group < 8 /*cvmx_sso_num_xgrp() */; group++) {
  1811. grp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
  1812. priority[group /* & 0x7 */] = grp_pri.s.pri;
  1813. }
  1814. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1815. cvmx_sso_ppx_qos_pri_t qos_pri;
  1816. qos_pri.u64 = csr_rd(CVMX_SSO_PPX_QOS_PRI(core_num));
  1817. priority[0] = qos_pri.s.qos0_pri;
  1818. priority[1] = qos_pri.s.qos1_pri;
  1819. priority[2] = qos_pri.s.qos2_pri;
  1820. priority[3] = qos_pri.s.qos3_pri;
  1821. priority[4] = qos_pri.s.qos4_pri;
  1822. priority[5] = qos_pri.s.qos5_pri;
  1823. priority[6] = qos_pri.s.qos6_pri;
  1824. priority[7] = qos_pri.s.qos7_pri;
  1825. } else {
  1826. /* POW priorities on CN5xxx .. CN66XX */
  1827. cvmx_pow_pp_grp_mskx_t grp_msk;
  1828. grp_msk.u64 = csr_rd(CVMX_POW_PP_GRP_MSKX(core_num));
  1829. priority[0] = grp_msk.s.qos0_pri;
  1830. priority[1] = grp_msk.s.qos1_pri;
  1831. priority[2] = grp_msk.s.qos2_pri;
  1832. priority[3] = grp_msk.s.qos3_pri;
  1833. priority[4] = grp_msk.s.qos4_pri;
  1834. priority[5] = grp_msk.s.qos5_pri;
  1835. priority[6] = grp_msk.s.qos6_pri;
  1836. priority[7] = grp_msk.s.qos7_pri;
  1837. }
  1838. /* Detect gaps between priorities and flag error - (optional) */
  1839. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1840. int i;
  1841. u32 prio_mask = 0;
  1842. for (i = 0; i < 8; i++)
  1843. if (priority[i] != 0xF)
  1844. prio_mask |= 1 << priority[i];
  1845. if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
  1846. debug("ERROR:%s: POW static priorities should be contiguous (0x%llx)\n",
  1847. __func__, (unsigned long long)prio_mask);
  1848. return;
  1849. }
  1850. }
  1851. }
  1852. static inline void cvmx_sso_get_group_priority(int node, cvmx_xgrp_t xgrp, int *priority,
  1853. int *weight, int *affinity)
  1854. {
  1855. cvmx_sso_grpx_pri_t grp_pri;
  1856. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1857. debug("ERROR: %s is not supported on this chip)\n", __func__);
  1858. return;
  1859. }
  1860. grp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(xgrp.xgrp));
  1861. *affinity = grp_pri.s.affinity;
  1862. *priority = grp_pri.s.pri;
  1863. *weight = grp_pri.s.weight;
  1864. }
  1865. /**
  1866. * Performs a tag switch and then an immediate deschedule. This completes
  1867. * immediately, so completion must not be waited for. This function does NOT
  1868. * update the wqe in DRAM to match arguments.
  1869. *
  1870. * This function does NOT wait for any prior tag switches to complete, so the
  1871. * calling code must do this.
  1872. *
  1873. * Note the following CAVEAT of the Octeon HW behavior when
  1874. * re-scheduling DE-SCHEDULEd items whose (next) state is
  1875. * ORDERED:
  1876. * - If there are no switches pending at the time that the
  1877. * HW executes the de-schedule, the HW will only re-schedule
  1878. * the head of the FIFO associated with the given tag. This
  1879. * means that in many respects, the HW treats this ORDERED
  1880. * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
  1881. * case (to an ORDERED tag), the HW will do the switch
  1882. * before the deschedule whenever it is possible to do
  1883. * the switch immediately, so it may often look like
  1884. * this case.
  1885. * - If there is a pending switch to ORDERED at the time
  1886. * the HW executes the de-schedule, the HW will perform
  1887. * the switch at the time it re-schedules, and will be
  1888. * able to reschedule any/all of the entries with the
  1889. * same tag.
  1890. * Due to this behavior, the RECOMMENDATION to software is
  1891. * that they have a (next) state of ATOMIC when they
  1892. * DE-SCHEDULE. If an ORDERED tag is what was really desired,
  1893. * SW can choose to immediately switch to an ORDERED tag
  1894. * after the work (that has an ATOMIC tag) is re-scheduled.
  1895. * Note that since there are never any tag switches pending
  1896. * when the HW re-schedules, this switch can be IMMEDIATE upon
  1897. * the reception of the pointer during the re-schedule.
  1898. *
  1899. * @param tag New tag value
  1900. * @param tag_type New tag type
  1901. * @param group New group value
  1902. * @param no_sched Control whether this work queue entry will be rescheduled.
  1903. * - 1 : don't schedule this work
  1904. * - 0 : allow this work to be scheduled.
  1905. */
  1906. static inline void cvmx_pow_tag_sw_desched_nocheck(u32 tag, cvmx_pow_tag_type_t tag_type, u64 group,
  1907. u64 no_sched)
  1908. {
  1909. union cvmx_pow_tag_req_addr ptr;
  1910. cvmx_pow_tag_req_t tag_req;
  1911. if (CVMX_ENABLE_POW_CHECKS) {
  1912. cvmx_pow_tag_info_t current_tag;
  1913. __cvmx_pow_warn_if_pending_switch(__func__);
  1914. current_tag = cvmx_pow_get_current_tag();
  1915. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  1916. "%s called with NULL_NULL tag\n", __func__);
  1917. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,
  1918. "%s called with NULL tag. Deschedule not allowed from NULL state\n",
  1919. __func__);
  1920. cvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) &&
  1921. (tag_type != CVMX_POW_TAG_TYPE_ATOMIC),
  1922. "%s called where neither the before or after tag is ATOMIC\n",
  1923. __func__);
  1924. }
  1925. tag_req.u64 = 0;
  1926. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1927. cvmx_wqe_t *wqp = cvmx_pow_get_current_wqp();
  1928. if (!wqp) {
  1929. debug("ERROR: Failed to get WQE, %s\n", __func__);
  1930. return;
  1931. }
  1932. group &= 0x1f;
  1933. wqp->word1.cn78xx.tag = tag;
  1934. wqp->word1.cn78xx.tag_type = tag_type;
  1935. wqp->word1.cn78xx.grp = group << 3;
  1936. CVMX_SYNCWS;
  1937. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
  1938. tag_req.s_cn78xx_other.type = tag_type;
  1939. tag_req.s_cn78xx_other.grp = group << 3;
  1940. tag_req.s_cn78xx_other.no_sched = no_sched;
  1941. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  1942. group &= 0x3f;
  1943. tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
  1944. tag_req.s_cn68xx_other.tag = tag;
  1945. tag_req.s_cn68xx_other.type = tag_type;
  1946. tag_req.s_cn68xx_other.grp = group;
  1947. tag_req.s_cn68xx_other.no_sched = no_sched;
  1948. } else {
  1949. group &= 0x0f;
  1950. tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
  1951. tag_req.s_cn38xx.tag = tag;
  1952. tag_req.s_cn38xx.type = tag_type;
  1953. tag_req.s_cn38xx.grp = group;
  1954. tag_req.s_cn38xx.no_sched = no_sched;
  1955. }
  1956. ptr.u64 = 0;
  1957. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1958. ptr.s.mem_region = CVMX_IO_SEG;
  1959. ptr.s.is_io = 1;
  1960. ptr.s.did = CVMX_OCT_DID_TAG_TAG3;
  1961. ptr.s_cn78xx.node = cvmx_get_node_num();
  1962. ptr.s_cn78xx.tag = tag;
  1963. } else {
  1964. ptr.s.mem_region = CVMX_IO_SEG;
  1965. ptr.s.is_io = 1;
  1966. ptr.s.did = CVMX_OCT_DID_TAG_TAG3;
  1967. }
  1968. cvmx_write_io(ptr.u64, tag_req.u64);
  1969. }
  1970. /**
  1971. * Performs a tag switch and then an immediate deschedule. This completes
  1972. * immediately, so completion must not be waited for. This function does NOT
  1973. * update the wqe in DRAM to match arguments.
  1974. *
  1975. * This function waits for any prior tag switches to complete, so the
  1976. * calling code may call this function with a pending tag switch.
  1977. *
  1978. * Note the following CAVEAT of the Octeon HW behavior when
  1979. * re-scheduling DE-SCHEDULEd items whose (next) state is
  1980. * ORDERED:
  1981. * - If there are no switches pending at the time that the
  1982. * HW executes the de-schedule, the HW will only re-schedule
  1983. * the head of the FIFO associated with the given tag. This
  1984. * means that in many respects, the HW treats this ORDERED
  1985. * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
  1986. * case (to an ORDERED tag), the HW will do the switch
  1987. * before the deschedule whenever it is possible to do
  1988. * the switch immediately, so it may often look like
  1989. * this case.
  1990. * - If there is a pending switch to ORDERED at the time
  1991. * the HW executes the de-schedule, the HW will perform
  1992. * the switch at the time it re-schedules, and will be
  1993. * able to reschedule any/all of the entries with the
  1994. * same tag.
  1995. * Due to this behavior, the RECOMMENDATION to software is
  1996. * that they have a (next) state of ATOMIC when they
  1997. * DE-SCHEDULE. If an ORDERED tag is what was really desired,
  1998. * SW can choose to immediately switch to an ORDERED tag
  1999. * after the work (that has an ATOMIC tag) is re-scheduled.
  2000. * Note that since there are never any tag switches pending
  2001. * when the HW re-schedules, this switch can be IMMEDIATE upon
  2002. * the reception of the pointer during the re-schedule.
  2003. *
  2004. * @param tag New tag value
  2005. * @param tag_type New tag type
  2006. * @param group New group value
  2007. * @param no_sched Control whether this work queue entry will be rescheduled.
  2008. * - 1 : don't schedule this work
  2009. * - 0 : allow this work to be scheduled.
  2010. */
  2011. static inline void cvmx_pow_tag_sw_desched(u32 tag, cvmx_pow_tag_type_t tag_type, u64 group,
  2012. u64 no_sched)
  2013. {
  2014. /* Need to make sure any writes to the work queue entry are complete */
  2015. CVMX_SYNCWS;
  2016. /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
  2017. * if a previous switch is still pending. */
  2018. cvmx_pow_tag_sw_wait();
  2019. cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
  2020. }
  2021. /**
  2022. * Descchedules the current work queue entry.
  2023. *
  2024. * @param no_sched no schedule flag value to be set on the work queue entry.
  2025. * If this is set the entry will not be rescheduled.
  2026. */
  2027. static inline void cvmx_pow_desched(u64 no_sched)
  2028. {
  2029. union cvmx_pow_tag_req_addr ptr;
  2030. cvmx_pow_tag_req_t tag_req;
  2031. if (CVMX_ENABLE_POW_CHECKS) {
  2032. cvmx_pow_tag_info_t current_tag;
  2033. __cvmx_pow_warn_if_pending_switch(__func__);
  2034. current_tag = cvmx_pow_get_current_tag();
  2035. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  2036. "%s called with NULL_NULL tag\n", __func__);
  2037. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,
  2038. "%s called with NULL tag. Deschedule not expected from NULL state\n",
  2039. __func__);
  2040. }
  2041. /* Need to make sure any writes to the work queue entry are complete */
  2042. CVMX_SYNCWS;
  2043. tag_req.u64 = 0;
  2044. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2045. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_DESCH;
  2046. tag_req.s_cn78xx_other.no_sched = no_sched;
  2047. } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
  2048. tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_DESCH;
  2049. tag_req.s_cn68xx_other.no_sched = no_sched;
  2050. } else {
  2051. tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_DESCH;
  2052. tag_req.s_cn38xx.no_sched = no_sched;
  2053. }
  2054. ptr.u64 = 0;
  2055. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2056. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  2057. ptr.s_cn78xx.is_io = 1;
  2058. ptr.s_cn78xx.did = CVMX_OCT_DID_TAG_TAG3;
  2059. ptr.s_cn78xx.node = cvmx_get_node_num();
  2060. } else {
  2061. ptr.s.mem_region = CVMX_IO_SEG;
  2062. ptr.s.is_io = 1;
  2063. ptr.s.did = CVMX_OCT_DID_TAG_TAG3;
  2064. }
  2065. cvmx_write_io(ptr.u64, tag_req.u64);
  2066. }
  2067. /******************************************************************************/
  2068. /* OCTEON3-specific functions. */
  2069. /******************************************************************************/
  2070. /**
  2071. * This function sets the the affinity of group to the cores in 78xx.
  2072. * It sets up all the cores in core_mask to accept work from the specified group.
  2073. *
  2074. * @param xgrp Group to accept work from, 0 - 255.
  2075. * @param core_mask Mask of all the cores which will accept work from this group
  2076. * @param mask_set Every core has set of 2 masks which can be set to accept work
  2077. * from 256 groups. At the time of get_work, cores can choose which mask_set
  2078. * to get work from. 'mask_set' values range from 0 to 3, where each of the
  2079. * two bits represents a mask set. Cores will be added to the mask set with
  2080. * corresponding bit set, and removed from the mask set with corresponding
  2081. * bit clear.
  2082. * Note: cores can only accept work from SSO groups on the same node,
  2083. * so the node number for the group is derived from the core number.
  2084. */
  2085. static inline void cvmx_sso_set_group_core_affinity(cvmx_xgrp_t xgrp,
  2086. const struct cvmx_coremask *core_mask,
  2087. u8 mask_set)
  2088. {
  2089. cvmx_sso_ppx_sx_grpmskx_t grp_msk;
  2090. int core;
  2091. int grp_index = xgrp.xgrp >> 6;
  2092. int bit_pos = xgrp.xgrp % 64;
  2093. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2094. debug("ERROR: %s is not supported on this chip)\n", __func__);
  2095. return;
  2096. }
  2097. cvmx_coremask_for_each_core(core, core_mask)
  2098. {
  2099. unsigned int node, ncore;
  2100. u64 reg_addr;
  2101. node = cvmx_coremask_core_to_node(core);
  2102. ncore = cvmx_coremask_core_on_node(core);
  2103. reg_addr = CVMX_SSO_PPX_SX_GRPMSKX(ncore, 0, grp_index);
  2104. grp_msk.u64 = csr_rd_node(node, reg_addr);
  2105. if (mask_set & 1)
  2106. grp_msk.s.grp_msk |= (1ull << bit_pos);
  2107. else
  2108. grp_msk.s.grp_msk &= ~(1ull << bit_pos);
  2109. csr_wr_node(node, reg_addr, grp_msk.u64);
  2110. reg_addr = CVMX_SSO_PPX_SX_GRPMSKX(ncore, 1, grp_index);
  2111. grp_msk.u64 = csr_rd_node(node, reg_addr);
  2112. if (mask_set & 2)
  2113. grp_msk.s.grp_msk |= (1ull << bit_pos);
  2114. else
  2115. grp_msk.s.grp_msk &= ~(1ull << bit_pos);
  2116. csr_wr_node(node, reg_addr, grp_msk.u64);
  2117. }
  2118. }
  2119. /**
  2120. * This function sets the priority and group affinity arbitration for each group.
  2121. *
  2122. * @param node Node number
  2123. * @param xgrp Group 0 - 255 to apply mask parameters to
  2124. * @param priority Priority of the group relative to other groups
  2125. * 0x0 - highest priority
  2126. * 0x7 - lowest priority
  2127. * @param weight Cross-group arbitration weight to apply to this group.
  2128. * valid values are 1-63
  2129. * h/w default is 0x3f
  2130. * @param affinity Processor affinity arbitration weight to apply to this group.
  2131. * If zero, affinity is disabled.
  2132. * valid values are 0-15
  2133. * h/w default which is 0xf.
  2134. * @param modify_mask mask of the parameters which needs to be modified.
  2135. * enum cvmx_sso_group_modify_mask
  2136. * to modify only priority -- set bit0
  2137. * to modify only weight -- set bit1
  2138. * to modify only affinity -- set bit2
  2139. */
  2140. static inline void cvmx_sso_set_group_priority(int node, cvmx_xgrp_t xgrp, int priority, int weight,
  2141. int affinity,
  2142. enum cvmx_sso_group_modify_mask modify_mask)
  2143. {
  2144. cvmx_sso_grpx_pri_t grp_pri;
  2145. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2146. debug("ERROR: %s is not supported on this chip)\n", __func__);
  2147. return;
  2148. }
  2149. if (weight <= 0)
  2150. weight = 0x3f; /* Force HW default when out of range */
  2151. grp_pri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(xgrp.xgrp));
  2152. if (grp_pri.s.weight == 0)
  2153. grp_pri.s.weight = 0x3f;
  2154. if (modify_mask & CVMX_SSO_MODIFY_GROUP_PRIORITY)
  2155. grp_pri.s.pri = priority;
  2156. if (modify_mask & CVMX_SSO_MODIFY_GROUP_WEIGHT)
  2157. grp_pri.s.weight = weight;
  2158. if (modify_mask & CVMX_SSO_MODIFY_GROUP_AFFINITY)
  2159. grp_pri.s.affinity = affinity;
  2160. csr_wr_node(node, CVMX_SSO_GRPX_PRI(xgrp.xgrp), grp_pri.u64);
  2161. }
  2162. /**
  2163. * Asynchronous work request.
  2164. * Only works on CN78XX style SSO.
  2165. *
  2166. * Work is requested from the SSO unit, and should later be checked with
  2167. * function cvmx_pow_work_response_async.
  2168. * This function does NOT wait for previous tag switches to complete,
  2169. * so the caller must ensure that there is not a pending tag switch.
  2170. *
  2171. * @param scr_addr Scratch memory address that response will be returned to,
  2172. * which is either a valid WQE, or a response with the invalid bit set.
  2173. * Byte address, must be 8 byte aligned.
  2174. * @param xgrp Group to receive work for (0-255).
  2175. * @param wait
  2176. * 1 to cause response to wait for work to become available (or timeout)
  2177. * 0 to cause response to return immediately
  2178. */
  2179. static inline void cvmx_sso_work_request_grp_async_nocheck(int scr_addr, cvmx_xgrp_t xgrp,
  2180. cvmx_pow_wait_t wait)
  2181. {
  2182. cvmx_pow_iobdma_store_t data;
  2183. unsigned int node = cvmx_get_node_num();
  2184. if (CVMX_ENABLE_POW_CHECKS) {
  2185. __cvmx_pow_warn_if_pending_switch(__func__);
  2186. cvmx_warn_if(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE), "Not CN78XX");
  2187. }
  2188. /* scr_addr must be 8 byte aligned */
  2189. data.u64 = 0;
  2190. data.s_cn78xx.scraddr = scr_addr >> 3;
  2191. data.s_cn78xx.len = 1;
  2192. data.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  2193. data.s_cn78xx.grouped = 1;
  2194. data.s_cn78xx.index_grp_mask = (node << 8) | xgrp.xgrp;
  2195. data.s_cn78xx.wait = wait;
  2196. data.s_cn78xx.node = node;
  2197. cvmx_send_single(data.u64);
  2198. }
  2199. /**
  2200. * Synchronous work request from the node-local SSO without verifying
  2201. * pending tag switch. It requests work from a specific SSO group.
  2202. *
  2203. * @param lgrp The local group number (within the SSO of the node of the caller)
  2204. * from which to get the work.
  2205. * @param wait When set, call stalls until work becomes available, or times out.
  2206. * If not set, returns immediately.
  2207. *
  2208. * Return: Returns the WQE pointer from SSO.
  2209. * Returns NULL if no work was available.
  2210. */
  2211. static inline void *cvmx_sso_work_request_grp_sync_nocheck(unsigned int lgrp, cvmx_pow_wait_t wait)
  2212. {
  2213. cvmx_pow_load_addr_t ptr;
  2214. cvmx_pow_tag_load_resp_t result;
  2215. unsigned int node = cvmx_get_node_num() & 3;
  2216. if (CVMX_ENABLE_POW_CHECKS) {
  2217. __cvmx_pow_warn_if_pending_switch(__func__);
  2218. cvmx_warn_if(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE), "Not CN78XX");
  2219. }
  2220. ptr.u64 = 0;
  2221. ptr.swork_78xx.mem_region = CVMX_IO_SEG;
  2222. ptr.swork_78xx.is_io = 1;
  2223. ptr.swork_78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  2224. ptr.swork_78xx.node = node;
  2225. ptr.swork_78xx.grouped = 1;
  2226. ptr.swork_78xx.index = (lgrp & 0xff) | node << 8;
  2227. ptr.swork_78xx.wait = wait;
  2228. result.u64 = csr_rd(ptr.u64);
  2229. if (result.s_work.no_work)
  2230. return NULL;
  2231. else
  2232. return cvmx_phys_to_ptr(result.s_work.addr);
  2233. }
  2234. /**
  2235. * Synchronous work request from the node-local SSO.
  2236. * It requests work from a specific SSO group.
  2237. * This function waits for any previous tag switch to complete before
  2238. * requesting the new work.
  2239. *
  2240. * @param lgrp The node-local group number from which to get the work.
  2241. * @param wait When set, call stalls until work becomes available, or times out.
  2242. * If not set, returns immediately.
  2243. *
  2244. * Return: The WQE pointer or NULL, if work is not available.
  2245. */
  2246. static inline void *cvmx_sso_work_request_grp_sync(unsigned int lgrp, cvmx_pow_wait_t wait)
  2247. {
  2248. cvmx_pow_tag_sw_wait();
  2249. return cvmx_sso_work_request_grp_sync_nocheck(lgrp, wait);
  2250. }
  2251. /**
  2252. * This function sets the group mask for a core. The group mask bits
  2253. * indicate which groups each core will accept work from.
  2254. *
  2255. * @param core_num Processor core to apply mask to.
  2256. * @param mask_set 7XXX has 2 sets of masks per core.
  2257. * Bit 0 represents the first mask set, bit 1 -- the second.
  2258. * @param xgrp_mask Group mask array.
  2259. * Total number of groups is divided into a number of
  2260. * 64-bits mask sets. Each bit in the mask, if set, enables
  2261. * the core to accept work from the corresponding group.
  2262. *
  2263. * NOTE: Each core can be configured to accept work in accordance to both
  2264. * mask sets, with the first having higher precedence over the second,
  2265. * or to accept work in accordance to just one of the two mask sets.
  2266. * The 'core_num' argument represents a processor core on any node
  2267. * in a coherent multi-chip system.
  2268. *
  2269. * If the 'mask_set' argument is 3, both mask sets are configured
  2270. * with the same value (which is not typically the intention),
  2271. * so keep in mind the function needs to be called twice
  2272. * to set a different value into each of the mask sets,
  2273. * once with 'mask_set=1' and second time with 'mask_set=2'.
  2274. */
  2275. static inline void cvmx_pow_set_xgrp_mask(u64 core_num, u8 mask_set, const u64 xgrp_mask[])
  2276. {
  2277. unsigned int grp, node, core;
  2278. u64 reg_addr;
  2279. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2280. debug("ERROR: %s is not supported on this chip)\n", __func__);
  2281. return;
  2282. }
  2283. if (CVMX_ENABLE_POW_CHECKS) {
  2284. cvmx_warn_if(((mask_set < 1) || (mask_set > 3)), "Invalid mask set");
  2285. }
  2286. if ((mask_set < 1) || (mask_set > 3))
  2287. mask_set = 3;
  2288. node = cvmx_coremask_core_to_node(core_num);
  2289. core = cvmx_coremask_core_on_node(core_num);
  2290. for (grp = 0; grp < (cvmx_sso_num_xgrp() >> 6); grp++) {
  2291. if (mask_set & 1) {
  2292. reg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 0, grp),
  2293. csr_wr_node(node, reg_addr, xgrp_mask[grp]);
  2294. }
  2295. if (mask_set & 2) {
  2296. reg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 1, grp),
  2297. csr_wr_node(node, reg_addr, xgrp_mask[grp]);
  2298. }
  2299. }
  2300. }
  2301. /**
  2302. * This function gets the group mask for a core. The group mask bits
  2303. * indicate which groups each core will accept work from.
  2304. *
  2305. * @param core_num Processor core to apply mask to.
  2306. * @param mask_set 7XXX has 2 sets of masks per core.
  2307. * Bit 0 represents the first mask set, bit 1 -- the second.
  2308. * @param xgrp_mask Provide pointer to u64 mask[8] output array.
  2309. * Total number of groups is divided into a number of
  2310. * 64-bits mask sets. Each bit in the mask represents
  2311. * the core accepts work from the corresponding group.
  2312. *
  2313. * NOTE: Each core can be configured to accept work in accordance to both
  2314. * mask sets, with the first having higher precedence over the second,
  2315. * or to accept work in accordance to just one of the two mask sets.
  2316. * The 'core_num' argument represents a processor core on any node
  2317. * in a coherent multi-chip system.
  2318. */
  2319. static inline void cvmx_pow_get_xgrp_mask(u64 core_num, u8 mask_set, u64 *xgrp_mask)
  2320. {
  2321. cvmx_sso_ppx_sx_grpmskx_t grp_msk;
  2322. unsigned int grp, node, core;
  2323. u64 reg_addr;
  2324. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2325. debug("ERROR: %s is not supported on this chip)\n", __func__);
  2326. return;
  2327. }
  2328. if (CVMX_ENABLE_POW_CHECKS) {
  2329. cvmx_warn_if(mask_set != 1 && mask_set != 2, "Invalid mask set");
  2330. }
  2331. node = cvmx_coremask_core_to_node(core_num);
  2332. core = cvmx_coremask_core_on_node(core_num);
  2333. for (grp = 0; grp < cvmx_sso_num_xgrp() >> 6; grp++) {
  2334. if (mask_set & 1) {
  2335. reg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 0, grp),
  2336. grp_msk.u64 = csr_rd_node(node, reg_addr);
  2337. xgrp_mask[grp] = grp_msk.s.grp_msk;
  2338. }
  2339. if (mask_set & 2) {
  2340. reg_addr = CVMX_SSO_PPX_SX_GRPMSKX(core, 1, grp),
  2341. grp_msk.u64 = csr_rd_node(node, reg_addr);
  2342. xgrp_mask[grp] = grp_msk.s.grp_msk;
  2343. }
  2344. }
  2345. }
  2346. /**
  2347. * Executes SSO SWTAG command.
  2348. * This is similar to cvmx_pow_tag_sw() function, but uses linear
  2349. * (vs. integrated group-qos) group index.
  2350. */
  2351. static inline void cvmx_pow_tag_sw_node(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,
  2352. int node)
  2353. {
  2354. union cvmx_pow_tag_req_addr ptr;
  2355. cvmx_pow_tag_req_t tag_req;
  2356. if (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {
  2357. debug("ERROR: %s is supported on OCTEON3 only\n", __func__);
  2358. return;
  2359. }
  2360. CVMX_SYNCWS;
  2361. cvmx_pow_tag_sw_wait();
  2362. if (CVMX_ENABLE_POW_CHECKS) {
  2363. cvmx_pow_tag_info_t current_tag;
  2364. __cvmx_pow_warn_if_pending_switch(__func__);
  2365. current_tag = cvmx_pow_get_current_tag();
  2366. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  2367. "%s called with NULL_NULL tag\n", __func__);
  2368. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,
  2369. "%s called with NULL tag\n", __func__);
  2370. cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),
  2371. "%s called to perform a tag switch to the same tag\n", __func__);
  2372. cvmx_warn_if(
  2373. tag_type == CVMX_POW_TAG_TYPE_NULL,
  2374. "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
  2375. __func__);
  2376. }
  2377. wqp->word1.cn78xx.tag = tag;
  2378. wqp->word1.cn78xx.tag_type = tag_type;
  2379. CVMX_SYNCWS;
  2380. tag_req.u64 = 0;
  2381. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG;
  2382. tag_req.s_cn78xx_other.type = tag_type;
  2383. ptr.u64 = 0;
  2384. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  2385. ptr.s_cn78xx.is_io = 1;
  2386. ptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  2387. ptr.s_cn78xx.node = node;
  2388. ptr.s_cn78xx.tag = tag;
  2389. cvmx_write_io(ptr.u64, tag_req.u64);
  2390. }
  2391. /**
  2392. * Executes SSO SWTAG_FULL command.
  2393. * This is similar to cvmx_pow_tag_sw_full() function, but
  2394. * uses linear (vs. integrated group-qos) group index.
  2395. */
  2396. static inline void cvmx_pow_tag_sw_full_node(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,
  2397. u8 xgrp, int node)
  2398. {
  2399. union cvmx_pow_tag_req_addr ptr;
  2400. cvmx_pow_tag_req_t tag_req;
  2401. u16 gxgrp;
  2402. if (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {
  2403. debug("ERROR: %s is supported on OCTEON3 only\n", __func__);
  2404. return;
  2405. }
  2406. /* Ensure that there is not a pending tag switch, as a tag switch cannot be
  2407. * started, if a previous switch is still pending. */
  2408. CVMX_SYNCWS;
  2409. cvmx_pow_tag_sw_wait();
  2410. if (CVMX_ENABLE_POW_CHECKS) {
  2411. cvmx_pow_tag_info_t current_tag;
  2412. __cvmx_pow_warn_if_pending_switch(__func__);
  2413. current_tag = cvmx_pow_get_current_tag();
  2414. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  2415. "%s called with NULL_NULL tag\n", __func__);
  2416. cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag),
  2417. "%s called to perform a tag switch to the same tag\n", __func__);
  2418. cvmx_warn_if(
  2419. tag_type == CVMX_POW_TAG_TYPE_NULL,
  2420. "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
  2421. __func__);
  2422. if ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())
  2423. cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(),
  2424. "%s passed WQE(%p) doesn't match the address in the POW(%p)\n",
  2425. __func__, wqp, cvmx_pow_get_current_wqp());
  2426. }
  2427. gxgrp = node;
  2428. gxgrp = gxgrp << 8 | xgrp;
  2429. wqp->word1.cn78xx.grp = gxgrp;
  2430. wqp->word1.cn78xx.tag = tag;
  2431. wqp->word1.cn78xx.tag_type = tag_type;
  2432. CVMX_SYNCWS;
  2433. tag_req.u64 = 0;
  2434. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;
  2435. tag_req.s_cn78xx_other.type = tag_type;
  2436. tag_req.s_cn78xx_other.grp = gxgrp;
  2437. tag_req.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);
  2438. ptr.u64 = 0;
  2439. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  2440. ptr.s_cn78xx.is_io = 1;
  2441. ptr.s_cn78xx.did = CVMX_OCT_DID_TAG_SWTAG;
  2442. ptr.s_cn78xx.node = node;
  2443. ptr.s_cn78xx.tag = tag;
  2444. cvmx_write_io(ptr.u64, tag_req.u64);
  2445. }
  2446. /**
  2447. * Submits work to an SSO group on any OCI node.
  2448. * This function updates the work queue entry in DRAM to match
  2449. * the arguments given.
  2450. * Note that the tag provided is for the work queue entry submitted,
  2451. * and is unrelated to the tag that the core currently holds.
  2452. *
  2453. * @param wqp pointer to work queue entry to submit.
  2454. * This entry is updated to match the other parameters
  2455. * @param tag tag value to be assigned to work queue entry
  2456. * @param tag_type type of tag
  2457. * @param xgrp native CN78XX group in the range 0..255
  2458. * @param node The OCI node number for the target group
  2459. *
  2460. * When this function is called on a model prior to CN78XX, which does
  2461. * not support OCI nodes, the 'node' argument is ignored, and the 'xgrp'
  2462. * parameter is converted into 'qos' (the lower 3 bits) and 'grp' (the higher
  2463. * 5 bits), following the backward-compatibility scheme of translating
  2464. * between new and old style group numbers.
  2465. */
  2466. static inline void cvmx_pow_work_submit_node(cvmx_wqe_t *wqp, u32 tag, cvmx_pow_tag_type_t tag_type,
  2467. u8 xgrp, u8 node)
  2468. {
  2469. union cvmx_pow_tag_req_addr ptr;
  2470. cvmx_pow_tag_req_t tag_req;
  2471. u16 group;
  2472. if (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {
  2473. debug("ERROR: %s is supported on OCTEON3 only\n", __func__);
  2474. return;
  2475. }
  2476. group = node;
  2477. group = group << 8 | xgrp;
  2478. wqp->word1.cn78xx.tag = tag;
  2479. wqp->word1.cn78xx.tag_type = tag_type;
  2480. wqp->word1.cn78xx.grp = group;
  2481. CVMX_SYNCWS;
  2482. tag_req.u64 = 0;
  2483. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_ADDWQ;
  2484. tag_req.s_cn78xx_other.type = tag_type;
  2485. tag_req.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);
  2486. tag_req.s_cn78xx_other.grp = group;
  2487. ptr.u64 = 0;
  2488. ptr.s_cn78xx.did = 0x66; // CVMX_OCT_DID_TAG_TAG6;
  2489. ptr.s_cn78xx.mem_region = CVMX_IO_SEG;
  2490. ptr.s_cn78xx.is_io = 1;
  2491. ptr.s_cn78xx.node = node;
  2492. ptr.s_cn78xx.tag = tag;
  2493. /* SYNC write to memory before the work submit. This is necessary
  2494. ** as POW may read values from DRAM at this time */
  2495. CVMX_SYNCWS;
  2496. cvmx_write_io(ptr.u64, tag_req.u64);
  2497. }
  2498. /**
  2499. * Executes the SSO SWTAG_DESCHED operation.
  2500. * This is similar to the cvmx_pow_tag_sw_desched() function, but
  2501. * uses linear (vs. unified group-qos) group index.
  2502. */
  2503. static inline void cvmx_pow_tag_sw_desched_node(cvmx_wqe_t *wqe, u32 tag,
  2504. cvmx_pow_tag_type_t tag_type, u8 xgrp, u64 no_sched,
  2505. u8 node)
  2506. {
  2507. union cvmx_pow_tag_req_addr ptr;
  2508. cvmx_pow_tag_req_t tag_req;
  2509. u16 group;
  2510. if (cvmx_unlikely(!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))) {
  2511. debug("ERROR: %s is supported on OCTEON3 only\n", __func__);
  2512. return;
  2513. }
  2514. /* Need to make sure any writes to the work queue entry are complete */
  2515. CVMX_SYNCWS;
  2516. /*
  2517. * Ensure that there is not a pending tag switch, as a tag switch cannot
  2518. * be started if a previous switch is still pending.
  2519. */
  2520. cvmx_pow_tag_sw_wait();
  2521. if (CVMX_ENABLE_POW_CHECKS) {
  2522. cvmx_pow_tag_info_t current_tag;
  2523. __cvmx_pow_warn_if_pending_switch(__func__);
  2524. current_tag = cvmx_pow_get_current_tag();
  2525. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL,
  2526. "%s called with NULL_NULL tag\n", __func__);
  2527. cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL,
  2528. "%s called with NULL tag. Deschedule not allowed from NULL state\n",
  2529. __func__);
  2530. cvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) &&
  2531. (tag_type != CVMX_POW_TAG_TYPE_ATOMIC),
  2532. "%s called where neither the before or after tag is ATOMIC\n",
  2533. __func__);
  2534. }
  2535. group = node;
  2536. group = group << 8 | xgrp;
  2537. wqe->word1.cn78xx.tag = tag;
  2538. wqe->word1.cn78xx.tag_type = tag_type;
  2539. wqe->word1.cn78xx.grp = group;
  2540. CVMX_SYNCWS;
  2541. tag_req.u64 = 0;
  2542. tag_req.s_cn78xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
  2543. tag_req.s_cn78xx_other.type = tag_type;
  2544. tag_req.s_cn78xx_other.grp = group;
  2545. tag_req.s_cn78xx_other.no_sched = no_sched;
  2546. ptr.u64 = 0;
  2547. ptr.s.mem_region = CVMX_IO_SEG;
  2548. ptr.s.is_io = 1;
  2549. ptr.s.did = CVMX_OCT_DID_TAG_TAG3;
  2550. ptr.s_cn78xx.node = node;
  2551. ptr.s_cn78xx.tag = tag;
  2552. cvmx_write_io(ptr.u64, tag_req.u64);
  2553. }
  2554. /* Executes the UPD_WQP_GRP SSO operation.
  2555. *
  2556. * @param wqp Pointer to the new work queue entry to switch to.
  2557. * @param xgrp SSO group in the range 0..255
  2558. *
  2559. * NOTE: The operation can be performed only on the local node.
  2560. */
  2561. static inline void cvmx_sso_update_wqp_group(cvmx_wqe_t *wqp, u8 xgrp)
  2562. {
  2563. union cvmx_pow_tag_req_addr addr;
  2564. cvmx_pow_tag_req_t data;
  2565. int node = cvmx_get_node_num();
  2566. int group = node << 8 | xgrp;
  2567. if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2568. debug("ERROR: %s is not supported on this chip)\n", __func__);
  2569. return;
  2570. }
  2571. wqp->word1.cn78xx.grp = group;
  2572. CVMX_SYNCWS;
  2573. data.u64 = 0;
  2574. data.s_cn78xx_other.op = CVMX_POW_TAG_OP_UPDATE_WQP_GRP;
  2575. data.s_cn78xx_other.grp = group;
  2576. data.s_cn78xx_other.wqp = cvmx_ptr_to_phys(wqp);
  2577. addr.u64 = 0;
  2578. addr.s_cn78xx.mem_region = CVMX_IO_SEG;
  2579. addr.s_cn78xx.is_io = 1;
  2580. addr.s_cn78xx.did = CVMX_OCT_DID_TAG_TAG1;
  2581. addr.s_cn78xx.node = node;
  2582. cvmx_write_io(addr.u64, data.u64);
  2583. }
  2584. /******************************************************************************/
  2585. /* Define usage of bits within the 32 bit tag values. */
  2586. /******************************************************************************/
  2587. /*
  2588. * Number of bits of the tag used by software. The SW bits
  2589. * are always a contiguous block of the high starting at bit 31.
  2590. * The hardware bits are always the low bits. By default, the top 8 bits
  2591. * of the tag are reserved for software, and the low 24 are set by the IPD unit.
  2592. */
  2593. #define CVMX_TAG_SW_BITS (8)
  2594. #define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
  2595. /* Below is the list of values for the top 8 bits of the tag. */
  2596. /*
  2597. * Tag values with top byte of this value are reserved for internal executive
  2598. * uses
  2599. */
  2600. #define CVMX_TAG_SW_BITS_INTERNAL 0x1
  2601. /*
  2602. * The executive divides the remaining 24 bits as follows:
  2603. * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
  2604. * the lower 16 bits (bits 15 - 0 of the tag) define are the value with
  2605. * the subgroup. Note that this section describes the format of tags generated
  2606. * by software - refer to the hardware documentation for a description of the
  2607. * tags values generated by the packet input hardware.
  2608. * Subgroups are defined here
  2609. */
  2610. /* Mask for the value portion of the tag */
  2611. #define CVMX_TAG_SUBGROUP_MASK 0xFFFF
  2612. #define CVMX_TAG_SUBGROUP_SHIFT 16
  2613. #define CVMX_TAG_SUBGROUP_PKO 0x1
  2614. /* End of executive tag subgroup definitions */
  2615. /* The remaining values software bit values 0x2 - 0xff are available
  2616. * for application use */
  2617. /**
  2618. * This function creates a 32 bit tag value from the two values provided.
  2619. *
  2620. * @param sw_bits The upper bits (number depends on configuration) are set
  2621. * to this value. The remainder of bits are set by the hw_bits parameter.
  2622. * @param hw_bits The lower bits (number depends on configuration) are set
  2623. * to this value. The remainder of bits are set by the sw_bits parameter.
  2624. *
  2625. * Return: 32 bit value of the combined hw and sw bits.
  2626. */
  2627. static inline u32 cvmx_pow_tag_compose(u64 sw_bits, u64 hw_bits)
  2628. {
  2629. return (((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) |
  2630. (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS)));
  2631. }
  2632. /**
  2633. * Extracts the bits allocated for software use from the tag
  2634. *
  2635. * @param tag 32 bit tag value
  2636. *
  2637. * Return: N bit software tag value, where N is configurable with
  2638. * the CVMX_TAG_SW_BITS define
  2639. */
  2640. static inline u32 cvmx_pow_tag_get_sw_bits(u64 tag)
  2641. {
  2642. return ((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS));
  2643. }
  2644. /**
  2645. *
  2646. * Extracts the bits allocated for hardware use from the tag
  2647. *
  2648. * @param tag 32 bit tag value
  2649. *
  2650. * Return: (32 - N) bit software tag value, where N is configurable with
  2651. * the CVMX_TAG_SW_BITS define
  2652. */
  2653. static inline u32 cvmx_pow_tag_get_hw_bits(u64 tag)
  2654. {
  2655. return (tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
  2656. }
  2657. static inline u64 cvmx_sso3_get_wqe_count(int node)
  2658. {
  2659. cvmx_sso_grpx_aq_cnt_t aq_cnt;
  2660. unsigned int grp = 0;
  2661. u64 cnt = 0;
  2662. for (grp = 0; grp < cvmx_sso_num_xgrp(); grp++) {
  2663. aq_cnt.u64 = csr_rd_node(node, CVMX_SSO_GRPX_AQ_CNT(grp));
  2664. cnt += aq_cnt.s.aq_cnt;
  2665. }
  2666. return cnt;
  2667. }
  2668. static inline u64 cvmx_sso_get_total_wqe_count(void)
  2669. {
  2670. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  2671. int node = cvmx_get_node_num();
  2672. return cvmx_sso3_get_wqe_count(node);
  2673. } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
  2674. cvmx_sso_iq_com_cnt_t sso_iq_com_cnt;
  2675. sso_iq_com_cnt.u64 = csr_rd(CVMX_SSO_IQ_COM_CNT);
  2676. return (sso_iq_com_cnt.s.iq_cnt);
  2677. } else {
  2678. cvmx_pow_iq_com_cnt_t pow_iq_com_cnt;
  2679. pow_iq_com_cnt.u64 = csr_rd(CVMX_POW_IQ_COM_CNT);
  2680. return (pow_iq_com_cnt.s.iq_cnt);
  2681. }
  2682. }
  2683. /**
  2684. * Store the current POW internal state into the supplied
  2685. * buffer. It is recommended that you pass a buffer of at least
  2686. * 128KB. The format of the capture may change based on SDK
  2687. * version and Octeon chip.
  2688. *
  2689. * @param buffer Buffer to store capture into
  2690. * @param buffer_size The size of the supplied buffer
  2691. *
  2692. * Return: Zero on success, negative on failure
  2693. */
  2694. int cvmx_pow_capture(void *buffer, int buffer_size);
  2695. /**
  2696. * Dump a POW capture to the console in a human readable format.
  2697. *
  2698. * @param buffer POW capture from cvmx_pow_capture()
  2699. * @param buffer_size Size of the buffer
  2700. */
  2701. void cvmx_pow_display(void *buffer, int buffer_size);
  2702. /**
  2703. * Return the number of POW entries supported by this chip
  2704. *
  2705. * Return: Number of POW entries
  2706. */
  2707. int cvmx_pow_get_num_entries(void);
  2708. int cvmx_pow_get_dump_size(void);
  2709. /**
  2710. * This will allocate count number of SSO groups on the specified node to the
  2711. * calling application. These groups will be for exclusive use of the
  2712. * application until they are freed.
  2713. * @param node The numa node for the allocation.
  2714. * @param base_group Pointer to the initial group, -1 to allocate anywhere.
  2715. * @param count The number of consecutive groups to allocate.
  2716. * Return: 0 on success and -1 on failure.
  2717. */
  2718. int cvmx_sso_reserve_group_range(int node, int *base_group, int count);
  2719. #define cvmx_sso_allocate_group_range cvmx_sso_reserve_group_range
  2720. int cvmx_sso_reserve_group(int node);
  2721. #define cvmx_sso_allocate_group cvmx_sso_reserve_group
  2722. int cvmx_sso_release_group_range(int node, int base_group, int count);
  2723. int cvmx_sso_release_group(int node, int group);
  2724. /**
  2725. * Show integrated SSO configuration.
  2726. *
  2727. * @param node node number
  2728. */
  2729. int cvmx_sso_config_dump(unsigned int node);
  2730. /**
  2731. * Show integrated SSO statistics.
  2732. *
  2733. * @param node node number
  2734. */
  2735. int cvmx_sso_stats_dump(unsigned int node);
  2736. /**
  2737. * Clear integrated SSO statistics.
  2738. *
  2739. * @param node node number
  2740. */
  2741. int cvmx_sso_stats_clear(unsigned int node);
  2742. /**
  2743. * Show SSO core-group affinity and priority per node (multi-node systems)
  2744. */
  2745. void cvmx_pow_mask_priority_dump_node(unsigned int node, struct cvmx_coremask *avail_coremask);
  2746. /**
  2747. * Show POW/SSO core-group affinity and priority (legacy, single-node systems)
  2748. */
  2749. static inline void cvmx_pow_mask_priority_dump(struct cvmx_coremask *avail_coremask)
  2750. {
  2751. cvmx_pow_mask_priority_dump_node(0 /*node */, avail_coremask);
  2752. }
  2753. /**
  2754. * Show SSO performance counters (multi-node systems)
  2755. */
  2756. void cvmx_pow_show_perf_counters_node(unsigned int node);
  2757. /**
  2758. * Show POW/SSO performance counters (legacy, single-node systems)
  2759. */
  2760. static inline void cvmx_pow_show_perf_counters(void)
  2761. {
  2762. cvmx_pow_show_perf_counters_node(0 /*node */);
  2763. }
  2764. #endif /* __CVMX_POW_H__ */