ti_sci.c 100 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments System Control Interface Protocol Driver
  4. *
  5. * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
  6. * Nishanth Menon
  7. */
  8. #define pr_fmt(fmt) "%s: " fmt, __func__
  9. #include <linux/bitmap.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/export.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mailbox_client.h>
  15. #include <linux/module.h>
  16. #include <linux/of_device.h>
  17. #include <linux/semaphore.h>
  18. #include <linux/slab.h>
  19. #include <linux/soc/ti/ti-msgmgr.h>
  20. #include <linux/soc/ti/ti_sci_protocol.h>
  21. #include <linux/reboot.h>
  22. #include "ti_sci.h"
  23. /* List of all TI SCI devices active in system */
  24. static LIST_HEAD(ti_sci_list);
  25. /* Protection for the entire list */
  26. static DEFINE_MUTEX(ti_sci_list_mutex);
  27. /**
  28. * struct ti_sci_xfer - Structure representing a message flow
  29. * @tx_message: Transmit message
  30. * @rx_len: Receive message length
  31. * @xfer_buf: Preallocated buffer to store receive message
  32. * Since we work with request-ACK protocol, we can
  33. * reuse the same buffer for the rx path as we
  34. * use for the tx path.
  35. * @done: completion event
  36. */
  37. struct ti_sci_xfer {
  38. struct ti_msgmgr_message tx_message;
  39. u8 rx_len;
  40. u8 *xfer_buf;
  41. struct completion done;
  42. };
  43. /**
  44. * struct ti_sci_xfers_info - Structure to manage transfer information
  45. * @sem_xfer_count: Counting Semaphore for managing max simultaneous
  46. * Messages.
  47. * @xfer_block: Preallocated Message array
  48. * @xfer_alloc_table: Bitmap table for allocated messages.
  49. * Index of this bitmap table is also used for message
  50. * sequence identifier.
  51. * @xfer_lock: Protection for message allocation
  52. */
  53. struct ti_sci_xfers_info {
  54. struct semaphore sem_xfer_count;
  55. struct ti_sci_xfer *xfer_block;
  56. unsigned long *xfer_alloc_table;
  57. /* protect transfer allocation */
  58. spinlock_t xfer_lock;
  59. };
  60. /**
  61. * struct ti_sci_desc - Description of SoC integration
  62. * @default_host_id: Host identifier representing the compute entity
  63. * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
  64. * @max_msgs: Maximum number of messages that can be pending
  65. * simultaneously in the system
  66. * @max_msg_size: Maximum size of data per message that can be handled.
  67. */
  68. struct ti_sci_desc {
  69. u8 default_host_id;
  70. int max_rx_timeout_ms;
  71. int max_msgs;
  72. int max_msg_size;
  73. };
  74. /**
  75. * struct ti_sci_info - Structure representing a TI SCI instance
  76. * @dev: Device pointer
  77. * @desc: SoC description for this instance
  78. * @nb: Reboot Notifier block
  79. * @d: Debugfs file entry
  80. * @debug_region: Memory region where the debug message are available
  81. * @debug_region_size: Debug region size
  82. * @debug_buffer: Buffer allocated to copy debug messages.
  83. * @handle: Instance of TI SCI handle to send to clients.
  84. * @cl: Mailbox Client
  85. * @chan_tx: Transmit mailbox channel
  86. * @chan_rx: Receive mailbox channel
  87. * @minfo: Message info
  88. * @node: list head
  89. * @host_id: Host ID
  90. * @users: Number of users of this instance
  91. */
  92. struct ti_sci_info {
  93. struct device *dev;
  94. struct notifier_block nb;
  95. const struct ti_sci_desc *desc;
  96. struct dentry *d;
  97. void __iomem *debug_region;
  98. char *debug_buffer;
  99. size_t debug_region_size;
  100. struct ti_sci_handle handle;
  101. struct mbox_client cl;
  102. struct mbox_chan *chan_tx;
  103. struct mbox_chan *chan_rx;
  104. struct ti_sci_xfers_info minfo;
  105. struct list_head node;
  106. u8 host_id;
  107. /* protected by ti_sci_list_mutex */
  108. int users;
  109. };
  110. #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
  111. #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
  112. #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
  113. #ifdef CONFIG_DEBUG_FS
  114. /**
  115. * ti_sci_debug_show() - Helper to dump the debug log
  116. * @s: sequence file pointer
  117. * @unused: unused.
  118. *
  119. * Return: 0
  120. */
  121. static int ti_sci_debug_show(struct seq_file *s, void *unused)
  122. {
  123. struct ti_sci_info *info = s->private;
  124. memcpy_fromio(info->debug_buffer, info->debug_region,
  125. info->debug_region_size);
  126. /*
  127. * We don't trust firmware to leave NULL terminated last byte (hence
  128. * we have allocated 1 extra 0 byte). Since we cannot guarantee any
  129. * specific data format for debug messages, We just present the data
  130. * in the buffer as is - we expect the messages to be self explanatory.
  131. */
  132. seq_puts(s, info->debug_buffer);
  133. return 0;
  134. }
  135. /* Provide the log file operations interface*/
  136. DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
  137. /**
  138. * ti_sci_debugfs_create() - Create log debug file
  139. * @pdev: platform device pointer
  140. * @info: Pointer to SCI entity information
  141. *
  142. * Return: 0 if all went fine, else corresponding error.
  143. */
  144. static int ti_sci_debugfs_create(struct platform_device *pdev,
  145. struct ti_sci_info *info)
  146. {
  147. struct device *dev = &pdev->dev;
  148. struct resource *res;
  149. char debug_name[50] = "ti_sci_debug@";
  150. /* Debug region is optional */
  151. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  152. "debug_messages");
  153. info->debug_region = devm_ioremap_resource(dev, res);
  154. if (IS_ERR(info->debug_region))
  155. return 0;
  156. info->debug_region_size = resource_size(res);
  157. info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
  158. sizeof(char), GFP_KERNEL);
  159. if (!info->debug_buffer)
  160. return -ENOMEM;
  161. /* Setup NULL termination */
  162. info->debug_buffer[info->debug_region_size] = 0;
  163. info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
  164. sizeof(debug_name) -
  165. sizeof("ti_sci_debug@")),
  166. 0444, NULL, info, &ti_sci_debug_fops);
  167. if (IS_ERR(info->d))
  168. return PTR_ERR(info->d);
  169. dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
  170. info->debug_region, info->debug_region_size, res);
  171. return 0;
  172. }
  173. /**
  174. * ti_sci_debugfs_destroy() - clean up log debug file
  175. * @pdev: platform device pointer
  176. * @info: Pointer to SCI entity information
  177. */
  178. static void ti_sci_debugfs_destroy(struct platform_device *pdev,
  179. struct ti_sci_info *info)
  180. {
  181. if (IS_ERR(info->debug_region))
  182. return;
  183. debugfs_remove(info->d);
  184. }
  185. #else /* CONFIG_DEBUG_FS */
  186. static inline int ti_sci_debugfs_create(struct platform_device *dev,
  187. struct ti_sci_info *info)
  188. {
  189. return 0;
  190. }
  191. static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
  192. struct ti_sci_info *info)
  193. {
  194. }
  195. #endif /* CONFIG_DEBUG_FS */
  196. /**
  197. * ti_sci_dump_header_dbg() - Helper to dump a message header.
  198. * @dev: Device pointer corresponding to the SCI entity
  199. * @hdr: pointer to header.
  200. */
  201. static inline void ti_sci_dump_header_dbg(struct device *dev,
  202. struct ti_sci_msg_hdr *hdr)
  203. {
  204. dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
  205. hdr->type, hdr->host, hdr->seq, hdr->flags);
  206. }
  207. /**
  208. * ti_sci_rx_callback() - mailbox client callback for receive messages
  209. * @cl: client pointer
  210. * @m: mailbox message
  211. *
  212. * Processes one received message to appropriate transfer information and
  213. * signals completion of the transfer.
  214. *
  215. * NOTE: This function will be invoked in IRQ context, hence should be
  216. * as optimal as possible.
  217. */
  218. static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
  219. {
  220. struct ti_sci_info *info = cl_to_ti_sci_info(cl);
  221. struct device *dev = info->dev;
  222. struct ti_sci_xfers_info *minfo = &info->minfo;
  223. struct ti_msgmgr_message *mbox_msg = m;
  224. struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
  225. struct ti_sci_xfer *xfer;
  226. u8 xfer_id;
  227. xfer_id = hdr->seq;
  228. /*
  229. * Are we even expecting this?
  230. * NOTE: barriers were implicit in locks used for modifying the bitmap
  231. */
  232. if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
  233. dev_err(dev, "Message for %d is not expected!\n", xfer_id);
  234. return;
  235. }
  236. xfer = &minfo->xfer_block[xfer_id];
  237. /* Is the message of valid length? */
  238. if (mbox_msg->len > info->desc->max_msg_size) {
  239. dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
  240. mbox_msg->len, info->desc->max_msg_size);
  241. ti_sci_dump_header_dbg(dev, hdr);
  242. return;
  243. }
  244. if (mbox_msg->len < xfer->rx_len) {
  245. dev_err(dev, "Recv xfer %zu < expected %d length\n",
  246. mbox_msg->len, xfer->rx_len);
  247. ti_sci_dump_header_dbg(dev, hdr);
  248. return;
  249. }
  250. ti_sci_dump_header_dbg(dev, hdr);
  251. /* Take a copy to the rx buffer.. */
  252. memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
  253. complete(&xfer->done);
  254. }
  255. /**
  256. * ti_sci_get_one_xfer() - Allocate one message
  257. * @info: Pointer to SCI entity information
  258. * @msg_type: Message type
  259. * @msg_flags: Flag to set for the message
  260. * @tx_message_size: transmit message size
  261. * @rx_message_size: receive message size
  262. *
  263. * Helper function which is used by various command functions that are
  264. * exposed to clients of this driver for allocating a message traffic event.
  265. *
  266. * This function can sleep depending on pending requests already in the system
  267. * for the SCI entity. Further, this also holds a spinlock to maintain integrity
  268. * of internal data structures.
  269. *
  270. * Return: 0 if all went fine, else corresponding error.
  271. */
  272. static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
  273. u16 msg_type, u32 msg_flags,
  274. size_t tx_message_size,
  275. size_t rx_message_size)
  276. {
  277. struct ti_sci_xfers_info *minfo = &info->minfo;
  278. struct ti_sci_xfer *xfer;
  279. struct ti_sci_msg_hdr *hdr;
  280. unsigned long flags;
  281. unsigned long bit_pos;
  282. u8 xfer_id;
  283. int ret;
  284. int timeout;
  285. /* Ensure we have sane transfer sizes */
  286. if (rx_message_size > info->desc->max_msg_size ||
  287. tx_message_size > info->desc->max_msg_size ||
  288. rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
  289. return ERR_PTR(-ERANGE);
  290. /*
  291. * Ensure we have only controlled number of pending messages.
  292. * Ideally, we might just have to wait a single message, be
  293. * conservative and wait 5 times that..
  294. */
  295. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
  296. ret = down_timeout(&minfo->sem_xfer_count, timeout);
  297. if (ret < 0)
  298. return ERR_PTR(ret);
  299. /* Keep the locked section as small as possible */
  300. spin_lock_irqsave(&minfo->xfer_lock, flags);
  301. bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
  302. info->desc->max_msgs);
  303. set_bit(bit_pos, minfo->xfer_alloc_table);
  304. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  305. /*
  306. * We already ensured in probe that we can have max messages that can
  307. * fit in hdr.seq - NOTE: this improves access latencies
  308. * to predictable O(1) access, BUT, it opens us to risk if
  309. * remote misbehaves with corrupted message sequence responses.
  310. * If that happens, we are going to be messed up anyways..
  311. */
  312. xfer_id = (u8)bit_pos;
  313. xfer = &minfo->xfer_block[xfer_id];
  314. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  315. xfer->tx_message.len = tx_message_size;
  316. xfer->rx_len = (u8)rx_message_size;
  317. reinit_completion(&xfer->done);
  318. hdr->seq = xfer_id;
  319. hdr->type = msg_type;
  320. hdr->host = info->host_id;
  321. hdr->flags = msg_flags;
  322. return xfer;
  323. }
  324. /**
  325. * ti_sci_put_one_xfer() - Release a message
  326. * @minfo: transfer info pointer
  327. * @xfer: message that was reserved by ti_sci_get_one_xfer
  328. *
  329. * This holds a spinlock to maintain integrity of internal data structures.
  330. */
  331. static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
  332. struct ti_sci_xfer *xfer)
  333. {
  334. unsigned long flags;
  335. struct ti_sci_msg_hdr *hdr;
  336. u8 xfer_id;
  337. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  338. xfer_id = hdr->seq;
  339. /*
  340. * Keep the locked section as small as possible
  341. * NOTE: we might escape with smp_mb and no lock here..
  342. * but just be conservative and symmetric.
  343. */
  344. spin_lock_irqsave(&minfo->xfer_lock, flags);
  345. clear_bit(xfer_id, minfo->xfer_alloc_table);
  346. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  347. /* Increment the count for the next user to get through */
  348. up(&minfo->sem_xfer_count);
  349. }
  350. /**
  351. * ti_sci_do_xfer() - Do one transfer
  352. * @info: Pointer to SCI entity information
  353. * @xfer: Transfer to initiate and wait for response
  354. *
  355. * Return: -ETIMEDOUT in case of no response, if transmit error,
  356. * return corresponding error, else if all goes well,
  357. * return 0.
  358. */
  359. static inline int ti_sci_do_xfer(struct ti_sci_info *info,
  360. struct ti_sci_xfer *xfer)
  361. {
  362. int ret;
  363. int timeout;
  364. struct device *dev = info->dev;
  365. ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
  366. if (ret < 0)
  367. return ret;
  368. ret = 0;
  369. /* And we wait for the response. */
  370. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
  371. if (!wait_for_completion_timeout(&xfer->done, timeout)) {
  372. dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
  373. (void *)_RET_IP_);
  374. ret = -ETIMEDOUT;
  375. }
  376. /*
  377. * NOTE: we might prefer not to need the mailbox ticker to manage the
  378. * transfer queueing since the protocol layer queues things by itself.
  379. * Unfortunately, we have to kick the mailbox framework after we have
  380. * received our message.
  381. */
  382. mbox_client_txdone(info->chan_tx, ret);
  383. return ret;
  384. }
  385. /**
  386. * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
  387. * @info: Pointer to SCI entity information
  388. *
  389. * Updates the SCI information in the internal data structure.
  390. *
  391. * Return: 0 if all went fine, else return appropriate error.
  392. */
  393. static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
  394. {
  395. struct device *dev = info->dev;
  396. struct ti_sci_handle *handle = &info->handle;
  397. struct ti_sci_version_info *ver = &handle->version;
  398. struct ti_sci_msg_resp_version *rev_info;
  399. struct ti_sci_xfer *xfer;
  400. int ret;
  401. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
  402. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  403. sizeof(struct ti_sci_msg_hdr),
  404. sizeof(*rev_info));
  405. if (IS_ERR(xfer)) {
  406. ret = PTR_ERR(xfer);
  407. dev_err(dev, "Message alloc failed(%d)\n", ret);
  408. return ret;
  409. }
  410. rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
  411. ret = ti_sci_do_xfer(info, xfer);
  412. if (ret) {
  413. dev_err(dev, "Mbox send fail %d\n", ret);
  414. goto fail;
  415. }
  416. ver->abi_major = rev_info->abi_major;
  417. ver->abi_minor = rev_info->abi_minor;
  418. ver->firmware_revision = rev_info->firmware_revision;
  419. strncpy(ver->firmware_description, rev_info->firmware_description,
  420. sizeof(ver->firmware_description));
  421. fail:
  422. ti_sci_put_one_xfer(&info->minfo, xfer);
  423. return ret;
  424. }
  425. /**
  426. * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
  427. * @r: pointer to response buffer
  428. *
  429. * Return: true if the response was an ACK, else returns false.
  430. */
  431. static inline bool ti_sci_is_response_ack(void *r)
  432. {
  433. struct ti_sci_msg_hdr *hdr = r;
  434. return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
  435. }
  436. /**
  437. * ti_sci_set_device_state() - Set device state helper
  438. * @handle: pointer to TI SCI handle
  439. * @id: Device identifier
  440. * @flags: flags to setup for the device
  441. * @state: State to move the device to
  442. *
  443. * Return: 0 if all went well, else returns appropriate error value.
  444. */
  445. static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
  446. u32 id, u32 flags, u8 state)
  447. {
  448. struct ti_sci_info *info;
  449. struct ti_sci_msg_req_set_device_state *req;
  450. struct ti_sci_msg_hdr *resp;
  451. struct ti_sci_xfer *xfer;
  452. struct device *dev;
  453. int ret = 0;
  454. if (IS_ERR(handle))
  455. return PTR_ERR(handle);
  456. if (!handle)
  457. return -EINVAL;
  458. info = handle_to_ti_sci_info(handle);
  459. dev = info->dev;
  460. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
  461. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  462. sizeof(*req), sizeof(*resp));
  463. if (IS_ERR(xfer)) {
  464. ret = PTR_ERR(xfer);
  465. dev_err(dev, "Message alloc failed(%d)\n", ret);
  466. return ret;
  467. }
  468. req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
  469. req->id = id;
  470. req->state = state;
  471. ret = ti_sci_do_xfer(info, xfer);
  472. if (ret) {
  473. dev_err(dev, "Mbox send fail %d\n", ret);
  474. goto fail;
  475. }
  476. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  477. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  478. fail:
  479. ti_sci_put_one_xfer(&info->minfo, xfer);
  480. return ret;
  481. }
  482. /**
  483. * ti_sci_get_device_state() - Get device state helper
  484. * @handle: Handle to the device
  485. * @id: Device Identifier
  486. * @clcnt: Pointer to Context Loss Count
  487. * @resets: pointer to resets
  488. * @p_state: pointer to p_state
  489. * @c_state: pointer to c_state
  490. *
  491. * Return: 0 if all went fine, else return appropriate error.
  492. */
  493. static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
  494. u32 id, u32 *clcnt, u32 *resets,
  495. u8 *p_state, u8 *c_state)
  496. {
  497. struct ti_sci_info *info;
  498. struct ti_sci_msg_req_get_device_state *req;
  499. struct ti_sci_msg_resp_get_device_state *resp;
  500. struct ti_sci_xfer *xfer;
  501. struct device *dev;
  502. int ret = 0;
  503. if (IS_ERR(handle))
  504. return PTR_ERR(handle);
  505. if (!handle)
  506. return -EINVAL;
  507. if (!clcnt && !resets && !p_state && !c_state)
  508. return -EINVAL;
  509. info = handle_to_ti_sci_info(handle);
  510. dev = info->dev;
  511. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
  512. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  513. sizeof(*req), sizeof(*resp));
  514. if (IS_ERR(xfer)) {
  515. ret = PTR_ERR(xfer);
  516. dev_err(dev, "Message alloc failed(%d)\n", ret);
  517. return ret;
  518. }
  519. req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
  520. req->id = id;
  521. ret = ti_sci_do_xfer(info, xfer);
  522. if (ret) {
  523. dev_err(dev, "Mbox send fail %d\n", ret);
  524. goto fail;
  525. }
  526. resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
  527. if (!ti_sci_is_response_ack(resp)) {
  528. ret = -ENODEV;
  529. goto fail;
  530. }
  531. if (clcnt)
  532. *clcnt = resp->context_loss_count;
  533. if (resets)
  534. *resets = resp->resets;
  535. if (p_state)
  536. *p_state = resp->programmed_state;
  537. if (c_state)
  538. *c_state = resp->current_state;
  539. fail:
  540. ti_sci_put_one_xfer(&info->minfo, xfer);
  541. return ret;
  542. }
  543. /**
  544. * ti_sci_cmd_get_device() - command to request for device managed by TISCI
  545. * that can be shared with other hosts.
  546. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  547. * @id: Device Identifier
  548. *
  549. * Request for the device - NOTE: the client MUST maintain integrity of
  550. * usage count by balancing get_device with put_device. No refcounting is
  551. * managed by driver for that purpose.
  552. *
  553. * Return: 0 if all went fine, else return appropriate error.
  554. */
  555. static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
  556. {
  557. return ti_sci_set_device_state(handle, id, 0,
  558. MSG_DEVICE_SW_STATE_ON);
  559. }
  560. /**
  561. * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
  562. * TISCI that is exclusively owned by the
  563. * requesting host.
  564. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  565. * @id: Device Identifier
  566. *
  567. * Request for the device - NOTE: the client MUST maintain integrity of
  568. * usage count by balancing get_device with put_device. No refcounting is
  569. * managed by driver for that purpose.
  570. *
  571. * Return: 0 if all went fine, else return appropriate error.
  572. */
  573. static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
  574. u32 id)
  575. {
  576. return ti_sci_set_device_state(handle, id,
  577. MSG_FLAG_DEVICE_EXCLUSIVE,
  578. MSG_DEVICE_SW_STATE_ON);
  579. }
  580. /**
  581. * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
  582. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  583. * @id: Device Identifier
  584. *
  585. * Request for the device - NOTE: the client MUST maintain integrity of
  586. * usage count by balancing get_device with put_device. No refcounting is
  587. * managed by driver for that purpose.
  588. *
  589. * Return: 0 if all went fine, else return appropriate error.
  590. */
  591. static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
  592. {
  593. return ti_sci_set_device_state(handle, id, 0,
  594. MSG_DEVICE_SW_STATE_RETENTION);
  595. }
  596. /**
  597. * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
  598. * TISCI that is exclusively owned by
  599. * requesting host.
  600. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  601. * @id: Device Identifier
  602. *
  603. * Request for the device - NOTE: the client MUST maintain integrity of
  604. * usage count by balancing get_device with put_device. No refcounting is
  605. * managed by driver for that purpose.
  606. *
  607. * Return: 0 if all went fine, else return appropriate error.
  608. */
  609. static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
  610. u32 id)
  611. {
  612. return ti_sci_set_device_state(handle, id,
  613. MSG_FLAG_DEVICE_EXCLUSIVE,
  614. MSG_DEVICE_SW_STATE_RETENTION);
  615. }
  616. /**
  617. * ti_sci_cmd_put_device() - command to release a device managed by TISCI
  618. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  619. * @id: Device Identifier
  620. *
  621. * Request for the device - NOTE: the client MUST maintain integrity of
  622. * usage count by balancing get_device with put_device. No refcounting is
  623. * managed by driver for that purpose.
  624. *
  625. * Return: 0 if all went fine, else return appropriate error.
  626. */
  627. static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
  628. {
  629. return ti_sci_set_device_state(handle, id,
  630. 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
  631. }
  632. /**
  633. * ti_sci_cmd_dev_is_valid() - Is the device valid
  634. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  635. * @id: Device Identifier
  636. *
  637. * Return: 0 if all went fine and the device ID is valid, else return
  638. * appropriate error.
  639. */
  640. static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
  641. {
  642. u8 unused;
  643. /* check the device state which will also tell us if the ID is valid */
  644. return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
  645. }
  646. /**
  647. * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
  648. * @handle: Pointer to TISCI handle
  649. * @id: Device Identifier
  650. * @count: Pointer to Context Loss counter to populate
  651. *
  652. * Return: 0 if all went fine, else return appropriate error.
  653. */
  654. static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
  655. u32 *count)
  656. {
  657. return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
  658. }
  659. /**
  660. * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
  661. * @handle: Pointer to TISCI handle
  662. * @id: Device Identifier
  663. * @r_state: true if requested to be idle
  664. *
  665. * Return: 0 if all went fine, else return appropriate error.
  666. */
  667. static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
  668. bool *r_state)
  669. {
  670. int ret;
  671. u8 state;
  672. if (!r_state)
  673. return -EINVAL;
  674. ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
  675. if (ret)
  676. return ret;
  677. *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
  678. return 0;
  679. }
  680. /**
  681. * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
  682. * @handle: Pointer to TISCI handle
  683. * @id: Device Identifier
  684. * @r_state: true if requested to be stopped
  685. * @curr_state: true if currently stopped.
  686. *
  687. * Return: 0 if all went fine, else return appropriate error.
  688. */
  689. static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
  690. bool *r_state, bool *curr_state)
  691. {
  692. int ret;
  693. u8 p_state, c_state;
  694. if (!r_state && !curr_state)
  695. return -EINVAL;
  696. ret =
  697. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  698. if (ret)
  699. return ret;
  700. if (r_state)
  701. *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
  702. if (curr_state)
  703. *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
  704. return 0;
  705. }
  706. /**
  707. * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
  708. * @handle: Pointer to TISCI handle
  709. * @id: Device Identifier
  710. * @r_state: true if requested to be ON
  711. * @curr_state: true if currently ON and active
  712. *
  713. * Return: 0 if all went fine, else return appropriate error.
  714. */
  715. static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
  716. bool *r_state, bool *curr_state)
  717. {
  718. int ret;
  719. u8 p_state, c_state;
  720. if (!r_state && !curr_state)
  721. return -EINVAL;
  722. ret =
  723. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  724. if (ret)
  725. return ret;
  726. if (r_state)
  727. *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
  728. if (curr_state)
  729. *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
  730. return 0;
  731. }
  732. /**
  733. * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
  734. * @handle: Pointer to TISCI handle
  735. * @id: Device Identifier
  736. * @curr_state: true if currently transitioning.
  737. *
  738. * Return: 0 if all went fine, else return appropriate error.
  739. */
  740. static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
  741. bool *curr_state)
  742. {
  743. int ret;
  744. u8 state;
  745. if (!curr_state)
  746. return -EINVAL;
  747. ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
  748. if (ret)
  749. return ret;
  750. *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
  751. return 0;
  752. }
  753. /**
  754. * ti_sci_cmd_set_device_resets() - command to set resets for device managed
  755. * by TISCI
  756. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  757. * @id: Device Identifier
  758. * @reset_state: Device specific reset bit field
  759. *
  760. * Return: 0 if all went fine, else return appropriate error.
  761. */
  762. static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
  763. u32 id, u32 reset_state)
  764. {
  765. struct ti_sci_info *info;
  766. struct ti_sci_msg_req_set_device_resets *req;
  767. struct ti_sci_msg_hdr *resp;
  768. struct ti_sci_xfer *xfer;
  769. struct device *dev;
  770. int ret = 0;
  771. if (IS_ERR(handle))
  772. return PTR_ERR(handle);
  773. if (!handle)
  774. return -EINVAL;
  775. info = handle_to_ti_sci_info(handle);
  776. dev = info->dev;
  777. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
  778. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  779. sizeof(*req), sizeof(*resp));
  780. if (IS_ERR(xfer)) {
  781. ret = PTR_ERR(xfer);
  782. dev_err(dev, "Message alloc failed(%d)\n", ret);
  783. return ret;
  784. }
  785. req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
  786. req->id = id;
  787. req->resets = reset_state;
  788. ret = ti_sci_do_xfer(info, xfer);
  789. if (ret) {
  790. dev_err(dev, "Mbox send fail %d\n", ret);
  791. goto fail;
  792. }
  793. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  794. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  795. fail:
  796. ti_sci_put_one_xfer(&info->minfo, xfer);
  797. return ret;
  798. }
  799. /**
  800. * ti_sci_cmd_get_device_resets() - Get reset state for device managed
  801. * by TISCI
  802. * @handle: Pointer to TISCI handle
  803. * @id: Device Identifier
  804. * @reset_state: Pointer to reset state to populate
  805. *
  806. * Return: 0 if all went fine, else return appropriate error.
  807. */
  808. static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
  809. u32 id, u32 *reset_state)
  810. {
  811. return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
  812. NULL);
  813. }
  814. /**
  815. * ti_sci_set_clock_state() - Set clock state helper
  816. * @handle: pointer to TI SCI handle
  817. * @dev_id: Device identifier this request is for
  818. * @clk_id: Clock identifier for the device for this request.
  819. * Each device has it's own set of clock inputs. This indexes
  820. * which clock input to modify.
  821. * @flags: Header flags as needed
  822. * @state: State to request for the clock.
  823. *
  824. * Return: 0 if all went well, else returns appropriate error value.
  825. */
  826. static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
  827. u32 dev_id, u32 clk_id,
  828. u32 flags, u8 state)
  829. {
  830. struct ti_sci_info *info;
  831. struct ti_sci_msg_req_set_clock_state *req;
  832. struct ti_sci_msg_hdr *resp;
  833. struct ti_sci_xfer *xfer;
  834. struct device *dev;
  835. int ret = 0;
  836. if (IS_ERR(handle))
  837. return PTR_ERR(handle);
  838. if (!handle)
  839. return -EINVAL;
  840. info = handle_to_ti_sci_info(handle);
  841. dev = info->dev;
  842. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
  843. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  844. sizeof(*req), sizeof(*resp));
  845. if (IS_ERR(xfer)) {
  846. ret = PTR_ERR(xfer);
  847. dev_err(dev, "Message alloc failed(%d)\n", ret);
  848. return ret;
  849. }
  850. req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
  851. req->dev_id = dev_id;
  852. if (clk_id < 255) {
  853. req->clk_id = clk_id;
  854. } else {
  855. req->clk_id = 255;
  856. req->clk_id_32 = clk_id;
  857. }
  858. req->request_state = state;
  859. ret = ti_sci_do_xfer(info, xfer);
  860. if (ret) {
  861. dev_err(dev, "Mbox send fail %d\n", ret);
  862. goto fail;
  863. }
  864. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  865. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  866. fail:
  867. ti_sci_put_one_xfer(&info->minfo, xfer);
  868. return ret;
  869. }
  870. /**
  871. * ti_sci_cmd_get_clock_state() - Get clock state helper
  872. * @handle: pointer to TI SCI handle
  873. * @dev_id: Device identifier this request is for
  874. * @clk_id: Clock identifier for the device for this request.
  875. * Each device has it's own set of clock inputs. This indexes
  876. * which clock input to modify.
  877. * @programmed_state: State requested for clock to move to
  878. * @current_state: State that the clock is currently in
  879. *
  880. * Return: 0 if all went well, else returns appropriate error value.
  881. */
  882. static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
  883. u32 dev_id, u32 clk_id,
  884. u8 *programmed_state, u8 *current_state)
  885. {
  886. struct ti_sci_info *info;
  887. struct ti_sci_msg_req_get_clock_state *req;
  888. struct ti_sci_msg_resp_get_clock_state *resp;
  889. struct ti_sci_xfer *xfer;
  890. struct device *dev;
  891. int ret = 0;
  892. if (IS_ERR(handle))
  893. return PTR_ERR(handle);
  894. if (!handle)
  895. return -EINVAL;
  896. if (!programmed_state && !current_state)
  897. return -EINVAL;
  898. info = handle_to_ti_sci_info(handle);
  899. dev = info->dev;
  900. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
  901. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  902. sizeof(*req), sizeof(*resp));
  903. if (IS_ERR(xfer)) {
  904. ret = PTR_ERR(xfer);
  905. dev_err(dev, "Message alloc failed(%d)\n", ret);
  906. return ret;
  907. }
  908. req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
  909. req->dev_id = dev_id;
  910. if (clk_id < 255) {
  911. req->clk_id = clk_id;
  912. } else {
  913. req->clk_id = 255;
  914. req->clk_id_32 = clk_id;
  915. }
  916. ret = ti_sci_do_xfer(info, xfer);
  917. if (ret) {
  918. dev_err(dev, "Mbox send fail %d\n", ret);
  919. goto fail;
  920. }
  921. resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
  922. if (!ti_sci_is_response_ack(resp)) {
  923. ret = -ENODEV;
  924. goto fail;
  925. }
  926. if (programmed_state)
  927. *programmed_state = resp->programmed_state;
  928. if (current_state)
  929. *current_state = resp->current_state;
  930. fail:
  931. ti_sci_put_one_xfer(&info->minfo, xfer);
  932. return ret;
  933. }
  934. /**
  935. * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
  936. * @handle: pointer to TI SCI handle
  937. * @dev_id: Device identifier this request is for
  938. * @clk_id: Clock identifier for the device for this request.
  939. * Each device has it's own set of clock inputs. This indexes
  940. * which clock input to modify.
  941. * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
  942. * @can_change_freq: 'true' if frequency change is desired, else 'false'
  943. * @enable_input_term: 'true' if input termination is desired, else 'false'
  944. *
  945. * Return: 0 if all went well, else returns appropriate error value.
  946. */
  947. static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
  948. u32 clk_id, bool needs_ssc,
  949. bool can_change_freq, bool enable_input_term)
  950. {
  951. u32 flags = 0;
  952. flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
  953. flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
  954. flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
  955. return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
  956. MSG_CLOCK_SW_STATE_REQ);
  957. }
  958. /**
  959. * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
  960. * @handle: pointer to TI SCI handle
  961. * @dev_id: Device identifier this request is for
  962. * @clk_id: Clock identifier for the device for this request.
  963. * Each device has it's own set of clock inputs. This indexes
  964. * which clock input to modify.
  965. *
  966. * NOTE: This clock must have been requested by get_clock previously.
  967. *
  968. * Return: 0 if all went well, else returns appropriate error value.
  969. */
  970. static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
  971. u32 dev_id, u32 clk_id)
  972. {
  973. return ti_sci_set_clock_state(handle, dev_id, clk_id,
  974. MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
  975. MSG_CLOCK_SW_STATE_UNREQ);
  976. }
  977. /**
  978. * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
  979. * @handle: pointer to TI SCI handle
  980. * @dev_id: Device identifier this request is for
  981. * @clk_id: Clock identifier for the device for this request.
  982. * Each device has it's own set of clock inputs. This indexes
  983. * which clock input to modify.
  984. *
  985. * NOTE: This clock must have been requested by get_clock previously.
  986. *
  987. * Return: 0 if all went well, else returns appropriate error value.
  988. */
  989. static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
  990. u32 dev_id, u32 clk_id)
  991. {
  992. return ti_sci_set_clock_state(handle, dev_id, clk_id,
  993. MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
  994. MSG_CLOCK_SW_STATE_AUTO);
  995. }
  996. /**
  997. * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
  998. * @handle: pointer to TI SCI handle
  999. * @dev_id: Device identifier this request is for
  1000. * @clk_id: Clock identifier for the device for this request.
  1001. * Each device has it's own set of clock inputs. This indexes
  1002. * which clock input to modify.
  1003. * @req_state: state indicating if the clock is auto managed
  1004. *
  1005. * Return: 0 if all went well, else returns appropriate error value.
  1006. */
  1007. static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
  1008. u32 dev_id, u32 clk_id, bool *req_state)
  1009. {
  1010. u8 state = 0;
  1011. int ret;
  1012. if (!req_state)
  1013. return -EINVAL;
  1014. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
  1015. if (ret)
  1016. return ret;
  1017. *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
  1018. return 0;
  1019. }
  1020. /**
  1021. * ti_sci_cmd_clk_is_on() - Is the clock ON
  1022. * @handle: pointer to TI SCI handle
  1023. * @dev_id: Device identifier this request is for
  1024. * @clk_id: Clock identifier for the device for this request.
  1025. * Each device has it's own set of clock inputs. This indexes
  1026. * which clock input to modify.
  1027. * @req_state: state indicating if the clock is managed by us and enabled
  1028. * @curr_state: state indicating if the clock is ready for operation
  1029. *
  1030. * Return: 0 if all went well, else returns appropriate error value.
  1031. */
  1032. static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
  1033. u32 clk_id, bool *req_state, bool *curr_state)
  1034. {
  1035. u8 c_state = 0, r_state = 0;
  1036. int ret;
  1037. if (!req_state && !curr_state)
  1038. return -EINVAL;
  1039. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1040. &r_state, &c_state);
  1041. if (ret)
  1042. return ret;
  1043. if (req_state)
  1044. *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
  1045. if (curr_state)
  1046. *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
  1047. return 0;
  1048. }
  1049. /**
  1050. * ti_sci_cmd_clk_is_off() - Is the clock OFF
  1051. * @handle: pointer to TI SCI handle
  1052. * @dev_id: Device identifier this request is for
  1053. * @clk_id: Clock identifier for the device for this request.
  1054. * Each device has it's own set of clock inputs. This indexes
  1055. * which clock input to modify.
  1056. * @req_state: state indicating if the clock is managed by us and disabled
  1057. * @curr_state: state indicating if the clock is NOT ready for operation
  1058. *
  1059. * Return: 0 if all went well, else returns appropriate error value.
  1060. */
  1061. static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
  1062. u32 clk_id, bool *req_state, bool *curr_state)
  1063. {
  1064. u8 c_state = 0, r_state = 0;
  1065. int ret;
  1066. if (!req_state && !curr_state)
  1067. return -EINVAL;
  1068. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1069. &r_state, &c_state);
  1070. if (ret)
  1071. return ret;
  1072. if (req_state)
  1073. *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
  1074. if (curr_state)
  1075. *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
  1076. return 0;
  1077. }
  1078. /**
  1079. * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
  1080. * @handle: pointer to TI SCI handle
  1081. * @dev_id: Device identifier this request is for
  1082. * @clk_id: Clock identifier for the device for this request.
  1083. * Each device has it's own set of clock inputs. This indexes
  1084. * which clock input to modify.
  1085. * @parent_id: Parent clock identifier to set
  1086. *
  1087. * Return: 0 if all went well, else returns appropriate error value.
  1088. */
  1089. static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
  1090. u32 dev_id, u32 clk_id, u32 parent_id)
  1091. {
  1092. struct ti_sci_info *info;
  1093. struct ti_sci_msg_req_set_clock_parent *req;
  1094. struct ti_sci_msg_hdr *resp;
  1095. struct ti_sci_xfer *xfer;
  1096. struct device *dev;
  1097. int ret = 0;
  1098. if (IS_ERR(handle))
  1099. return PTR_ERR(handle);
  1100. if (!handle)
  1101. return -EINVAL;
  1102. info = handle_to_ti_sci_info(handle);
  1103. dev = info->dev;
  1104. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
  1105. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1106. sizeof(*req), sizeof(*resp));
  1107. if (IS_ERR(xfer)) {
  1108. ret = PTR_ERR(xfer);
  1109. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1110. return ret;
  1111. }
  1112. req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
  1113. req->dev_id = dev_id;
  1114. if (clk_id < 255) {
  1115. req->clk_id = clk_id;
  1116. } else {
  1117. req->clk_id = 255;
  1118. req->clk_id_32 = clk_id;
  1119. }
  1120. if (parent_id < 255) {
  1121. req->parent_id = parent_id;
  1122. } else {
  1123. req->parent_id = 255;
  1124. req->parent_id_32 = parent_id;
  1125. }
  1126. ret = ti_sci_do_xfer(info, xfer);
  1127. if (ret) {
  1128. dev_err(dev, "Mbox send fail %d\n", ret);
  1129. goto fail;
  1130. }
  1131. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1132. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1133. fail:
  1134. ti_sci_put_one_xfer(&info->minfo, xfer);
  1135. return ret;
  1136. }
  1137. /**
  1138. * ti_sci_cmd_clk_get_parent() - Get current parent clock source
  1139. * @handle: pointer to TI SCI handle
  1140. * @dev_id: Device identifier this request is for
  1141. * @clk_id: Clock identifier for the device for this request.
  1142. * Each device has it's own set of clock inputs. This indexes
  1143. * which clock input to modify.
  1144. * @parent_id: Current clock parent
  1145. *
  1146. * Return: 0 if all went well, else returns appropriate error value.
  1147. */
  1148. static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
  1149. u32 dev_id, u32 clk_id, u32 *parent_id)
  1150. {
  1151. struct ti_sci_info *info;
  1152. struct ti_sci_msg_req_get_clock_parent *req;
  1153. struct ti_sci_msg_resp_get_clock_parent *resp;
  1154. struct ti_sci_xfer *xfer;
  1155. struct device *dev;
  1156. int ret = 0;
  1157. if (IS_ERR(handle))
  1158. return PTR_ERR(handle);
  1159. if (!handle || !parent_id)
  1160. return -EINVAL;
  1161. info = handle_to_ti_sci_info(handle);
  1162. dev = info->dev;
  1163. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
  1164. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1165. sizeof(*req), sizeof(*resp));
  1166. if (IS_ERR(xfer)) {
  1167. ret = PTR_ERR(xfer);
  1168. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1169. return ret;
  1170. }
  1171. req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
  1172. req->dev_id = dev_id;
  1173. if (clk_id < 255) {
  1174. req->clk_id = clk_id;
  1175. } else {
  1176. req->clk_id = 255;
  1177. req->clk_id_32 = clk_id;
  1178. }
  1179. ret = ti_sci_do_xfer(info, xfer);
  1180. if (ret) {
  1181. dev_err(dev, "Mbox send fail %d\n", ret);
  1182. goto fail;
  1183. }
  1184. resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
  1185. if (!ti_sci_is_response_ack(resp)) {
  1186. ret = -ENODEV;
  1187. } else {
  1188. if (resp->parent_id < 255)
  1189. *parent_id = resp->parent_id;
  1190. else
  1191. *parent_id = resp->parent_id_32;
  1192. }
  1193. fail:
  1194. ti_sci_put_one_xfer(&info->minfo, xfer);
  1195. return ret;
  1196. }
  1197. /**
  1198. * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
  1199. * @handle: pointer to TI SCI handle
  1200. * @dev_id: Device identifier this request is for
  1201. * @clk_id: Clock identifier for the device for this request.
  1202. * Each device has it's own set of clock inputs. This indexes
  1203. * which clock input to modify.
  1204. * @num_parents: Returns he number of parents to the current clock.
  1205. *
  1206. * Return: 0 if all went well, else returns appropriate error value.
  1207. */
  1208. static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
  1209. u32 dev_id, u32 clk_id,
  1210. u32 *num_parents)
  1211. {
  1212. struct ti_sci_info *info;
  1213. struct ti_sci_msg_req_get_clock_num_parents *req;
  1214. struct ti_sci_msg_resp_get_clock_num_parents *resp;
  1215. struct ti_sci_xfer *xfer;
  1216. struct device *dev;
  1217. int ret = 0;
  1218. if (IS_ERR(handle))
  1219. return PTR_ERR(handle);
  1220. if (!handle || !num_parents)
  1221. return -EINVAL;
  1222. info = handle_to_ti_sci_info(handle);
  1223. dev = info->dev;
  1224. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
  1225. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1226. sizeof(*req), sizeof(*resp));
  1227. if (IS_ERR(xfer)) {
  1228. ret = PTR_ERR(xfer);
  1229. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1230. return ret;
  1231. }
  1232. req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
  1233. req->dev_id = dev_id;
  1234. if (clk_id < 255) {
  1235. req->clk_id = clk_id;
  1236. } else {
  1237. req->clk_id = 255;
  1238. req->clk_id_32 = clk_id;
  1239. }
  1240. ret = ti_sci_do_xfer(info, xfer);
  1241. if (ret) {
  1242. dev_err(dev, "Mbox send fail %d\n", ret);
  1243. goto fail;
  1244. }
  1245. resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
  1246. if (!ti_sci_is_response_ack(resp)) {
  1247. ret = -ENODEV;
  1248. } else {
  1249. if (resp->num_parents < 255)
  1250. *num_parents = resp->num_parents;
  1251. else
  1252. *num_parents = resp->num_parents_32;
  1253. }
  1254. fail:
  1255. ti_sci_put_one_xfer(&info->minfo, xfer);
  1256. return ret;
  1257. }
  1258. /**
  1259. * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
  1260. * @handle: pointer to TI SCI handle
  1261. * @dev_id: Device identifier this request is for
  1262. * @clk_id: Clock identifier for the device for this request.
  1263. * Each device has it's own set of clock inputs. This indexes
  1264. * which clock input to modify.
  1265. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1266. * allowable programmed frequency and does not account for clock
  1267. * tolerances and jitter.
  1268. * @target_freq: The target clock frequency in Hz. A frequency will be
  1269. * processed as close to this target frequency as possible.
  1270. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1271. * allowable programmed frequency and does not account for clock
  1272. * tolerances and jitter.
  1273. * @match_freq: Frequency match in Hz response.
  1274. *
  1275. * Return: 0 if all went well, else returns appropriate error value.
  1276. */
  1277. static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
  1278. u32 dev_id, u32 clk_id, u64 min_freq,
  1279. u64 target_freq, u64 max_freq,
  1280. u64 *match_freq)
  1281. {
  1282. struct ti_sci_info *info;
  1283. struct ti_sci_msg_req_query_clock_freq *req;
  1284. struct ti_sci_msg_resp_query_clock_freq *resp;
  1285. struct ti_sci_xfer *xfer;
  1286. struct device *dev;
  1287. int ret = 0;
  1288. if (IS_ERR(handle))
  1289. return PTR_ERR(handle);
  1290. if (!handle || !match_freq)
  1291. return -EINVAL;
  1292. info = handle_to_ti_sci_info(handle);
  1293. dev = info->dev;
  1294. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
  1295. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1296. sizeof(*req), sizeof(*resp));
  1297. if (IS_ERR(xfer)) {
  1298. ret = PTR_ERR(xfer);
  1299. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1300. return ret;
  1301. }
  1302. req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
  1303. req->dev_id = dev_id;
  1304. if (clk_id < 255) {
  1305. req->clk_id = clk_id;
  1306. } else {
  1307. req->clk_id = 255;
  1308. req->clk_id_32 = clk_id;
  1309. }
  1310. req->min_freq_hz = min_freq;
  1311. req->target_freq_hz = target_freq;
  1312. req->max_freq_hz = max_freq;
  1313. ret = ti_sci_do_xfer(info, xfer);
  1314. if (ret) {
  1315. dev_err(dev, "Mbox send fail %d\n", ret);
  1316. goto fail;
  1317. }
  1318. resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
  1319. if (!ti_sci_is_response_ack(resp))
  1320. ret = -ENODEV;
  1321. else
  1322. *match_freq = resp->freq_hz;
  1323. fail:
  1324. ti_sci_put_one_xfer(&info->minfo, xfer);
  1325. return ret;
  1326. }
  1327. /**
  1328. * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
  1329. * @handle: pointer to TI SCI handle
  1330. * @dev_id: Device identifier this request is for
  1331. * @clk_id: Clock identifier for the device for this request.
  1332. * Each device has it's own set of clock inputs. This indexes
  1333. * which clock input to modify.
  1334. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1335. * allowable programmed frequency and does not account for clock
  1336. * tolerances and jitter.
  1337. * @target_freq: The target clock frequency in Hz. A frequency will be
  1338. * processed as close to this target frequency as possible.
  1339. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1340. * allowable programmed frequency and does not account for clock
  1341. * tolerances and jitter.
  1342. *
  1343. * Return: 0 if all went well, else returns appropriate error value.
  1344. */
  1345. static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
  1346. u32 dev_id, u32 clk_id, u64 min_freq,
  1347. u64 target_freq, u64 max_freq)
  1348. {
  1349. struct ti_sci_info *info;
  1350. struct ti_sci_msg_req_set_clock_freq *req;
  1351. struct ti_sci_msg_hdr *resp;
  1352. struct ti_sci_xfer *xfer;
  1353. struct device *dev;
  1354. int ret = 0;
  1355. if (IS_ERR(handle))
  1356. return PTR_ERR(handle);
  1357. if (!handle)
  1358. return -EINVAL;
  1359. info = handle_to_ti_sci_info(handle);
  1360. dev = info->dev;
  1361. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
  1362. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1363. sizeof(*req), sizeof(*resp));
  1364. if (IS_ERR(xfer)) {
  1365. ret = PTR_ERR(xfer);
  1366. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1367. return ret;
  1368. }
  1369. req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
  1370. req->dev_id = dev_id;
  1371. if (clk_id < 255) {
  1372. req->clk_id = clk_id;
  1373. } else {
  1374. req->clk_id = 255;
  1375. req->clk_id_32 = clk_id;
  1376. }
  1377. req->min_freq_hz = min_freq;
  1378. req->target_freq_hz = target_freq;
  1379. req->max_freq_hz = max_freq;
  1380. ret = ti_sci_do_xfer(info, xfer);
  1381. if (ret) {
  1382. dev_err(dev, "Mbox send fail %d\n", ret);
  1383. goto fail;
  1384. }
  1385. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1386. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1387. fail:
  1388. ti_sci_put_one_xfer(&info->minfo, xfer);
  1389. return ret;
  1390. }
  1391. /**
  1392. * ti_sci_cmd_clk_get_freq() - Get current frequency
  1393. * @handle: pointer to TI SCI handle
  1394. * @dev_id: Device identifier this request is for
  1395. * @clk_id: Clock identifier for the device for this request.
  1396. * Each device has it's own set of clock inputs. This indexes
  1397. * which clock input to modify.
  1398. * @freq: Currently frequency in Hz
  1399. *
  1400. * Return: 0 if all went well, else returns appropriate error value.
  1401. */
  1402. static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
  1403. u32 dev_id, u32 clk_id, u64 *freq)
  1404. {
  1405. struct ti_sci_info *info;
  1406. struct ti_sci_msg_req_get_clock_freq *req;
  1407. struct ti_sci_msg_resp_get_clock_freq *resp;
  1408. struct ti_sci_xfer *xfer;
  1409. struct device *dev;
  1410. int ret = 0;
  1411. if (IS_ERR(handle))
  1412. return PTR_ERR(handle);
  1413. if (!handle || !freq)
  1414. return -EINVAL;
  1415. info = handle_to_ti_sci_info(handle);
  1416. dev = info->dev;
  1417. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
  1418. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1419. sizeof(*req), sizeof(*resp));
  1420. if (IS_ERR(xfer)) {
  1421. ret = PTR_ERR(xfer);
  1422. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1423. return ret;
  1424. }
  1425. req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
  1426. req->dev_id = dev_id;
  1427. if (clk_id < 255) {
  1428. req->clk_id = clk_id;
  1429. } else {
  1430. req->clk_id = 255;
  1431. req->clk_id_32 = clk_id;
  1432. }
  1433. ret = ti_sci_do_xfer(info, xfer);
  1434. if (ret) {
  1435. dev_err(dev, "Mbox send fail %d\n", ret);
  1436. goto fail;
  1437. }
  1438. resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
  1439. if (!ti_sci_is_response_ack(resp))
  1440. ret = -ENODEV;
  1441. else
  1442. *freq = resp->freq_hz;
  1443. fail:
  1444. ti_sci_put_one_xfer(&info->minfo, xfer);
  1445. return ret;
  1446. }
  1447. static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
  1448. {
  1449. struct ti_sci_info *info;
  1450. struct ti_sci_msg_req_reboot *req;
  1451. struct ti_sci_msg_hdr *resp;
  1452. struct ti_sci_xfer *xfer;
  1453. struct device *dev;
  1454. int ret = 0;
  1455. if (IS_ERR(handle))
  1456. return PTR_ERR(handle);
  1457. if (!handle)
  1458. return -EINVAL;
  1459. info = handle_to_ti_sci_info(handle);
  1460. dev = info->dev;
  1461. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
  1462. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1463. sizeof(*req), sizeof(*resp));
  1464. if (IS_ERR(xfer)) {
  1465. ret = PTR_ERR(xfer);
  1466. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1467. return ret;
  1468. }
  1469. req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
  1470. ret = ti_sci_do_xfer(info, xfer);
  1471. if (ret) {
  1472. dev_err(dev, "Mbox send fail %d\n", ret);
  1473. goto fail;
  1474. }
  1475. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1476. if (!ti_sci_is_response_ack(resp))
  1477. ret = -ENODEV;
  1478. else
  1479. ret = 0;
  1480. fail:
  1481. ti_sci_put_one_xfer(&info->minfo, xfer);
  1482. return ret;
  1483. }
  1484. /**
  1485. * ti_sci_get_resource_range - Helper to get a range of resources assigned
  1486. * to a host. Resource is uniquely identified by
  1487. * type and subtype.
  1488. * @handle: Pointer to TISCI handle.
  1489. * @dev_id: TISCI device ID.
  1490. * @subtype: Resource assignment subtype that is being requested
  1491. * from the given device.
  1492. * @s_host: Host processor ID to which the resources are allocated
  1493. * @range_start: Start index of the resource range
  1494. * @range_num: Number of resources in the range
  1495. *
  1496. * Return: 0 if all went fine, else return appropriate error.
  1497. */
  1498. static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
  1499. u32 dev_id, u8 subtype, u8 s_host,
  1500. u16 *range_start, u16 *range_num)
  1501. {
  1502. struct ti_sci_msg_resp_get_resource_range *resp;
  1503. struct ti_sci_msg_req_get_resource_range *req;
  1504. struct ti_sci_xfer *xfer;
  1505. struct ti_sci_info *info;
  1506. struct device *dev;
  1507. int ret = 0;
  1508. if (IS_ERR(handle))
  1509. return PTR_ERR(handle);
  1510. if (!handle)
  1511. return -EINVAL;
  1512. info = handle_to_ti_sci_info(handle);
  1513. dev = info->dev;
  1514. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
  1515. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1516. sizeof(*req), sizeof(*resp));
  1517. if (IS_ERR(xfer)) {
  1518. ret = PTR_ERR(xfer);
  1519. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1520. return ret;
  1521. }
  1522. req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
  1523. req->secondary_host = s_host;
  1524. req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
  1525. req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
  1526. ret = ti_sci_do_xfer(info, xfer);
  1527. if (ret) {
  1528. dev_err(dev, "Mbox send fail %d\n", ret);
  1529. goto fail;
  1530. }
  1531. resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
  1532. if (!ti_sci_is_response_ack(resp)) {
  1533. ret = -ENODEV;
  1534. } else if (!resp->range_start && !resp->range_num) {
  1535. ret = -ENODEV;
  1536. } else {
  1537. *range_start = resp->range_start;
  1538. *range_num = resp->range_num;
  1539. };
  1540. fail:
  1541. ti_sci_put_one_xfer(&info->minfo, xfer);
  1542. return ret;
  1543. }
  1544. /**
  1545. * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
  1546. * that is same as ti sci interface host.
  1547. * @handle: Pointer to TISCI handle.
  1548. * @dev_id: TISCI device ID.
  1549. * @subtype: Resource assignment subtype that is being requested
  1550. * from the given device.
  1551. * @range_start: Start index of the resource range
  1552. * @range_num: Number of resources in the range
  1553. *
  1554. * Return: 0 if all went fine, else return appropriate error.
  1555. */
  1556. static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
  1557. u32 dev_id, u8 subtype,
  1558. u16 *range_start, u16 *range_num)
  1559. {
  1560. return ti_sci_get_resource_range(handle, dev_id, subtype,
  1561. TI_SCI_IRQ_SECONDARY_HOST_INVALID,
  1562. range_start, range_num);
  1563. }
  1564. /**
  1565. * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
  1566. * assigned to a specified host.
  1567. * @handle: Pointer to TISCI handle.
  1568. * @dev_id: TISCI device ID.
  1569. * @subtype: Resource assignment subtype that is being requested
  1570. * from the given device.
  1571. * @s_host: Host processor ID to which the resources are allocated
  1572. * @range_start: Start index of the resource range
  1573. * @range_num: Number of resources in the range
  1574. *
  1575. * Return: 0 if all went fine, else return appropriate error.
  1576. */
  1577. static
  1578. int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
  1579. u32 dev_id, u8 subtype, u8 s_host,
  1580. u16 *range_start, u16 *range_num)
  1581. {
  1582. return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
  1583. range_start, range_num);
  1584. }
  1585. /**
  1586. * ti_sci_manage_irq() - Helper api to configure/release the irq route between
  1587. * the requested source and destination
  1588. * @handle: Pointer to TISCI handle.
  1589. * @valid_params: Bit fields defining the validity of certain params
  1590. * @src_id: Device ID of the IRQ source
  1591. * @src_index: IRQ source index within the source device
  1592. * @dst_id: Device ID of the IRQ destination
  1593. * @dst_host_irq: IRQ number of the destination device
  1594. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1595. * @vint: Virtual interrupt to be used within the IA
  1596. * @global_event: Global event number to be used for the requesting event
  1597. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1598. * @s_host: Secondary host ID to which the irq/event is being
  1599. * requested for.
  1600. * @type: Request type irq set or release.
  1601. *
  1602. * Return: 0 if all went fine, else return appropriate error.
  1603. */
  1604. static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
  1605. u32 valid_params, u16 src_id, u16 src_index,
  1606. u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
  1607. u16 global_event, u8 vint_status_bit, u8 s_host,
  1608. u16 type)
  1609. {
  1610. struct ti_sci_msg_req_manage_irq *req;
  1611. struct ti_sci_msg_hdr *resp;
  1612. struct ti_sci_xfer *xfer;
  1613. struct ti_sci_info *info;
  1614. struct device *dev;
  1615. int ret = 0;
  1616. if (IS_ERR(handle))
  1617. return PTR_ERR(handle);
  1618. if (!handle)
  1619. return -EINVAL;
  1620. info = handle_to_ti_sci_info(handle);
  1621. dev = info->dev;
  1622. xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1623. sizeof(*req), sizeof(*resp));
  1624. if (IS_ERR(xfer)) {
  1625. ret = PTR_ERR(xfer);
  1626. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1627. return ret;
  1628. }
  1629. req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
  1630. req->valid_params = valid_params;
  1631. req->src_id = src_id;
  1632. req->src_index = src_index;
  1633. req->dst_id = dst_id;
  1634. req->dst_host_irq = dst_host_irq;
  1635. req->ia_id = ia_id;
  1636. req->vint = vint;
  1637. req->global_event = global_event;
  1638. req->vint_status_bit = vint_status_bit;
  1639. req->secondary_host = s_host;
  1640. ret = ti_sci_do_xfer(info, xfer);
  1641. if (ret) {
  1642. dev_err(dev, "Mbox send fail %d\n", ret);
  1643. goto fail;
  1644. }
  1645. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1646. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1647. fail:
  1648. ti_sci_put_one_xfer(&info->minfo, xfer);
  1649. return ret;
  1650. }
  1651. /**
  1652. * ti_sci_set_irq() - Helper api to configure the irq route between the
  1653. * requested source and destination
  1654. * @handle: Pointer to TISCI handle.
  1655. * @valid_params: Bit fields defining the validity of certain params
  1656. * @src_id: Device ID of the IRQ source
  1657. * @src_index: IRQ source index within the source device
  1658. * @dst_id: Device ID of the IRQ destination
  1659. * @dst_host_irq: IRQ number of the destination device
  1660. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1661. * @vint: Virtual interrupt to be used within the IA
  1662. * @global_event: Global event number to be used for the requesting event
  1663. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1664. * @s_host: Secondary host ID to which the irq/event is being
  1665. * requested for.
  1666. *
  1667. * Return: 0 if all went fine, else return appropriate error.
  1668. */
  1669. static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
  1670. u16 src_id, u16 src_index, u16 dst_id,
  1671. u16 dst_host_irq, u16 ia_id, u16 vint,
  1672. u16 global_event, u8 vint_status_bit, u8 s_host)
  1673. {
  1674. pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
  1675. __func__, valid_params, src_id, src_index,
  1676. dst_id, dst_host_irq, ia_id, vint, global_event,
  1677. vint_status_bit);
  1678. return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
  1679. dst_id, dst_host_irq, ia_id, vint,
  1680. global_event, vint_status_bit, s_host,
  1681. TI_SCI_MSG_SET_IRQ);
  1682. }
  1683. /**
  1684. * ti_sci_free_irq() - Helper api to free the irq route between the
  1685. * requested source and destination
  1686. * @handle: Pointer to TISCI handle.
  1687. * @valid_params: Bit fields defining the validity of certain params
  1688. * @src_id: Device ID of the IRQ source
  1689. * @src_index: IRQ source index within the source device
  1690. * @dst_id: Device ID of the IRQ destination
  1691. * @dst_host_irq: IRQ number of the destination device
  1692. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1693. * @vint: Virtual interrupt to be used within the IA
  1694. * @global_event: Global event number to be used for the requesting event
  1695. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1696. * @s_host: Secondary host ID to which the irq/event is being
  1697. * requested for.
  1698. *
  1699. * Return: 0 if all went fine, else return appropriate error.
  1700. */
  1701. static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
  1702. u16 src_id, u16 src_index, u16 dst_id,
  1703. u16 dst_host_irq, u16 ia_id, u16 vint,
  1704. u16 global_event, u8 vint_status_bit, u8 s_host)
  1705. {
  1706. pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
  1707. __func__, valid_params, src_id, src_index,
  1708. dst_id, dst_host_irq, ia_id, vint, global_event,
  1709. vint_status_bit);
  1710. return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
  1711. dst_id, dst_host_irq, ia_id, vint,
  1712. global_event, vint_status_bit, s_host,
  1713. TI_SCI_MSG_FREE_IRQ);
  1714. }
  1715. /**
  1716. * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
  1717. * source and destination.
  1718. * @handle: Pointer to TISCI handle.
  1719. * @src_id: Device ID of the IRQ source
  1720. * @src_index: IRQ source index within the source device
  1721. * @dst_id: Device ID of the IRQ destination
  1722. * @dst_host_irq: IRQ number of the destination device
  1723. * @vint_irq: Boolean specifying if this interrupt belongs to
  1724. * Interrupt Aggregator.
  1725. *
  1726. * Return: 0 if all went fine, else return appropriate error.
  1727. */
  1728. static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
  1729. u16 src_index, u16 dst_id, u16 dst_host_irq)
  1730. {
  1731. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
  1732. return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
  1733. dst_host_irq, 0, 0, 0, 0, 0);
  1734. }
  1735. /**
  1736. * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
  1737. * requested source and Interrupt Aggregator.
  1738. * @handle: Pointer to TISCI handle.
  1739. * @src_id: Device ID of the IRQ source
  1740. * @src_index: IRQ source index within the source device
  1741. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1742. * @vint: Virtual interrupt to be used within the IA
  1743. * @global_event: Global event number to be used for the requesting event
  1744. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1745. *
  1746. * Return: 0 if all went fine, else return appropriate error.
  1747. */
  1748. static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
  1749. u16 src_id, u16 src_index, u16 ia_id,
  1750. u16 vint, u16 global_event,
  1751. u8 vint_status_bit)
  1752. {
  1753. u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
  1754. MSG_FLAG_GLB_EVNT_VALID |
  1755. MSG_FLAG_VINT_STS_BIT_VALID;
  1756. return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
  1757. ia_id, vint, global_event, vint_status_bit, 0);
  1758. }
  1759. /**
  1760. * ti_sci_cmd_free_irq() - Free a host irq route between the between the
  1761. * requested source and destination.
  1762. * @handle: Pointer to TISCI handle.
  1763. * @src_id: Device ID of the IRQ source
  1764. * @src_index: IRQ source index within the source device
  1765. * @dst_id: Device ID of the IRQ destination
  1766. * @dst_host_irq: IRQ number of the destination device
  1767. * @vint_irq: Boolean specifying if this interrupt belongs to
  1768. * Interrupt Aggregator.
  1769. *
  1770. * Return: 0 if all went fine, else return appropriate error.
  1771. */
  1772. static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
  1773. u16 src_index, u16 dst_id, u16 dst_host_irq)
  1774. {
  1775. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
  1776. return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
  1777. dst_host_irq, 0, 0, 0, 0, 0);
  1778. }
  1779. /**
  1780. * ti_sci_cmd_free_event_map() - Free an event map between the requested source
  1781. * and Interrupt Aggregator.
  1782. * @handle: Pointer to TISCI handle.
  1783. * @src_id: Device ID of the IRQ source
  1784. * @src_index: IRQ source index within the source device
  1785. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1786. * @vint: Virtual interrupt to be used within the IA
  1787. * @global_event: Global event number to be used for the requesting event
  1788. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1789. *
  1790. * Return: 0 if all went fine, else return appropriate error.
  1791. */
  1792. static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
  1793. u16 src_id, u16 src_index, u16 ia_id,
  1794. u16 vint, u16 global_event,
  1795. u8 vint_status_bit)
  1796. {
  1797. u32 valid_params = MSG_FLAG_IA_ID_VALID |
  1798. MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
  1799. MSG_FLAG_VINT_STS_BIT_VALID;
  1800. return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
  1801. ia_id, vint, global_event, vint_status_bit, 0);
  1802. }
  1803. /**
  1804. * ti_sci_cmd_ring_config() - configure RA ring
  1805. * @handle: Pointer to TI SCI handle.
  1806. * @valid_params: Bitfield defining validity of ring configuration
  1807. * parameters
  1808. * @nav_id: Device ID of Navigator Subsystem from which the ring is
  1809. * allocated
  1810. * @index: Ring index
  1811. * @addr_lo: The ring base address lo 32 bits
  1812. * @addr_hi: The ring base address hi 32 bits
  1813. * @count: Number of ring elements
  1814. * @mode: The mode of the ring
  1815. * @size: The ring element size.
  1816. * @order_id: Specifies the ring's bus order ID
  1817. *
  1818. * Return: 0 if all went well, else returns appropriate error value.
  1819. *
  1820. * See @ti_sci_msg_rm_ring_cfg_req for more info.
  1821. */
  1822. static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
  1823. u32 valid_params, u16 nav_id, u16 index,
  1824. u32 addr_lo, u32 addr_hi, u32 count,
  1825. u8 mode, u8 size, u8 order_id)
  1826. {
  1827. struct ti_sci_msg_rm_ring_cfg_req *req;
  1828. struct ti_sci_msg_hdr *resp;
  1829. struct ti_sci_xfer *xfer;
  1830. struct ti_sci_info *info;
  1831. struct device *dev;
  1832. int ret = 0;
  1833. if (IS_ERR_OR_NULL(handle))
  1834. return -EINVAL;
  1835. info = handle_to_ti_sci_info(handle);
  1836. dev = info->dev;
  1837. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
  1838. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1839. sizeof(*req), sizeof(*resp));
  1840. if (IS_ERR(xfer)) {
  1841. ret = PTR_ERR(xfer);
  1842. dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
  1843. return ret;
  1844. }
  1845. req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
  1846. req->valid_params = valid_params;
  1847. req->nav_id = nav_id;
  1848. req->index = index;
  1849. req->addr_lo = addr_lo;
  1850. req->addr_hi = addr_hi;
  1851. req->count = count;
  1852. req->mode = mode;
  1853. req->size = size;
  1854. req->order_id = order_id;
  1855. ret = ti_sci_do_xfer(info, xfer);
  1856. if (ret) {
  1857. dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
  1858. goto fail;
  1859. }
  1860. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1861. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1862. fail:
  1863. ti_sci_put_one_xfer(&info->minfo, xfer);
  1864. dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
  1865. return ret;
  1866. }
  1867. /**
  1868. * ti_sci_cmd_ring_get_config() - get RA ring configuration
  1869. * @handle: Pointer to TI SCI handle.
  1870. * @nav_id: Device ID of Navigator Subsystem from which the ring is
  1871. * allocated
  1872. * @index: Ring index
  1873. * @addr_lo: Returns ring's base address lo 32 bits
  1874. * @addr_hi: Returns ring's base address hi 32 bits
  1875. * @count: Returns number of ring elements
  1876. * @mode: Returns mode of the ring
  1877. * @size: Returns ring element size
  1878. * @order_id: Returns ring's bus order ID
  1879. *
  1880. * Return: 0 if all went well, else returns appropriate error value.
  1881. *
  1882. * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
  1883. */
  1884. static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
  1885. u32 nav_id, u32 index, u8 *mode,
  1886. u32 *addr_lo, u32 *addr_hi,
  1887. u32 *count, u8 *size, u8 *order_id)
  1888. {
  1889. struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
  1890. struct ti_sci_msg_rm_ring_get_cfg_req *req;
  1891. struct ti_sci_xfer *xfer;
  1892. struct ti_sci_info *info;
  1893. struct device *dev;
  1894. int ret = 0;
  1895. if (IS_ERR_OR_NULL(handle))
  1896. return -EINVAL;
  1897. info = handle_to_ti_sci_info(handle);
  1898. dev = info->dev;
  1899. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
  1900. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1901. sizeof(*req), sizeof(*resp));
  1902. if (IS_ERR(xfer)) {
  1903. ret = PTR_ERR(xfer);
  1904. dev_err(dev,
  1905. "RM_RA:Message get config failed(%d)\n", ret);
  1906. return ret;
  1907. }
  1908. req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
  1909. req->nav_id = nav_id;
  1910. req->index = index;
  1911. ret = ti_sci_do_xfer(info, xfer);
  1912. if (ret) {
  1913. dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
  1914. goto fail;
  1915. }
  1916. resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
  1917. if (!ti_sci_is_response_ack(resp)) {
  1918. ret = -ENODEV;
  1919. } else {
  1920. if (mode)
  1921. *mode = resp->mode;
  1922. if (addr_lo)
  1923. *addr_lo = resp->addr_lo;
  1924. if (addr_hi)
  1925. *addr_hi = resp->addr_hi;
  1926. if (count)
  1927. *count = resp->count;
  1928. if (size)
  1929. *size = resp->size;
  1930. if (order_id)
  1931. *order_id = resp->order_id;
  1932. };
  1933. fail:
  1934. ti_sci_put_one_xfer(&info->minfo, xfer);
  1935. dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
  1936. return ret;
  1937. }
  1938. /**
  1939. * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
  1940. * @handle: Pointer to TI SCI handle.
  1941. * @nav_id: Device ID of Navigator Subsystem which should be used for
  1942. * pairing
  1943. * @src_thread: Source PSI-L thread ID
  1944. * @dst_thread: Destination PSI-L thread ID
  1945. *
  1946. * Return: 0 if all went well, else returns appropriate error value.
  1947. */
  1948. static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
  1949. u32 nav_id, u32 src_thread, u32 dst_thread)
  1950. {
  1951. struct ti_sci_msg_psil_pair *req;
  1952. struct ti_sci_msg_hdr *resp;
  1953. struct ti_sci_xfer *xfer;
  1954. struct ti_sci_info *info;
  1955. struct device *dev;
  1956. int ret = 0;
  1957. if (IS_ERR(handle))
  1958. return PTR_ERR(handle);
  1959. if (!handle)
  1960. return -EINVAL;
  1961. info = handle_to_ti_sci_info(handle);
  1962. dev = info->dev;
  1963. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
  1964. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1965. sizeof(*req), sizeof(*resp));
  1966. if (IS_ERR(xfer)) {
  1967. ret = PTR_ERR(xfer);
  1968. dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
  1969. return ret;
  1970. }
  1971. req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
  1972. req->nav_id = nav_id;
  1973. req->src_thread = src_thread;
  1974. req->dst_thread = dst_thread;
  1975. ret = ti_sci_do_xfer(info, xfer);
  1976. if (ret) {
  1977. dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
  1978. goto fail;
  1979. }
  1980. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1981. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  1982. fail:
  1983. ti_sci_put_one_xfer(&info->minfo, xfer);
  1984. return ret;
  1985. }
  1986. /**
  1987. * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
  1988. * @handle: Pointer to TI SCI handle.
  1989. * @nav_id: Device ID of Navigator Subsystem which should be used for
  1990. * unpairing
  1991. * @src_thread: Source PSI-L thread ID
  1992. * @dst_thread: Destination PSI-L thread ID
  1993. *
  1994. * Return: 0 if all went well, else returns appropriate error value.
  1995. */
  1996. static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
  1997. u32 nav_id, u32 src_thread, u32 dst_thread)
  1998. {
  1999. struct ti_sci_msg_psil_unpair *req;
  2000. struct ti_sci_msg_hdr *resp;
  2001. struct ti_sci_xfer *xfer;
  2002. struct ti_sci_info *info;
  2003. struct device *dev;
  2004. int ret = 0;
  2005. if (IS_ERR(handle))
  2006. return PTR_ERR(handle);
  2007. if (!handle)
  2008. return -EINVAL;
  2009. info = handle_to_ti_sci_info(handle);
  2010. dev = info->dev;
  2011. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
  2012. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2013. sizeof(*req), sizeof(*resp));
  2014. if (IS_ERR(xfer)) {
  2015. ret = PTR_ERR(xfer);
  2016. dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
  2017. return ret;
  2018. }
  2019. req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
  2020. req->nav_id = nav_id;
  2021. req->src_thread = src_thread;
  2022. req->dst_thread = dst_thread;
  2023. ret = ti_sci_do_xfer(info, xfer);
  2024. if (ret) {
  2025. dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
  2026. goto fail;
  2027. }
  2028. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2029. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2030. fail:
  2031. ti_sci_put_one_xfer(&info->minfo, xfer);
  2032. return ret;
  2033. }
  2034. /**
  2035. * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
  2036. * @handle: Pointer to TI SCI handle.
  2037. * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
  2038. * structure
  2039. *
  2040. * Return: 0 if all went well, else returns appropriate error value.
  2041. *
  2042. * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
  2043. * more info.
  2044. */
  2045. static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
  2046. const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
  2047. {
  2048. struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
  2049. struct ti_sci_msg_hdr *resp;
  2050. struct ti_sci_xfer *xfer;
  2051. struct ti_sci_info *info;
  2052. struct device *dev;
  2053. int ret = 0;
  2054. if (IS_ERR_OR_NULL(handle))
  2055. return -EINVAL;
  2056. info = handle_to_ti_sci_info(handle);
  2057. dev = info->dev;
  2058. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
  2059. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2060. sizeof(*req), sizeof(*resp));
  2061. if (IS_ERR(xfer)) {
  2062. ret = PTR_ERR(xfer);
  2063. dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
  2064. return ret;
  2065. }
  2066. req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
  2067. req->valid_params = params->valid_params;
  2068. req->nav_id = params->nav_id;
  2069. req->index = params->index;
  2070. req->tx_pause_on_err = params->tx_pause_on_err;
  2071. req->tx_filt_einfo = params->tx_filt_einfo;
  2072. req->tx_filt_pswords = params->tx_filt_pswords;
  2073. req->tx_atype = params->tx_atype;
  2074. req->tx_chan_type = params->tx_chan_type;
  2075. req->tx_supr_tdpkt = params->tx_supr_tdpkt;
  2076. req->tx_fetch_size = params->tx_fetch_size;
  2077. req->tx_credit_count = params->tx_credit_count;
  2078. req->txcq_qnum = params->txcq_qnum;
  2079. req->tx_priority = params->tx_priority;
  2080. req->tx_qos = params->tx_qos;
  2081. req->tx_orderid = params->tx_orderid;
  2082. req->fdepth = params->fdepth;
  2083. req->tx_sched_priority = params->tx_sched_priority;
  2084. req->tx_burst_size = params->tx_burst_size;
  2085. ret = ti_sci_do_xfer(info, xfer);
  2086. if (ret) {
  2087. dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
  2088. goto fail;
  2089. }
  2090. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2091. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2092. fail:
  2093. ti_sci_put_one_xfer(&info->minfo, xfer);
  2094. dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
  2095. return ret;
  2096. }
  2097. /**
  2098. * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
  2099. * @handle: Pointer to TI SCI handle.
  2100. * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
  2101. * structure
  2102. *
  2103. * Return: 0 if all went well, else returns appropriate error value.
  2104. *
  2105. * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
  2106. * more info.
  2107. */
  2108. static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
  2109. const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
  2110. {
  2111. struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
  2112. struct ti_sci_msg_hdr *resp;
  2113. struct ti_sci_xfer *xfer;
  2114. struct ti_sci_info *info;
  2115. struct device *dev;
  2116. int ret = 0;
  2117. if (IS_ERR_OR_NULL(handle))
  2118. return -EINVAL;
  2119. info = handle_to_ti_sci_info(handle);
  2120. dev = info->dev;
  2121. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
  2122. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2123. sizeof(*req), sizeof(*resp));
  2124. if (IS_ERR(xfer)) {
  2125. ret = PTR_ERR(xfer);
  2126. dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
  2127. return ret;
  2128. }
  2129. req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
  2130. req->valid_params = params->valid_params;
  2131. req->nav_id = params->nav_id;
  2132. req->index = params->index;
  2133. req->rx_fetch_size = params->rx_fetch_size;
  2134. req->rxcq_qnum = params->rxcq_qnum;
  2135. req->rx_priority = params->rx_priority;
  2136. req->rx_qos = params->rx_qos;
  2137. req->rx_orderid = params->rx_orderid;
  2138. req->rx_sched_priority = params->rx_sched_priority;
  2139. req->flowid_start = params->flowid_start;
  2140. req->flowid_cnt = params->flowid_cnt;
  2141. req->rx_pause_on_err = params->rx_pause_on_err;
  2142. req->rx_atype = params->rx_atype;
  2143. req->rx_chan_type = params->rx_chan_type;
  2144. req->rx_ignore_short = params->rx_ignore_short;
  2145. req->rx_ignore_long = params->rx_ignore_long;
  2146. req->rx_burst_size = params->rx_burst_size;
  2147. ret = ti_sci_do_xfer(info, xfer);
  2148. if (ret) {
  2149. dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
  2150. goto fail;
  2151. }
  2152. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2153. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2154. fail:
  2155. ti_sci_put_one_xfer(&info->minfo, xfer);
  2156. dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
  2157. return ret;
  2158. }
  2159. /**
  2160. * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
  2161. * @handle: Pointer to TI SCI handle.
  2162. * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
  2163. * structure
  2164. *
  2165. * Return: 0 if all went well, else returns appropriate error value.
  2166. *
  2167. * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
  2168. * more info.
  2169. */
  2170. static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
  2171. const struct ti_sci_msg_rm_udmap_flow_cfg *params)
  2172. {
  2173. struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
  2174. struct ti_sci_msg_hdr *resp;
  2175. struct ti_sci_xfer *xfer;
  2176. struct ti_sci_info *info;
  2177. struct device *dev;
  2178. int ret = 0;
  2179. if (IS_ERR_OR_NULL(handle))
  2180. return -EINVAL;
  2181. info = handle_to_ti_sci_info(handle);
  2182. dev = info->dev;
  2183. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
  2184. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2185. sizeof(*req), sizeof(*resp));
  2186. if (IS_ERR(xfer)) {
  2187. ret = PTR_ERR(xfer);
  2188. dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
  2189. return ret;
  2190. }
  2191. req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
  2192. req->valid_params = params->valid_params;
  2193. req->nav_id = params->nav_id;
  2194. req->flow_index = params->flow_index;
  2195. req->rx_einfo_present = params->rx_einfo_present;
  2196. req->rx_psinfo_present = params->rx_psinfo_present;
  2197. req->rx_error_handling = params->rx_error_handling;
  2198. req->rx_desc_type = params->rx_desc_type;
  2199. req->rx_sop_offset = params->rx_sop_offset;
  2200. req->rx_dest_qnum = params->rx_dest_qnum;
  2201. req->rx_src_tag_hi = params->rx_src_tag_hi;
  2202. req->rx_src_tag_lo = params->rx_src_tag_lo;
  2203. req->rx_dest_tag_hi = params->rx_dest_tag_hi;
  2204. req->rx_dest_tag_lo = params->rx_dest_tag_lo;
  2205. req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
  2206. req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
  2207. req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
  2208. req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
  2209. req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
  2210. req->rx_fdq1_qnum = params->rx_fdq1_qnum;
  2211. req->rx_fdq2_qnum = params->rx_fdq2_qnum;
  2212. req->rx_fdq3_qnum = params->rx_fdq3_qnum;
  2213. req->rx_ps_location = params->rx_ps_location;
  2214. ret = ti_sci_do_xfer(info, xfer);
  2215. if (ret) {
  2216. dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
  2217. goto fail;
  2218. }
  2219. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2220. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2221. fail:
  2222. ti_sci_put_one_xfer(&info->minfo, xfer);
  2223. dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
  2224. return ret;
  2225. }
  2226. /**
  2227. * ti_sci_cmd_proc_request() - Command to request a physical processor control
  2228. * @handle: Pointer to TI SCI handle
  2229. * @proc_id: Processor ID this request is for
  2230. *
  2231. * Return: 0 if all went well, else returns appropriate error value.
  2232. */
  2233. static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
  2234. u8 proc_id)
  2235. {
  2236. struct ti_sci_msg_req_proc_request *req;
  2237. struct ti_sci_msg_hdr *resp;
  2238. struct ti_sci_info *info;
  2239. struct ti_sci_xfer *xfer;
  2240. struct device *dev;
  2241. int ret = 0;
  2242. if (!handle)
  2243. return -EINVAL;
  2244. if (IS_ERR(handle))
  2245. return PTR_ERR(handle);
  2246. info = handle_to_ti_sci_info(handle);
  2247. dev = info->dev;
  2248. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
  2249. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2250. sizeof(*req), sizeof(*resp));
  2251. if (IS_ERR(xfer)) {
  2252. ret = PTR_ERR(xfer);
  2253. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2254. return ret;
  2255. }
  2256. req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
  2257. req->processor_id = proc_id;
  2258. ret = ti_sci_do_xfer(info, xfer);
  2259. if (ret) {
  2260. dev_err(dev, "Mbox send fail %d\n", ret);
  2261. goto fail;
  2262. }
  2263. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2264. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2265. fail:
  2266. ti_sci_put_one_xfer(&info->minfo, xfer);
  2267. return ret;
  2268. }
  2269. /**
  2270. * ti_sci_cmd_proc_release() - Command to release a physical processor control
  2271. * @handle: Pointer to TI SCI handle
  2272. * @proc_id: Processor ID this request is for
  2273. *
  2274. * Return: 0 if all went well, else returns appropriate error value.
  2275. */
  2276. static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
  2277. u8 proc_id)
  2278. {
  2279. struct ti_sci_msg_req_proc_release *req;
  2280. struct ti_sci_msg_hdr *resp;
  2281. struct ti_sci_info *info;
  2282. struct ti_sci_xfer *xfer;
  2283. struct device *dev;
  2284. int ret = 0;
  2285. if (!handle)
  2286. return -EINVAL;
  2287. if (IS_ERR(handle))
  2288. return PTR_ERR(handle);
  2289. info = handle_to_ti_sci_info(handle);
  2290. dev = info->dev;
  2291. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
  2292. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2293. sizeof(*req), sizeof(*resp));
  2294. if (IS_ERR(xfer)) {
  2295. ret = PTR_ERR(xfer);
  2296. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2297. return ret;
  2298. }
  2299. req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
  2300. req->processor_id = proc_id;
  2301. ret = ti_sci_do_xfer(info, xfer);
  2302. if (ret) {
  2303. dev_err(dev, "Mbox send fail %d\n", ret);
  2304. goto fail;
  2305. }
  2306. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2307. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2308. fail:
  2309. ti_sci_put_one_xfer(&info->minfo, xfer);
  2310. return ret;
  2311. }
  2312. /**
  2313. * ti_sci_cmd_proc_handover() - Command to handover a physical processor
  2314. * control to a host in the processor's access
  2315. * control list.
  2316. * @handle: Pointer to TI SCI handle
  2317. * @proc_id: Processor ID this request is for
  2318. * @host_id: Host ID to get the control of the processor
  2319. *
  2320. * Return: 0 if all went well, else returns appropriate error value.
  2321. */
  2322. static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
  2323. u8 proc_id, u8 host_id)
  2324. {
  2325. struct ti_sci_msg_req_proc_handover *req;
  2326. struct ti_sci_msg_hdr *resp;
  2327. struct ti_sci_info *info;
  2328. struct ti_sci_xfer *xfer;
  2329. struct device *dev;
  2330. int ret = 0;
  2331. if (!handle)
  2332. return -EINVAL;
  2333. if (IS_ERR(handle))
  2334. return PTR_ERR(handle);
  2335. info = handle_to_ti_sci_info(handle);
  2336. dev = info->dev;
  2337. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
  2338. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2339. sizeof(*req), sizeof(*resp));
  2340. if (IS_ERR(xfer)) {
  2341. ret = PTR_ERR(xfer);
  2342. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2343. return ret;
  2344. }
  2345. req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
  2346. req->processor_id = proc_id;
  2347. req->host_id = host_id;
  2348. ret = ti_sci_do_xfer(info, xfer);
  2349. if (ret) {
  2350. dev_err(dev, "Mbox send fail %d\n", ret);
  2351. goto fail;
  2352. }
  2353. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2354. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2355. fail:
  2356. ti_sci_put_one_xfer(&info->minfo, xfer);
  2357. return ret;
  2358. }
  2359. /**
  2360. * ti_sci_cmd_proc_set_config() - Command to set the processor boot
  2361. * configuration flags
  2362. * @handle: Pointer to TI SCI handle
  2363. * @proc_id: Processor ID this request is for
  2364. * @config_flags_set: Configuration flags to be set
  2365. * @config_flags_clear: Configuration flags to be cleared.
  2366. *
  2367. * Return: 0 if all went well, else returns appropriate error value.
  2368. */
  2369. static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
  2370. u8 proc_id, u64 bootvector,
  2371. u32 config_flags_set,
  2372. u32 config_flags_clear)
  2373. {
  2374. struct ti_sci_msg_req_set_config *req;
  2375. struct ti_sci_msg_hdr *resp;
  2376. struct ti_sci_info *info;
  2377. struct ti_sci_xfer *xfer;
  2378. struct device *dev;
  2379. int ret = 0;
  2380. if (!handle)
  2381. return -EINVAL;
  2382. if (IS_ERR(handle))
  2383. return PTR_ERR(handle);
  2384. info = handle_to_ti_sci_info(handle);
  2385. dev = info->dev;
  2386. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
  2387. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2388. sizeof(*req), sizeof(*resp));
  2389. if (IS_ERR(xfer)) {
  2390. ret = PTR_ERR(xfer);
  2391. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2392. return ret;
  2393. }
  2394. req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
  2395. req->processor_id = proc_id;
  2396. req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
  2397. req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
  2398. TI_SCI_ADDR_HIGH_SHIFT;
  2399. req->config_flags_set = config_flags_set;
  2400. req->config_flags_clear = config_flags_clear;
  2401. ret = ti_sci_do_xfer(info, xfer);
  2402. if (ret) {
  2403. dev_err(dev, "Mbox send fail %d\n", ret);
  2404. goto fail;
  2405. }
  2406. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2407. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2408. fail:
  2409. ti_sci_put_one_xfer(&info->minfo, xfer);
  2410. return ret;
  2411. }
  2412. /**
  2413. * ti_sci_cmd_proc_set_control() - Command to set the processor boot
  2414. * control flags
  2415. * @handle: Pointer to TI SCI handle
  2416. * @proc_id: Processor ID this request is for
  2417. * @control_flags_set: Control flags to be set
  2418. * @control_flags_clear: Control flags to be cleared
  2419. *
  2420. * Return: 0 if all went well, else returns appropriate error value.
  2421. */
  2422. static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
  2423. u8 proc_id, u32 control_flags_set,
  2424. u32 control_flags_clear)
  2425. {
  2426. struct ti_sci_msg_req_set_ctrl *req;
  2427. struct ti_sci_msg_hdr *resp;
  2428. struct ti_sci_info *info;
  2429. struct ti_sci_xfer *xfer;
  2430. struct device *dev;
  2431. int ret = 0;
  2432. if (!handle)
  2433. return -EINVAL;
  2434. if (IS_ERR(handle))
  2435. return PTR_ERR(handle);
  2436. info = handle_to_ti_sci_info(handle);
  2437. dev = info->dev;
  2438. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
  2439. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2440. sizeof(*req), sizeof(*resp));
  2441. if (IS_ERR(xfer)) {
  2442. ret = PTR_ERR(xfer);
  2443. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2444. return ret;
  2445. }
  2446. req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
  2447. req->processor_id = proc_id;
  2448. req->control_flags_set = control_flags_set;
  2449. req->control_flags_clear = control_flags_clear;
  2450. ret = ti_sci_do_xfer(info, xfer);
  2451. if (ret) {
  2452. dev_err(dev, "Mbox send fail %d\n", ret);
  2453. goto fail;
  2454. }
  2455. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2456. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2457. fail:
  2458. ti_sci_put_one_xfer(&info->minfo, xfer);
  2459. return ret;
  2460. }
  2461. /**
  2462. * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
  2463. * @handle: Pointer to TI SCI handle
  2464. * @proc_id: Processor ID this request is for
  2465. *
  2466. * Return: 0 if all went well, else returns appropriate error value.
  2467. */
  2468. static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
  2469. u8 proc_id, u64 *bv, u32 *cfg_flags,
  2470. u32 *ctrl_flags, u32 *sts_flags)
  2471. {
  2472. struct ti_sci_msg_resp_get_status *resp;
  2473. struct ti_sci_msg_req_get_status *req;
  2474. struct ti_sci_info *info;
  2475. struct ti_sci_xfer *xfer;
  2476. struct device *dev;
  2477. int ret = 0;
  2478. if (!handle)
  2479. return -EINVAL;
  2480. if (IS_ERR(handle))
  2481. return PTR_ERR(handle);
  2482. info = handle_to_ti_sci_info(handle);
  2483. dev = info->dev;
  2484. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
  2485. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2486. sizeof(*req), sizeof(*resp));
  2487. if (IS_ERR(xfer)) {
  2488. ret = PTR_ERR(xfer);
  2489. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2490. return ret;
  2491. }
  2492. req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
  2493. req->processor_id = proc_id;
  2494. ret = ti_sci_do_xfer(info, xfer);
  2495. if (ret) {
  2496. dev_err(dev, "Mbox send fail %d\n", ret);
  2497. goto fail;
  2498. }
  2499. resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
  2500. if (!ti_sci_is_response_ack(resp)) {
  2501. ret = -ENODEV;
  2502. } else {
  2503. *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
  2504. (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
  2505. TI_SCI_ADDR_HIGH_MASK);
  2506. *cfg_flags = resp->config_flags;
  2507. *ctrl_flags = resp->control_flags;
  2508. *sts_flags = resp->status_flags;
  2509. }
  2510. fail:
  2511. ti_sci_put_one_xfer(&info->minfo, xfer);
  2512. return ret;
  2513. }
  2514. /*
  2515. * ti_sci_setup_ops() - Setup the operations structures
  2516. * @info: pointer to TISCI pointer
  2517. */
  2518. static void ti_sci_setup_ops(struct ti_sci_info *info)
  2519. {
  2520. struct ti_sci_ops *ops = &info->handle.ops;
  2521. struct ti_sci_core_ops *core_ops = &ops->core_ops;
  2522. struct ti_sci_dev_ops *dops = &ops->dev_ops;
  2523. struct ti_sci_clk_ops *cops = &ops->clk_ops;
  2524. struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
  2525. struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
  2526. struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
  2527. struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
  2528. struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
  2529. struct ti_sci_proc_ops *pops = &ops->proc_ops;
  2530. core_ops->reboot_device = ti_sci_cmd_core_reboot;
  2531. dops->get_device = ti_sci_cmd_get_device;
  2532. dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
  2533. dops->idle_device = ti_sci_cmd_idle_device;
  2534. dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
  2535. dops->put_device = ti_sci_cmd_put_device;
  2536. dops->is_valid = ti_sci_cmd_dev_is_valid;
  2537. dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
  2538. dops->is_idle = ti_sci_cmd_dev_is_idle;
  2539. dops->is_stop = ti_sci_cmd_dev_is_stop;
  2540. dops->is_on = ti_sci_cmd_dev_is_on;
  2541. dops->is_transitioning = ti_sci_cmd_dev_is_trans;
  2542. dops->set_device_resets = ti_sci_cmd_set_device_resets;
  2543. dops->get_device_resets = ti_sci_cmd_get_device_resets;
  2544. cops->get_clock = ti_sci_cmd_get_clock;
  2545. cops->idle_clock = ti_sci_cmd_idle_clock;
  2546. cops->put_clock = ti_sci_cmd_put_clock;
  2547. cops->is_auto = ti_sci_cmd_clk_is_auto;
  2548. cops->is_on = ti_sci_cmd_clk_is_on;
  2549. cops->is_off = ti_sci_cmd_clk_is_off;
  2550. cops->set_parent = ti_sci_cmd_clk_set_parent;
  2551. cops->get_parent = ti_sci_cmd_clk_get_parent;
  2552. cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
  2553. cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
  2554. cops->set_freq = ti_sci_cmd_clk_set_freq;
  2555. cops->get_freq = ti_sci_cmd_clk_get_freq;
  2556. rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
  2557. rm_core_ops->get_range_from_shost =
  2558. ti_sci_cmd_get_resource_range_from_shost;
  2559. iops->set_irq = ti_sci_cmd_set_irq;
  2560. iops->set_event_map = ti_sci_cmd_set_event_map;
  2561. iops->free_irq = ti_sci_cmd_free_irq;
  2562. iops->free_event_map = ti_sci_cmd_free_event_map;
  2563. rops->config = ti_sci_cmd_ring_config;
  2564. rops->get_config = ti_sci_cmd_ring_get_config;
  2565. psilops->pair = ti_sci_cmd_rm_psil_pair;
  2566. psilops->unpair = ti_sci_cmd_rm_psil_unpair;
  2567. udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
  2568. udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
  2569. udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
  2570. pops->request = ti_sci_cmd_proc_request;
  2571. pops->release = ti_sci_cmd_proc_release;
  2572. pops->handover = ti_sci_cmd_proc_handover;
  2573. pops->set_config = ti_sci_cmd_proc_set_config;
  2574. pops->set_control = ti_sci_cmd_proc_set_control;
  2575. pops->get_status = ti_sci_cmd_proc_get_status;
  2576. }
  2577. /**
  2578. * ti_sci_get_handle() - Get the TI SCI handle for a device
  2579. * @dev: Pointer to device for which we want SCI handle
  2580. *
  2581. * NOTE: The function does not track individual clients of the framework
  2582. * and is expected to be maintained by caller of TI SCI protocol library.
  2583. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  2584. * Return: pointer to handle if successful, else:
  2585. * -EPROBE_DEFER if the instance is not ready
  2586. * -ENODEV if the required node handler is missing
  2587. * -EINVAL if invalid conditions are encountered.
  2588. */
  2589. const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
  2590. {
  2591. struct device_node *ti_sci_np;
  2592. struct list_head *p;
  2593. struct ti_sci_handle *handle = NULL;
  2594. struct ti_sci_info *info;
  2595. if (!dev) {
  2596. pr_err("I need a device pointer\n");
  2597. return ERR_PTR(-EINVAL);
  2598. }
  2599. ti_sci_np = of_get_parent(dev->of_node);
  2600. if (!ti_sci_np) {
  2601. dev_err(dev, "No OF information\n");
  2602. return ERR_PTR(-EINVAL);
  2603. }
  2604. mutex_lock(&ti_sci_list_mutex);
  2605. list_for_each(p, &ti_sci_list) {
  2606. info = list_entry(p, struct ti_sci_info, node);
  2607. if (ti_sci_np == info->dev->of_node) {
  2608. handle = &info->handle;
  2609. info->users++;
  2610. break;
  2611. }
  2612. }
  2613. mutex_unlock(&ti_sci_list_mutex);
  2614. of_node_put(ti_sci_np);
  2615. if (!handle)
  2616. return ERR_PTR(-EPROBE_DEFER);
  2617. return handle;
  2618. }
  2619. EXPORT_SYMBOL_GPL(ti_sci_get_handle);
  2620. /**
  2621. * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
  2622. * @handle: Handle acquired by ti_sci_get_handle
  2623. *
  2624. * NOTE: The function does not track individual clients of the framework
  2625. * and is expected to be maintained by caller of TI SCI protocol library.
  2626. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  2627. *
  2628. * Return: 0 is successfully released
  2629. * if an error pointer was passed, it returns the error value back,
  2630. * if null was passed, it returns -EINVAL;
  2631. */
  2632. int ti_sci_put_handle(const struct ti_sci_handle *handle)
  2633. {
  2634. struct ti_sci_info *info;
  2635. if (IS_ERR(handle))
  2636. return PTR_ERR(handle);
  2637. if (!handle)
  2638. return -EINVAL;
  2639. info = handle_to_ti_sci_info(handle);
  2640. mutex_lock(&ti_sci_list_mutex);
  2641. if (!WARN_ON(!info->users))
  2642. info->users--;
  2643. mutex_unlock(&ti_sci_list_mutex);
  2644. return 0;
  2645. }
  2646. EXPORT_SYMBOL_GPL(ti_sci_put_handle);
  2647. static void devm_ti_sci_release(struct device *dev, void *res)
  2648. {
  2649. const struct ti_sci_handle **ptr = res;
  2650. const struct ti_sci_handle *handle = *ptr;
  2651. int ret;
  2652. ret = ti_sci_put_handle(handle);
  2653. if (ret)
  2654. dev_err(dev, "failed to put handle %d\n", ret);
  2655. }
  2656. /**
  2657. * devm_ti_sci_get_handle() - Managed get handle
  2658. * @dev: device for which we want SCI handle for.
  2659. *
  2660. * NOTE: This releases the handle once the device resources are
  2661. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  2662. * The function does not track individual clients of the framework
  2663. * and is expected to be maintained by caller of TI SCI protocol library.
  2664. *
  2665. * Return: 0 if all went fine, else corresponding error.
  2666. */
  2667. const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
  2668. {
  2669. const struct ti_sci_handle **ptr;
  2670. const struct ti_sci_handle *handle;
  2671. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  2672. if (!ptr)
  2673. return ERR_PTR(-ENOMEM);
  2674. handle = ti_sci_get_handle(dev);
  2675. if (!IS_ERR(handle)) {
  2676. *ptr = handle;
  2677. devres_add(dev, ptr);
  2678. } else {
  2679. devres_free(ptr);
  2680. }
  2681. return handle;
  2682. }
  2683. EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
  2684. /**
  2685. * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
  2686. * @np: device node
  2687. * @property: property name containing phandle on TISCI node
  2688. *
  2689. * NOTE: The function does not track individual clients of the framework
  2690. * and is expected to be maintained by caller of TI SCI protocol library.
  2691. * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
  2692. * Return: pointer to handle if successful, else:
  2693. * -EPROBE_DEFER if the instance is not ready
  2694. * -ENODEV if the required node handler is missing
  2695. * -EINVAL if invalid conditions are encountered.
  2696. */
  2697. const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
  2698. const char *property)
  2699. {
  2700. struct ti_sci_handle *handle = NULL;
  2701. struct device_node *ti_sci_np;
  2702. struct ti_sci_info *info;
  2703. struct list_head *p;
  2704. if (!np) {
  2705. pr_err("I need a device pointer\n");
  2706. return ERR_PTR(-EINVAL);
  2707. }
  2708. ti_sci_np = of_parse_phandle(np, property, 0);
  2709. if (!ti_sci_np)
  2710. return ERR_PTR(-ENODEV);
  2711. mutex_lock(&ti_sci_list_mutex);
  2712. list_for_each(p, &ti_sci_list) {
  2713. info = list_entry(p, struct ti_sci_info, node);
  2714. if (ti_sci_np == info->dev->of_node) {
  2715. handle = &info->handle;
  2716. info->users++;
  2717. break;
  2718. }
  2719. }
  2720. mutex_unlock(&ti_sci_list_mutex);
  2721. of_node_put(ti_sci_np);
  2722. if (!handle)
  2723. return ERR_PTR(-EPROBE_DEFER);
  2724. return handle;
  2725. }
  2726. EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
  2727. /**
  2728. * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
  2729. * @dev: Device pointer requesting TISCI handle
  2730. * @property: property name containing phandle on TISCI node
  2731. *
  2732. * NOTE: This releases the handle once the device resources are
  2733. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  2734. * The function does not track individual clients of the framework
  2735. * and is expected to be maintained by caller of TI SCI protocol library.
  2736. *
  2737. * Return: 0 if all went fine, else corresponding error.
  2738. */
  2739. const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
  2740. const char *property)
  2741. {
  2742. const struct ti_sci_handle *handle;
  2743. const struct ti_sci_handle **ptr;
  2744. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  2745. if (!ptr)
  2746. return ERR_PTR(-ENOMEM);
  2747. handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
  2748. if (!IS_ERR(handle)) {
  2749. *ptr = handle;
  2750. devres_add(dev, ptr);
  2751. } else {
  2752. devres_free(ptr);
  2753. }
  2754. return handle;
  2755. }
  2756. EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
  2757. /**
  2758. * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
  2759. * @res: Pointer to the TISCI resource
  2760. *
  2761. * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
  2762. */
  2763. u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
  2764. {
  2765. unsigned long flags;
  2766. u16 set, free_bit;
  2767. raw_spin_lock_irqsave(&res->lock, flags);
  2768. for (set = 0; set < res->sets; set++) {
  2769. free_bit = find_first_zero_bit(res->desc[set].res_map,
  2770. res->desc[set].num);
  2771. if (free_bit != res->desc[set].num) {
  2772. set_bit(free_bit, res->desc[set].res_map);
  2773. raw_spin_unlock_irqrestore(&res->lock, flags);
  2774. return res->desc[set].start + free_bit;
  2775. }
  2776. }
  2777. raw_spin_unlock_irqrestore(&res->lock, flags);
  2778. return TI_SCI_RESOURCE_NULL;
  2779. }
  2780. EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
  2781. /**
  2782. * ti_sci_release_resource() - Release a resource from TISCI resource.
  2783. * @res: Pointer to the TISCI resource
  2784. * @id: Resource id to be released.
  2785. */
  2786. void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
  2787. {
  2788. unsigned long flags;
  2789. u16 set;
  2790. raw_spin_lock_irqsave(&res->lock, flags);
  2791. for (set = 0; set < res->sets; set++) {
  2792. if (res->desc[set].start <= id &&
  2793. (res->desc[set].num + res->desc[set].start) > id)
  2794. clear_bit(id - res->desc[set].start,
  2795. res->desc[set].res_map);
  2796. }
  2797. raw_spin_unlock_irqrestore(&res->lock, flags);
  2798. }
  2799. EXPORT_SYMBOL_GPL(ti_sci_release_resource);
  2800. /**
  2801. * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
  2802. * @res: Pointer to the TISCI resource
  2803. *
  2804. * Return: Total number of available resources.
  2805. */
  2806. u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
  2807. {
  2808. u32 set, count = 0;
  2809. for (set = 0; set < res->sets; set++)
  2810. count += res->desc[set].num;
  2811. return count;
  2812. }
  2813. EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
  2814. /**
  2815. * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
  2816. * @handle: TISCI handle
  2817. * @dev: Device pointer to which the resource is assigned
  2818. * @dev_id: TISCI device id to which the resource is assigned
  2819. * @sub_types: Array of sub_types assigned corresponding to device
  2820. * @sets: Number of sub_types
  2821. *
  2822. * Return: Pointer to ti_sci_resource if all went well else appropriate
  2823. * error pointer.
  2824. */
  2825. static struct ti_sci_resource *
  2826. devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
  2827. struct device *dev, u32 dev_id, u32 *sub_types,
  2828. u32 sets)
  2829. {
  2830. struct ti_sci_resource *res;
  2831. bool valid_set = false;
  2832. int i, ret;
  2833. res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
  2834. if (!res)
  2835. return ERR_PTR(-ENOMEM);
  2836. res->sets = sets;
  2837. res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
  2838. GFP_KERNEL);
  2839. if (!res->desc)
  2840. return ERR_PTR(-ENOMEM);
  2841. for (i = 0; i < res->sets; i++) {
  2842. ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
  2843. sub_types[i],
  2844. &res->desc[i].start,
  2845. &res->desc[i].num);
  2846. if (ret) {
  2847. dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
  2848. dev_id, sub_types[i]);
  2849. res->desc[i].start = 0;
  2850. res->desc[i].num = 0;
  2851. continue;
  2852. }
  2853. dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
  2854. dev_id, sub_types[i], res->desc[i].start,
  2855. res->desc[i].num);
  2856. valid_set = true;
  2857. res->desc[i].res_map =
  2858. devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
  2859. sizeof(*res->desc[i].res_map), GFP_KERNEL);
  2860. if (!res->desc[i].res_map)
  2861. return ERR_PTR(-ENOMEM);
  2862. }
  2863. raw_spin_lock_init(&res->lock);
  2864. if (valid_set)
  2865. return res;
  2866. return ERR_PTR(-EINVAL);
  2867. }
  2868. /**
  2869. * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
  2870. * @handle: TISCI handle
  2871. * @dev: Device pointer to which the resource is assigned
  2872. * @dev_id: TISCI device id to which the resource is assigned
  2873. * @of_prop: property name by which the resource are represented
  2874. *
  2875. * Return: Pointer to ti_sci_resource if all went well else appropriate
  2876. * error pointer.
  2877. */
  2878. struct ti_sci_resource *
  2879. devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
  2880. struct device *dev, u32 dev_id, char *of_prop)
  2881. {
  2882. struct ti_sci_resource *res;
  2883. u32 *sub_types;
  2884. int sets;
  2885. sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
  2886. sizeof(u32));
  2887. if (sets < 0) {
  2888. dev_err(dev, "%s resource type ids not available\n", of_prop);
  2889. return ERR_PTR(sets);
  2890. }
  2891. sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
  2892. if (!sub_types)
  2893. return ERR_PTR(-ENOMEM);
  2894. of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
  2895. res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
  2896. sets);
  2897. kfree(sub_types);
  2898. return res;
  2899. }
  2900. EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
  2901. /**
  2902. * devm_ti_sci_get_resource() - Get a resource range assigned to the device
  2903. * @handle: TISCI handle
  2904. * @dev: Device pointer to which the resource is assigned
  2905. * @dev_id: TISCI device id to which the resource is assigned
  2906. * @suub_type: TISCI resource subytpe representing the resource.
  2907. *
  2908. * Return: Pointer to ti_sci_resource if all went well else appropriate
  2909. * error pointer.
  2910. */
  2911. struct ti_sci_resource *
  2912. devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
  2913. u32 dev_id, u32 sub_type)
  2914. {
  2915. return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
  2916. }
  2917. EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
  2918. static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
  2919. void *cmd)
  2920. {
  2921. struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
  2922. const struct ti_sci_handle *handle = &info->handle;
  2923. ti_sci_cmd_core_reboot(handle);
  2924. /* call fail OR pass, we should not be here in the first place */
  2925. return NOTIFY_BAD;
  2926. }
  2927. /* Description for K2G */
  2928. static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
  2929. .default_host_id = 2,
  2930. /* Conservative duration */
  2931. .max_rx_timeout_ms = 1000,
  2932. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  2933. .max_msgs = 20,
  2934. .max_msg_size = 64,
  2935. };
  2936. /* Description for AM654 */
  2937. static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
  2938. .default_host_id = 12,
  2939. /* Conservative duration */
  2940. .max_rx_timeout_ms = 10000,
  2941. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  2942. .max_msgs = 20,
  2943. .max_msg_size = 60,
  2944. };
  2945. static const struct of_device_id ti_sci_of_match[] = {
  2946. {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
  2947. {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
  2948. { /* Sentinel */ },
  2949. };
  2950. MODULE_DEVICE_TABLE(of, ti_sci_of_match);
  2951. static int ti_sci_probe(struct platform_device *pdev)
  2952. {
  2953. struct device *dev = &pdev->dev;
  2954. const struct of_device_id *of_id;
  2955. const struct ti_sci_desc *desc;
  2956. struct ti_sci_xfer *xfer;
  2957. struct ti_sci_info *info = NULL;
  2958. struct ti_sci_xfers_info *minfo;
  2959. struct mbox_client *cl;
  2960. int ret = -EINVAL;
  2961. int i;
  2962. int reboot = 0;
  2963. u32 h_id;
  2964. of_id = of_match_device(ti_sci_of_match, dev);
  2965. if (!of_id) {
  2966. dev_err(dev, "OF data missing\n");
  2967. return -EINVAL;
  2968. }
  2969. desc = of_id->data;
  2970. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  2971. if (!info)
  2972. return -ENOMEM;
  2973. info->dev = dev;
  2974. info->desc = desc;
  2975. ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
  2976. /* if the property is not present in DT, use a default from desc */
  2977. if (ret < 0) {
  2978. info->host_id = info->desc->default_host_id;
  2979. } else {
  2980. if (!h_id) {
  2981. dev_warn(dev, "Host ID 0 is reserved for firmware\n");
  2982. info->host_id = info->desc->default_host_id;
  2983. } else {
  2984. info->host_id = h_id;
  2985. }
  2986. }
  2987. reboot = of_property_read_bool(dev->of_node,
  2988. "ti,system-reboot-controller");
  2989. INIT_LIST_HEAD(&info->node);
  2990. minfo = &info->minfo;
  2991. /*
  2992. * Pre-allocate messages
  2993. * NEVER allocate more than what we can indicate in hdr.seq
  2994. * if we have data description bug, force a fix..
  2995. */
  2996. if (WARN_ON(desc->max_msgs >=
  2997. 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
  2998. return -EINVAL;
  2999. minfo->xfer_block = devm_kcalloc(dev,
  3000. desc->max_msgs,
  3001. sizeof(*minfo->xfer_block),
  3002. GFP_KERNEL);
  3003. if (!minfo->xfer_block)
  3004. return -ENOMEM;
  3005. minfo->xfer_alloc_table = devm_kcalloc(dev,
  3006. BITS_TO_LONGS(desc->max_msgs),
  3007. sizeof(unsigned long),
  3008. GFP_KERNEL);
  3009. if (!minfo->xfer_alloc_table)
  3010. return -ENOMEM;
  3011. bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
  3012. /* Pre-initialize the buffer pointer to pre-allocated buffers */
  3013. for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
  3014. xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
  3015. GFP_KERNEL);
  3016. if (!xfer->xfer_buf)
  3017. return -ENOMEM;
  3018. xfer->tx_message.buf = xfer->xfer_buf;
  3019. init_completion(&xfer->done);
  3020. }
  3021. ret = ti_sci_debugfs_create(pdev, info);
  3022. if (ret)
  3023. dev_warn(dev, "Failed to create debug file\n");
  3024. platform_set_drvdata(pdev, info);
  3025. cl = &info->cl;
  3026. cl->dev = dev;
  3027. cl->tx_block = false;
  3028. cl->rx_callback = ti_sci_rx_callback;
  3029. cl->knows_txdone = true;
  3030. spin_lock_init(&minfo->xfer_lock);
  3031. sema_init(&minfo->sem_xfer_count, desc->max_msgs);
  3032. info->chan_rx = mbox_request_channel_byname(cl, "rx");
  3033. if (IS_ERR(info->chan_rx)) {
  3034. ret = PTR_ERR(info->chan_rx);
  3035. goto out;
  3036. }
  3037. info->chan_tx = mbox_request_channel_byname(cl, "tx");
  3038. if (IS_ERR(info->chan_tx)) {
  3039. ret = PTR_ERR(info->chan_tx);
  3040. goto out;
  3041. }
  3042. ret = ti_sci_cmd_get_revision(info);
  3043. if (ret) {
  3044. dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
  3045. goto out;
  3046. }
  3047. ti_sci_setup_ops(info);
  3048. if (reboot) {
  3049. info->nb.notifier_call = tisci_reboot_handler;
  3050. info->nb.priority = 128;
  3051. ret = register_restart_handler(&info->nb);
  3052. if (ret) {
  3053. dev_err(dev, "reboot registration fail(%d)\n", ret);
  3054. return ret;
  3055. }
  3056. }
  3057. dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
  3058. info->handle.version.abi_major, info->handle.version.abi_minor,
  3059. info->handle.version.firmware_revision,
  3060. info->handle.version.firmware_description);
  3061. mutex_lock(&ti_sci_list_mutex);
  3062. list_add_tail(&info->node, &ti_sci_list);
  3063. mutex_unlock(&ti_sci_list_mutex);
  3064. return of_platform_populate(dev->of_node, NULL, NULL, dev);
  3065. out:
  3066. if (!IS_ERR(info->chan_tx))
  3067. mbox_free_channel(info->chan_tx);
  3068. if (!IS_ERR(info->chan_rx))
  3069. mbox_free_channel(info->chan_rx);
  3070. debugfs_remove(info->d);
  3071. return ret;
  3072. }
  3073. static int ti_sci_remove(struct platform_device *pdev)
  3074. {
  3075. struct ti_sci_info *info;
  3076. struct device *dev = &pdev->dev;
  3077. int ret = 0;
  3078. of_platform_depopulate(dev);
  3079. info = platform_get_drvdata(pdev);
  3080. if (info->nb.notifier_call)
  3081. unregister_restart_handler(&info->nb);
  3082. mutex_lock(&ti_sci_list_mutex);
  3083. if (info->users)
  3084. ret = -EBUSY;
  3085. else
  3086. list_del(&info->node);
  3087. mutex_unlock(&ti_sci_list_mutex);
  3088. if (!ret) {
  3089. ti_sci_debugfs_destroy(pdev, info);
  3090. /* Safe to free channels since no more users */
  3091. mbox_free_channel(info->chan_tx);
  3092. mbox_free_channel(info->chan_rx);
  3093. }
  3094. return ret;
  3095. }
  3096. static struct platform_driver ti_sci_driver = {
  3097. .probe = ti_sci_probe,
  3098. .remove = ti_sci_remove,
  3099. .driver = {
  3100. .name = "ti-sci",
  3101. .of_match_table = of_match_ptr(ti_sci_of_match),
  3102. },
  3103. };
  3104. module_platform_driver(ti_sci_driver);
  3105. MODULE_LICENSE("GPL v2");
  3106. MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
  3107. MODULE_AUTHOR("Nishanth Menon");
  3108. MODULE_ALIAS("platform:ti-sci");