spi.c 114 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. // SPI init/core code
  3. //
  4. // Copyright (C) 2005 David Brownell
  5. // Copyright (C) 2008 Secret Lab Technologies Ltd.
  6. #include <linux/kernel.h>
  7. #include <linux/device.h>
  8. #include <linux/init.h>
  9. #include <linux/cache.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/mutex.h>
  13. #include <linux/of_device.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/clk/clk-conf.h>
  16. #include <linux/slab.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/spi/spi-mem.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/gpio/consumer.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/pm_domain.h>
  24. #include <linux/property.h>
  25. #include <linux/export.h>
  26. #include <linux/sched/rt.h>
  27. #include <uapi/linux/sched/types.h>
  28. #include <linux/delay.h>
  29. #include <linux/kthread.h>
  30. #include <linux/ioport.h>
  31. #include <linux/acpi.h>
  32. #include <linux/highmem.h>
  33. #include <linux/idr.h>
  34. #include <linux/platform_data/x86/apple.h>
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/spi.h>
  37. EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
  38. EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
  39. #include "internals.h"
  40. static DEFINE_IDR(spi_master_idr);
  41. static void spidev_release(struct device *dev)
  42. {
  43. struct spi_device *spi = to_spi_device(dev);
  44. spi_controller_put(spi->controller);
  45. kfree(spi->driver_override);
  46. kfree(spi);
  47. }
  48. static ssize_t
  49. modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  50. {
  51. const struct spi_device *spi = to_spi_device(dev);
  52. int len;
  53. len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  54. if (len != -ENODEV)
  55. return len;
  56. return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  57. }
  58. static DEVICE_ATTR_RO(modalias);
  59. static ssize_t driver_override_store(struct device *dev,
  60. struct device_attribute *a,
  61. const char *buf, size_t count)
  62. {
  63. struct spi_device *spi = to_spi_device(dev);
  64. const char *end = memchr(buf, '\n', count);
  65. const size_t len = end ? end - buf : count;
  66. const char *driver_override, *old;
  67. /* We need to keep extra room for a newline when displaying value */
  68. if (len >= (PAGE_SIZE - 1))
  69. return -EINVAL;
  70. driver_override = kstrndup(buf, len, GFP_KERNEL);
  71. if (!driver_override)
  72. return -ENOMEM;
  73. device_lock(dev);
  74. old = spi->driver_override;
  75. if (len) {
  76. spi->driver_override = driver_override;
  77. } else {
  78. /* Empty string, disable driver override */
  79. spi->driver_override = NULL;
  80. kfree(driver_override);
  81. }
  82. device_unlock(dev);
  83. kfree(old);
  84. return count;
  85. }
  86. static ssize_t driver_override_show(struct device *dev,
  87. struct device_attribute *a, char *buf)
  88. {
  89. const struct spi_device *spi = to_spi_device(dev);
  90. ssize_t len;
  91. device_lock(dev);
  92. len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
  93. device_unlock(dev);
  94. return len;
  95. }
  96. static DEVICE_ATTR_RW(driver_override);
  97. #define SPI_STATISTICS_ATTRS(field, file) \
  98. static ssize_t spi_controller_##field##_show(struct device *dev, \
  99. struct device_attribute *attr, \
  100. char *buf) \
  101. { \
  102. struct spi_controller *ctlr = container_of(dev, \
  103. struct spi_controller, dev); \
  104. return spi_statistics_##field##_show(&ctlr->statistics, buf); \
  105. } \
  106. static struct device_attribute dev_attr_spi_controller_##field = { \
  107. .attr = { .name = file, .mode = 0444 }, \
  108. .show = spi_controller_##field##_show, \
  109. }; \
  110. static ssize_t spi_device_##field##_show(struct device *dev, \
  111. struct device_attribute *attr, \
  112. char *buf) \
  113. { \
  114. struct spi_device *spi = to_spi_device(dev); \
  115. return spi_statistics_##field##_show(&spi->statistics, buf); \
  116. } \
  117. static struct device_attribute dev_attr_spi_device_##field = { \
  118. .attr = { .name = file, .mode = 0444 }, \
  119. .show = spi_device_##field##_show, \
  120. }
  121. #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
  122. static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
  123. char *buf) \
  124. { \
  125. unsigned long flags; \
  126. ssize_t len; \
  127. spin_lock_irqsave(&stat->lock, flags); \
  128. len = sprintf(buf, format_string, stat->field); \
  129. spin_unlock_irqrestore(&stat->lock, flags); \
  130. return len; \
  131. } \
  132. SPI_STATISTICS_ATTRS(name, file)
  133. #define SPI_STATISTICS_SHOW(field, format_string) \
  134. SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
  135. field, format_string)
  136. SPI_STATISTICS_SHOW(messages, "%lu");
  137. SPI_STATISTICS_SHOW(transfers, "%lu");
  138. SPI_STATISTICS_SHOW(errors, "%lu");
  139. SPI_STATISTICS_SHOW(timedout, "%lu");
  140. SPI_STATISTICS_SHOW(spi_sync, "%lu");
  141. SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
  142. SPI_STATISTICS_SHOW(spi_async, "%lu");
  143. SPI_STATISTICS_SHOW(bytes, "%llu");
  144. SPI_STATISTICS_SHOW(bytes_rx, "%llu");
  145. SPI_STATISTICS_SHOW(bytes_tx, "%llu");
  146. #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
  147. SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
  148. "transfer_bytes_histo_" number, \
  149. transfer_bytes_histo[index], "%lu")
  150. SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
  151. SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
  152. SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
  153. SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
  154. SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
  155. SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
  156. SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
  157. SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
  158. SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
  159. SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
  160. SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
  161. SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
  162. SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
  163. SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
  164. SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
  165. SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
  166. SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
  167. SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
  168. static struct attribute *spi_dev_attrs[] = {
  169. &dev_attr_modalias.attr,
  170. &dev_attr_driver_override.attr,
  171. NULL,
  172. };
  173. static const struct attribute_group spi_dev_group = {
  174. .attrs = spi_dev_attrs,
  175. };
  176. static struct attribute *spi_device_statistics_attrs[] = {
  177. &dev_attr_spi_device_messages.attr,
  178. &dev_attr_spi_device_transfers.attr,
  179. &dev_attr_spi_device_errors.attr,
  180. &dev_attr_spi_device_timedout.attr,
  181. &dev_attr_spi_device_spi_sync.attr,
  182. &dev_attr_spi_device_spi_sync_immediate.attr,
  183. &dev_attr_spi_device_spi_async.attr,
  184. &dev_attr_spi_device_bytes.attr,
  185. &dev_attr_spi_device_bytes_rx.attr,
  186. &dev_attr_spi_device_bytes_tx.attr,
  187. &dev_attr_spi_device_transfer_bytes_histo0.attr,
  188. &dev_attr_spi_device_transfer_bytes_histo1.attr,
  189. &dev_attr_spi_device_transfer_bytes_histo2.attr,
  190. &dev_attr_spi_device_transfer_bytes_histo3.attr,
  191. &dev_attr_spi_device_transfer_bytes_histo4.attr,
  192. &dev_attr_spi_device_transfer_bytes_histo5.attr,
  193. &dev_attr_spi_device_transfer_bytes_histo6.attr,
  194. &dev_attr_spi_device_transfer_bytes_histo7.attr,
  195. &dev_attr_spi_device_transfer_bytes_histo8.attr,
  196. &dev_attr_spi_device_transfer_bytes_histo9.attr,
  197. &dev_attr_spi_device_transfer_bytes_histo10.attr,
  198. &dev_attr_spi_device_transfer_bytes_histo11.attr,
  199. &dev_attr_spi_device_transfer_bytes_histo12.attr,
  200. &dev_attr_spi_device_transfer_bytes_histo13.attr,
  201. &dev_attr_spi_device_transfer_bytes_histo14.attr,
  202. &dev_attr_spi_device_transfer_bytes_histo15.attr,
  203. &dev_attr_spi_device_transfer_bytes_histo16.attr,
  204. &dev_attr_spi_device_transfers_split_maxsize.attr,
  205. NULL,
  206. };
  207. static const struct attribute_group spi_device_statistics_group = {
  208. .name = "statistics",
  209. .attrs = spi_device_statistics_attrs,
  210. };
  211. static const struct attribute_group *spi_dev_groups[] = {
  212. &spi_dev_group,
  213. &spi_device_statistics_group,
  214. NULL,
  215. };
  216. static struct attribute *spi_controller_statistics_attrs[] = {
  217. &dev_attr_spi_controller_messages.attr,
  218. &dev_attr_spi_controller_transfers.attr,
  219. &dev_attr_spi_controller_errors.attr,
  220. &dev_attr_spi_controller_timedout.attr,
  221. &dev_attr_spi_controller_spi_sync.attr,
  222. &dev_attr_spi_controller_spi_sync_immediate.attr,
  223. &dev_attr_spi_controller_spi_async.attr,
  224. &dev_attr_spi_controller_bytes.attr,
  225. &dev_attr_spi_controller_bytes_rx.attr,
  226. &dev_attr_spi_controller_bytes_tx.attr,
  227. &dev_attr_spi_controller_transfer_bytes_histo0.attr,
  228. &dev_attr_spi_controller_transfer_bytes_histo1.attr,
  229. &dev_attr_spi_controller_transfer_bytes_histo2.attr,
  230. &dev_attr_spi_controller_transfer_bytes_histo3.attr,
  231. &dev_attr_spi_controller_transfer_bytes_histo4.attr,
  232. &dev_attr_spi_controller_transfer_bytes_histo5.attr,
  233. &dev_attr_spi_controller_transfer_bytes_histo6.attr,
  234. &dev_attr_spi_controller_transfer_bytes_histo7.attr,
  235. &dev_attr_spi_controller_transfer_bytes_histo8.attr,
  236. &dev_attr_spi_controller_transfer_bytes_histo9.attr,
  237. &dev_attr_spi_controller_transfer_bytes_histo10.attr,
  238. &dev_attr_spi_controller_transfer_bytes_histo11.attr,
  239. &dev_attr_spi_controller_transfer_bytes_histo12.attr,
  240. &dev_attr_spi_controller_transfer_bytes_histo13.attr,
  241. &dev_attr_spi_controller_transfer_bytes_histo14.attr,
  242. &dev_attr_spi_controller_transfer_bytes_histo15.attr,
  243. &dev_attr_spi_controller_transfer_bytes_histo16.attr,
  244. &dev_attr_spi_controller_transfers_split_maxsize.attr,
  245. NULL,
  246. };
  247. static const struct attribute_group spi_controller_statistics_group = {
  248. .name = "statistics",
  249. .attrs = spi_controller_statistics_attrs,
  250. };
  251. static const struct attribute_group *spi_master_groups[] = {
  252. &spi_controller_statistics_group,
  253. NULL,
  254. };
  255. void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  256. struct spi_transfer *xfer,
  257. struct spi_controller *ctlr)
  258. {
  259. unsigned long flags;
  260. int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
  261. if (l2len < 0)
  262. l2len = 0;
  263. spin_lock_irqsave(&stats->lock, flags);
  264. stats->transfers++;
  265. stats->transfer_bytes_histo[l2len]++;
  266. stats->bytes += xfer->len;
  267. if ((xfer->tx_buf) &&
  268. (xfer->tx_buf != ctlr->dummy_tx))
  269. stats->bytes_tx += xfer->len;
  270. if ((xfer->rx_buf) &&
  271. (xfer->rx_buf != ctlr->dummy_rx))
  272. stats->bytes_rx += xfer->len;
  273. spin_unlock_irqrestore(&stats->lock, flags);
  274. }
  275. EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
  276. /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
  277. * and the sysfs version makes coldplug work too.
  278. */
  279. static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
  280. const struct spi_device *sdev)
  281. {
  282. while (id->name[0]) {
  283. if (!strcmp(sdev->modalias, id->name))
  284. return id;
  285. id++;
  286. }
  287. return NULL;
  288. }
  289. const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
  290. {
  291. const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
  292. return spi_match_id(sdrv->id_table, sdev);
  293. }
  294. EXPORT_SYMBOL_GPL(spi_get_device_id);
  295. static int spi_match_device(struct device *dev, struct device_driver *drv)
  296. {
  297. const struct spi_device *spi = to_spi_device(dev);
  298. const struct spi_driver *sdrv = to_spi_driver(drv);
  299. /* Check override first, and if set, only use the named driver */
  300. if (spi->driver_override)
  301. return strcmp(spi->driver_override, drv->name) == 0;
  302. /* Attempt an OF style match */
  303. if (of_driver_match_device(dev, drv))
  304. return 1;
  305. /* Then try ACPI */
  306. if (acpi_driver_match_device(dev, drv))
  307. return 1;
  308. if (sdrv->id_table)
  309. return !!spi_match_id(sdrv->id_table, spi);
  310. return strcmp(spi->modalias, drv->name) == 0;
  311. }
  312. static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
  313. {
  314. const struct spi_device *spi = to_spi_device(dev);
  315. int rc;
  316. rc = acpi_device_uevent_modalias(dev, env);
  317. if (rc != -ENODEV)
  318. return rc;
  319. return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
  320. }
  321. struct bus_type spi_bus_type = {
  322. .name = "spi",
  323. .dev_groups = spi_dev_groups,
  324. .match = spi_match_device,
  325. .uevent = spi_uevent,
  326. };
  327. EXPORT_SYMBOL_GPL(spi_bus_type);
  328. static int spi_drv_probe(struct device *dev)
  329. {
  330. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  331. struct spi_device *spi = to_spi_device(dev);
  332. int ret;
  333. ret = of_clk_set_defaults(dev->of_node, false);
  334. if (ret)
  335. return ret;
  336. if (dev->of_node) {
  337. spi->irq = of_irq_get(dev->of_node, 0);
  338. if (spi->irq == -EPROBE_DEFER)
  339. return -EPROBE_DEFER;
  340. if (spi->irq < 0)
  341. spi->irq = 0;
  342. }
  343. ret = dev_pm_domain_attach(dev, true);
  344. if (ret)
  345. return ret;
  346. if (sdrv->probe) {
  347. ret = sdrv->probe(spi);
  348. if (ret)
  349. dev_pm_domain_detach(dev, true);
  350. }
  351. return ret;
  352. }
  353. static int spi_drv_remove(struct device *dev)
  354. {
  355. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  356. int ret = 0;
  357. if (sdrv->remove)
  358. ret = sdrv->remove(to_spi_device(dev));
  359. dev_pm_domain_detach(dev, true);
  360. return ret;
  361. }
  362. static void spi_drv_shutdown(struct device *dev)
  363. {
  364. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  365. sdrv->shutdown(to_spi_device(dev));
  366. }
  367. /**
  368. * __spi_register_driver - register a SPI driver
  369. * @owner: owner module of the driver to register
  370. * @sdrv: the driver to register
  371. * Context: can sleep
  372. *
  373. * Return: zero on success, else a negative error code.
  374. */
  375. int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
  376. {
  377. sdrv->driver.owner = owner;
  378. sdrv->driver.bus = &spi_bus_type;
  379. sdrv->driver.probe = spi_drv_probe;
  380. sdrv->driver.remove = spi_drv_remove;
  381. if (sdrv->shutdown)
  382. sdrv->driver.shutdown = spi_drv_shutdown;
  383. return driver_register(&sdrv->driver);
  384. }
  385. EXPORT_SYMBOL_GPL(__spi_register_driver);
  386. /*-------------------------------------------------------------------------*/
  387. /* SPI devices should normally not be created by SPI device drivers; that
  388. * would make them board-specific. Similarly with SPI controller drivers.
  389. * Device registration normally goes into like arch/.../mach.../board-YYY.c
  390. * with other readonly (flashable) information about mainboard devices.
  391. */
  392. struct boardinfo {
  393. struct list_head list;
  394. struct spi_board_info board_info;
  395. };
  396. static LIST_HEAD(board_list);
  397. static LIST_HEAD(spi_controller_list);
  398. /*
  399. * Used to protect add/del operation for board_info list and
  400. * spi_controller list, and their matching process
  401. * also used to protect object of type struct idr
  402. */
  403. static DEFINE_MUTEX(board_lock);
  404. /*
  405. * Prevents addition of devices with same chip select and
  406. * addition of devices below an unregistering controller.
  407. */
  408. static DEFINE_MUTEX(spi_add_lock);
  409. /**
  410. * spi_alloc_device - Allocate a new SPI device
  411. * @ctlr: Controller to which device is connected
  412. * Context: can sleep
  413. *
  414. * Allows a driver to allocate and initialize a spi_device without
  415. * registering it immediately. This allows a driver to directly
  416. * fill the spi_device with device parameters before calling
  417. * spi_add_device() on it.
  418. *
  419. * Caller is responsible to call spi_add_device() on the returned
  420. * spi_device structure to add it to the SPI controller. If the caller
  421. * needs to discard the spi_device without adding it, then it should
  422. * call spi_dev_put() on it.
  423. *
  424. * Return: a pointer to the new device, or NULL.
  425. */
  426. struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
  427. {
  428. struct spi_device *spi;
  429. if (!spi_controller_get(ctlr))
  430. return NULL;
  431. spi = kzalloc(sizeof(*spi), GFP_KERNEL);
  432. if (!spi) {
  433. spi_controller_put(ctlr);
  434. return NULL;
  435. }
  436. spi->master = spi->controller = ctlr;
  437. spi->dev.parent = &ctlr->dev;
  438. spi->dev.bus = &spi_bus_type;
  439. spi->dev.release = spidev_release;
  440. spi->cs_gpio = -ENOENT;
  441. spi->mode = ctlr->buswidth_override_bits;
  442. spin_lock_init(&spi->statistics.lock);
  443. device_initialize(&spi->dev);
  444. return spi;
  445. }
  446. EXPORT_SYMBOL_GPL(spi_alloc_device);
  447. static void spi_dev_set_name(struct spi_device *spi)
  448. {
  449. struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
  450. if (adev) {
  451. dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
  452. return;
  453. }
  454. dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
  455. spi->chip_select);
  456. }
  457. static int spi_dev_check(struct device *dev, void *data)
  458. {
  459. struct spi_device *spi = to_spi_device(dev);
  460. struct spi_device *new_spi = data;
  461. if (spi->controller == new_spi->controller &&
  462. spi->chip_select == new_spi->chip_select)
  463. return -EBUSY;
  464. return 0;
  465. }
  466. static void spi_cleanup(struct spi_device *spi)
  467. {
  468. if (spi->controller->cleanup)
  469. spi->controller->cleanup(spi);
  470. }
  471. /**
  472. * spi_add_device - Add spi_device allocated with spi_alloc_device
  473. * @spi: spi_device to register
  474. *
  475. * Companion function to spi_alloc_device. Devices allocated with
  476. * spi_alloc_device can be added onto the spi bus with this function.
  477. *
  478. * Return: 0 on success; negative errno on failure
  479. */
  480. int spi_add_device(struct spi_device *spi)
  481. {
  482. struct spi_controller *ctlr = spi->controller;
  483. struct device *dev = ctlr->dev.parent;
  484. int status;
  485. /* Chipselects are numbered 0..max; validate. */
  486. if (spi->chip_select >= ctlr->num_chipselect) {
  487. dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
  488. ctlr->num_chipselect);
  489. return -EINVAL;
  490. }
  491. /* Set the bus ID string */
  492. spi_dev_set_name(spi);
  493. /* We need to make sure there's no other device with this
  494. * chipselect **BEFORE** we call setup(), else we'll trash
  495. * its configuration. Lock against concurrent add() calls.
  496. */
  497. mutex_lock(&spi_add_lock);
  498. status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
  499. if (status) {
  500. dev_err(dev, "chipselect %d already in use\n",
  501. spi->chip_select);
  502. goto done;
  503. }
  504. /* Controller may unregister concurrently */
  505. if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
  506. !device_is_registered(&ctlr->dev)) {
  507. status = -ENODEV;
  508. goto done;
  509. }
  510. /* Descriptors take precedence */
  511. if (ctlr->cs_gpiods)
  512. spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
  513. else if (ctlr->cs_gpios)
  514. spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
  515. /* Drivers may modify this initial i/o setup, but will
  516. * normally rely on the device being setup. Devices
  517. * using SPI_CS_HIGH can't coexist well otherwise...
  518. */
  519. status = spi_setup(spi);
  520. if (status < 0) {
  521. dev_err(dev, "can't setup %s, status %d\n",
  522. dev_name(&spi->dev), status);
  523. goto done;
  524. }
  525. /* Device may be bound to an active driver when this returns */
  526. status = device_add(&spi->dev);
  527. if (status < 0) {
  528. dev_err(dev, "can't add %s, status %d\n",
  529. dev_name(&spi->dev), status);
  530. spi_cleanup(spi);
  531. } else {
  532. dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
  533. }
  534. done:
  535. mutex_unlock(&spi_add_lock);
  536. return status;
  537. }
  538. EXPORT_SYMBOL_GPL(spi_add_device);
  539. /**
  540. * spi_new_device - instantiate one new SPI device
  541. * @ctlr: Controller to which device is connected
  542. * @chip: Describes the SPI device
  543. * Context: can sleep
  544. *
  545. * On typical mainboards, this is purely internal; and it's not needed
  546. * after board init creates the hard-wired devices. Some development
  547. * platforms may not be able to use spi_register_board_info though, and
  548. * this is exported so that for example a USB or parport based adapter
  549. * driver could add devices (which it would learn about out-of-band).
  550. *
  551. * Return: the new device, or NULL.
  552. */
  553. struct spi_device *spi_new_device(struct spi_controller *ctlr,
  554. struct spi_board_info *chip)
  555. {
  556. struct spi_device *proxy;
  557. int status;
  558. /* NOTE: caller did any chip->bus_num checks necessary.
  559. *
  560. * Also, unless we change the return value convention to use
  561. * error-or-pointer (not NULL-or-pointer), troubleshootability
  562. * suggests syslogged diagnostics are best here (ugh).
  563. */
  564. proxy = spi_alloc_device(ctlr);
  565. if (!proxy)
  566. return NULL;
  567. WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
  568. proxy->chip_select = chip->chip_select;
  569. proxy->max_speed_hz = chip->max_speed_hz;
  570. proxy->mode = chip->mode;
  571. proxy->irq = chip->irq;
  572. strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
  573. proxy->dev.platform_data = (void *) chip->platform_data;
  574. proxy->controller_data = chip->controller_data;
  575. proxy->controller_state = NULL;
  576. if (chip->properties) {
  577. status = device_add_properties(&proxy->dev, chip->properties);
  578. if (status) {
  579. dev_err(&ctlr->dev,
  580. "failed to add properties to '%s': %d\n",
  581. chip->modalias, status);
  582. goto err_dev_put;
  583. }
  584. }
  585. status = spi_add_device(proxy);
  586. if (status < 0)
  587. goto err_remove_props;
  588. return proxy;
  589. err_remove_props:
  590. if (chip->properties)
  591. device_remove_properties(&proxy->dev);
  592. err_dev_put:
  593. spi_dev_put(proxy);
  594. return NULL;
  595. }
  596. EXPORT_SYMBOL_GPL(spi_new_device);
  597. /**
  598. * spi_unregister_device - unregister a single SPI device
  599. * @spi: spi_device to unregister
  600. *
  601. * Start making the passed SPI device vanish. Normally this would be handled
  602. * by spi_unregister_controller().
  603. */
  604. void spi_unregister_device(struct spi_device *spi)
  605. {
  606. if (!spi)
  607. return;
  608. if (spi->dev.of_node) {
  609. of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
  610. of_node_put(spi->dev.of_node);
  611. }
  612. if (ACPI_COMPANION(&spi->dev))
  613. acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
  614. device_del(&spi->dev);
  615. spi_cleanup(spi);
  616. put_device(&spi->dev);
  617. }
  618. EXPORT_SYMBOL_GPL(spi_unregister_device);
  619. static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
  620. struct spi_board_info *bi)
  621. {
  622. struct spi_device *dev;
  623. if (ctlr->bus_num != bi->bus_num)
  624. return;
  625. dev = spi_new_device(ctlr, bi);
  626. if (!dev)
  627. dev_err(ctlr->dev.parent, "can't create new device for %s\n",
  628. bi->modalias);
  629. }
  630. /**
  631. * spi_register_board_info - register SPI devices for a given board
  632. * @info: array of chip descriptors
  633. * @n: how many descriptors are provided
  634. * Context: can sleep
  635. *
  636. * Board-specific early init code calls this (probably during arch_initcall)
  637. * with segments of the SPI device table. Any device nodes are created later,
  638. * after the relevant parent SPI controller (bus_num) is defined. We keep
  639. * this table of devices forever, so that reloading a controller driver will
  640. * not make Linux forget about these hard-wired devices.
  641. *
  642. * Other code can also call this, e.g. a particular add-on board might provide
  643. * SPI devices through its expansion connector, so code initializing that board
  644. * would naturally declare its SPI devices.
  645. *
  646. * The board info passed can safely be __initdata ... but be careful of
  647. * any embedded pointers (platform_data, etc), they're copied as-is.
  648. * Device properties are deep-copied though.
  649. *
  650. * Return: zero on success, else a negative error code.
  651. */
  652. int spi_register_board_info(struct spi_board_info const *info, unsigned n)
  653. {
  654. struct boardinfo *bi;
  655. int i;
  656. if (!n)
  657. return 0;
  658. bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
  659. if (!bi)
  660. return -ENOMEM;
  661. for (i = 0; i < n; i++, bi++, info++) {
  662. struct spi_controller *ctlr;
  663. memcpy(&bi->board_info, info, sizeof(*info));
  664. if (info->properties) {
  665. bi->board_info.properties =
  666. property_entries_dup(info->properties);
  667. if (IS_ERR(bi->board_info.properties))
  668. return PTR_ERR(bi->board_info.properties);
  669. }
  670. mutex_lock(&board_lock);
  671. list_add_tail(&bi->list, &board_list);
  672. list_for_each_entry(ctlr, &spi_controller_list, list)
  673. spi_match_controller_to_boardinfo(ctlr,
  674. &bi->board_info);
  675. mutex_unlock(&board_lock);
  676. }
  677. return 0;
  678. }
  679. /*-------------------------------------------------------------------------*/
  680. static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
  681. {
  682. bool enable1 = enable;
  683. /*
  684. * Avoid calling into the driver (or doing delays) if the chip select
  685. * isn't actually changing from the last time this was called.
  686. */
  687. if (!force && (spi->controller->last_cs_enable == enable) &&
  688. (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
  689. return;
  690. spi->controller->last_cs_enable = enable;
  691. spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
  692. if (!spi->controller->set_cs_timing) {
  693. if (enable1)
  694. spi_delay_exec(&spi->controller->cs_setup, NULL);
  695. else
  696. spi_delay_exec(&spi->controller->cs_hold, NULL);
  697. }
  698. if (spi->mode & SPI_CS_HIGH)
  699. enable = !enable;
  700. if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
  701. if (!(spi->mode & SPI_NO_CS)) {
  702. if (spi->cs_gpiod) {
  703. /*
  704. * Historically ACPI has no means of the GPIO polarity and
  705. * thus the SPISerialBus() resource defines it on the per-chip
  706. * basis. In order to avoid a chain of negations, the GPIO
  707. * polarity is considered being Active High. Even for the cases
  708. * when _DSD() is involved (in the updated versions of ACPI)
  709. * the GPIO CS polarity must be defined Active High to avoid
  710. * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
  711. * into account.
  712. */
  713. if (has_acpi_companion(&spi->dev))
  714. gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
  715. else
  716. /* Polarity handled by GPIO library */
  717. gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
  718. } else {
  719. /*
  720. * invert the enable line, as active low is
  721. * default for SPI.
  722. */
  723. gpio_set_value_cansleep(spi->cs_gpio, !enable);
  724. }
  725. }
  726. /* Some SPI masters need both GPIO CS & slave_select */
  727. if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
  728. spi->controller->set_cs)
  729. spi->controller->set_cs(spi, !enable);
  730. } else if (spi->controller->set_cs) {
  731. spi->controller->set_cs(spi, !enable);
  732. }
  733. if (!spi->controller->set_cs_timing) {
  734. if (!enable1)
  735. spi_delay_exec(&spi->controller->cs_inactive, NULL);
  736. }
  737. }
  738. #ifdef CONFIG_HAS_DMA
  739. int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
  740. struct sg_table *sgt, void *buf, size_t len,
  741. enum dma_data_direction dir)
  742. {
  743. const bool vmalloced_buf = is_vmalloc_addr(buf);
  744. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  745. #ifdef CONFIG_HIGHMEM
  746. const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
  747. (unsigned long)buf < (PKMAP_BASE +
  748. (LAST_PKMAP * PAGE_SIZE)));
  749. #else
  750. const bool kmap_buf = false;
  751. #endif
  752. int desc_len;
  753. int sgs;
  754. struct page *vm_page;
  755. struct scatterlist *sg;
  756. void *sg_buf;
  757. size_t min;
  758. int i, ret;
  759. if (vmalloced_buf || kmap_buf) {
  760. desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
  761. sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
  762. } else if (virt_addr_valid(buf)) {
  763. desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
  764. sgs = DIV_ROUND_UP(len, desc_len);
  765. } else {
  766. return -EINVAL;
  767. }
  768. ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
  769. if (ret != 0)
  770. return ret;
  771. sg = &sgt->sgl[0];
  772. for (i = 0; i < sgs; i++) {
  773. if (vmalloced_buf || kmap_buf) {
  774. /*
  775. * Next scatterlist entry size is the minimum between
  776. * the desc_len and the remaining buffer length that
  777. * fits in a page.
  778. */
  779. min = min_t(size_t, desc_len,
  780. min_t(size_t, len,
  781. PAGE_SIZE - offset_in_page(buf)));
  782. if (vmalloced_buf)
  783. vm_page = vmalloc_to_page(buf);
  784. else
  785. vm_page = kmap_to_page(buf);
  786. if (!vm_page) {
  787. sg_free_table(sgt);
  788. return -ENOMEM;
  789. }
  790. sg_set_page(sg, vm_page,
  791. min, offset_in_page(buf));
  792. } else {
  793. min = min_t(size_t, len, desc_len);
  794. sg_buf = buf;
  795. sg_set_buf(sg, sg_buf, min);
  796. }
  797. buf += min;
  798. len -= min;
  799. sg = sg_next(sg);
  800. }
  801. ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
  802. if (!ret)
  803. ret = -ENOMEM;
  804. if (ret < 0) {
  805. sg_free_table(sgt);
  806. return ret;
  807. }
  808. sgt->nents = ret;
  809. return 0;
  810. }
  811. void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
  812. struct sg_table *sgt, enum dma_data_direction dir)
  813. {
  814. if (sgt->orig_nents) {
  815. dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
  816. sg_free_table(sgt);
  817. }
  818. }
  819. static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
  820. {
  821. struct device *tx_dev, *rx_dev;
  822. struct spi_transfer *xfer;
  823. int ret;
  824. if (!ctlr->can_dma)
  825. return 0;
  826. if (ctlr->dma_tx)
  827. tx_dev = ctlr->dma_tx->device->dev;
  828. else
  829. tx_dev = ctlr->dev.parent;
  830. if (ctlr->dma_rx)
  831. rx_dev = ctlr->dma_rx->device->dev;
  832. else
  833. rx_dev = ctlr->dev.parent;
  834. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  835. if (!ctlr->can_dma(ctlr, msg->spi, xfer))
  836. continue;
  837. if (xfer->tx_buf != NULL) {
  838. ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
  839. (void *)xfer->tx_buf, xfer->len,
  840. DMA_TO_DEVICE);
  841. if (ret != 0)
  842. return ret;
  843. }
  844. if (xfer->rx_buf != NULL) {
  845. ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
  846. xfer->rx_buf, xfer->len,
  847. DMA_FROM_DEVICE);
  848. if (ret != 0) {
  849. spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
  850. DMA_TO_DEVICE);
  851. return ret;
  852. }
  853. }
  854. }
  855. ctlr->cur_msg_mapped = true;
  856. return 0;
  857. }
  858. static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
  859. {
  860. struct spi_transfer *xfer;
  861. struct device *tx_dev, *rx_dev;
  862. if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
  863. return 0;
  864. if (ctlr->dma_tx)
  865. tx_dev = ctlr->dma_tx->device->dev;
  866. else
  867. tx_dev = ctlr->dev.parent;
  868. if (ctlr->dma_rx)
  869. rx_dev = ctlr->dma_rx->device->dev;
  870. else
  871. rx_dev = ctlr->dev.parent;
  872. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  873. if (!ctlr->can_dma(ctlr, msg->spi, xfer))
  874. continue;
  875. spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
  876. spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
  877. }
  878. ctlr->cur_msg_mapped = false;
  879. return 0;
  880. }
  881. #else /* !CONFIG_HAS_DMA */
  882. static inline int __spi_map_msg(struct spi_controller *ctlr,
  883. struct spi_message *msg)
  884. {
  885. return 0;
  886. }
  887. static inline int __spi_unmap_msg(struct spi_controller *ctlr,
  888. struct spi_message *msg)
  889. {
  890. return 0;
  891. }
  892. #endif /* !CONFIG_HAS_DMA */
  893. static inline int spi_unmap_msg(struct spi_controller *ctlr,
  894. struct spi_message *msg)
  895. {
  896. struct spi_transfer *xfer;
  897. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  898. /*
  899. * Restore the original value of tx_buf or rx_buf if they are
  900. * NULL.
  901. */
  902. if (xfer->tx_buf == ctlr->dummy_tx)
  903. xfer->tx_buf = NULL;
  904. if (xfer->rx_buf == ctlr->dummy_rx)
  905. xfer->rx_buf = NULL;
  906. }
  907. return __spi_unmap_msg(ctlr, msg);
  908. }
  909. static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
  910. {
  911. struct spi_transfer *xfer;
  912. void *tmp;
  913. unsigned int max_tx, max_rx;
  914. if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
  915. && !(msg->spi->mode & SPI_3WIRE)) {
  916. max_tx = 0;
  917. max_rx = 0;
  918. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  919. if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
  920. !xfer->tx_buf)
  921. max_tx = max(xfer->len, max_tx);
  922. if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
  923. !xfer->rx_buf)
  924. max_rx = max(xfer->len, max_rx);
  925. }
  926. if (max_tx) {
  927. tmp = krealloc(ctlr->dummy_tx, max_tx,
  928. GFP_KERNEL | GFP_DMA);
  929. if (!tmp)
  930. return -ENOMEM;
  931. ctlr->dummy_tx = tmp;
  932. memset(tmp, 0, max_tx);
  933. }
  934. if (max_rx) {
  935. tmp = krealloc(ctlr->dummy_rx, max_rx,
  936. GFP_KERNEL | GFP_DMA);
  937. if (!tmp)
  938. return -ENOMEM;
  939. ctlr->dummy_rx = tmp;
  940. }
  941. if (max_tx || max_rx) {
  942. list_for_each_entry(xfer, &msg->transfers,
  943. transfer_list) {
  944. if (!xfer->len)
  945. continue;
  946. if (!xfer->tx_buf)
  947. xfer->tx_buf = ctlr->dummy_tx;
  948. if (!xfer->rx_buf)
  949. xfer->rx_buf = ctlr->dummy_rx;
  950. }
  951. }
  952. }
  953. return __spi_map_msg(ctlr, msg);
  954. }
  955. static int spi_transfer_wait(struct spi_controller *ctlr,
  956. struct spi_message *msg,
  957. struct spi_transfer *xfer)
  958. {
  959. struct spi_statistics *statm = &ctlr->statistics;
  960. struct spi_statistics *stats = &msg->spi->statistics;
  961. u32 speed_hz = xfer->speed_hz;
  962. unsigned long long ms;
  963. if (spi_controller_is_slave(ctlr)) {
  964. if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
  965. dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
  966. return -EINTR;
  967. }
  968. } else {
  969. if (!speed_hz)
  970. speed_hz = 100000;
  971. ms = 8LL * 1000LL * xfer->len;
  972. do_div(ms, speed_hz);
  973. ms += ms + 200; /* some tolerance */
  974. if (ms > UINT_MAX)
  975. ms = UINT_MAX;
  976. ms = wait_for_completion_timeout(&ctlr->xfer_completion,
  977. msecs_to_jiffies(ms));
  978. if (ms == 0) {
  979. SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
  980. SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
  981. dev_err(&msg->spi->dev,
  982. "SPI transfer timed out\n");
  983. return -ETIMEDOUT;
  984. }
  985. }
  986. return 0;
  987. }
  988. static void _spi_transfer_delay_ns(u32 ns)
  989. {
  990. if (!ns)
  991. return;
  992. if (ns <= 1000) {
  993. ndelay(ns);
  994. } else {
  995. u32 us = DIV_ROUND_UP(ns, 1000);
  996. if (us <= 10)
  997. udelay(us);
  998. else
  999. usleep_range(us, us + DIV_ROUND_UP(us, 10));
  1000. }
  1001. }
  1002. int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
  1003. {
  1004. u32 delay = _delay->value;
  1005. u32 unit = _delay->unit;
  1006. u32 hz;
  1007. if (!delay)
  1008. return 0;
  1009. switch (unit) {
  1010. case SPI_DELAY_UNIT_USECS:
  1011. delay *= 1000;
  1012. break;
  1013. case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
  1014. break;
  1015. case SPI_DELAY_UNIT_SCK:
  1016. /* clock cycles need to be obtained from spi_transfer */
  1017. if (!xfer)
  1018. return -EINVAL;
  1019. /* if there is no effective speed know, then approximate
  1020. * by underestimating with half the requested hz
  1021. */
  1022. hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
  1023. if (!hz)
  1024. return -EINVAL;
  1025. delay *= DIV_ROUND_UP(1000000000, hz);
  1026. break;
  1027. default:
  1028. return -EINVAL;
  1029. }
  1030. return delay;
  1031. }
  1032. EXPORT_SYMBOL_GPL(spi_delay_to_ns);
  1033. int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
  1034. {
  1035. int delay;
  1036. might_sleep();
  1037. if (!_delay)
  1038. return -EINVAL;
  1039. delay = spi_delay_to_ns(_delay, xfer);
  1040. if (delay < 0)
  1041. return delay;
  1042. _spi_transfer_delay_ns(delay);
  1043. return 0;
  1044. }
  1045. EXPORT_SYMBOL_GPL(spi_delay_exec);
  1046. static void _spi_transfer_cs_change_delay(struct spi_message *msg,
  1047. struct spi_transfer *xfer)
  1048. {
  1049. u32 delay = xfer->cs_change_delay.value;
  1050. u32 unit = xfer->cs_change_delay.unit;
  1051. int ret;
  1052. /* return early on "fast" mode - for everything but USECS */
  1053. if (!delay) {
  1054. if (unit == SPI_DELAY_UNIT_USECS)
  1055. _spi_transfer_delay_ns(10000);
  1056. return;
  1057. }
  1058. ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
  1059. if (ret) {
  1060. dev_err_once(&msg->spi->dev,
  1061. "Use of unsupported delay unit %i, using default of 10us\n",
  1062. unit);
  1063. _spi_transfer_delay_ns(10000);
  1064. }
  1065. }
  1066. /*
  1067. * spi_transfer_one_message - Default implementation of transfer_one_message()
  1068. *
  1069. * This is a standard implementation of transfer_one_message() for
  1070. * drivers which implement a transfer_one() operation. It provides
  1071. * standard handling of delays and chip select management.
  1072. */
  1073. static int spi_transfer_one_message(struct spi_controller *ctlr,
  1074. struct spi_message *msg)
  1075. {
  1076. struct spi_transfer *xfer;
  1077. bool keep_cs = false;
  1078. int ret = 0;
  1079. struct spi_statistics *statm = &ctlr->statistics;
  1080. struct spi_statistics *stats = &msg->spi->statistics;
  1081. spi_set_cs(msg->spi, true, false);
  1082. SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
  1083. SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
  1084. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1085. trace_spi_transfer_start(msg, xfer);
  1086. spi_statistics_add_transfer_stats(statm, xfer, ctlr);
  1087. spi_statistics_add_transfer_stats(stats, xfer, ctlr);
  1088. if (!ctlr->ptp_sts_supported) {
  1089. xfer->ptp_sts_word_pre = 0;
  1090. ptp_read_system_prets(xfer->ptp_sts);
  1091. }
  1092. if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
  1093. reinit_completion(&ctlr->xfer_completion);
  1094. fallback_pio:
  1095. ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
  1096. if (ret < 0) {
  1097. if (ctlr->cur_msg_mapped &&
  1098. (xfer->error & SPI_TRANS_FAIL_NO_START)) {
  1099. __spi_unmap_msg(ctlr, msg);
  1100. ctlr->fallback = true;
  1101. xfer->error &= ~SPI_TRANS_FAIL_NO_START;
  1102. goto fallback_pio;
  1103. }
  1104. SPI_STATISTICS_INCREMENT_FIELD(statm,
  1105. errors);
  1106. SPI_STATISTICS_INCREMENT_FIELD(stats,
  1107. errors);
  1108. dev_err(&msg->spi->dev,
  1109. "SPI transfer failed: %d\n", ret);
  1110. goto out;
  1111. }
  1112. if (ret > 0) {
  1113. ret = spi_transfer_wait(ctlr, msg, xfer);
  1114. if (ret < 0)
  1115. msg->status = ret;
  1116. }
  1117. } else {
  1118. if (xfer->len)
  1119. dev_err(&msg->spi->dev,
  1120. "Bufferless transfer has length %u\n",
  1121. xfer->len);
  1122. }
  1123. if (!ctlr->ptp_sts_supported) {
  1124. ptp_read_system_postts(xfer->ptp_sts);
  1125. xfer->ptp_sts_word_post = xfer->len;
  1126. }
  1127. trace_spi_transfer_stop(msg, xfer);
  1128. if (msg->status != -EINPROGRESS)
  1129. goto out;
  1130. spi_transfer_delay_exec(xfer);
  1131. if (xfer->cs_change) {
  1132. if (list_is_last(&xfer->transfer_list,
  1133. &msg->transfers)) {
  1134. keep_cs = true;
  1135. } else {
  1136. spi_set_cs(msg->spi, false, false);
  1137. _spi_transfer_cs_change_delay(msg, xfer);
  1138. spi_set_cs(msg->spi, true, false);
  1139. }
  1140. }
  1141. msg->actual_length += xfer->len;
  1142. }
  1143. out:
  1144. if (ret != 0 || !keep_cs)
  1145. spi_set_cs(msg->spi, false, false);
  1146. if (msg->status == -EINPROGRESS)
  1147. msg->status = ret;
  1148. if (msg->status && ctlr->handle_err)
  1149. ctlr->handle_err(ctlr, msg);
  1150. spi_finalize_current_message(ctlr);
  1151. return ret;
  1152. }
  1153. /**
  1154. * spi_finalize_current_transfer - report completion of a transfer
  1155. * @ctlr: the controller reporting completion
  1156. *
  1157. * Called by SPI drivers using the core transfer_one_message()
  1158. * implementation to notify it that the current interrupt driven
  1159. * transfer has finished and the next one may be scheduled.
  1160. */
  1161. void spi_finalize_current_transfer(struct spi_controller *ctlr)
  1162. {
  1163. complete(&ctlr->xfer_completion);
  1164. }
  1165. EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
  1166. static void spi_idle_runtime_pm(struct spi_controller *ctlr)
  1167. {
  1168. if (ctlr->auto_runtime_pm) {
  1169. pm_runtime_mark_last_busy(ctlr->dev.parent);
  1170. pm_runtime_put_autosuspend(ctlr->dev.parent);
  1171. }
  1172. }
  1173. /**
  1174. * __spi_pump_messages - function which processes spi message queue
  1175. * @ctlr: controller to process queue for
  1176. * @in_kthread: true if we are in the context of the message pump thread
  1177. *
  1178. * This function checks if there is any spi message in the queue that
  1179. * needs processing and if so call out to the driver to initialize hardware
  1180. * and transfer each message.
  1181. *
  1182. * Note that it is called both from the kthread itself and also from
  1183. * inside spi_sync(); the queue extraction handling at the top of the
  1184. * function should deal with this safely.
  1185. */
  1186. static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
  1187. {
  1188. struct spi_transfer *xfer;
  1189. struct spi_message *msg;
  1190. bool was_busy = false;
  1191. unsigned long flags;
  1192. int ret;
  1193. /* Lock queue */
  1194. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1195. /* Make sure we are not already running a message */
  1196. if (ctlr->cur_msg) {
  1197. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1198. return;
  1199. }
  1200. /* If another context is idling the device then defer */
  1201. if (ctlr->idling) {
  1202. kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
  1203. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1204. return;
  1205. }
  1206. /* Check if the queue is idle */
  1207. if (list_empty(&ctlr->queue) || !ctlr->running) {
  1208. if (!ctlr->busy) {
  1209. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1210. return;
  1211. }
  1212. /* Defer any non-atomic teardown to the thread */
  1213. if (!in_kthread) {
  1214. if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
  1215. !ctlr->unprepare_transfer_hardware) {
  1216. spi_idle_runtime_pm(ctlr);
  1217. ctlr->busy = false;
  1218. trace_spi_controller_idle(ctlr);
  1219. } else {
  1220. kthread_queue_work(ctlr->kworker,
  1221. &ctlr->pump_messages);
  1222. }
  1223. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1224. return;
  1225. }
  1226. ctlr->busy = false;
  1227. ctlr->idling = true;
  1228. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1229. kfree(ctlr->dummy_rx);
  1230. ctlr->dummy_rx = NULL;
  1231. kfree(ctlr->dummy_tx);
  1232. ctlr->dummy_tx = NULL;
  1233. if (ctlr->unprepare_transfer_hardware &&
  1234. ctlr->unprepare_transfer_hardware(ctlr))
  1235. dev_err(&ctlr->dev,
  1236. "failed to unprepare transfer hardware\n");
  1237. spi_idle_runtime_pm(ctlr);
  1238. trace_spi_controller_idle(ctlr);
  1239. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1240. ctlr->idling = false;
  1241. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1242. return;
  1243. }
  1244. /* Extract head of queue */
  1245. msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
  1246. ctlr->cur_msg = msg;
  1247. list_del_init(&msg->queue);
  1248. if (ctlr->busy)
  1249. was_busy = true;
  1250. else
  1251. ctlr->busy = true;
  1252. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1253. mutex_lock(&ctlr->io_mutex);
  1254. if (!was_busy && ctlr->auto_runtime_pm) {
  1255. ret = pm_runtime_get_sync(ctlr->dev.parent);
  1256. if (ret < 0) {
  1257. pm_runtime_put_noidle(ctlr->dev.parent);
  1258. dev_err(&ctlr->dev, "Failed to power device: %d\n",
  1259. ret);
  1260. mutex_unlock(&ctlr->io_mutex);
  1261. return;
  1262. }
  1263. }
  1264. if (!was_busy)
  1265. trace_spi_controller_busy(ctlr);
  1266. if (!was_busy && ctlr->prepare_transfer_hardware) {
  1267. ret = ctlr->prepare_transfer_hardware(ctlr);
  1268. if (ret) {
  1269. dev_err(&ctlr->dev,
  1270. "failed to prepare transfer hardware: %d\n",
  1271. ret);
  1272. if (ctlr->auto_runtime_pm)
  1273. pm_runtime_put(ctlr->dev.parent);
  1274. msg->status = ret;
  1275. spi_finalize_current_message(ctlr);
  1276. mutex_unlock(&ctlr->io_mutex);
  1277. return;
  1278. }
  1279. }
  1280. trace_spi_message_start(msg);
  1281. if (ctlr->prepare_message) {
  1282. ret = ctlr->prepare_message(ctlr, msg);
  1283. if (ret) {
  1284. dev_err(&ctlr->dev, "failed to prepare message: %d\n",
  1285. ret);
  1286. msg->status = ret;
  1287. spi_finalize_current_message(ctlr);
  1288. goto out;
  1289. }
  1290. ctlr->cur_msg_prepared = true;
  1291. }
  1292. ret = spi_map_msg(ctlr, msg);
  1293. if (ret) {
  1294. msg->status = ret;
  1295. spi_finalize_current_message(ctlr);
  1296. goto out;
  1297. }
  1298. if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
  1299. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  1300. xfer->ptp_sts_word_pre = 0;
  1301. ptp_read_system_prets(xfer->ptp_sts);
  1302. }
  1303. }
  1304. ret = ctlr->transfer_one_message(ctlr, msg);
  1305. if (ret) {
  1306. dev_err(&ctlr->dev,
  1307. "failed to transfer one message from queue\n");
  1308. goto out;
  1309. }
  1310. out:
  1311. mutex_unlock(&ctlr->io_mutex);
  1312. /* Prod the scheduler in case transfer_one() was busy waiting */
  1313. if (!ret)
  1314. cond_resched();
  1315. }
  1316. /**
  1317. * spi_pump_messages - kthread work function which processes spi message queue
  1318. * @work: pointer to kthread work struct contained in the controller struct
  1319. */
  1320. static void spi_pump_messages(struct kthread_work *work)
  1321. {
  1322. struct spi_controller *ctlr =
  1323. container_of(work, struct spi_controller, pump_messages);
  1324. __spi_pump_messages(ctlr, true);
  1325. }
  1326. /**
  1327. * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
  1328. * TX timestamp for the requested byte from the SPI
  1329. * transfer. The frequency with which this function
  1330. * must be called (once per word, once for the whole
  1331. * transfer, once per batch of words etc) is arbitrary
  1332. * as long as the @tx buffer offset is greater than or
  1333. * equal to the requested byte at the time of the
  1334. * call. The timestamp is only taken once, at the
  1335. * first such call. It is assumed that the driver
  1336. * advances its @tx buffer pointer monotonically.
  1337. * @ctlr: Pointer to the spi_controller structure of the driver
  1338. * @xfer: Pointer to the transfer being timestamped
  1339. * @progress: How many words (not bytes) have been transferred so far
  1340. * @irqs_off: If true, will disable IRQs and preemption for the duration of the
  1341. * transfer, for less jitter in time measurement. Only compatible
  1342. * with PIO drivers. If true, must follow up with
  1343. * spi_take_timestamp_post or otherwise system will crash.
  1344. * WARNING: for fully predictable results, the CPU frequency must
  1345. * also be under control (governor).
  1346. */
  1347. void spi_take_timestamp_pre(struct spi_controller *ctlr,
  1348. struct spi_transfer *xfer,
  1349. size_t progress, bool irqs_off)
  1350. {
  1351. if (!xfer->ptp_sts)
  1352. return;
  1353. if (xfer->timestamped)
  1354. return;
  1355. if (progress > xfer->ptp_sts_word_pre)
  1356. return;
  1357. /* Capture the resolution of the timestamp */
  1358. xfer->ptp_sts_word_pre = progress;
  1359. if (irqs_off) {
  1360. local_irq_save(ctlr->irq_flags);
  1361. preempt_disable();
  1362. }
  1363. ptp_read_system_prets(xfer->ptp_sts);
  1364. }
  1365. EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
  1366. /**
  1367. * spi_take_timestamp_post - helper for drivers to collect the end of the
  1368. * TX timestamp for the requested byte from the SPI
  1369. * transfer. Can be called with an arbitrary
  1370. * frequency: only the first call where @tx exceeds
  1371. * or is equal to the requested word will be
  1372. * timestamped.
  1373. * @ctlr: Pointer to the spi_controller structure of the driver
  1374. * @xfer: Pointer to the transfer being timestamped
  1375. * @progress: How many words (not bytes) have been transferred so far
  1376. * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
  1377. */
  1378. void spi_take_timestamp_post(struct spi_controller *ctlr,
  1379. struct spi_transfer *xfer,
  1380. size_t progress, bool irqs_off)
  1381. {
  1382. if (!xfer->ptp_sts)
  1383. return;
  1384. if (xfer->timestamped)
  1385. return;
  1386. if (progress < xfer->ptp_sts_word_post)
  1387. return;
  1388. ptp_read_system_postts(xfer->ptp_sts);
  1389. if (irqs_off) {
  1390. local_irq_restore(ctlr->irq_flags);
  1391. preempt_enable();
  1392. }
  1393. /* Capture the resolution of the timestamp */
  1394. xfer->ptp_sts_word_post = progress;
  1395. xfer->timestamped = true;
  1396. }
  1397. EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
  1398. /**
  1399. * spi_set_thread_rt - set the controller to pump at realtime priority
  1400. * @ctlr: controller to boost priority of
  1401. *
  1402. * This can be called because the controller requested realtime priority
  1403. * (by setting the ->rt value before calling spi_register_controller()) or
  1404. * because a device on the bus said that its transfers needed realtime
  1405. * priority.
  1406. *
  1407. * NOTE: at the moment if any device on a bus says it needs realtime then
  1408. * the thread will be at realtime priority for all transfers on that
  1409. * controller. If this eventually becomes a problem we may see if we can
  1410. * find a way to boost the priority only temporarily during relevant
  1411. * transfers.
  1412. */
  1413. static void spi_set_thread_rt(struct spi_controller *ctlr)
  1414. {
  1415. dev_info(&ctlr->dev,
  1416. "will run message pump with realtime priority\n");
  1417. sched_set_fifo(ctlr->kworker->task);
  1418. }
  1419. static int spi_init_queue(struct spi_controller *ctlr)
  1420. {
  1421. ctlr->running = false;
  1422. ctlr->busy = false;
  1423. ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
  1424. if (IS_ERR(ctlr->kworker)) {
  1425. dev_err(&ctlr->dev, "failed to create message pump kworker\n");
  1426. return PTR_ERR(ctlr->kworker);
  1427. }
  1428. kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
  1429. /*
  1430. * Controller config will indicate if this controller should run the
  1431. * message pump with high (realtime) priority to reduce the transfer
  1432. * latency on the bus by minimising the delay between a transfer
  1433. * request and the scheduling of the message pump thread. Without this
  1434. * setting the message pump thread will remain at default priority.
  1435. */
  1436. if (ctlr->rt)
  1437. spi_set_thread_rt(ctlr);
  1438. return 0;
  1439. }
  1440. /**
  1441. * spi_get_next_queued_message() - called by driver to check for queued
  1442. * messages
  1443. * @ctlr: the controller to check for queued messages
  1444. *
  1445. * If there are more messages in the queue, the next message is returned from
  1446. * this call.
  1447. *
  1448. * Return: the next message in the queue, else NULL if the queue is empty.
  1449. */
  1450. struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
  1451. {
  1452. struct spi_message *next;
  1453. unsigned long flags;
  1454. /* get a pointer to the next message, if any */
  1455. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1456. next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
  1457. queue);
  1458. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1459. return next;
  1460. }
  1461. EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
  1462. /**
  1463. * spi_finalize_current_message() - the current message is complete
  1464. * @ctlr: the controller to return the message to
  1465. *
  1466. * Called by the driver to notify the core that the message in the front of the
  1467. * queue is complete and can be removed from the queue.
  1468. */
  1469. void spi_finalize_current_message(struct spi_controller *ctlr)
  1470. {
  1471. struct spi_transfer *xfer;
  1472. struct spi_message *mesg;
  1473. unsigned long flags;
  1474. int ret;
  1475. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1476. mesg = ctlr->cur_msg;
  1477. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1478. if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
  1479. list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
  1480. ptp_read_system_postts(xfer->ptp_sts);
  1481. xfer->ptp_sts_word_post = xfer->len;
  1482. }
  1483. }
  1484. if (unlikely(ctlr->ptp_sts_supported))
  1485. list_for_each_entry(xfer, &mesg->transfers, transfer_list)
  1486. WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
  1487. spi_unmap_msg(ctlr, mesg);
  1488. /* In the prepare_messages callback the spi bus has the opportunity to
  1489. * split a transfer to smaller chunks.
  1490. * Release splited transfers here since spi_map_msg is done on the
  1491. * splited transfers.
  1492. */
  1493. spi_res_release(ctlr, mesg);
  1494. if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
  1495. ret = ctlr->unprepare_message(ctlr, mesg);
  1496. if (ret) {
  1497. dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
  1498. ret);
  1499. }
  1500. }
  1501. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1502. ctlr->cur_msg = NULL;
  1503. ctlr->cur_msg_prepared = false;
  1504. ctlr->fallback = false;
  1505. kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
  1506. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1507. trace_spi_message_done(mesg);
  1508. mesg->state = NULL;
  1509. if (mesg->complete)
  1510. mesg->complete(mesg->context);
  1511. }
  1512. EXPORT_SYMBOL_GPL(spi_finalize_current_message);
  1513. static int spi_start_queue(struct spi_controller *ctlr)
  1514. {
  1515. unsigned long flags;
  1516. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1517. if (ctlr->running || ctlr->busy) {
  1518. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1519. return -EBUSY;
  1520. }
  1521. ctlr->running = true;
  1522. ctlr->cur_msg = NULL;
  1523. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1524. kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
  1525. return 0;
  1526. }
  1527. static int spi_stop_queue(struct spi_controller *ctlr)
  1528. {
  1529. unsigned long flags;
  1530. unsigned limit = 500;
  1531. int ret = 0;
  1532. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1533. /*
  1534. * This is a bit lame, but is optimized for the common execution path.
  1535. * A wait_queue on the ctlr->busy could be used, but then the common
  1536. * execution path (pump_messages) would be required to call wake_up or
  1537. * friends on every SPI message. Do this instead.
  1538. */
  1539. while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
  1540. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1541. usleep_range(10000, 11000);
  1542. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1543. }
  1544. if (!list_empty(&ctlr->queue) || ctlr->busy)
  1545. ret = -EBUSY;
  1546. else
  1547. ctlr->running = false;
  1548. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1549. if (ret) {
  1550. dev_warn(&ctlr->dev, "could not stop message queue\n");
  1551. return ret;
  1552. }
  1553. return ret;
  1554. }
  1555. static int spi_destroy_queue(struct spi_controller *ctlr)
  1556. {
  1557. int ret;
  1558. ret = spi_stop_queue(ctlr);
  1559. /*
  1560. * kthread_flush_worker will block until all work is done.
  1561. * If the reason that stop_queue timed out is that the work will never
  1562. * finish, then it does no good to call flush/stop thread, so
  1563. * return anyway.
  1564. */
  1565. if (ret) {
  1566. dev_err(&ctlr->dev, "problem destroying queue\n");
  1567. return ret;
  1568. }
  1569. kthread_destroy_worker(ctlr->kworker);
  1570. return 0;
  1571. }
  1572. static int __spi_queued_transfer(struct spi_device *spi,
  1573. struct spi_message *msg,
  1574. bool need_pump)
  1575. {
  1576. struct spi_controller *ctlr = spi->controller;
  1577. unsigned long flags;
  1578. spin_lock_irqsave(&ctlr->queue_lock, flags);
  1579. if (!ctlr->running) {
  1580. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1581. return -ESHUTDOWN;
  1582. }
  1583. msg->actual_length = 0;
  1584. msg->status = -EINPROGRESS;
  1585. list_add_tail(&msg->queue, &ctlr->queue);
  1586. if (!ctlr->busy && need_pump)
  1587. kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
  1588. spin_unlock_irqrestore(&ctlr->queue_lock, flags);
  1589. return 0;
  1590. }
  1591. /**
  1592. * spi_queued_transfer - transfer function for queued transfers
  1593. * @spi: spi device which is requesting transfer
  1594. * @msg: spi message which is to handled is queued to driver queue
  1595. *
  1596. * Return: zero on success, else a negative error code.
  1597. */
  1598. static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
  1599. {
  1600. return __spi_queued_transfer(spi, msg, true);
  1601. }
  1602. static int spi_controller_initialize_queue(struct spi_controller *ctlr)
  1603. {
  1604. int ret;
  1605. ctlr->transfer = spi_queued_transfer;
  1606. if (!ctlr->transfer_one_message)
  1607. ctlr->transfer_one_message = spi_transfer_one_message;
  1608. /* Initialize and start queue */
  1609. ret = spi_init_queue(ctlr);
  1610. if (ret) {
  1611. dev_err(&ctlr->dev, "problem initializing queue\n");
  1612. goto err_init_queue;
  1613. }
  1614. ctlr->queued = true;
  1615. ret = spi_start_queue(ctlr);
  1616. if (ret) {
  1617. dev_err(&ctlr->dev, "problem starting queue\n");
  1618. goto err_start_queue;
  1619. }
  1620. return 0;
  1621. err_start_queue:
  1622. spi_destroy_queue(ctlr);
  1623. err_init_queue:
  1624. return ret;
  1625. }
  1626. /**
  1627. * spi_flush_queue - Send all pending messages in the queue from the callers'
  1628. * context
  1629. * @ctlr: controller to process queue for
  1630. *
  1631. * This should be used when one wants to ensure all pending messages have been
  1632. * sent before doing something. Is used by the spi-mem code to make sure SPI
  1633. * memory operations do not preempt regular SPI transfers that have been queued
  1634. * before the spi-mem operation.
  1635. */
  1636. void spi_flush_queue(struct spi_controller *ctlr)
  1637. {
  1638. if (ctlr->transfer == spi_queued_transfer)
  1639. __spi_pump_messages(ctlr, false);
  1640. }
  1641. /*-------------------------------------------------------------------------*/
  1642. #if defined(CONFIG_OF)
  1643. static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
  1644. struct device_node *nc)
  1645. {
  1646. u32 value;
  1647. int rc;
  1648. /* Mode (clock phase/polarity/etc.) */
  1649. if (of_property_read_bool(nc, "spi-cpha"))
  1650. spi->mode |= SPI_CPHA;
  1651. if (of_property_read_bool(nc, "spi-cpol"))
  1652. spi->mode |= SPI_CPOL;
  1653. if (of_property_read_bool(nc, "spi-3wire"))
  1654. spi->mode |= SPI_3WIRE;
  1655. if (of_property_read_bool(nc, "spi-lsb-first"))
  1656. spi->mode |= SPI_LSB_FIRST;
  1657. if (of_property_read_bool(nc, "spi-cs-high"))
  1658. spi->mode |= SPI_CS_HIGH;
  1659. /* Device DUAL/QUAD mode */
  1660. if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
  1661. switch (value) {
  1662. case 1:
  1663. break;
  1664. case 2:
  1665. spi->mode |= SPI_TX_DUAL;
  1666. break;
  1667. case 4:
  1668. spi->mode |= SPI_TX_QUAD;
  1669. break;
  1670. case 8:
  1671. spi->mode |= SPI_TX_OCTAL;
  1672. break;
  1673. default:
  1674. dev_warn(&ctlr->dev,
  1675. "spi-tx-bus-width %d not supported\n",
  1676. value);
  1677. break;
  1678. }
  1679. }
  1680. if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
  1681. switch (value) {
  1682. case 1:
  1683. break;
  1684. case 2:
  1685. spi->mode |= SPI_RX_DUAL;
  1686. break;
  1687. case 4:
  1688. spi->mode |= SPI_RX_QUAD;
  1689. break;
  1690. case 8:
  1691. spi->mode |= SPI_RX_OCTAL;
  1692. break;
  1693. default:
  1694. dev_warn(&ctlr->dev,
  1695. "spi-rx-bus-width %d not supported\n",
  1696. value);
  1697. break;
  1698. }
  1699. }
  1700. if (spi_controller_is_slave(ctlr)) {
  1701. if (!of_node_name_eq(nc, "slave")) {
  1702. dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
  1703. nc);
  1704. return -EINVAL;
  1705. }
  1706. return 0;
  1707. }
  1708. /* Device address */
  1709. rc = of_property_read_u32(nc, "reg", &value);
  1710. if (rc) {
  1711. dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
  1712. nc, rc);
  1713. return rc;
  1714. }
  1715. spi->chip_select = value;
  1716. /* Device speed */
  1717. if (!of_property_read_u32(nc, "spi-max-frequency", &value))
  1718. spi->max_speed_hz = value;
  1719. return 0;
  1720. }
  1721. static struct spi_device *
  1722. of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
  1723. {
  1724. struct spi_device *spi;
  1725. int rc;
  1726. /* Alloc an spi_device */
  1727. spi = spi_alloc_device(ctlr);
  1728. if (!spi) {
  1729. dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
  1730. rc = -ENOMEM;
  1731. goto err_out;
  1732. }
  1733. /* Select device driver */
  1734. rc = of_modalias_node(nc, spi->modalias,
  1735. sizeof(spi->modalias));
  1736. if (rc < 0) {
  1737. dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
  1738. goto err_out;
  1739. }
  1740. rc = of_spi_parse_dt(ctlr, spi, nc);
  1741. if (rc)
  1742. goto err_out;
  1743. /* Store a pointer to the node in the device structure */
  1744. of_node_get(nc);
  1745. spi->dev.of_node = nc;
  1746. spi->dev.fwnode = of_fwnode_handle(nc);
  1747. /* Register the new device */
  1748. rc = spi_add_device(spi);
  1749. if (rc) {
  1750. dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
  1751. goto err_of_node_put;
  1752. }
  1753. return spi;
  1754. err_of_node_put:
  1755. of_node_put(nc);
  1756. err_out:
  1757. spi_dev_put(spi);
  1758. return ERR_PTR(rc);
  1759. }
  1760. /**
  1761. * of_register_spi_devices() - Register child devices onto the SPI bus
  1762. * @ctlr: Pointer to spi_controller device
  1763. *
  1764. * Registers an spi_device for each child node of controller node which
  1765. * represents a valid SPI slave.
  1766. */
  1767. static void of_register_spi_devices(struct spi_controller *ctlr)
  1768. {
  1769. struct spi_device *spi;
  1770. struct device_node *nc;
  1771. if (!ctlr->dev.of_node)
  1772. return;
  1773. for_each_available_child_of_node(ctlr->dev.of_node, nc) {
  1774. if (of_node_test_and_set_flag(nc, OF_POPULATED))
  1775. continue;
  1776. spi = of_register_spi_device(ctlr, nc);
  1777. if (IS_ERR(spi)) {
  1778. dev_warn(&ctlr->dev,
  1779. "Failed to create SPI device for %pOF\n", nc);
  1780. of_node_clear_flag(nc, OF_POPULATED);
  1781. }
  1782. }
  1783. }
  1784. #else
  1785. static void of_register_spi_devices(struct spi_controller *ctlr) { }
  1786. #endif
  1787. #ifdef CONFIG_ACPI
  1788. struct acpi_spi_lookup {
  1789. struct spi_controller *ctlr;
  1790. u32 max_speed_hz;
  1791. u32 mode;
  1792. int irq;
  1793. u8 bits_per_word;
  1794. u8 chip_select;
  1795. };
  1796. static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
  1797. struct acpi_spi_lookup *lookup)
  1798. {
  1799. const union acpi_object *obj;
  1800. if (!x86_apple_machine)
  1801. return;
  1802. if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
  1803. && obj->buffer.length >= 4)
  1804. lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
  1805. if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
  1806. && obj->buffer.length == 8)
  1807. lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
  1808. if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
  1809. && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
  1810. lookup->mode |= SPI_LSB_FIRST;
  1811. if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
  1812. && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
  1813. lookup->mode |= SPI_CPOL;
  1814. if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
  1815. && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
  1816. lookup->mode |= SPI_CPHA;
  1817. }
  1818. static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
  1819. {
  1820. struct acpi_spi_lookup *lookup = data;
  1821. struct spi_controller *ctlr = lookup->ctlr;
  1822. if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
  1823. struct acpi_resource_spi_serialbus *sb;
  1824. acpi_handle parent_handle;
  1825. acpi_status status;
  1826. sb = &ares->data.spi_serial_bus;
  1827. if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
  1828. status = acpi_get_handle(NULL,
  1829. sb->resource_source.string_ptr,
  1830. &parent_handle);
  1831. if (ACPI_FAILURE(status) ||
  1832. ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
  1833. return -ENODEV;
  1834. /*
  1835. * ACPI DeviceSelection numbering is handled by the
  1836. * host controller driver in Windows and can vary
  1837. * from driver to driver. In Linux we always expect
  1838. * 0 .. max - 1 so we need to ask the driver to
  1839. * translate between the two schemes.
  1840. */
  1841. if (ctlr->fw_translate_cs) {
  1842. int cs = ctlr->fw_translate_cs(ctlr,
  1843. sb->device_selection);
  1844. if (cs < 0)
  1845. return cs;
  1846. lookup->chip_select = cs;
  1847. } else {
  1848. lookup->chip_select = sb->device_selection;
  1849. }
  1850. lookup->max_speed_hz = sb->connection_speed;
  1851. lookup->bits_per_word = sb->data_bit_length;
  1852. if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
  1853. lookup->mode |= SPI_CPHA;
  1854. if (sb->clock_polarity == ACPI_SPI_START_HIGH)
  1855. lookup->mode |= SPI_CPOL;
  1856. if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
  1857. lookup->mode |= SPI_CS_HIGH;
  1858. }
  1859. } else if (lookup->irq < 0) {
  1860. struct resource r;
  1861. if (acpi_dev_resource_interrupt(ares, 0, &r))
  1862. lookup->irq = r.start;
  1863. }
  1864. /* Always tell the ACPI core to skip this resource */
  1865. return 1;
  1866. }
  1867. static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
  1868. struct acpi_device *adev)
  1869. {
  1870. acpi_handle parent_handle = NULL;
  1871. struct list_head resource_list;
  1872. struct acpi_spi_lookup lookup = {};
  1873. struct spi_device *spi;
  1874. int ret;
  1875. if (acpi_bus_get_status(adev) || !adev->status.present ||
  1876. acpi_device_enumerated(adev))
  1877. return AE_OK;
  1878. lookup.ctlr = ctlr;
  1879. lookup.irq = -1;
  1880. INIT_LIST_HEAD(&resource_list);
  1881. ret = acpi_dev_get_resources(adev, &resource_list,
  1882. acpi_spi_add_resource, &lookup);
  1883. acpi_dev_free_resource_list(&resource_list);
  1884. if (ret < 0)
  1885. /* found SPI in _CRS but it points to another controller */
  1886. return AE_OK;
  1887. if (!lookup.max_speed_hz &&
  1888. !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
  1889. ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
  1890. /* Apple does not use _CRS but nested devices for SPI slaves */
  1891. acpi_spi_parse_apple_properties(adev, &lookup);
  1892. }
  1893. if (!lookup.max_speed_hz)
  1894. return AE_OK;
  1895. spi = spi_alloc_device(ctlr);
  1896. if (!spi) {
  1897. dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
  1898. dev_name(&adev->dev));
  1899. return AE_NO_MEMORY;
  1900. }
  1901. ACPI_COMPANION_SET(&spi->dev, adev);
  1902. spi->max_speed_hz = lookup.max_speed_hz;
  1903. spi->mode |= lookup.mode;
  1904. spi->irq = lookup.irq;
  1905. spi->bits_per_word = lookup.bits_per_word;
  1906. spi->chip_select = lookup.chip_select;
  1907. acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
  1908. sizeof(spi->modalias));
  1909. if (spi->irq < 0)
  1910. spi->irq = acpi_dev_gpio_irq_get(adev, 0);
  1911. acpi_device_set_enumerated(adev);
  1912. adev->power.flags.ignore_parent = true;
  1913. if (spi_add_device(spi)) {
  1914. adev->power.flags.ignore_parent = false;
  1915. dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
  1916. dev_name(&adev->dev));
  1917. spi_dev_put(spi);
  1918. }
  1919. return AE_OK;
  1920. }
  1921. static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
  1922. void *data, void **return_value)
  1923. {
  1924. struct spi_controller *ctlr = data;
  1925. struct acpi_device *adev;
  1926. if (acpi_bus_get_device(handle, &adev))
  1927. return AE_OK;
  1928. return acpi_register_spi_device(ctlr, adev);
  1929. }
  1930. #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
  1931. static void acpi_register_spi_devices(struct spi_controller *ctlr)
  1932. {
  1933. acpi_status status;
  1934. acpi_handle handle;
  1935. handle = ACPI_HANDLE(ctlr->dev.parent);
  1936. if (!handle)
  1937. return;
  1938. status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
  1939. SPI_ACPI_ENUMERATE_MAX_DEPTH,
  1940. acpi_spi_add_device, NULL, ctlr, NULL);
  1941. if (ACPI_FAILURE(status))
  1942. dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
  1943. }
  1944. #else
  1945. static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
  1946. #endif /* CONFIG_ACPI */
  1947. static void spi_controller_release(struct device *dev)
  1948. {
  1949. struct spi_controller *ctlr;
  1950. ctlr = container_of(dev, struct spi_controller, dev);
  1951. kfree(ctlr);
  1952. }
  1953. static struct class spi_master_class = {
  1954. .name = "spi_master",
  1955. .owner = THIS_MODULE,
  1956. .dev_release = spi_controller_release,
  1957. .dev_groups = spi_master_groups,
  1958. };
  1959. #ifdef CONFIG_SPI_SLAVE
  1960. /**
  1961. * spi_slave_abort - abort the ongoing transfer request on an SPI slave
  1962. * controller
  1963. * @spi: device used for the current transfer
  1964. */
  1965. int spi_slave_abort(struct spi_device *spi)
  1966. {
  1967. struct spi_controller *ctlr = spi->controller;
  1968. if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
  1969. return ctlr->slave_abort(ctlr);
  1970. return -ENOTSUPP;
  1971. }
  1972. EXPORT_SYMBOL_GPL(spi_slave_abort);
  1973. static int match_true(struct device *dev, void *data)
  1974. {
  1975. return 1;
  1976. }
  1977. static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
  1978. char *buf)
  1979. {
  1980. struct spi_controller *ctlr = container_of(dev, struct spi_controller,
  1981. dev);
  1982. struct device *child;
  1983. child = device_find_child(&ctlr->dev, NULL, match_true);
  1984. return sprintf(buf, "%s\n",
  1985. child ? to_spi_device(child)->modalias : NULL);
  1986. }
  1987. static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
  1988. const char *buf, size_t count)
  1989. {
  1990. struct spi_controller *ctlr = container_of(dev, struct spi_controller,
  1991. dev);
  1992. struct spi_device *spi;
  1993. struct device *child;
  1994. char name[32];
  1995. int rc;
  1996. rc = sscanf(buf, "%31s", name);
  1997. if (rc != 1 || !name[0])
  1998. return -EINVAL;
  1999. child = device_find_child(&ctlr->dev, NULL, match_true);
  2000. if (child) {
  2001. /* Remove registered slave */
  2002. device_unregister(child);
  2003. put_device(child);
  2004. }
  2005. if (strcmp(name, "(null)")) {
  2006. /* Register new slave */
  2007. spi = spi_alloc_device(ctlr);
  2008. if (!spi)
  2009. return -ENOMEM;
  2010. strlcpy(spi->modalias, name, sizeof(spi->modalias));
  2011. rc = spi_add_device(spi);
  2012. if (rc) {
  2013. spi_dev_put(spi);
  2014. return rc;
  2015. }
  2016. }
  2017. return count;
  2018. }
  2019. static DEVICE_ATTR_RW(slave);
  2020. static struct attribute *spi_slave_attrs[] = {
  2021. &dev_attr_slave.attr,
  2022. NULL,
  2023. };
  2024. static const struct attribute_group spi_slave_group = {
  2025. .attrs = spi_slave_attrs,
  2026. };
  2027. static const struct attribute_group *spi_slave_groups[] = {
  2028. &spi_controller_statistics_group,
  2029. &spi_slave_group,
  2030. NULL,
  2031. };
  2032. static struct class spi_slave_class = {
  2033. .name = "spi_slave",
  2034. .owner = THIS_MODULE,
  2035. .dev_release = spi_controller_release,
  2036. .dev_groups = spi_slave_groups,
  2037. };
  2038. #else
  2039. extern struct class spi_slave_class; /* dummy */
  2040. #endif
  2041. /**
  2042. * __spi_alloc_controller - allocate an SPI master or slave controller
  2043. * @dev: the controller, possibly using the platform_bus
  2044. * @size: how much zeroed driver-private data to allocate; the pointer to this
  2045. * memory is in the driver_data field of the returned device, accessible
  2046. * with spi_controller_get_devdata(); the memory is cacheline aligned;
  2047. * drivers granting DMA access to portions of their private data need to
  2048. * round up @size using ALIGN(size, dma_get_cache_alignment()).
  2049. * @slave: flag indicating whether to allocate an SPI master (false) or SPI
  2050. * slave (true) controller
  2051. * Context: can sleep
  2052. *
  2053. * This call is used only by SPI controller drivers, which are the
  2054. * only ones directly touching chip registers. It's how they allocate
  2055. * an spi_controller structure, prior to calling spi_register_controller().
  2056. *
  2057. * This must be called from context that can sleep.
  2058. *
  2059. * The caller is responsible for assigning the bus number and initializing the
  2060. * controller's methods before calling spi_register_controller(); and (after
  2061. * errors adding the device) calling spi_controller_put() to prevent a memory
  2062. * leak.
  2063. *
  2064. * Return: the SPI controller structure on success, else NULL.
  2065. */
  2066. struct spi_controller *__spi_alloc_controller(struct device *dev,
  2067. unsigned int size, bool slave)
  2068. {
  2069. struct spi_controller *ctlr;
  2070. size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
  2071. if (!dev)
  2072. return NULL;
  2073. ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
  2074. if (!ctlr)
  2075. return NULL;
  2076. device_initialize(&ctlr->dev);
  2077. ctlr->bus_num = -1;
  2078. ctlr->num_chipselect = 1;
  2079. ctlr->slave = slave;
  2080. if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
  2081. ctlr->dev.class = &spi_slave_class;
  2082. else
  2083. ctlr->dev.class = &spi_master_class;
  2084. ctlr->dev.parent = dev;
  2085. pm_suspend_ignore_children(&ctlr->dev, true);
  2086. spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
  2087. return ctlr;
  2088. }
  2089. EXPORT_SYMBOL_GPL(__spi_alloc_controller);
  2090. static void devm_spi_release_controller(struct device *dev, void *ctlr)
  2091. {
  2092. spi_controller_put(*(struct spi_controller **)ctlr);
  2093. }
  2094. /**
  2095. * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
  2096. * @dev: physical device of SPI controller
  2097. * @size: how much zeroed driver-private data to allocate
  2098. * @slave: whether to allocate an SPI master (false) or SPI slave (true)
  2099. * Context: can sleep
  2100. *
  2101. * Allocate an SPI controller and automatically release a reference on it
  2102. * when @dev is unbound from its driver. Drivers are thus relieved from
  2103. * having to call spi_controller_put().
  2104. *
  2105. * The arguments to this function are identical to __spi_alloc_controller().
  2106. *
  2107. * Return: the SPI controller structure on success, else NULL.
  2108. */
  2109. struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
  2110. unsigned int size,
  2111. bool slave)
  2112. {
  2113. struct spi_controller **ptr, *ctlr;
  2114. ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
  2115. GFP_KERNEL);
  2116. if (!ptr)
  2117. return NULL;
  2118. ctlr = __spi_alloc_controller(dev, size, slave);
  2119. if (ctlr) {
  2120. *ptr = ctlr;
  2121. devres_add(dev, ptr);
  2122. } else {
  2123. devres_free(ptr);
  2124. }
  2125. return ctlr;
  2126. }
  2127. EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
  2128. #ifdef CONFIG_OF
  2129. static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
  2130. {
  2131. int nb, i, *cs;
  2132. struct device_node *np = ctlr->dev.of_node;
  2133. if (!np)
  2134. return 0;
  2135. nb = of_gpio_named_count(np, "cs-gpios");
  2136. ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
  2137. /* Return error only for an incorrectly formed cs-gpios property */
  2138. if (nb == 0 || nb == -ENOENT)
  2139. return 0;
  2140. else if (nb < 0)
  2141. return nb;
  2142. cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
  2143. GFP_KERNEL);
  2144. ctlr->cs_gpios = cs;
  2145. if (!ctlr->cs_gpios)
  2146. return -ENOMEM;
  2147. for (i = 0; i < ctlr->num_chipselect; i++)
  2148. cs[i] = -ENOENT;
  2149. for (i = 0; i < nb; i++)
  2150. cs[i] = of_get_named_gpio(np, "cs-gpios", i);
  2151. return 0;
  2152. }
  2153. #else
  2154. static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
  2155. {
  2156. return 0;
  2157. }
  2158. #endif
  2159. /**
  2160. * spi_get_gpio_descs() - grab chip select GPIOs for the master
  2161. * @ctlr: The SPI master to grab GPIO descriptors for
  2162. */
  2163. static int spi_get_gpio_descs(struct spi_controller *ctlr)
  2164. {
  2165. int nb, i;
  2166. struct gpio_desc **cs;
  2167. struct device *dev = &ctlr->dev;
  2168. unsigned long native_cs_mask = 0;
  2169. unsigned int num_cs_gpios = 0;
  2170. nb = gpiod_count(dev, "cs");
  2171. ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
  2172. /* No GPIOs at all is fine, else return the error */
  2173. if (nb == 0 || nb == -ENOENT)
  2174. return 0;
  2175. else if (nb < 0)
  2176. return nb;
  2177. cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
  2178. GFP_KERNEL);
  2179. if (!cs)
  2180. return -ENOMEM;
  2181. ctlr->cs_gpiods = cs;
  2182. for (i = 0; i < nb; i++) {
  2183. /*
  2184. * Most chipselects are active low, the inverted
  2185. * semantics are handled by special quirks in gpiolib,
  2186. * so initializing them GPIOD_OUT_LOW here means
  2187. * "unasserted", in most cases this will drive the physical
  2188. * line high.
  2189. */
  2190. cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
  2191. GPIOD_OUT_LOW);
  2192. if (IS_ERR(cs[i]))
  2193. return PTR_ERR(cs[i]);
  2194. if (cs[i]) {
  2195. /*
  2196. * If we find a CS GPIO, name it after the device and
  2197. * chip select line.
  2198. */
  2199. char *gpioname;
  2200. gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
  2201. dev_name(dev), i);
  2202. if (!gpioname)
  2203. return -ENOMEM;
  2204. gpiod_set_consumer_name(cs[i], gpioname);
  2205. num_cs_gpios++;
  2206. continue;
  2207. }
  2208. if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
  2209. dev_err(dev, "Invalid native chip select %d\n", i);
  2210. return -EINVAL;
  2211. }
  2212. native_cs_mask |= BIT(i);
  2213. }
  2214. ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
  2215. if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
  2216. ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
  2217. dev_err(dev, "No unused native chip select available\n");
  2218. return -EINVAL;
  2219. }
  2220. return 0;
  2221. }
  2222. static int spi_controller_check_ops(struct spi_controller *ctlr)
  2223. {
  2224. /*
  2225. * The controller may implement only the high-level SPI-memory like
  2226. * operations if it does not support regular SPI transfers, and this is
  2227. * valid use case.
  2228. * If ->mem_ops is NULL, we request that at least one of the
  2229. * ->transfer_xxx() method be implemented.
  2230. */
  2231. if (ctlr->mem_ops) {
  2232. if (!ctlr->mem_ops->exec_op)
  2233. return -EINVAL;
  2234. } else if (!ctlr->transfer && !ctlr->transfer_one &&
  2235. !ctlr->transfer_one_message) {
  2236. return -EINVAL;
  2237. }
  2238. return 0;
  2239. }
  2240. /**
  2241. * spi_register_controller - register SPI master or slave controller
  2242. * @ctlr: initialized master, originally from spi_alloc_master() or
  2243. * spi_alloc_slave()
  2244. * Context: can sleep
  2245. *
  2246. * SPI controllers connect to their drivers using some non-SPI bus,
  2247. * such as the platform bus. The final stage of probe() in that code
  2248. * includes calling spi_register_controller() to hook up to this SPI bus glue.
  2249. *
  2250. * SPI controllers use board specific (often SOC specific) bus numbers,
  2251. * and board-specific addressing for SPI devices combines those numbers
  2252. * with chip select numbers. Since SPI does not directly support dynamic
  2253. * device identification, boards need configuration tables telling which
  2254. * chip is at which address.
  2255. *
  2256. * This must be called from context that can sleep. It returns zero on
  2257. * success, else a negative error code (dropping the controller's refcount).
  2258. * After a successful return, the caller is responsible for calling
  2259. * spi_unregister_controller().
  2260. *
  2261. * Return: zero on success, else a negative error code.
  2262. */
  2263. int spi_register_controller(struct spi_controller *ctlr)
  2264. {
  2265. struct device *dev = ctlr->dev.parent;
  2266. struct boardinfo *bi;
  2267. int status;
  2268. int id, first_dynamic;
  2269. if (!dev)
  2270. return -ENODEV;
  2271. /*
  2272. * Make sure all necessary hooks are implemented before registering
  2273. * the SPI controller.
  2274. */
  2275. status = spi_controller_check_ops(ctlr);
  2276. if (status)
  2277. return status;
  2278. if (ctlr->bus_num >= 0) {
  2279. /* devices with a fixed bus num must check-in with the num */
  2280. mutex_lock(&board_lock);
  2281. id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
  2282. ctlr->bus_num + 1, GFP_KERNEL);
  2283. mutex_unlock(&board_lock);
  2284. if (WARN(id < 0, "couldn't get idr"))
  2285. return id == -ENOSPC ? -EBUSY : id;
  2286. ctlr->bus_num = id;
  2287. } else if (ctlr->dev.of_node) {
  2288. /* allocate dynamic bus number using Linux idr */
  2289. id = of_alias_get_id(ctlr->dev.of_node, "spi");
  2290. if (id >= 0) {
  2291. ctlr->bus_num = id;
  2292. mutex_lock(&board_lock);
  2293. id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
  2294. ctlr->bus_num + 1, GFP_KERNEL);
  2295. mutex_unlock(&board_lock);
  2296. if (WARN(id < 0, "couldn't get idr"))
  2297. return id == -ENOSPC ? -EBUSY : id;
  2298. }
  2299. }
  2300. if (ctlr->bus_num < 0) {
  2301. first_dynamic = of_alias_get_highest_id("spi");
  2302. if (first_dynamic < 0)
  2303. first_dynamic = 0;
  2304. else
  2305. first_dynamic++;
  2306. mutex_lock(&board_lock);
  2307. id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
  2308. 0, GFP_KERNEL);
  2309. mutex_unlock(&board_lock);
  2310. if (WARN(id < 0, "couldn't get idr"))
  2311. return id;
  2312. ctlr->bus_num = id;
  2313. }
  2314. INIT_LIST_HEAD(&ctlr->queue);
  2315. spin_lock_init(&ctlr->queue_lock);
  2316. spin_lock_init(&ctlr->bus_lock_spinlock);
  2317. mutex_init(&ctlr->bus_lock_mutex);
  2318. mutex_init(&ctlr->io_mutex);
  2319. ctlr->bus_lock_flag = 0;
  2320. init_completion(&ctlr->xfer_completion);
  2321. if (!ctlr->max_dma_len)
  2322. ctlr->max_dma_len = INT_MAX;
  2323. /* register the device, then userspace will see it.
  2324. * registration fails if the bus ID is in use.
  2325. */
  2326. dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
  2327. if (!spi_controller_is_slave(ctlr)) {
  2328. if (ctlr->use_gpio_descriptors) {
  2329. status = spi_get_gpio_descs(ctlr);
  2330. if (status)
  2331. goto free_bus_id;
  2332. /*
  2333. * A controller using GPIO descriptors always
  2334. * supports SPI_CS_HIGH if need be.
  2335. */
  2336. ctlr->mode_bits |= SPI_CS_HIGH;
  2337. } else {
  2338. /* Legacy code path for GPIOs from DT */
  2339. status = of_spi_get_gpio_numbers(ctlr);
  2340. if (status)
  2341. goto free_bus_id;
  2342. }
  2343. }
  2344. /*
  2345. * Even if it's just one always-selected device, there must
  2346. * be at least one chipselect.
  2347. */
  2348. if (!ctlr->num_chipselect) {
  2349. status = -EINVAL;
  2350. goto free_bus_id;
  2351. }
  2352. status = device_add(&ctlr->dev);
  2353. if (status < 0)
  2354. goto free_bus_id;
  2355. dev_dbg(dev, "registered %s %s\n",
  2356. spi_controller_is_slave(ctlr) ? "slave" : "master",
  2357. dev_name(&ctlr->dev));
  2358. /*
  2359. * If we're using a queued driver, start the queue. Note that we don't
  2360. * need the queueing logic if the driver is only supporting high-level
  2361. * memory operations.
  2362. */
  2363. if (ctlr->transfer) {
  2364. dev_info(dev, "controller is unqueued, this is deprecated\n");
  2365. } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
  2366. status = spi_controller_initialize_queue(ctlr);
  2367. if (status) {
  2368. device_del(&ctlr->dev);
  2369. goto free_bus_id;
  2370. }
  2371. }
  2372. /* add statistics */
  2373. spin_lock_init(&ctlr->statistics.lock);
  2374. mutex_lock(&board_lock);
  2375. list_add_tail(&ctlr->list, &spi_controller_list);
  2376. list_for_each_entry(bi, &board_list, list)
  2377. spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
  2378. mutex_unlock(&board_lock);
  2379. /* Register devices from the device tree and ACPI */
  2380. of_register_spi_devices(ctlr);
  2381. acpi_register_spi_devices(ctlr);
  2382. return status;
  2383. free_bus_id:
  2384. mutex_lock(&board_lock);
  2385. idr_remove(&spi_master_idr, ctlr->bus_num);
  2386. mutex_unlock(&board_lock);
  2387. return status;
  2388. }
  2389. EXPORT_SYMBOL_GPL(spi_register_controller);
  2390. static void devm_spi_unregister(struct device *dev, void *res)
  2391. {
  2392. spi_unregister_controller(*(struct spi_controller **)res);
  2393. }
  2394. /**
  2395. * devm_spi_register_controller - register managed SPI master or slave
  2396. * controller
  2397. * @dev: device managing SPI controller
  2398. * @ctlr: initialized controller, originally from spi_alloc_master() or
  2399. * spi_alloc_slave()
  2400. * Context: can sleep
  2401. *
  2402. * Register a SPI device as with spi_register_controller() which will
  2403. * automatically be unregistered and freed.
  2404. *
  2405. * Return: zero on success, else a negative error code.
  2406. */
  2407. int devm_spi_register_controller(struct device *dev,
  2408. struct spi_controller *ctlr)
  2409. {
  2410. struct spi_controller **ptr;
  2411. int ret;
  2412. ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
  2413. if (!ptr)
  2414. return -ENOMEM;
  2415. ret = spi_register_controller(ctlr);
  2416. if (!ret) {
  2417. *ptr = ctlr;
  2418. devres_add(dev, ptr);
  2419. } else {
  2420. devres_free(ptr);
  2421. }
  2422. return ret;
  2423. }
  2424. EXPORT_SYMBOL_GPL(devm_spi_register_controller);
  2425. static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
  2426. {
  2427. return *(struct spi_controller **)res == ctlr;
  2428. }
  2429. static int __unregister(struct device *dev, void *null)
  2430. {
  2431. spi_unregister_device(to_spi_device(dev));
  2432. return 0;
  2433. }
  2434. /**
  2435. * spi_unregister_controller - unregister SPI master or slave controller
  2436. * @ctlr: the controller being unregistered
  2437. * Context: can sleep
  2438. *
  2439. * This call is used only by SPI controller drivers, which are the
  2440. * only ones directly touching chip registers.
  2441. *
  2442. * This must be called from context that can sleep.
  2443. *
  2444. * Note that this function also drops a reference to the controller.
  2445. */
  2446. void spi_unregister_controller(struct spi_controller *ctlr)
  2447. {
  2448. struct spi_controller *found;
  2449. int id = ctlr->bus_num;
  2450. /* Prevent addition of new devices, unregister existing ones */
  2451. if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
  2452. mutex_lock(&spi_add_lock);
  2453. device_for_each_child(&ctlr->dev, NULL, __unregister);
  2454. /* First make sure that this controller was ever added */
  2455. mutex_lock(&board_lock);
  2456. found = idr_find(&spi_master_idr, id);
  2457. mutex_unlock(&board_lock);
  2458. if (ctlr->queued) {
  2459. if (spi_destroy_queue(ctlr))
  2460. dev_err(&ctlr->dev, "queue remove failed\n");
  2461. }
  2462. mutex_lock(&board_lock);
  2463. list_del(&ctlr->list);
  2464. mutex_unlock(&board_lock);
  2465. device_del(&ctlr->dev);
  2466. /* Release the last reference on the controller if its driver
  2467. * has not yet been converted to devm_spi_alloc_master/slave().
  2468. */
  2469. if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
  2470. devm_spi_match_controller, ctlr))
  2471. put_device(&ctlr->dev);
  2472. /* free bus id */
  2473. mutex_lock(&board_lock);
  2474. if (found == ctlr)
  2475. idr_remove(&spi_master_idr, id);
  2476. mutex_unlock(&board_lock);
  2477. if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
  2478. mutex_unlock(&spi_add_lock);
  2479. }
  2480. EXPORT_SYMBOL_GPL(spi_unregister_controller);
  2481. int spi_controller_suspend(struct spi_controller *ctlr)
  2482. {
  2483. int ret;
  2484. /* Basically no-ops for non-queued controllers */
  2485. if (!ctlr->queued)
  2486. return 0;
  2487. ret = spi_stop_queue(ctlr);
  2488. if (ret)
  2489. dev_err(&ctlr->dev, "queue stop failed\n");
  2490. return ret;
  2491. }
  2492. EXPORT_SYMBOL_GPL(spi_controller_suspend);
  2493. int spi_controller_resume(struct spi_controller *ctlr)
  2494. {
  2495. int ret;
  2496. if (!ctlr->queued)
  2497. return 0;
  2498. ret = spi_start_queue(ctlr);
  2499. if (ret)
  2500. dev_err(&ctlr->dev, "queue restart failed\n");
  2501. return ret;
  2502. }
  2503. EXPORT_SYMBOL_GPL(spi_controller_resume);
  2504. static int __spi_controller_match(struct device *dev, const void *data)
  2505. {
  2506. struct spi_controller *ctlr;
  2507. const u16 *bus_num = data;
  2508. ctlr = container_of(dev, struct spi_controller, dev);
  2509. return ctlr->bus_num == *bus_num;
  2510. }
  2511. /**
  2512. * spi_busnum_to_master - look up master associated with bus_num
  2513. * @bus_num: the master's bus number
  2514. * Context: can sleep
  2515. *
  2516. * This call may be used with devices that are registered after
  2517. * arch init time. It returns a refcounted pointer to the relevant
  2518. * spi_controller (which the caller must release), or NULL if there is
  2519. * no such master registered.
  2520. *
  2521. * Return: the SPI master structure on success, else NULL.
  2522. */
  2523. struct spi_controller *spi_busnum_to_master(u16 bus_num)
  2524. {
  2525. struct device *dev;
  2526. struct spi_controller *ctlr = NULL;
  2527. dev = class_find_device(&spi_master_class, NULL, &bus_num,
  2528. __spi_controller_match);
  2529. if (dev)
  2530. ctlr = container_of(dev, struct spi_controller, dev);
  2531. /* reference got in class_find_device */
  2532. return ctlr;
  2533. }
  2534. EXPORT_SYMBOL_GPL(spi_busnum_to_master);
  2535. /*-------------------------------------------------------------------------*/
  2536. /* Core methods for SPI resource management */
  2537. /**
  2538. * spi_res_alloc - allocate a spi resource that is life-cycle managed
  2539. * during the processing of a spi_message while using
  2540. * spi_transfer_one
  2541. * @spi: the spi device for which we allocate memory
  2542. * @release: the release code to execute for this resource
  2543. * @size: size to alloc and return
  2544. * @gfp: GFP allocation flags
  2545. *
  2546. * Return: the pointer to the allocated data
  2547. *
  2548. * This may get enhanced in the future to allocate from a memory pool
  2549. * of the @spi_device or @spi_controller to avoid repeated allocations.
  2550. */
  2551. void *spi_res_alloc(struct spi_device *spi,
  2552. spi_res_release_t release,
  2553. size_t size, gfp_t gfp)
  2554. {
  2555. struct spi_res *sres;
  2556. sres = kzalloc(sizeof(*sres) + size, gfp);
  2557. if (!sres)
  2558. return NULL;
  2559. INIT_LIST_HEAD(&sres->entry);
  2560. sres->release = release;
  2561. return sres->data;
  2562. }
  2563. EXPORT_SYMBOL_GPL(spi_res_alloc);
  2564. /**
  2565. * spi_res_free - free an spi resource
  2566. * @res: pointer to the custom data of a resource
  2567. *
  2568. */
  2569. void spi_res_free(void *res)
  2570. {
  2571. struct spi_res *sres = container_of(res, struct spi_res, data);
  2572. if (!res)
  2573. return;
  2574. WARN_ON(!list_empty(&sres->entry));
  2575. kfree(sres);
  2576. }
  2577. EXPORT_SYMBOL_GPL(spi_res_free);
  2578. /**
  2579. * spi_res_add - add a spi_res to the spi_message
  2580. * @message: the spi message
  2581. * @res: the spi_resource
  2582. */
  2583. void spi_res_add(struct spi_message *message, void *res)
  2584. {
  2585. struct spi_res *sres = container_of(res, struct spi_res, data);
  2586. WARN_ON(!list_empty(&sres->entry));
  2587. list_add_tail(&sres->entry, &message->resources);
  2588. }
  2589. EXPORT_SYMBOL_GPL(spi_res_add);
  2590. /**
  2591. * spi_res_release - release all spi resources for this message
  2592. * @ctlr: the @spi_controller
  2593. * @message: the @spi_message
  2594. */
  2595. void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
  2596. {
  2597. struct spi_res *res, *tmp;
  2598. list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
  2599. if (res->release)
  2600. res->release(ctlr, message, res->data);
  2601. list_del(&res->entry);
  2602. kfree(res);
  2603. }
  2604. }
  2605. EXPORT_SYMBOL_GPL(spi_res_release);
  2606. /*-------------------------------------------------------------------------*/
  2607. /* Core methods for spi_message alterations */
  2608. static void __spi_replace_transfers_release(struct spi_controller *ctlr,
  2609. struct spi_message *msg,
  2610. void *res)
  2611. {
  2612. struct spi_replaced_transfers *rxfer = res;
  2613. size_t i;
  2614. /* call extra callback if requested */
  2615. if (rxfer->release)
  2616. rxfer->release(ctlr, msg, res);
  2617. /* insert replaced transfers back into the message */
  2618. list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
  2619. /* remove the formerly inserted entries */
  2620. for (i = 0; i < rxfer->inserted; i++)
  2621. list_del(&rxfer->inserted_transfers[i].transfer_list);
  2622. }
  2623. /**
  2624. * spi_replace_transfers - replace transfers with several transfers
  2625. * and register change with spi_message.resources
  2626. * @msg: the spi_message we work upon
  2627. * @xfer_first: the first spi_transfer we want to replace
  2628. * @remove: number of transfers to remove
  2629. * @insert: the number of transfers we want to insert instead
  2630. * @release: extra release code necessary in some circumstances
  2631. * @extradatasize: extra data to allocate (with alignment guarantees
  2632. * of struct @spi_transfer)
  2633. * @gfp: gfp flags
  2634. *
  2635. * Returns: pointer to @spi_replaced_transfers,
  2636. * PTR_ERR(...) in case of errors.
  2637. */
  2638. struct spi_replaced_transfers *spi_replace_transfers(
  2639. struct spi_message *msg,
  2640. struct spi_transfer *xfer_first,
  2641. size_t remove,
  2642. size_t insert,
  2643. spi_replaced_release_t release,
  2644. size_t extradatasize,
  2645. gfp_t gfp)
  2646. {
  2647. struct spi_replaced_transfers *rxfer;
  2648. struct spi_transfer *xfer;
  2649. size_t i;
  2650. /* allocate the structure using spi_res */
  2651. rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
  2652. struct_size(rxfer, inserted_transfers, insert)
  2653. + extradatasize,
  2654. gfp);
  2655. if (!rxfer)
  2656. return ERR_PTR(-ENOMEM);
  2657. /* the release code to invoke before running the generic release */
  2658. rxfer->release = release;
  2659. /* assign extradata */
  2660. if (extradatasize)
  2661. rxfer->extradata =
  2662. &rxfer->inserted_transfers[insert];
  2663. /* init the replaced_transfers list */
  2664. INIT_LIST_HEAD(&rxfer->replaced_transfers);
  2665. /* assign the list_entry after which we should reinsert
  2666. * the @replaced_transfers - it may be spi_message.messages!
  2667. */
  2668. rxfer->replaced_after = xfer_first->transfer_list.prev;
  2669. /* remove the requested number of transfers */
  2670. for (i = 0; i < remove; i++) {
  2671. /* if the entry after replaced_after it is msg->transfers
  2672. * then we have been requested to remove more transfers
  2673. * than are in the list
  2674. */
  2675. if (rxfer->replaced_after->next == &msg->transfers) {
  2676. dev_err(&msg->spi->dev,
  2677. "requested to remove more spi_transfers than are available\n");
  2678. /* insert replaced transfers back into the message */
  2679. list_splice(&rxfer->replaced_transfers,
  2680. rxfer->replaced_after);
  2681. /* free the spi_replace_transfer structure */
  2682. spi_res_free(rxfer);
  2683. /* and return with an error */
  2684. return ERR_PTR(-EINVAL);
  2685. }
  2686. /* remove the entry after replaced_after from list of
  2687. * transfers and add it to list of replaced_transfers
  2688. */
  2689. list_move_tail(rxfer->replaced_after->next,
  2690. &rxfer->replaced_transfers);
  2691. }
  2692. /* create copy of the given xfer with identical settings
  2693. * based on the first transfer to get removed
  2694. */
  2695. for (i = 0; i < insert; i++) {
  2696. /* we need to run in reverse order */
  2697. xfer = &rxfer->inserted_transfers[insert - 1 - i];
  2698. /* copy all spi_transfer data */
  2699. memcpy(xfer, xfer_first, sizeof(*xfer));
  2700. /* add to list */
  2701. list_add(&xfer->transfer_list, rxfer->replaced_after);
  2702. /* clear cs_change and delay for all but the last */
  2703. if (i) {
  2704. xfer->cs_change = false;
  2705. xfer->delay_usecs = 0;
  2706. xfer->delay.value = 0;
  2707. }
  2708. }
  2709. /* set up inserted */
  2710. rxfer->inserted = insert;
  2711. /* and register it with spi_res/spi_message */
  2712. spi_res_add(msg, rxfer);
  2713. return rxfer;
  2714. }
  2715. EXPORT_SYMBOL_GPL(spi_replace_transfers);
  2716. static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
  2717. struct spi_message *msg,
  2718. struct spi_transfer **xferp,
  2719. size_t maxsize,
  2720. gfp_t gfp)
  2721. {
  2722. struct spi_transfer *xfer = *xferp, *xfers;
  2723. struct spi_replaced_transfers *srt;
  2724. size_t offset;
  2725. size_t count, i;
  2726. /* calculate how many we have to replace */
  2727. count = DIV_ROUND_UP(xfer->len, maxsize);
  2728. /* create replacement */
  2729. srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
  2730. if (IS_ERR(srt))
  2731. return PTR_ERR(srt);
  2732. xfers = srt->inserted_transfers;
  2733. /* now handle each of those newly inserted spi_transfers
  2734. * note that the replacements spi_transfers all are preset
  2735. * to the same values as *xferp, so tx_buf, rx_buf and len
  2736. * are all identical (as well as most others)
  2737. * so we just have to fix up len and the pointers.
  2738. *
  2739. * this also includes support for the depreciated
  2740. * spi_message.is_dma_mapped interface
  2741. */
  2742. /* the first transfer just needs the length modified, so we
  2743. * run it outside the loop
  2744. */
  2745. xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
  2746. /* all the others need rx_buf/tx_buf also set */
  2747. for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
  2748. /* update rx_buf, tx_buf and dma */
  2749. if (xfers[i].rx_buf)
  2750. xfers[i].rx_buf += offset;
  2751. if (xfers[i].rx_dma)
  2752. xfers[i].rx_dma += offset;
  2753. if (xfers[i].tx_buf)
  2754. xfers[i].tx_buf += offset;
  2755. if (xfers[i].tx_dma)
  2756. xfers[i].tx_dma += offset;
  2757. /* update length */
  2758. xfers[i].len = min(maxsize, xfers[i].len - offset);
  2759. }
  2760. /* we set up xferp to the last entry we have inserted,
  2761. * so that we skip those already split transfers
  2762. */
  2763. *xferp = &xfers[count - 1];
  2764. /* increment statistics counters */
  2765. SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
  2766. transfers_split_maxsize);
  2767. SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
  2768. transfers_split_maxsize);
  2769. return 0;
  2770. }
  2771. /**
  2772. * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
  2773. * when an individual transfer exceeds a
  2774. * certain size
  2775. * @ctlr: the @spi_controller for this transfer
  2776. * @msg: the @spi_message to transform
  2777. * @maxsize: the maximum when to apply this
  2778. * @gfp: GFP allocation flags
  2779. *
  2780. * Return: status of transformation
  2781. */
  2782. int spi_split_transfers_maxsize(struct spi_controller *ctlr,
  2783. struct spi_message *msg,
  2784. size_t maxsize,
  2785. gfp_t gfp)
  2786. {
  2787. struct spi_transfer *xfer;
  2788. int ret;
  2789. /* iterate over the transfer_list,
  2790. * but note that xfer is advanced to the last transfer inserted
  2791. * to avoid checking sizes again unnecessarily (also xfer does
  2792. * potentiall belong to a different list by the time the
  2793. * replacement has happened
  2794. */
  2795. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  2796. if (xfer->len > maxsize) {
  2797. ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
  2798. maxsize, gfp);
  2799. if (ret)
  2800. return ret;
  2801. }
  2802. }
  2803. return 0;
  2804. }
  2805. EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
  2806. /*-------------------------------------------------------------------------*/
  2807. /* Core methods for SPI controller protocol drivers. Some of the
  2808. * other core methods are currently defined as inline functions.
  2809. */
  2810. static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
  2811. u8 bits_per_word)
  2812. {
  2813. if (ctlr->bits_per_word_mask) {
  2814. /* Only 32 bits fit in the mask */
  2815. if (bits_per_word > 32)
  2816. return -EINVAL;
  2817. if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
  2818. return -EINVAL;
  2819. }
  2820. return 0;
  2821. }
  2822. /**
  2823. * spi_setup - setup SPI mode and clock rate
  2824. * @spi: the device whose settings are being modified
  2825. * Context: can sleep, and no requests are queued to the device
  2826. *
  2827. * SPI protocol drivers may need to update the transfer mode if the
  2828. * device doesn't work with its default. They may likewise need
  2829. * to update clock rates or word sizes from initial values. This function
  2830. * changes those settings, and must be called from a context that can sleep.
  2831. * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
  2832. * effect the next time the device is selected and data is transferred to
  2833. * or from it. When this function returns, the spi device is deselected.
  2834. *
  2835. * Note that this call will fail if the protocol driver specifies an option
  2836. * that the underlying controller or its driver does not support. For
  2837. * example, not all hardware supports wire transfers using nine bit words,
  2838. * LSB-first wire encoding, or active-high chipselects.
  2839. *
  2840. * Return: zero on success, else a negative error code.
  2841. */
  2842. int spi_setup(struct spi_device *spi)
  2843. {
  2844. unsigned bad_bits, ugly_bits;
  2845. int status;
  2846. /* check mode to prevent that DUAL and QUAD set at the same time
  2847. */
  2848. if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
  2849. ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
  2850. dev_err(&spi->dev,
  2851. "setup: can not select dual and quad at the same time\n");
  2852. return -EINVAL;
  2853. }
  2854. /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
  2855. */
  2856. if ((spi->mode & SPI_3WIRE) && (spi->mode &
  2857. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
  2858. SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
  2859. return -EINVAL;
  2860. /* help drivers fail *cleanly* when they need options
  2861. * that aren't supported with their current controller
  2862. * SPI_CS_WORD has a fallback software implementation,
  2863. * so it is ignored here.
  2864. */
  2865. bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
  2866. /* nothing prevents from working with active-high CS in case if it
  2867. * is driven by GPIO.
  2868. */
  2869. if (gpio_is_valid(spi->cs_gpio))
  2870. bad_bits &= ~SPI_CS_HIGH;
  2871. ugly_bits = bad_bits &
  2872. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
  2873. SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
  2874. if (ugly_bits) {
  2875. dev_warn(&spi->dev,
  2876. "setup: ignoring unsupported mode bits %x\n",
  2877. ugly_bits);
  2878. spi->mode &= ~ugly_bits;
  2879. bad_bits &= ~ugly_bits;
  2880. }
  2881. if (bad_bits) {
  2882. dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
  2883. bad_bits);
  2884. return -EINVAL;
  2885. }
  2886. if (!spi->bits_per_word)
  2887. spi->bits_per_word = 8;
  2888. status = __spi_validate_bits_per_word(spi->controller,
  2889. spi->bits_per_word);
  2890. if (status)
  2891. return status;
  2892. if (!spi->max_speed_hz)
  2893. spi->max_speed_hz = spi->controller->max_speed_hz;
  2894. mutex_lock(&spi->controller->io_mutex);
  2895. if (spi->controller->setup)
  2896. status = spi->controller->setup(spi);
  2897. if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
  2898. status = pm_runtime_get_sync(spi->controller->dev.parent);
  2899. if (status < 0) {
  2900. mutex_unlock(&spi->controller->io_mutex);
  2901. pm_runtime_put_noidle(spi->controller->dev.parent);
  2902. dev_err(&spi->controller->dev, "Failed to power device: %d\n",
  2903. status);
  2904. return status;
  2905. }
  2906. /*
  2907. * We do not want to return positive value from pm_runtime_get,
  2908. * there are many instances of devices calling spi_setup() and
  2909. * checking for a non-zero return value instead of a negative
  2910. * return value.
  2911. */
  2912. status = 0;
  2913. spi_set_cs(spi, false, true);
  2914. pm_runtime_mark_last_busy(spi->controller->dev.parent);
  2915. pm_runtime_put_autosuspend(spi->controller->dev.parent);
  2916. } else {
  2917. spi_set_cs(spi, false, true);
  2918. }
  2919. mutex_unlock(&spi->controller->io_mutex);
  2920. if (spi->rt && !spi->controller->rt) {
  2921. spi->controller->rt = true;
  2922. spi_set_thread_rt(spi->controller);
  2923. }
  2924. dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
  2925. (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
  2926. (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
  2927. (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
  2928. (spi->mode & SPI_3WIRE) ? "3wire, " : "",
  2929. (spi->mode & SPI_LOOP) ? "loopback, " : "",
  2930. spi->bits_per_word, spi->max_speed_hz,
  2931. status);
  2932. return status;
  2933. }
  2934. EXPORT_SYMBOL_GPL(spi_setup);
  2935. /**
  2936. * spi_set_cs_timing - configure CS setup, hold, and inactive delays
  2937. * @spi: the device that requires specific CS timing configuration
  2938. * @setup: CS setup time specified via @spi_delay
  2939. * @hold: CS hold time specified via @spi_delay
  2940. * @inactive: CS inactive delay between transfers specified via @spi_delay
  2941. *
  2942. * Return: zero on success, else a negative error code.
  2943. */
  2944. int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
  2945. struct spi_delay *hold, struct spi_delay *inactive)
  2946. {
  2947. size_t len;
  2948. if (spi->controller->set_cs_timing)
  2949. return spi->controller->set_cs_timing(spi, setup, hold,
  2950. inactive);
  2951. if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
  2952. (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
  2953. (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
  2954. dev_err(&spi->dev,
  2955. "Clock-cycle delays for CS not supported in SW mode\n");
  2956. return -ENOTSUPP;
  2957. }
  2958. len = sizeof(struct spi_delay);
  2959. /* copy delays to controller */
  2960. if (setup)
  2961. memcpy(&spi->controller->cs_setup, setup, len);
  2962. else
  2963. memset(&spi->controller->cs_setup, 0, len);
  2964. if (hold)
  2965. memcpy(&spi->controller->cs_hold, hold, len);
  2966. else
  2967. memset(&spi->controller->cs_hold, 0, len);
  2968. if (inactive)
  2969. memcpy(&spi->controller->cs_inactive, inactive, len);
  2970. else
  2971. memset(&spi->controller->cs_inactive, 0, len);
  2972. return 0;
  2973. }
  2974. EXPORT_SYMBOL_GPL(spi_set_cs_timing);
  2975. static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
  2976. struct spi_device *spi)
  2977. {
  2978. int delay1, delay2;
  2979. delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
  2980. if (delay1 < 0)
  2981. return delay1;
  2982. delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
  2983. if (delay2 < 0)
  2984. return delay2;
  2985. if (delay1 < delay2)
  2986. memcpy(&xfer->word_delay, &spi->word_delay,
  2987. sizeof(xfer->word_delay));
  2988. return 0;
  2989. }
  2990. static int __spi_validate(struct spi_device *spi, struct spi_message *message)
  2991. {
  2992. struct spi_controller *ctlr = spi->controller;
  2993. struct spi_transfer *xfer;
  2994. int w_size;
  2995. if (list_empty(&message->transfers))
  2996. return -EINVAL;
  2997. /* If an SPI controller does not support toggling the CS line on each
  2998. * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
  2999. * for the CS line, we can emulate the CS-per-word hardware function by
  3000. * splitting transfers into one-word transfers and ensuring that
  3001. * cs_change is set for each transfer.
  3002. */
  3003. if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
  3004. spi->cs_gpiod ||
  3005. gpio_is_valid(spi->cs_gpio))) {
  3006. size_t maxsize;
  3007. int ret;
  3008. maxsize = (spi->bits_per_word + 7) / 8;
  3009. /* spi_split_transfers_maxsize() requires message->spi */
  3010. message->spi = spi;
  3011. ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
  3012. GFP_KERNEL);
  3013. if (ret)
  3014. return ret;
  3015. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3016. /* don't change cs_change on the last entry in the list */
  3017. if (list_is_last(&xfer->transfer_list, &message->transfers))
  3018. break;
  3019. xfer->cs_change = 1;
  3020. }
  3021. }
  3022. /* Half-duplex links include original MicroWire, and ones with
  3023. * only one data pin like SPI_3WIRE (switches direction) or where
  3024. * either MOSI or MISO is missing. They can also be caused by
  3025. * software limitations.
  3026. */
  3027. if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
  3028. (spi->mode & SPI_3WIRE)) {
  3029. unsigned flags = ctlr->flags;
  3030. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3031. if (xfer->rx_buf && xfer->tx_buf)
  3032. return -EINVAL;
  3033. if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
  3034. return -EINVAL;
  3035. if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
  3036. return -EINVAL;
  3037. }
  3038. }
  3039. /**
  3040. * Set transfer bits_per_word and max speed as spi device default if
  3041. * it is not set for this transfer.
  3042. * Set transfer tx_nbits and rx_nbits as single transfer default
  3043. * (SPI_NBITS_SINGLE) if it is not set for this transfer.
  3044. * Ensure transfer word_delay is at least as long as that required by
  3045. * device itself.
  3046. */
  3047. message->frame_length = 0;
  3048. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3049. xfer->effective_speed_hz = 0;
  3050. message->frame_length += xfer->len;
  3051. if (!xfer->bits_per_word)
  3052. xfer->bits_per_word = spi->bits_per_word;
  3053. if (!xfer->speed_hz)
  3054. xfer->speed_hz = spi->max_speed_hz;
  3055. if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
  3056. xfer->speed_hz = ctlr->max_speed_hz;
  3057. if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
  3058. return -EINVAL;
  3059. /*
  3060. * SPI transfer length should be multiple of SPI word size
  3061. * where SPI word size should be power-of-two multiple
  3062. */
  3063. if (xfer->bits_per_word <= 8)
  3064. w_size = 1;
  3065. else if (xfer->bits_per_word <= 16)
  3066. w_size = 2;
  3067. else
  3068. w_size = 4;
  3069. /* No partial transfers accepted */
  3070. if (xfer->len % w_size)
  3071. return -EINVAL;
  3072. if (xfer->speed_hz && ctlr->min_speed_hz &&
  3073. xfer->speed_hz < ctlr->min_speed_hz)
  3074. return -EINVAL;
  3075. if (xfer->tx_buf && !xfer->tx_nbits)
  3076. xfer->tx_nbits = SPI_NBITS_SINGLE;
  3077. if (xfer->rx_buf && !xfer->rx_nbits)
  3078. xfer->rx_nbits = SPI_NBITS_SINGLE;
  3079. /* check transfer tx/rx_nbits:
  3080. * 1. check the value matches one of single, dual and quad
  3081. * 2. check tx/rx_nbits match the mode in spi_device
  3082. */
  3083. if (xfer->tx_buf) {
  3084. if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
  3085. xfer->tx_nbits != SPI_NBITS_DUAL &&
  3086. xfer->tx_nbits != SPI_NBITS_QUAD)
  3087. return -EINVAL;
  3088. if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
  3089. !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
  3090. return -EINVAL;
  3091. if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
  3092. !(spi->mode & SPI_TX_QUAD))
  3093. return -EINVAL;
  3094. }
  3095. /* check transfer rx_nbits */
  3096. if (xfer->rx_buf) {
  3097. if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
  3098. xfer->rx_nbits != SPI_NBITS_DUAL &&
  3099. xfer->rx_nbits != SPI_NBITS_QUAD)
  3100. return -EINVAL;
  3101. if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
  3102. !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
  3103. return -EINVAL;
  3104. if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
  3105. !(spi->mode & SPI_RX_QUAD))
  3106. return -EINVAL;
  3107. }
  3108. if (_spi_xfer_word_delay_update(xfer, spi))
  3109. return -EINVAL;
  3110. }
  3111. message->status = -EINPROGRESS;
  3112. return 0;
  3113. }
  3114. static int __spi_async(struct spi_device *spi, struct spi_message *message)
  3115. {
  3116. struct spi_controller *ctlr = spi->controller;
  3117. struct spi_transfer *xfer;
  3118. /*
  3119. * Some controllers do not support doing regular SPI transfers. Return
  3120. * ENOTSUPP when this is the case.
  3121. */
  3122. if (!ctlr->transfer)
  3123. return -ENOTSUPP;
  3124. message->spi = spi;
  3125. SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
  3126. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
  3127. trace_spi_message_submit(message);
  3128. if (!ctlr->ptp_sts_supported) {
  3129. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  3130. xfer->ptp_sts_word_pre = 0;
  3131. ptp_read_system_prets(xfer->ptp_sts);
  3132. }
  3133. }
  3134. return ctlr->transfer(spi, message);
  3135. }
  3136. /**
  3137. * spi_async - asynchronous SPI transfer
  3138. * @spi: device with which data will be exchanged
  3139. * @message: describes the data transfers, including completion callback
  3140. * Context: any (irqs may be blocked, etc)
  3141. *
  3142. * This call may be used in_irq and other contexts which can't sleep,
  3143. * as well as from task contexts which can sleep.
  3144. *
  3145. * The completion callback is invoked in a context which can't sleep.
  3146. * Before that invocation, the value of message->status is undefined.
  3147. * When the callback is issued, message->status holds either zero (to
  3148. * indicate complete success) or a negative error code. After that
  3149. * callback returns, the driver which issued the transfer request may
  3150. * deallocate the associated memory; it's no longer in use by any SPI
  3151. * core or controller driver code.
  3152. *
  3153. * Note that although all messages to a spi_device are handled in
  3154. * FIFO order, messages may go to different devices in other orders.
  3155. * Some device might be higher priority, or have various "hard" access
  3156. * time requirements, for example.
  3157. *
  3158. * On detection of any fault during the transfer, processing of
  3159. * the entire message is aborted, and the device is deselected.
  3160. * Until returning from the associated message completion callback,
  3161. * no other spi_message queued to that device will be processed.
  3162. * (This rule applies equally to all the synchronous transfer calls,
  3163. * which are wrappers around this core asynchronous primitive.)
  3164. *
  3165. * Return: zero on success, else a negative error code.
  3166. */
  3167. int spi_async(struct spi_device *spi, struct spi_message *message)
  3168. {
  3169. struct spi_controller *ctlr = spi->controller;
  3170. int ret;
  3171. unsigned long flags;
  3172. ret = __spi_validate(spi, message);
  3173. if (ret != 0)
  3174. return ret;
  3175. spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
  3176. if (ctlr->bus_lock_flag)
  3177. ret = -EBUSY;
  3178. else
  3179. ret = __spi_async(spi, message);
  3180. spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
  3181. return ret;
  3182. }
  3183. EXPORT_SYMBOL_GPL(spi_async);
  3184. /**
  3185. * spi_async_locked - version of spi_async with exclusive bus usage
  3186. * @spi: device with which data will be exchanged
  3187. * @message: describes the data transfers, including completion callback
  3188. * Context: any (irqs may be blocked, etc)
  3189. *
  3190. * This call may be used in_irq and other contexts which can't sleep,
  3191. * as well as from task contexts which can sleep.
  3192. *
  3193. * The completion callback is invoked in a context which can't sleep.
  3194. * Before that invocation, the value of message->status is undefined.
  3195. * When the callback is issued, message->status holds either zero (to
  3196. * indicate complete success) or a negative error code. After that
  3197. * callback returns, the driver which issued the transfer request may
  3198. * deallocate the associated memory; it's no longer in use by any SPI
  3199. * core or controller driver code.
  3200. *
  3201. * Note that although all messages to a spi_device are handled in
  3202. * FIFO order, messages may go to different devices in other orders.
  3203. * Some device might be higher priority, or have various "hard" access
  3204. * time requirements, for example.
  3205. *
  3206. * On detection of any fault during the transfer, processing of
  3207. * the entire message is aborted, and the device is deselected.
  3208. * Until returning from the associated message completion callback,
  3209. * no other spi_message queued to that device will be processed.
  3210. * (This rule applies equally to all the synchronous transfer calls,
  3211. * which are wrappers around this core asynchronous primitive.)
  3212. *
  3213. * Return: zero on success, else a negative error code.
  3214. */
  3215. int spi_async_locked(struct spi_device *spi, struct spi_message *message)
  3216. {
  3217. struct spi_controller *ctlr = spi->controller;
  3218. int ret;
  3219. unsigned long flags;
  3220. ret = __spi_validate(spi, message);
  3221. if (ret != 0)
  3222. return ret;
  3223. spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
  3224. ret = __spi_async(spi, message);
  3225. spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
  3226. return ret;
  3227. }
  3228. EXPORT_SYMBOL_GPL(spi_async_locked);
  3229. /*-------------------------------------------------------------------------*/
  3230. /* Utility methods for SPI protocol drivers, layered on
  3231. * top of the core. Some other utility methods are defined as
  3232. * inline functions.
  3233. */
  3234. static void spi_complete(void *arg)
  3235. {
  3236. complete(arg);
  3237. }
  3238. static int __spi_sync(struct spi_device *spi, struct spi_message *message)
  3239. {
  3240. DECLARE_COMPLETION_ONSTACK(done);
  3241. int status;
  3242. struct spi_controller *ctlr = spi->controller;
  3243. unsigned long flags;
  3244. status = __spi_validate(spi, message);
  3245. if (status != 0)
  3246. return status;
  3247. message->complete = spi_complete;
  3248. message->context = &done;
  3249. message->spi = spi;
  3250. SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
  3251. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
  3252. /* If we're not using the legacy transfer method then we will
  3253. * try to transfer in the calling context so special case.
  3254. * This code would be less tricky if we could remove the
  3255. * support for driver implemented message queues.
  3256. */
  3257. if (ctlr->transfer == spi_queued_transfer) {
  3258. spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
  3259. trace_spi_message_submit(message);
  3260. status = __spi_queued_transfer(spi, message, false);
  3261. spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
  3262. } else {
  3263. status = spi_async_locked(spi, message);
  3264. }
  3265. if (status == 0) {
  3266. /* Push out the messages in the calling context if we
  3267. * can.
  3268. */
  3269. if (ctlr->transfer == spi_queued_transfer) {
  3270. SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
  3271. spi_sync_immediate);
  3272. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
  3273. spi_sync_immediate);
  3274. __spi_pump_messages(ctlr, false);
  3275. }
  3276. wait_for_completion(&done);
  3277. status = message->status;
  3278. }
  3279. message->context = NULL;
  3280. return status;
  3281. }
  3282. /**
  3283. * spi_sync - blocking/synchronous SPI data transfers
  3284. * @spi: device with which data will be exchanged
  3285. * @message: describes the data transfers
  3286. * Context: can sleep
  3287. *
  3288. * This call may only be used from a context that may sleep. The sleep
  3289. * is non-interruptible, and has no timeout. Low-overhead controller
  3290. * drivers may DMA directly into and out of the message buffers.
  3291. *
  3292. * Note that the SPI device's chip select is active during the message,
  3293. * and then is normally disabled between messages. Drivers for some
  3294. * frequently-used devices may want to minimize costs of selecting a chip,
  3295. * by leaving it selected in anticipation that the next message will go
  3296. * to the same chip. (That may increase power usage.)
  3297. *
  3298. * Also, the caller is guaranteeing that the memory associated with the
  3299. * message will not be freed before this call returns.
  3300. *
  3301. * Return: zero on success, else a negative error code.
  3302. */
  3303. int spi_sync(struct spi_device *spi, struct spi_message *message)
  3304. {
  3305. int ret;
  3306. mutex_lock(&spi->controller->bus_lock_mutex);
  3307. ret = __spi_sync(spi, message);
  3308. mutex_unlock(&spi->controller->bus_lock_mutex);
  3309. return ret;
  3310. }
  3311. EXPORT_SYMBOL_GPL(spi_sync);
  3312. /**
  3313. * spi_sync_locked - version of spi_sync with exclusive bus usage
  3314. * @spi: device with which data will be exchanged
  3315. * @message: describes the data transfers
  3316. * Context: can sleep
  3317. *
  3318. * This call may only be used from a context that may sleep. The sleep
  3319. * is non-interruptible, and has no timeout. Low-overhead controller
  3320. * drivers may DMA directly into and out of the message buffers.
  3321. *
  3322. * This call should be used by drivers that require exclusive access to the
  3323. * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
  3324. * be released by a spi_bus_unlock call when the exclusive access is over.
  3325. *
  3326. * Return: zero on success, else a negative error code.
  3327. */
  3328. int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
  3329. {
  3330. return __spi_sync(spi, message);
  3331. }
  3332. EXPORT_SYMBOL_GPL(spi_sync_locked);
  3333. /**
  3334. * spi_bus_lock - obtain a lock for exclusive SPI bus usage
  3335. * @ctlr: SPI bus master that should be locked for exclusive bus access
  3336. * Context: can sleep
  3337. *
  3338. * This call may only be used from a context that may sleep. The sleep
  3339. * is non-interruptible, and has no timeout.
  3340. *
  3341. * This call should be used by drivers that require exclusive access to the
  3342. * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
  3343. * exclusive access is over. Data transfer must be done by spi_sync_locked
  3344. * and spi_async_locked calls when the SPI bus lock is held.
  3345. *
  3346. * Return: always zero.
  3347. */
  3348. int spi_bus_lock(struct spi_controller *ctlr)
  3349. {
  3350. unsigned long flags;
  3351. mutex_lock(&ctlr->bus_lock_mutex);
  3352. spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
  3353. ctlr->bus_lock_flag = 1;
  3354. spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
  3355. /* mutex remains locked until spi_bus_unlock is called */
  3356. return 0;
  3357. }
  3358. EXPORT_SYMBOL_GPL(spi_bus_lock);
  3359. /**
  3360. * spi_bus_unlock - release the lock for exclusive SPI bus usage
  3361. * @ctlr: SPI bus master that was locked for exclusive bus access
  3362. * Context: can sleep
  3363. *
  3364. * This call may only be used from a context that may sleep. The sleep
  3365. * is non-interruptible, and has no timeout.
  3366. *
  3367. * This call releases an SPI bus lock previously obtained by an spi_bus_lock
  3368. * call.
  3369. *
  3370. * Return: always zero.
  3371. */
  3372. int spi_bus_unlock(struct spi_controller *ctlr)
  3373. {
  3374. ctlr->bus_lock_flag = 0;
  3375. mutex_unlock(&ctlr->bus_lock_mutex);
  3376. return 0;
  3377. }
  3378. EXPORT_SYMBOL_GPL(spi_bus_unlock);
  3379. /* portable code must never pass more than 32 bytes */
  3380. #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
  3381. static u8 *buf;
  3382. /**
  3383. * spi_write_then_read - SPI synchronous write followed by read
  3384. * @spi: device with which data will be exchanged
  3385. * @txbuf: data to be written (need not be dma-safe)
  3386. * @n_tx: size of txbuf, in bytes
  3387. * @rxbuf: buffer into which data will be read (need not be dma-safe)
  3388. * @n_rx: size of rxbuf, in bytes
  3389. * Context: can sleep
  3390. *
  3391. * This performs a half duplex MicroWire style transaction with the
  3392. * device, sending txbuf and then reading rxbuf. The return value
  3393. * is zero for success, else a negative errno status code.
  3394. * This call may only be used from a context that may sleep.
  3395. *
  3396. * Parameters to this routine are always copied using a small buffer.
  3397. * Performance-sensitive or bulk transfer code should instead use
  3398. * spi_{async,sync}() calls with dma-safe buffers.
  3399. *
  3400. * Return: zero on success, else a negative error code.
  3401. */
  3402. int spi_write_then_read(struct spi_device *spi,
  3403. const void *txbuf, unsigned n_tx,
  3404. void *rxbuf, unsigned n_rx)
  3405. {
  3406. static DEFINE_MUTEX(lock);
  3407. int status;
  3408. struct spi_message message;
  3409. struct spi_transfer x[2];
  3410. u8 *local_buf;
  3411. /* Use preallocated DMA-safe buffer if we can. We can't avoid
  3412. * copying here, (as a pure convenience thing), but we can
  3413. * keep heap costs out of the hot path unless someone else is
  3414. * using the pre-allocated buffer or the transfer is too large.
  3415. */
  3416. if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
  3417. local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
  3418. GFP_KERNEL | GFP_DMA);
  3419. if (!local_buf)
  3420. return -ENOMEM;
  3421. } else {
  3422. local_buf = buf;
  3423. }
  3424. spi_message_init(&message);
  3425. memset(x, 0, sizeof(x));
  3426. if (n_tx) {
  3427. x[0].len = n_tx;
  3428. spi_message_add_tail(&x[0], &message);
  3429. }
  3430. if (n_rx) {
  3431. x[1].len = n_rx;
  3432. spi_message_add_tail(&x[1], &message);
  3433. }
  3434. memcpy(local_buf, txbuf, n_tx);
  3435. x[0].tx_buf = local_buf;
  3436. x[1].rx_buf = local_buf + n_tx;
  3437. /* do the i/o */
  3438. status = spi_sync(spi, &message);
  3439. if (status == 0)
  3440. memcpy(rxbuf, x[1].rx_buf, n_rx);
  3441. if (x[0].tx_buf == buf)
  3442. mutex_unlock(&lock);
  3443. else
  3444. kfree(local_buf);
  3445. return status;
  3446. }
  3447. EXPORT_SYMBOL_GPL(spi_write_then_read);
  3448. /*-------------------------------------------------------------------------*/
  3449. #if IS_ENABLED(CONFIG_OF)
  3450. /* must call put_device() when done with returned spi_device device */
  3451. struct spi_device *of_find_spi_device_by_node(struct device_node *node)
  3452. {
  3453. struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
  3454. return dev ? to_spi_device(dev) : NULL;
  3455. }
  3456. EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
  3457. #endif /* IS_ENABLED(CONFIG_OF) */
  3458. #if IS_ENABLED(CONFIG_OF_DYNAMIC)
  3459. /* the spi controllers are not using spi_bus, so we find it with another way */
  3460. static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
  3461. {
  3462. struct device *dev;
  3463. dev = class_find_device_by_of_node(&spi_master_class, node);
  3464. if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
  3465. dev = class_find_device_by_of_node(&spi_slave_class, node);
  3466. if (!dev)
  3467. return NULL;
  3468. /* reference got in class_find_device */
  3469. return container_of(dev, struct spi_controller, dev);
  3470. }
  3471. static int of_spi_notify(struct notifier_block *nb, unsigned long action,
  3472. void *arg)
  3473. {
  3474. struct of_reconfig_data *rd = arg;
  3475. struct spi_controller *ctlr;
  3476. struct spi_device *spi;
  3477. switch (of_reconfig_get_state_change(action, arg)) {
  3478. case OF_RECONFIG_CHANGE_ADD:
  3479. ctlr = of_find_spi_controller_by_node(rd->dn->parent);
  3480. if (ctlr == NULL)
  3481. return NOTIFY_OK; /* not for us */
  3482. if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
  3483. put_device(&ctlr->dev);
  3484. return NOTIFY_OK;
  3485. }
  3486. spi = of_register_spi_device(ctlr, rd->dn);
  3487. put_device(&ctlr->dev);
  3488. if (IS_ERR(spi)) {
  3489. pr_err("%s: failed to create for '%pOF'\n",
  3490. __func__, rd->dn);
  3491. of_node_clear_flag(rd->dn, OF_POPULATED);
  3492. return notifier_from_errno(PTR_ERR(spi));
  3493. }
  3494. break;
  3495. case OF_RECONFIG_CHANGE_REMOVE:
  3496. /* already depopulated? */
  3497. if (!of_node_check_flag(rd->dn, OF_POPULATED))
  3498. return NOTIFY_OK;
  3499. /* find our device by node */
  3500. spi = of_find_spi_device_by_node(rd->dn);
  3501. if (spi == NULL)
  3502. return NOTIFY_OK; /* no? not meant for us */
  3503. /* unregister takes one ref away */
  3504. spi_unregister_device(spi);
  3505. /* and put the reference of the find */
  3506. put_device(&spi->dev);
  3507. break;
  3508. }
  3509. return NOTIFY_OK;
  3510. }
  3511. static struct notifier_block spi_of_notifier = {
  3512. .notifier_call = of_spi_notify,
  3513. };
  3514. #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  3515. extern struct notifier_block spi_of_notifier;
  3516. #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  3517. #if IS_ENABLED(CONFIG_ACPI)
  3518. static int spi_acpi_controller_match(struct device *dev, const void *data)
  3519. {
  3520. return ACPI_COMPANION(dev->parent) == data;
  3521. }
  3522. static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
  3523. {
  3524. struct device *dev;
  3525. dev = class_find_device(&spi_master_class, NULL, adev,
  3526. spi_acpi_controller_match);
  3527. if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
  3528. dev = class_find_device(&spi_slave_class, NULL, adev,
  3529. spi_acpi_controller_match);
  3530. if (!dev)
  3531. return NULL;
  3532. return container_of(dev, struct spi_controller, dev);
  3533. }
  3534. static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
  3535. {
  3536. struct device *dev;
  3537. dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
  3538. return to_spi_device(dev);
  3539. }
  3540. static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
  3541. void *arg)
  3542. {
  3543. struct acpi_device *adev = arg;
  3544. struct spi_controller *ctlr;
  3545. struct spi_device *spi;
  3546. switch (value) {
  3547. case ACPI_RECONFIG_DEVICE_ADD:
  3548. ctlr = acpi_spi_find_controller_by_adev(adev->parent);
  3549. if (!ctlr)
  3550. break;
  3551. acpi_register_spi_device(ctlr, adev);
  3552. put_device(&ctlr->dev);
  3553. break;
  3554. case ACPI_RECONFIG_DEVICE_REMOVE:
  3555. if (!acpi_device_enumerated(adev))
  3556. break;
  3557. spi = acpi_spi_find_device_by_adev(adev);
  3558. if (!spi)
  3559. break;
  3560. spi_unregister_device(spi);
  3561. put_device(&spi->dev);
  3562. break;
  3563. }
  3564. return NOTIFY_OK;
  3565. }
  3566. static struct notifier_block spi_acpi_notifier = {
  3567. .notifier_call = acpi_spi_notify,
  3568. };
  3569. #else
  3570. extern struct notifier_block spi_acpi_notifier;
  3571. #endif
  3572. static int __init spi_init(void)
  3573. {
  3574. int status;
  3575. buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
  3576. if (!buf) {
  3577. status = -ENOMEM;
  3578. goto err0;
  3579. }
  3580. status = bus_register(&spi_bus_type);
  3581. if (status < 0)
  3582. goto err1;
  3583. status = class_register(&spi_master_class);
  3584. if (status < 0)
  3585. goto err2;
  3586. if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
  3587. status = class_register(&spi_slave_class);
  3588. if (status < 0)
  3589. goto err3;
  3590. }
  3591. if (IS_ENABLED(CONFIG_OF_DYNAMIC))
  3592. WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
  3593. if (IS_ENABLED(CONFIG_ACPI))
  3594. WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
  3595. return 0;
  3596. err3:
  3597. class_unregister(&spi_master_class);
  3598. err2:
  3599. bus_unregister(&spi_bus_type);
  3600. err1:
  3601. kfree(buf);
  3602. buf = NULL;
  3603. err0:
  3604. return status;
  3605. }
  3606. /* board_info is normally registered in arch_initcall(),
  3607. * but even essential drivers wait till later
  3608. *
  3609. * REVISIT only boardinfo really needs static linking. the rest (device and
  3610. * driver registration) _could_ be dynamically linked (modular) ... costs
  3611. * include needing to have boardinfo data structures be much more public.
  3612. */
  3613. postcore_initcall(spi_init);