regmap.c 79 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Register map access API
  4. //
  5. // Copyright 2011 Wolfson Microelectronics plc
  6. //
  7. // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  8. #include <linux/device.h>
  9. #include <linux/slab.h>
  10. #include <linux/export.h>
  11. #include <linux/mutex.h>
  12. #include <linux/err.h>
  13. #include <linux/property.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/sched.h>
  16. #include <linux/delay.h>
  17. #include <linux/log2.h>
  18. #include <linux/hwspinlock.h>
  19. #include <asm/unaligned.h>
  20. #define CREATE_TRACE_POINTS
  21. #include "trace.h"
  22. #include "internal.h"
  23. /*
  24. * Sometimes for failures during very early init the trace
  25. * infrastructure isn't available early enough to be used. For this
  26. * sort of problem defining LOG_DEVICE will add printks for basic
  27. * register I/O on a specific device.
  28. */
  29. #undef LOG_DEVICE
  30. #ifdef LOG_DEVICE
  31. static inline bool regmap_should_log(struct regmap *map)
  32. {
  33. return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
  34. }
  35. #else
  36. static inline bool regmap_should_log(struct regmap *map) { return false; }
  37. #endif
  38. static int _regmap_update_bits(struct regmap *map, unsigned int reg,
  39. unsigned int mask, unsigned int val,
  40. bool *change, bool force_write);
  41. static int _regmap_bus_reg_read(void *context, unsigned int reg,
  42. unsigned int *val);
  43. static int _regmap_bus_read(void *context, unsigned int reg,
  44. unsigned int *val);
  45. static int _regmap_bus_formatted_write(void *context, unsigned int reg,
  46. unsigned int val);
  47. static int _regmap_bus_reg_write(void *context, unsigned int reg,
  48. unsigned int val);
  49. static int _regmap_bus_raw_write(void *context, unsigned int reg,
  50. unsigned int val);
  51. bool regmap_reg_in_ranges(unsigned int reg,
  52. const struct regmap_range *ranges,
  53. unsigned int nranges)
  54. {
  55. const struct regmap_range *r;
  56. int i;
  57. for (i = 0, r = ranges; i < nranges; i++, r++)
  58. if (regmap_reg_in_range(reg, r))
  59. return true;
  60. return false;
  61. }
  62. EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
  63. bool regmap_check_range_table(struct regmap *map, unsigned int reg,
  64. const struct regmap_access_table *table)
  65. {
  66. /* Check "no ranges" first */
  67. if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
  68. return false;
  69. /* In case zero "yes ranges" are supplied, any reg is OK */
  70. if (!table->n_yes_ranges)
  71. return true;
  72. return regmap_reg_in_ranges(reg, table->yes_ranges,
  73. table->n_yes_ranges);
  74. }
  75. EXPORT_SYMBOL_GPL(regmap_check_range_table);
  76. bool regmap_writeable(struct regmap *map, unsigned int reg)
  77. {
  78. if (map->max_register && reg > map->max_register)
  79. return false;
  80. if (map->writeable_reg)
  81. return map->writeable_reg(map->dev, reg);
  82. if (map->wr_table)
  83. return regmap_check_range_table(map, reg, map->wr_table);
  84. return true;
  85. }
  86. bool regmap_cached(struct regmap *map, unsigned int reg)
  87. {
  88. int ret;
  89. unsigned int val;
  90. if (map->cache_type == REGCACHE_NONE)
  91. return false;
  92. if (!map->cache_ops)
  93. return false;
  94. if (map->max_register && reg > map->max_register)
  95. return false;
  96. map->lock(map->lock_arg);
  97. ret = regcache_read(map, reg, &val);
  98. map->unlock(map->lock_arg);
  99. if (ret)
  100. return false;
  101. return true;
  102. }
  103. bool regmap_readable(struct regmap *map, unsigned int reg)
  104. {
  105. if (!map->reg_read)
  106. return false;
  107. if (map->max_register && reg > map->max_register)
  108. return false;
  109. if (map->format.format_write)
  110. return false;
  111. if (map->readable_reg)
  112. return map->readable_reg(map->dev, reg);
  113. if (map->rd_table)
  114. return regmap_check_range_table(map, reg, map->rd_table);
  115. return true;
  116. }
  117. bool regmap_volatile(struct regmap *map, unsigned int reg)
  118. {
  119. if (!map->format.format_write && !regmap_readable(map, reg))
  120. return false;
  121. if (map->volatile_reg)
  122. return map->volatile_reg(map->dev, reg);
  123. if (map->volatile_table)
  124. return regmap_check_range_table(map, reg, map->volatile_table);
  125. if (map->cache_ops)
  126. return false;
  127. else
  128. return true;
  129. }
  130. bool regmap_precious(struct regmap *map, unsigned int reg)
  131. {
  132. if (!regmap_readable(map, reg))
  133. return false;
  134. if (map->precious_reg)
  135. return map->precious_reg(map->dev, reg);
  136. if (map->precious_table)
  137. return regmap_check_range_table(map, reg, map->precious_table);
  138. return false;
  139. }
  140. bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
  141. {
  142. if (map->writeable_noinc_reg)
  143. return map->writeable_noinc_reg(map->dev, reg);
  144. if (map->wr_noinc_table)
  145. return regmap_check_range_table(map, reg, map->wr_noinc_table);
  146. return true;
  147. }
  148. bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
  149. {
  150. if (map->readable_noinc_reg)
  151. return map->readable_noinc_reg(map->dev, reg);
  152. if (map->rd_noinc_table)
  153. return regmap_check_range_table(map, reg, map->rd_noinc_table);
  154. return true;
  155. }
  156. static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
  157. size_t num)
  158. {
  159. unsigned int i;
  160. for (i = 0; i < num; i++)
  161. if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
  162. return false;
  163. return true;
  164. }
  165. static void regmap_format_12_20_write(struct regmap *map,
  166. unsigned int reg, unsigned int val)
  167. {
  168. u8 *out = map->work_buf;
  169. out[0] = reg >> 4;
  170. out[1] = (reg << 4) | (val >> 16);
  171. out[2] = val >> 8;
  172. out[3] = val;
  173. }
  174. static void regmap_format_2_6_write(struct regmap *map,
  175. unsigned int reg, unsigned int val)
  176. {
  177. u8 *out = map->work_buf;
  178. *out = (reg << 6) | val;
  179. }
  180. static void regmap_format_4_12_write(struct regmap *map,
  181. unsigned int reg, unsigned int val)
  182. {
  183. __be16 *out = map->work_buf;
  184. *out = cpu_to_be16((reg << 12) | val);
  185. }
  186. static void regmap_format_7_9_write(struct regmap *map,
  187. unsigned int reg, unsigned int val)
  188. {
  189. __be16 *out = map->work_buf;
  190. *out = cpu_to_be16((reg << 9) | val);
  191. }
  192. static void regmap_format_10_14_write(struct regmap *map,
  193. unsigned int reg, unsigned int val)
  194. {
  195. u8 *out = map->work_buf;
  196. out[2] = val;
  197. out[1] = (val >> 8) | (reg << 6);
  198. out[0] = reg >> 2;
  199. }
  200. static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
  201. {
  202. u8 *b = buf;
  203. b[0] = val << shift;
  204. }
  205. static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
  206. {
  207. put_unaligned_be16(val << shift, buf);
  208. }
  209. static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
  210. {
  211. put_unaligned_le16(val << shift, buf);
  212. }
  213. static void regmap_format_16_native(void *buf, unsigned int val,
  214. unsigned int shift)
  215. {
  216. u16 v = val << shift;
  217. memcpy(buf, &v, sizeof(v));
  218. }
  219. static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
  220. {
  221. u8 *b = buf;
  222. val <<= shift;
  223. b[0] = val >> 16;
  224. b[1] = val >> 8;
  225. b[2] = val;
  226. }
  227. static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
  228. {
  229. put_unaligned_be32(val << shift, buf);
  230. }
  231. static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
  232. {
  233. put_unaligned_le32(val << shift, buf);
  234. }
  235. static void regmap_format_32_native(void *buf, unsigned int val,
  236. unsigned int shift)
  237. {
  238. u32 v = val << shift;
  239. memcpy(buf, &v, sizeof(v));
  240. }
  241. #ifdef CONFIG_64BIT
  242. static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
  243. {
  244. put_unaligned_be64((u64) val << shift, buf);
  245. }
  246. static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
  247. {
  248. put_unaligned_le64((u64) val << shift, buf);
  249. }
  250. static void regmap_format_64_native(void *buf, unsigned int val,
  251. unsigned int shift)
  252. {
  253. u64 v = (u64) val << shift;
  254. memcpy(buf, &v, sizeof(v));
  255. }
  256. #endif
  257. static void regmap_parse_inplace_noop(void *buf)
  258. {
  259. }
  260. static unsigned int regmap_parse_8(const void *buf)
  261. {
  262. const u8 *b = buf;
  263. return b[0];
  264. }
  265. static unsigned int regmap_parse_16_be(const void *buf)
  266. {
  267. return get_unaligned_be16(buf);
  268. }
  269. static unsigned int regmap_parse_16_le(const void *buf)
  270. {
  271. return get_unaligned_le16(buf);
  272. }
  273. static void regmap_parse_16_be_inplace(void *buf)
  274. {
  275. u16 v = get_unaligned_be16(buf);
  276. memcpy(buf, &v, sizeof(v));
  277. }
  278. static void regmap_parse_16_le_inplace(void *buf)
  279. {
  280. u16 v = get_unaligned_le16(buf);
  281. memcpy(buf, &v, sizeof(v));
  282. }
  283. static unsigned int regmap_parse_16_native(const void *buf)
  284. {
  285. u16 v;
  286. memcpy(&v, buf, sizeof(v));
  287. return v;
  288. }
  289. static unsigned int regmap_parse_24(const void *buf)
  290. {
  291. const u8 *b = buf;
  292. unsigned int ret = b[2];
  293. ret |= ((unsigned int)b[1]) << 8;
  294. ret |= ((unsigned int)b[0]) << 16;
  295. return ret;
  296. }
  297. static unsigned int regmap_parse_32_be(const void *buf)
  298. {
  299. return get_unaligned_be32(buf);
  300. }
  301. static unsigned int regmap_parse_32_le(const void *buf)
  302. {
  303. return get_unaligned_le32(buf);
  304. }
  305. static void regmap_parse_32_be_inplace(void *buf)
  306. {
  307. u32 v = get_unaligned_be32(buf);
  308. memcpy(buf, &v, sizeof(v));
  309. }
  310. static void regmap_parse_32_le_inplace(void *buf)
  311. {
  312. u32 v = get_unaligned_le32(buf);
  313. memcpy(buf, &v, sizeof(v));
  314. }
  315. static unsigned int regmap_parse_32_native(const void *buf)
  316. {
  317. u32 v;
  318. memcpy(&v, buf, sizeof(v));
  319. return v;
  320. }
  321. #ifdef CONFIG_64BIT
  322. static unsigned int regmap_parse_64_be(const void *buf)
  323. {
  324. return get_unaligned_be64(buf);
  325. }
  326. static unsigned int regmap_parse_64_le(const void *buf)
  327. {
  328. return get_unaligned_le64(buf);
  329. }
  330. static void regmap_parse_64_be_inplace(void *buf)
  331. {
  332. u64 v = get_unaligned_be64(buf);
  333. memcpy(buf, &v, sizeof(v));
  334. }
  335. static void regmap_parse_64_le_inplace(void *buf)
  336. {
  337. u64 v = get_unaligned_le64(buf);
  338. memcpy(buf, &v, sizeof(v));
  339. }
  340. static unsigned int regmap_parse_64_native(const void *buf)
  341. {
  342. u64 v;
  343. memcpy(&v, buf, sizeof(v));
  344. return v;
  345. }
  346. #endif
  347. static void regmap_lock_hwlock(void *__map)
  348. {
  349. struct regmap *map = __map;
  350. hwspin_lock_timeout(map->hwlock, UINT_MAX);
  351. }
  352. static void regmap_lock_hwlock_irq(void *__map)
  353. {
  354. struct regmap *map = __map;
  355. hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
  356. }
  357. static void regmap_lock_hwlock_irqsave(void *__map)
  358. {
  359. struct regmap *map = __map;
  360. hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
  361. &map->spinlock_flags);
  362. }
  363. static void regmap_unlock_hwlock(void *__map)
  364. {
  365. struct regmap *map = __map;
  366. hwspin_unlock(map->hwlock);
  367. }
  368. static void regmap_unlock_hwlock_irq(void *__map)
  369. {
  370. struct regmap *map = __map;
  371. hwspin_unlock_irq(map->hwlock);
  372. }
  373. static void regmap_unlock_hwlock_irqrestore(void *__map)
  374. {
  375. struct regmap *map = __map;
  376. hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
  377. }
  378. static void regmap_lock_unlock_none(void *__map)
  379. {
  380. }
  381. static void regmap_lock_mutex(void *__map)
  382. {
  383. struct regmap *map = __map;
  384. mutex_lock(&map->mutex);
  385. }
  386. static void regmap_unlock_mutex(void *__map)
  387. {
  388. struct regmap *map = __map;
  389. mutex_unlock(&map->mutex);
  390. }
  391. static void regmap_lock_spinlock(void *__map)
  392. __acquires(&map->spinlock)
  393. {
  394. struct regmap *map = __map;
  395. unsigned long flags;
  396. spin_lock_irqsave(&map->spinlock, flags);
  397. map->spinlock_flags = flags;
  398. }
  399. static void regmap_unlock_spinlock(void *__map)
  400. __releases(&map->spinlock)
  401. {
  402. struct regmap *map = __map;
  403. spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
  404. }
  405. static void dev_get_regmap_release(struct device *dev, void *res)
  406. {
  407. /*
  408. * We don't actually have anything to do here; the goal here
  409. * is not to manage the regmap but to provide a simple way to
  410. * get the regmap back given a struct device.
  411. */
  412. }
  413. static bool _regmap_range_add(struct regmap *map,
  414. struct regmap_range_node *data)
  415. {
  416. struct rb_root *root = &map->range_tree;
  417. struct rb_node **new = &(root->rb_node), *parent = NULL;
  418. while (*new) {
  419. struct regmap_range_node *this =
  420. rb_entry(*new, struct regmap_range_node, node);
  421. parent = *new;
  422. if (data->range_max < this->range_min)
  423. new = &((*new)->rb_left);
  424. else if (data->range_min > this->range_max)
  425. new = &((*new)->rb_right);
  426. else
  427. return false;
  428. }
  429. rb_link_node(&data->node, parent, new);
  430. rb_insert_color(&data->node, root);
  431. return true;
  432. }
  433. static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
  434. unsigned int reg)
  435. {
  436. struct rb_node *node = map->range_tree.rb_node;
  437. while (node) {
  438. struct regmap_range_node *this =
  439. rb_entry(node, struct regmap_range_node, node);
  440. if (reg < this->range_min)
  441. node = node->rb_left;
  442. else if (reg > this->range_max)
  443. node = node->rb_right;
  444. else
  445. return this;
  446. }
  447. return NULL;
  448. }
  449. static void regmap_range_exit(struct regmap *map)
  450. {
  451. struct rb_node *next;
  452. struct regmap_range_node *range_node;
  453. next = rb_first(&map->range_tree);
  454. while (next) {
  455. range_node = rb_entry(next, struct regmap_range_node, node);
  456. next = rb_next(&range_node->node);
  457. rb_erase(&range_node->node, &map->range_tree);
  458. kfree(range_node);
  459. }
  460. kfree(map->selector_work_buf);
  461. }
  462. static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
  463. {
  464. if (config->name) {
  465. const char *name = kstrdup_const(config->name, GFP_KERNEL);
  466. if (!name)
  467. return -ENOMEM;
  468. kfree_const(map->name);
  469. map->name = name;
  470. }
  471. return 0;
  472. }
  473. int regmap_attach_dev(struct device *dev, struct regmap *map,
  474. const struct regmap_config *config)
  475. {
  476. struct regmap **m;
  477. int ret;
  478. map->dev = dev;
  479. ret = regmap_set_name(map, config);
  480. if (ret)
  481. return ret;
  482. regmap_debugfs_exit(map);
  483. regmap_debugfs_init(map);
  484. /* Add a devres resource for dev_get_regmap() */
  485. m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
  486. if (!m) {
  487. regmap_debugfs_exit(map);
  488. return -ENOMEM;
  489. }
  490. *m = map;
  491. devres_add(dev, m);
  492. return 0;
  493. }
  494. EXPORT_SYMBOL_GPL(regmap_attach_dev);
  495. static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
  496. const struct regmap_config *config)
  497. {
  498. enum regmap_endian endian;
  499. /* Retrieve the endianness specification from the regmap config */
  500. endian = config->reg_format_endian;
  501. /* If the regmap config specified a non-default value, use that */
  502. if (endian != REGMAP_ENDIAN_DEFAULT)
  503. return endian;
  504. /* Retrieve the endianness specification from the bus config */
  505. if (bus && bus->reg_format_endian_default)
  506. endian = bus->reg_format_endian_default;
  507. /* If the bus specified a non-default value, use that */
  508. if (endian != REGMAP_ENDIAN_DEFAULT)
  509. return endian;
  510. /* Use this if no other value was found */
  511. return REGMAP_ENDIAN_BIG;
  512. }
  513. enum regmap_endian regmap_get_val_endian(struct device *dev,
  514. const struct regmap_bus *bus,
  515. const struct regmap_config *config)
  516. {
  517. struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
  518. enum regmap_endian endian;
  519. /* Retrieve the endianness specification from the regmap config */
  520. endian = config->val_format_endian;
  521. /* If the regmap config specified a non-default value, use that */
  522. if (endian != REGMAP_ENDIAN_DEFAULT)
  523. return endian;
  524. /* If the firmware node exist try to get endianness from it */
  525. if (fwnode_property_read_bool(fwnode, "big-endian"))
  526. endian = REGMAP_ENDIAN_BIG;
  527. else if (fwnode_property_read_bool(fwnode, "little-endian"))
  528. endian = REGMAP_ENDIAN_LITTLE;
  529. else if (fwnode_property_read_bool(fwnode, "native-endian"))
  530. endian = REGMAP_ENDIAN_NATIVE;
  531. /* If the endianness was specified in fwnode, use that */
  532. if (endian != REGMAP_ENDIAN_DEFAULT)
  533. return endian;
  534. /* Retrieve the endianness specification from the bus config */
  535. if (bus && bus->val_format_endian_default)
  536. endian = bus->val_format_endian_default;
  537. /* If the bus specified a non-default value, use that */
  538. if (endian != REGMAP_ENDIAN_DEFAULT)
  539. return endian;
  540. /* Use this if no other value was found */
  541. return REGMAP_ENDIAN_BIG;
  542. }
  543. EXPORT_SYMBOL_GPL(regmap_get_val_endian);
  544. struct regmap *__regmap_init(struct device *dev,
  545. const struct regmap_bus *bus,
  546. void *bus_context,
  547. const struct regmap_config *config,
  548. struct lock_class_key *lock_key,
  549. const char *lock_name)
  550. {
  551. struct regmap *map;
  552. int ret = -EINVAL;
  553. enum regmap_endian reg_endian, val_endian;
  554. int i, j;
  555. if (!config)
  556. goto err;
  557. map = kzalloc(sizeof(*map), GFP_KERNEL);
  558. if (map == NULL) {
  559. ret = -ENOMEM;
  560. goto err;
  561. }
  562. ret = regmap_set_name(map, config);
  563. if (ret)
  564. goto err_map;
  565. ret = -EINVAL; /* Later error paths rely on this */
  566. if (config->disable_locking) {
  567. map->lock = map->unlock = regmap_lock_unlock_none;
  568. map->can_sleep = config->can_sleep;
  569. regmap_debugfs_disable(map);
  570. } else if (config->lock && config->unlock) {
  571. map->lock = config->lock;
  572. map->unlock = config->unlock;
  573. map->lock_arg = config->lock_arg;
  574. map->can_sleep = config->can_sleep;
  575. } else if (config->use_hwlock) {
  576. map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
  577. if (!map->hwlock) {
  578. ret = -ENXIO;
  579. goto err_name;
  580. }
  581. switch (config->hwlock_mode) {
  582. case HWLOCK_IRQSTATE:
  583. map->lock = regmap_lock_hwlock_irqsave;
  584. map->unlock = regmap_unlock_hwlock_irqrestore;
  585. break;
  586. case HWLOCK_IRQ:
  587. map->lock = regmap_lock_hwlock_irq;
  588. map->unlock = regmap_unlock_hwlock_irq;
  589. break;
  590. default:
  591. map->lock = regmap_lock_hwlock;
  592. map->unlock = regmap_unlock_hwlock;
  593. break;
  594. }
  595. map->lock_arg = map;
  596. } else {
  597. if ((bus && bus->fast_io) ||
  598. config->fast_io) {
  599. spin_lock_init(&map->spinlock);
  600. map->lock = regmap_lock_spinlock;
  601. map->unlock = regmap_unlock_spinlock;
  602. lockdep_set_class_and_name(&map->spinlock,
  603. lock_key, lock_name);
  604. } else {
  605. mutex_init(&map->mutex);
  606. map->lock = regmap_lock_mutex;
  607. map->unlock = regmap_unlock_mutex;
  608. map->can_sleep = true;
  609. lockdep_set_class_and_name(&map->mutex,
  610. lock_key, lock_name);
  611. }
  612. map->lock_arg = map;
  613. }
  614. /*
  615. * When we write in fast-paths with regmap_bulk_write() don't allocate
  616. * scratch buffers with sleeping allocations.
  617. */
  618. if ((bus && bus->fast_io) || config->fast_io)
  619. map->alloc_flags = GFP_ATOMIC;
  620. else
  621. map->alloc_flags = GFP_KERNEL;
  622. map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
  623. map->format.pad_bytes = config->pad_bits / 8;
  624. map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
  625. map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
  626. config->val_bits + config->pad_bits, 8);
  627. map->reg_shift = config->pad_bits % 8;
  628. if (config->reg_stride)
  629. map->reg_stride = config->reg_stride;
  630. else
  631. map->reg_stride = 1;
  632. if (is_power_of_2(map->reg_stride))
  633. map->reg_stride_order = ilog2(map->reg_stride);
  634. else
  635. map->reg_stride_order = -1;
  636. map->use_single_read = config->use_single_read || !bus || !bus->read;
  637. map->use_single_write = config->use_single_write || !bus || !bus->write;
  638. map->can_multi_write = config->can_multi_write && bus && bus->write;
  639. if (bus) {
  640. map->max_raw_read = bus->max_raw_read;
  641. map->max_raw_write = bus->max_raw_write;
  642. }
  643. map->dev = dev;
  644. map->bus = bus;
  645. map->bus_context = bus_context;
  646. map->max_register = config->max_register;
  647. map->wr_table = config->wr_table;
  648. map->rd_table = config->rd_table;
  649. map->volatile_table = config->volatile_table;
  650. map->precious_table = config->precious_table;
  651. map->wr_noinc_table = config->wr_noinc_table;
  652. map->rd_noinc_table = config->rd_noinc_table;
  653. map->writeable_reg = config->writeable_reg;
  654. map->readable_reg = config->readable_reg;
  655. map->volatile_reg = config->volatile_reg;
  656. map->precious_reg = config->precious_reg;
  657. map->writeable_noinc_reg = config->writeable_noinc_reg;
  658. map->readable_noinc_reg = config->readable_noinc_reg;
  659. map->cache_type = config->cache_type;
  660. spin_lock_init(&map->async_lock);
  661. INIT_LIST_HEAD(&map->async_list);
  662. INIT_LIST_HEAD(&map->async_free);
  663. init_waitqueue_head(&map->async_waitq);
  664. if (config->read_flag_mask ||
  665. config->write_flag_mask ||
  666. config->zero_flag_mask) {
  667. map->read_flag_mask = config->read_flag_mask;
  668. map->write_flag_mask = config->write_flag_mask;
  669. } else if (bus) {
  670. map->read_flag_mask = bus->read_flag_mask;
  671. }
  672. if (!bus) {
  673. map->reg_read = config->reg_read;
  674. map->reg_write = config->reg_write;
  675. map->defer_caching = false;
  676. goto skip_format_initialization;
  677. } else if (!bus->read || !bus->write) {
  678. map->reg_read = _regmap_bus_reg_read;
  679. map->reg_write = _regmap_bus_reg_write;
  680. map->reg_update_bits = bus->reg_update_bits;
  681. map->defer_caching = false;
  682. goto skip_format_initialization;
  683. } else {
  684. map->reg_read = _regmap_bus_read;
  685. map->reg_update_bits = bus->reg_update_bits;
  686. }
  687. reg_endian = regmap_get_reg_endian(bus, config);
  688. val_endian = regmap_get_val_endian(dev, bus, config);
  689. switch (config->reg_bits + map->reg_shift) {
  690. case 2:
  691. switch (config->val_bits) {
  692. case 6:
  693. map->format.format_write = regmap_format_2_6_write;
  694. break;
  695. default:
  696. goto err_hwlock;
  697. }
  698. break;
  699. case 4:
  700. switch (config->val_bits) {
  701. case 12:
  702. map->format.format_write = regmap_format_4_12_write;
  703. break;
  704. default:
  705. goto err_hwlock;
  706. }
  707. break;
  708. case 7:
  709. switch (config->val_bits) {
  710. case 9:
  711. map->format.format_write = regmap_format_7_9_write;
  712. break;
  713. default:
  714. goto err_hwlock;
  715. }
  716. break;
  717. case 10:
  718. switch (config->val_bits) {
  719. case 14:
  720. map->format.format_write = regmap_format_10_14_write;
  721. break;
  722. default:
  723. goto err_hwlock;
  724. }
  725. break;
  726. case 12:
  727. switch (config->val_bits) {
  728. case 20:
  729. map->format.format_write = regmap_format_12_20_write;
  730. break;
  731. default:
  732. goto err_hwlock;
  733. }
  734. break;
  735. case 8:
  736. map->format.format_reg = regmap_format_8;
  737. break;
  738. case 16:
  739. switch (reg_endian) {
  740. case REGMAP_ENDIAN_BIG:
  741. map->format.format_reg = regmap_format_16_be;
  742. break;
  743. case REGMAP_ENDIAN_LITTLE:
  744. map->format.format_reg = regmap_format_16_le;
  745. break;
  746. case REGMAP_ENDIAN_NATIVE:
  747. map->format.format_reg = regmap_format_16_native;
  748. break;
  749. default:
  750. goto err_hwlock;
  751. }
  752. break;
  753. case 24:
  754. if (reg_endian != REGMAP_ENDIAN_BIG)
  755. goto err_hwlock;
  756. map->format.format_reg = regmap_format_24;
  757. break;
  758. case 32:
  759. switch (reg_endian) {
  760. case REGMAP_ENDIAN_BIG:
  761. map->format.format_reg = regmap_format_32_be;
  762. break;
  763. case REGMAP_ENDIAN_LITTLE:
  764. map->format.format_reg = regmap_format_32_le;
  765. break;
  766. case REGMAP_ENDIAN_NATIVE:
  767. map->format.format_reg = regmap_format_32_native;
  768. break;
  769. default:
  770. goto err_hwlock;
  771. }
  772. break;
  773. #ifdef CONFIG_64BIT
  774. case 64:
  775. switch (reg_endian) {
  776. case REGMAP_ENDIAN_BIG:
  777. map->format.format_reg = regmap_format_64_be;
  778. break;
  779. case REGMAP_ENDIAN_LITTLE:
  780. map->format.format_reg = regmap_format_64_le;
  781. break;
  782. case REGMAP_ENDIAN_NATIVE:
  783. map->format.format_reg = regmap_format_64_native;
  784. break;
  785. default:
  786. goto err_hwlock;
  787. }
  788. break;
  789. #endif
  790. default:
  791. goto err_hwlock;
  792. }
  793. if (val_endian == REGMAP_ENDIAN_NATIVE)
  794. map->format.parse_inplace = regmap_parse_inplace_noop;
  795. switch (config->val_bits) {
  796. case 8:
  797. map->format.format_val = regmap_format_8;
  798. map->format.parse_val = regmap_parse_8;
  799. map->format.parse_inplace = regmap_parse_inplace_noop;
  800. break;
  801. case 16:
  802. switch (val_endian) {
  803. case REGMAP_ENDIAN_BIG:
  804. map->format.format_val = regmap_format_16_be;
  805. map->format.parse_val = regmap_parse_16_be;
  806. map->format.parse_inplace = regmap_parse_16_be_inplace;
  807. break;
  808. case REGMAP_ENDIAN_LITTLE:
  809. map->format.format_val = regmap_format_16_le;
  810. map->format.parse_val = regmap_parse_16_le;
  811. map->format.parse_inplace = regmap_parse_16_le_inplace;
  812. break;
  813. case REGMAP_ENDIAN_NATIVE:
  814. map->format.format_val = regmap_format_16_native;
  815. map->format.parse_val = regmap_parse_16_native;
  816. break;
  817. default:
  818. goto err_hwlock;
  819. }
  820. break;
  821. case 24:
  822. if (val_endian != REGMAP_ENDIAN_BIG)
  823. goto err_hwlock;
  824. map->format.format_val = regmap_format_24;
  825. map->format.parse_val = regmap_parse_24;
  826. break;
  827. case 32:
  828. switch (val_endian) {
  829. case REGMAP_ENDIAN_BIG:
  830. map->format.format_val = regmap_format_32_be;
  831. map->format.parse_val = regmap_parse_32_be;
  832. map->format.parse_inplace = regmap_parse_32_be_inplace;
  833. break;
  834. case REGMAP_ENDIAN_LITTLE:
  835. map->format.format_val = regmap_format_32_le;
  836. map->format.parse_val = regmap_parse_32_le;
  837. map->format.parse_inplace = regmap_parse_32_le_inplace;
  838. break;
  839. case REGMAP_ENDIAN_NATIVE:
  840. map->format.format_val = regmap_format_32_native;
  841. map->format.parse_val = regmap_parse_32_native;
  842. break;
  843. default:
  844. goto err_hwlock;
  845. }
  846. break;
  847. #ifdef CONFIG_64BIT
  848. case 64:
  849. switch (val_endian) {
  850. case REGMAP_ENDIAN_BIG:
  851. map->format.format_val = regmap_format_64_be;
  852. map->format.parse_val = regmap_parse_64_be;
  853. map->format.parse_inplace = regmap_parse_64_be_inplace;
  854. break;
  855. case REGMAP_ENDIAN_LITTLE:
  856. map->format.format_val = regmap_format_64_le;
  857. map->format.parse_val = regmap_parse_64_le;
  858. map->format.parse_inplace = regmap_parse_64_le_inplace;
  859. break;
  860. case REGMAP_ENDIAN_NATIVE:
  861. map->format.format_val = regmap_format_64_native;
  862. map->format.parse_val = regmap_parse_64_native;
  863. break;
  864. default:
  865. goto err_hwlock;
  866. }
  867. break;
  868. #endif
  869. }
  870. if (map->format.format_write) {
  871. if ((reg_endian != REGMAP_ENDIAN_BIG) ||
  872. (val_endian != REGMAP_ENDIAN_BIG))
  873. goto err_hwlock;
  874. map->use_single_write = true;
  875. }
  876. if (!map->format.format_write &&
  877. !(map->format.format_reg && map->format.format_val))
  878. goto err_hwlock;
  879. map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
  880. if (map->work_buf == NULL) {
  881. ret = -ENOMEM;
  882. goto err_hwlock;
  883. }
  884. if (map->format.format_write) {
  885. map->defer_caching = false;
  886. map->reg_write = _regmap_bus_formatted_write;
  887. } else if (map->format.format_val) {
  888. map->defer_caching = true;
  889. map->reg_write = _regmap_bus_raw_write;
  890. }
  891. skip_format_initialization:
  892. map->range_tree = RB_ROOT;
  893. for (i = 0; i < config->num_ranges; i++) {
  894. const struct regmap_range_cfg *range_cfg = &config->ranges[i];
  895. struct regmap_range_node *new;
  896. /* Sanity check */
  897. if (range_cfg->range_max < range_cfg->range_min) {
  898. dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
  899. range_cfg->range_max, range_cfg->range_min);
  900. goto err_range;
  901. }
  902. if (range_cfg->range_max > map->max_register) {
  903. dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
  904. range_cfg->range_max, map->max_register);
  905. goto err_range;
  906. }
  907. if (range_cfg->selector_reg > map->max_register) {
  908. dev_err(map->dev,
  909. "Invalid range %d: selector out of map\n", i);
  910. goto err_range;
  911. }
  912. if (range_cfg->window_len == 0) {
  913. dev_err(map->dev, "Invalid range %d: window_len 0\n",
  914. i);
  915. goto err_range;
  916. }
  917. /* Make sure, that this register range has no selector
  918. or data window within its boundary */
  919. for (j = 0; j < config->num_ranges; j++) {
  920. unsigned sel_reg = config->ranges[j].selector_reg;
  921. unsigned win_min = config->ranges[j].window_start;
  922. unsigned win_max = win_min +
  923. config->ranges[j].window_len - 1;
  924. /* Allow data window inside its own virtual range */
  925. if (j == i)
  926. continue;
  927. if (range_cfg->range_min <= sel_reg &&
  928. sel_reg <= range_cfg->range_max) {
  929. dev_err(map->dev,
  930. "Range %d: selector for %d in window\n",
  931. i, j);
  932. goto err_range;
  933. }
  934. if (!(win_max < range_cfg->range_min ||
  935. win_min > range_cfg->range_max)) {
  936. dev_err(map->dev,
  937. "Range %d: window for %d in window\n",
  938. i, j);
  939. goto err_range;
  940. }
  941. }
  942. new = kzalloc(sizeof(*new), GFP_KERNEL);
  943. if (new == NULL) {
  944. ret = -ENOMEM;
  945. goto err_range;
  946. }
  947. new->map = map;
  948. new->name = range_cfg->name;
  949. new->range_min = range_cfg->range_min;
  950. new->range_max = range_cfg->range_max;
  951. new->selector_reg = range_cfg->selector_reg;
  952. new->selector_mask = range_cfg->selector_mask;
  953. new->selector_shift = range_cfg->selector_shift;
  954. new->window_start = range_cfg->window_start;
  955. new->window_len = range_cfg->window_len;
  956. if (!_regmap_range_add(map, new)) {
  957. dev_err(map->dev, "Failed to add range %d\n", i);
  958. kfree(new);
  959. goto err_range;
  960. }
  961. if (map->selector_work_buf == NULL) {
  962. map->selector_work_buf =
  963. kzalloc(map->format.buf_size, GFP_KERNEL);
  964. if (map->selector_work_buf == NULL) {
  965. ret = -ENOMEM;
  966. goto err_range;
  967. }
  968. }
  969. }
  970. ret = regcache_init(map, config);
  971. if (ret != 0)
  972. goto err_range;
  973. if (dev) {
  974. ret = regmap_attach_dev(dev, map, config);
  975. if (ret != 0)
  976. goto err_regcache;
  977. } else {
  978. regmap_debugfs_init(map);
  979. }
  980. return map;
  981. err_regcache:
  982. regcache_exit(map);
  983. err_range:
  984. regmap_range_exit(map);
  985. kfree(map->work_buf);
  986. err_hwlock:
  987. if (map->hwlock)
  988. hwspin_lock_free(map->hwlock);
  989. err_name:
  990. kfree_const(map->name);
  991. err_map:
  992. kfree(map);
  993. err:
  994. return ERR_PTR(ret);
  995. }
  996. EXPORT_SYMBOL_GPL(__regmap_init);
  997. static void devm_regmap_release(struct device *dev, void *res)
  998. {
  999. regmap_exit(*(struct regmap **)res);
  1000. }
  1001. struct regmap *__devm_regmap_init(struct device *dev,
  1002. const struct regmap_bus *bus,
  1003. void *bus_context,
  1004. const struct regmap_config *config,
  1005. struct lock_class_key *lock_key,
  1006. const char *lock_name)
  1007. {
  1008. struct regmap **ptr, *regmap;
  1009. ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
  1010. if (!ptr)
  1011. return ERR_PTR(-ENOMEM);
  1012. regmap = __regmap_init(dev, bus, bus_context, config,
  1013. lock_key, lock_name);
  1014. if (!IS_ERR(regmap)) {
  1015. *ptr = regmap;
  1016. devres_add(dev, ptr);
  1017. } else {
  1018. devres_free(ptr);
  1019. }
  1020. return regmap;
  1021. }
  1022. EXPORT_SYMBOL_GPL(__devm_regmap_init);
  1023. static void regmap_field_init(struct regmap_field *rm_field,
  1024. struct regmap *regmap, struct reg_field reg_field)
  1025. {
  1026. rm_field->regmap = regmap;
  1027. rm_field->reg = reg_field.reg;
  1028. rm_field->shift = reg_field.lsb;
  1029. rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
  1030. rm_field->id_size = reg_field.id_size;
  1031. rm_field->id_offset = reg_field.id_offset;
  1032. }
  1033. /**
  1034. * devm_regmap_field_alloc() - Allocate and initialise a register field.
  1035. *
  1036. * @dev: Device that will be interacted with
  1037. * @regmap: regmap bank in which this register field is located.
  1038. * @reg_field: Register field with in the bank.
  1039. *
  1040. * The return value will be an ERR_PTR() on error or a valid pointer
  1041. * to a struct regmap_field. The regmap_field will be automatically freed
  1042. * by the device management code.
  1043. */
  1044. struct regmap_field *devm_regmap_field_alloc(struct device *dev,
  1045. struct regmap *regmap, struct reg_field reg_field)
  1046. {
  1047. struct regmap_field *rm_field = devm_kzalloc(dev,
  1048. sizeof(*rm_field), GFP_KERNEL);
  1049. if (!rm_field)
  1050. return ERR_PTR(-ENOMEM);
  1051. regmap_field_init(rm_field, regmap, reg_field);
  1052. return rm_field;
  1053. }
  1054. EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
  1055. /**
  1056. * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
  1057. *
  1058. * @regmap: regmap bank in which this register field is located.
  1059. * @rm_field: regmap register fields within the bank.
  1060. * @reg_field: Register fields within the bank.
  1061. * @num_fields: Number of register fields.
  1062. *
  1063. * The return value will be an -ENOMEM on error or zero for success.
  1064. * Newly allocated regmap_fields should be freed by calling
  1065. * regmap_field_bulk_free()
  1066. */
  1067. int regmap_field_bulk_alloc(struct regmap *regmap,
  1068. struct regmap_field **rm_field,
  1069. struct reg_field *reg_field,
  1070. int num_fields)
  1071. {
  1072. struct regmap_field *rf;
  1073. int i;
  1074. rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
  1075. if (!rf)
  1076. return -ENOMEM;
  1077. for (i = 0; i < num_fields; i++) {
  1078. regmap_field_init(&rf[i], regmap, reg_field[i]);
  1079. rm_field[i] = &rf[i];
  1080. }
  1081. return 0;
  1082. }
  1083. EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
  1084. /**
  1085. * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
  1086. * fields.
  1087. *
  1088. * @dev: Device that will be interacted with
  1089. * @regmap: regmap bank in which this register field is located.
  1090. * @rm_field: regmap register fields within the bank.
  1091. * @reg_field: Register fields within the bank.
  1092. * @num_fields: Number of register fields.
  1093. *
  1094. * The return value will be an -ENOMEM on error or zero for success.
  1095. * Newly allocated regmap_fields will be automatically freed by the
  1096. * device management code.
  1097. */
  1098. int devm_regmap_field_bulk_alloc(struct device *dev,
  1099. struct regmap *regmap,
  1100. struct regmap_field **rm_field,
  1101. struct reg_field *reg_field,
  1102. int num_fields)
  1103. {
  1104. struct regmap_field *rf;
  1105. int i;
  1106. rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
  1107. if (!rf)
  1108. return -ENOMEM;
  1109. for (i = 0; i < num_fields; i++) {
  1110. regmap_field_init(&rf[i], regmap, reg_field[i]);
  1111. rm_field[i] = &rf[i];
  1112. }
  1113. return 0;
  1114. }
  1115. EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
  1116. /**
  1117. * regmap_field_bulk_free() - Free register field allocated using
  1118. * regmap_field_bulk_alloc.
  1119. *
  1120. * @field: regmap fields which should be freed.
  1121. */
  1122. void regmap_field_bulk_free(struct regmap_field *field)
  1123. {
  1124. kfree(field);
  1125. }
  1126. EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
  1127. /**
  1128. * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
  1129. * devm_regmap_field_bulk_alloc.
  1130. *
  1131. * @dev: Device that will be interacted with
  1132. * @field: regmap field which should be freed.
  1133. *
  1134. * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
  1135. * drivers need not call this function, as the memory allocated via devm
  1136. * will be freed as per device-driver life-cycle.
  1137. */
  1138. void devm_regmap_field_bulk_free(struct device *dev,
  1139. struct regmap_field *field)
  1140. {
  1141. devm_kfree(dev, field);
  1142. }
  1143. EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
  1144. /**
  1145. * devm_regmap_field_free() - Free a register field allocated using
  1146. * devm_regmap_field_alloc.
  1147. *
  1148. * @dev: Device that will be interacted with
  1149. * @field: regmap field which should be freed.
  1150. *
  1151. * Free register field allocated using devm_regmap_field_alloc(). Usually
  1152. * drivers need not call this function, as the memory allocated via devm
  1153. * will be freed as per device-driver life-cyle.
  1154. */
  1155. void devm_regmap_field_free(struct device *dev,
  1156. struct regmap_field *field)
  1157. {
  1158. devm_kfree(dev, field);
  1159. }
  1160. EXPORT_SYMBOL_GPL(devm_regmap_field_free);
  1161. /**
  1162. * regmap_field_alloc() - Allocate and initialise a register field.
  1163. *
  1164. * @regmap: regmap bank in which this register field is located.
  1165. * @reg_field: Register field with in the bank.
  1166. *
  1167. * The return value will be an ERR_PTR() on error or a valid pointer
  1168. * to a struct regmap_field. The regmap_field should be freed by the
  1169. * user once its finished working with it using regmap_field_free().
  1170. */
  1171. struct regmap_field *regmap_field_alloc(struct regmap *regmap,
  1172. struct reg_field reg_field)
  1173. {
  1174. struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
  1175. if (!rm_field)
  1176. return ERR_PTR(-ENOMEM);
  1177. regmap_field_init(rm_field, regmap, reg_field);
  1178. return rm_field;
  1179. }
  1180. EXPORT_SYMBOL_GPL(regmap_field_alloc);
  1181. /**
  1182. * regmap_field_free() - Free register field allocated using
  1183. * regmap_field_alloc.
  1184. *
  1185. * @field: regmap field which should be freed.
  1186. */
  1187. void regmap_field_free(struct regmap_field *field)
  1188. {
  1189. kfree(field);
  1190. }
  1191. EXPORT_SYMBOL_GPL(regmap_field_free);
  1192. /**
  1193. * regmap_reinit_cache() - Reinitialise the current register cache
  1194. *
  1195. * @map: Register map to operate on.
  1196. * @config: New configuration. Only the cache data will be used.
  1197. *
  1198. * Discard any existing register cache for the map and initialize a
  1199. * new cache. This can be used to restore the cache to defaults or to
  1200. * update the cache configuration to reflect runtime discovery of the
  1201. * hardware.
  1202. *
  1203. * No explicit locking is done here, the user needs to ensure that
  1204. * this function will not race with other calls to regmap.
  1205. */
  1206. int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
  1207. {
  1208. int ret;
  1209. regcache_exit(map);
  1210. regmap_debugfs_exit(map);
  1211. map->max_register = config->max_register;
  1212. map->writeable_reg = config->writeable_reg;
  1213. map->readable_reg = config->readable_reg;
  1214. map->volatile_reg = config->volatile_reg;
  1215. map->precious_reg = config->precious_reg;
  1216. map->writeable_noinc_reg = config->writeable_noinc_reg;
  1217. map->readable_noinc_reg = config->readable_noinc_reg;
  1218. map->cache_type = config->cache_type;
  1219. ret = regmap_set_name(map, config);
  1220. if (ret)
  1221. return ret;
  1222. regmap_debugfs_init(map);
  1223. map->cache_bypass = false;
  1224. map->cache_only = false;
  1225. return regcache_init(map, config);
  1226. }
  1227. EXPORT_SYMBOL_GPL(regmap_reinit_cache);
  1228. /**
  1229. * regmap_exit() - Free a previously allocated register map
  1230. *
  1231. * @map: Register map to operate on.
  1232. */
  1233. void regmap_exit(struct regmap *map)
  1234. {
  1235. struct regmap_async *async;
  1236. regcache_exit(map);
  1237. regmap_debugfs_exit(map);
  1238. regmap_range_exit(map);
  1239. if (map->bus && map->bus->free_context)
  1240. map->bus->free_context(map->bus_context);
  1241. kfree(map->work_buf);
  1242. while (!list_empty(&map->async_free)) {
  1243. async = list_first_entry_or_null(&map->async_free,
  1244. struct regmap_async,
  1245. list);
  1246. list_del(&async->list);
  1247. kfree(async->work_buf);
  1248. kfree(async);
  1249. }
  1250. if (map->hwlock)
  1251. hwspin_lock_free(map->hwlock);
  1252. if (map->lock == regmap_lock_mutex)
  1253. mutex_destroy(&map->mutex);
  1254. kfree_const(map->name);
  1255. kfree(map->patch);
  1256. kfree(map);
  1257. }
  1258. EXPORT_SYMBOL_GPL(regmap_exit);
  1259. static int dev_get_regmap_match(struct device *dev, void *res, void *data)
  1260. {
  1261. struct regmap **r = res;
  1262. if (!r || !*r) {
  1263. WARN_ON(!r || !*r);
  1264. return 0;
  1265. }
  1266. /* If the user didn't specify a name match any */
  1267. if (data)
  1268. return !strcmp((*r)->name, data);
  1269. else
  1270. return 1;
  1271. }
  1272. /**
  1273. * dev_get_regmap() - Obtain the regmap (if any) for a device
  1274. *
  1275. * @dev: Device to retrieve the map for
  1276. * @name: Optional name for the register map, usually NULL.
  1277. *
  1278. * Returns the regmap for the device if one is present, or NULL. If
  1279. * name is specified then it must match the name specified when
  1280. * registering the device, if it is NULL then the first regmap found
  1281. * will be used. Devices with multiple register maps are very rare,
  1282. * generic code should normally not need to specify a name.
  1283. */
  1284. struct regmap *dev_get_regmap(struct device *dev, const char *name)
  1285. {
  1286. struct regmap **r = devres_find(dev, dev_get_regmap_release,
  1287. dev_get_regmap_match, (void *)name);
  1288. if (!r)
  1289. return NULL;
  1290. return *r;
  1291. }
  1292. EXPORT_SYMBOL_GPL(dev_get_regmap);
  1293. /**
  1294. * regmap_get_device() - Obtain the device from a regmap
  1295. *
  1296. * @map: Register map to operate on.
  1297. *
  1298. * Returns the underlying device that the regmap has been created for.
  1299. */
  1300. struct device *regmap_get_device(struct regmap *map)
  1301. {
  1302. return map->dev;
  1303. }
  1304. EXPORT_SYMBOL_GPL(regmap_get_device);
  1305. static int _regmap_select_page(struct regmap *map, unsigned int *reg,
  1306. struct regmap_range_node *range,
  1307. unsigned int val_num)
  1308. {
  1309. void *orig_work_buf;
  1310. unsigned int win_offset;
  1311. unsigned int win_page;
  1312. bool page_chg;
  1313. int ret;
  1314. win_offset = (*reg - range->range_min) % range->window_len;
  1315. win_page = (*reg - range->range_min) / range->window_len;
  1316. if (val_num > 1) {
  1317. /* Bulk write shouldn't cross range boundary */
  1318. if (*reg + val_num - 1 > range->range_max)
  1319. return -EINVAL;
  1320. /* ... or single page boundary */
  1321. if (val_num > range->window_len - win_offset)
  1322. return -EINVAL;
  1323. }
  1324. /* It is possible to have selector register inside data window.
  1325. In that case, selector register is located on every page and
  1326. it needs no page switching, when accessed alone. */
  1327. if (val_num > 1 ||
  1328. range->window_start + win_offset != range->selector_reg) {
  1329. /* Use separate work_buf during page switching */
  1330. orig_work_buf = map->work_buf;
  1331. map->work_buf = map->selector_work_buf;
  1332. ret = _regmap_update_bits(map, range->selector_reg,
  1333. range->selector_mask,
  1334. win_page << range->selector_shift,
  1335. &page_chg, false);
  1336. map->work_buf = orig_work_buf;
  1337. if (ret != 0)
  1338. return ret;
  1339. }
  1340. *reg = range->window_start + win_offset;
  1341. return 0;
  1342. }
  1343. static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
  1344. unsigned long mask)
  1345. {
  1346. u8 *buf;
  1347. int i;
  1348. if (!mask || !map->work_buf)
  1349. return;
  1350. buf = map->work_buf;
  1351. for (i = 0; i < max_bytes; i++)
  1352. buf[i] |= (mask >> (8 * i)) & 0xff;
  1353. }
  1354. static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
  1355. const void *val, size_t val_len, bool noinc)
  1356. {
  1357. struct regmap_range_node *range;
  1358. unsigned long flags;
  1359. void *work_val = map->work_buf + map->format.reg_bytes +
  1360. map->format.pad_bytes;
  1361. void *buf;
  1362. int ret = -ENOTSUPP;
  1363. size_t len;
  1364. int i;
  1365. WARN_ON(!map->bus);
  1366. /* Check for unwritable or noinc registers in range
  1367. * before we start
  1368. */
  1369. if (!regmap_writeable_noinc(map, reg)) {
  1370. for (i = 0; i < val_len / map->format.val_bytes; i++) {
  1371. unsigned int element =
  1372. reg + regmap_get_offset(map, i);
  1373. if (!regmap_writeable(map, element) ||
  1374. regmap_writeable_noinc(map, element))
  1375. return -EINVAL;
  1376. }
  1377. }
  1378. if (!map->cache_bypass && map->format.parse_val) {
  1379. unsigned int ival;
  1380. int val_bytes = map->format.val_bytes;
  1381. for (i = 0; i < val_len / val_bytes; i++) {
  1382. ival = map->format.parse_val(val + (i * val_bytes));
  1383. ret = regcache_write(map,
  1384. reg + regmap_get_offset(map, i),
  1385. ival);
  1386. if (ret) {
  1387. dev_err(map->dev,
  1388. "Error in caching of register: %x ret: %d\n",
  1389. reg + regmap_get_offset(map, i), ret);
  1390. return ret;
  1391. }
  1392. }
  1393. if (map->cache_only) {
  1394. map->cache_dirty = true;
  1395. return 0;
  1396. }
  1397. }
  1398. range = _regmap_range_lookup(map, reg);
  1399. if (range) {
  1400. int val_num = val_len / map->format.val_bytes;
  1401. int win_offset = (reg - range->range_min) % range->window_len;
  1402. int win_residue = range->window_len - win_offset;
  1403. /* If the write goes beyond the end of the window split it */
  1404. while (val_num > win_residue) {
  1405. dev_dbg(map->dev, "Writing window %d/%zu\n",
  1406. win_residue, val_len / map->format.val_bytes);
  1407. ret = _regmap_raw_write_impl(map, reg, val,
  1408. win_residue *
  1409. map->format.val_bytes, noinc);
  1410. if (ret != 0)
  1411. return ret;
  1412. reg += win_residue;
  1413. val_num -= win_residue;
  1414. val += win_residue * map->format.val_bytes;
  1415. val_len -= win_residue * map->format.val_bytes;
  1416. win_offset = (reg - range->range_min) %
  1417. range->window_len;
  1418. win_residue = range->window_len - win_offset;
  1419. }
  1420. ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
  1421. if (ret != 0)
  1422. return ret;
  1423. }
  1424. map->format.format_reg(map->work_buf, reg, map->reg_shift);
  1425. regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
  1426. map->write_flag_mask);
  1427. /*
  1428. * Essentially all I/O mechanisms will be faster with a single
  1429. * buffer to write. Since register syncs often generate raw
  1430. * writes of single registers optimise that case.
  1431. */
  1432. if (val != work_val && val_len == map->format.val_bytes) {
  1433. memcpy(work_val, val, map->format.val_bytes);
  1434. val = work_val;
  1435. }
  1436. if (map->async && map->bus->async_write) {
  1437. struct regmap_async *async;
  1438. trace_regmap_async_write_start(map, reg, val_len);
  1439. spin_lock_irqsave(&map->async_lock, flags);
  1440. async = list_first_entry_or_null(&map->async_free,
  1441. struct regmap_async,
  1442. list);
  1443. if (async)
  1444. list_del(&async->list);
  1445. spin_unlock_irqrestore(&map->async_lock, flags);
  1446. if (!async) {
  1447. async = map->bus->async_alloc();
  1448. if (!async)
  1449. return -ENOMEM;
  1450. async->work_buf = kzalloc(map->format.buf_size,
  1451. GFP_KERNEL | GFP_DMA);
  1452. if (!async->work_buf) {
  1453. kfree(async);
  1454. return -ENOMEM;
  1455. }
  1456. }
  1457. async->map = map;
  1458. /* If the caller supplied the value we can use it safely. */
  1459. memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
  1460. map->format.reg_bytes + map->format.val_bytes);
  1461. spin_lock_irqsave(&map->async_lock, flags);
  1462. list_add_tail(&async->list, &map->async_list);
  1463. spin_unlock_irqrestore(&map->async_lock, flags);
  1464. if (val != work_val)
  1465. ret = map->bus->async_write(map->bus_context,
  1466. async->work_buf,
  1467. map->format.reg_bytes +
  1468. map->format.pad_bytes,
  1469. val, val_len, async);
  1470. else
  1471. ret = map->bus->async_write(map->bus_context,
  1472. async->work_buf,
  1473. map->format.reg_bytes +
  1474. map->format.pad_bytes +
  1475. val_len, NULL, 0, async);
  1476. if (ret != 0) {
  1477. dev_err(map->dev, "Failed to schedule write: %d\n",
  1478. ret);
  1479. spin_lock_irqsave(&map->async_lock, flags);
  1480. list_move(&async->list, &map->async_free);
  1481. spin_unlock_irqrestore(&map->async_lock, flags);
  1482. }
  1483. return ret;
  1484. }
  1485. trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
  1486. /* If we're doing a single register write we can probably just
  1487. * send the work_buf directly, otherwise try to do a gather
  1488. * write.
  1489. */
  1490. if (val == work_val)
  1491. ret = map->bus->write(map->bus_context, map->work_buf,
  1492. map->format.reg_bytes +
  1493. map->format.pad_bytes +
  1494. val_len);
  1495. else if (map->bus->gather_write)
  1496. ret = map->bus->gather_write(map->bus_context, map->work_buf,
  1497. map->format.reg_bytes +
  1498. map->format.pad_bytes,
  1499. val, val_len);
  1500. else
  1501. ret = -ENOTSUPP;
  1502. /* If that didn't work fall back on linearising by hand. */
  1503. if (ret == -ENOTSUPP) {
  1504. len = map->format.reg_bytes + map->format.pad_bytes + val_len;
  1505. buf = kzalloc(len, GFP_KERNEL);
  1506. if (!buf)
  1507. return -ENOMEM;
  1508. memcpy(buf, map->work_buf, map->format.reg_bytes);
  1509. memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
  1510. val, val_len);
  1511. ret = map->bus->write(map->bus_context, buf, len);
  1512. kfree(buf);
  1513. } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
  1514. /* regcache_drop_region() takes lock that we already have,
  1515. * thus call map->cache_ops->drop() directly
  1516. */
  1517. if (map->cache_ops && map->cache_ops->drop)
  1518. map->cache_ops->drop(map, reg, reg + 1);
  1519. }
  1520. trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
  1521. return ret;
  1522. }
  1523. /**
  1524. * regmap_can_raw_write - Test if regmap_raw_write() is supported
  1525. *
  1526. * @map: Map to check.
  1527. */
  1528. bool regmap_can_raw_write(struct regmap *map)
  1529. {
  1530. return map->bus && map->bus->write && map->format.format_val &&
  1531. map->format.format_reg;
  1532. }
  1533. EXPORT_SYMBOL_GPL(regmap_can_raw_write);
  1534. /**
  1535. * regmap_get_raw_read_max - Get the maximum size we can read
  1536. *
  1537. * @map: Map to check.
  1538. */
  1539. size_t regmap_get_raw_read_max(struct regmap *map)
  1540. {
  1541. return map->max_raw_read;
  1542. }
  1543. EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
  1544. /**
  1545. * regmap_get_raw_write_max - Get the maximum size we can read
  1546. *
  1547. * @map: Map to check.
  1548. */
  1549. size_t regmap_get_raw_write_max(struct regmap *map)
  1550. {
  1551. return map->max_raw_write;
  1552. }
  1553. EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
  1554. static int _regmap_bus_formatted_write(void *context, unsigned int reg,
  1555. unsigned int val)
  1556. {
  1557. int ret;
  1558. struct regmap_range_node *range;
  1559. struct regmap *map = context;
  1560. WARN_ON(!map->bus || !map->format.format_write);
  1561. range = _regmap_range_lookup(map, reg);
  1562. if (range) {
  1563. ret = _regmap_select_page(map, &reg, range, 1);
  1564. if (ret != 0)
  1565. return ret;
  1566. }
  1567. map->format.format_write(map, reg, val);
  1568. trace_regmap_hw_write_start(map, reg, 1);
  1569. ret = map->bus->write(map->bus_context, map->work_buf,
  1570. map->format.buf_size);
  1571. trace_regmap_hw_write_done(map, reg, 1);
  1572. return ret;
  1573. }
  1574. static int _regmap_bus_reg_write(void *context, unsigned int reg,
  1575. unsigned int val)
  1576. {
  1577. struct regmap *map = context;
  1578. return map->bus->reg_write(map->bus_context, reg, val);
  1579. }
  1580. static int _regmap_bus_raw_write(void *context, unsigned int reg,
  1581. unsigned int val)
  1582. {
  1583. struct regmap *map = context;
  1584. WARN_ON(!map->bus || !map->format.format_val);
  1585. map->format.format_val(map->work_buf + map->format.reg_bytes
  1586. + map->format.pad_bytes, val, 0);
  1587. return _regmap_raw_write_impl(map, reg,
  1588. map->work_buf +
  1589. map->format.reg_bytes +
  1590. map->format.pad_bytes,
  1591. map->format.val_bytes,
  1592. false);
  1593. }
  1594. static inline void *_regmap_map_get_context(struct regmap *map)
  1595. {
  1596. return (map->bus) ? map : map->bus_context;
  1597. }
  1598. int _regmap_write(struct regmap *map, unsigned int reg,
  1599. unsigned int val)
  1600. {
  1601. int ret;
  1602. void *context = _regmap_map_get_context(map);
  1603. if (!regmap_writeable(map, reg))
  1604. return -EIO;
  1605. if (!map->cache_bypass && !map->defer_caching) {
  1606. ret = regcache_write(map, reg, val);
  1607. if (ret != 0)
  1608. return ret;
  1609. if (map->cache_only) {
  1610. map->cache_dirty = true;
  1611. return 0;
  1612. }
  1613. }
  1614. if (regmap_should_log(map))
  1615. dev_info(map->dev, "%x <= %x\n", reg, val);
  1616. trace_regmap_reg_write(map, reg, val);
  1617. return map->reg_write(context, reg, val);
  1618. }
  1619. /**
  1620. * regmap_write() - Write a value to a single register
  1621. *
  1622. * @map: Register map to write to
  1623. * @reg: Register to write to
  1624. * @val: Value to be written
  1625. *
  1626. * A value of zero will be returned on success, a negative errno will
  1627. * be returned in error cases.
  1628. */
  1629. int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
  1630. {
  1631. int ret;
  1632. if (!IS_ALIGNED(reg, map->reg_stride))
  1633. return -EINVAL;
  1634. map->lock(map->lock_arg);
  1635. ret = _regmap_write(map, reg, val);
  1636. map->unlock(map->lock_arg);
  1637. return ret;
  1638. }
  1639. EXPORT_SYMBOL_GPL(regmap_write);
  1640. /**
  1641. * regmap_write_async() - Write a value to a single register asynchronously
  1642. *
  1643. * @map: Register map to write to
  1644. * @reg: Register to write to
  1645. * @val: Value to be written
  1646. *
  1647. * A value of zero will be returned on success, a negative errno will
  1648. * be returned in error cases.
  1649. */
  1650. int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
  1651. {
  1652. int ret;
  1653. if (!IS_ALIGNED(reg, map->reg_stride))
  1654. return -EINVAL;
  1655. map->lock(map->lock_arg);
  1656. map->async = true;
  1657. ret = _regmap_write(map, reg, val);
  1658. map->async = false;
  1659. map->unlock(map->lock_arg);
  1660. return ret;
  1661. }
  1662. EXPORT_SYMBOL_GPL(regmap_write_async);
  1663. int _regmap_raw_write(struct regmap *map, unsigned int reg,
  1664. const void *val, size_t val_len, bool noinc)
  1665. {
  1666. size_t val_bytes = map->format.val_bytes;
  1667. size_t val_count = val_len / val_bytes;
  1668. size_t chunk_count, chunk_bytes;
  1669. size_t chunk_regs = val_count;
  1670. int ret, i;
  1671. if (!val_count)
  1672. return -EINVAL;
  1673. if (map->use_single_write)
  1674. chunk_regs = 1;
  1675. else if (map->max_raw_write && val_len > map->max_raw_write)
  1676. chunk_regs = map->max_raw_write / val_bytes;
  1677. chunk_count = val_count / chunk_regs;
  1678. chunk_bytes = chunk_regs * val_bytes;
  1679. /* Write as many bytes as possible with chunk_size */
  1680. for (i = 0; i < chunk_count; i++) {
  1681. ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
  1682. if (ret)
  1683. return ret;
  1684. reg += regmap_get_offset(map, chunk_regs);
  1685. val += chunk_bytes;
  1686. val_len -= chunk_bytes;
  1687. }
  1688. /* Write remaining bytes */
  1689. if (val_len)
  1690. ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
  1691. return ret;
  1692. }
  1693. /**
  1694. * regmap_raw_write() - Write raw values to one or more registers
  1695. *
  1696. * @map: Register map to write to
  1697. * @reg: Initial register to write to
  1698. * @val: Block of data to be written, laid out for direct transmission to the
  1699. * device
  1700. * @val_len: Length of data pointed to by val.
  1701. *
  1702. * This function is intended to be used for things like firmware
  1703. * download where a large block of data needs to be transferred to the
  1704. * device. No formatting will be done on the data provided.
  1705. *
  1706. * A value of zero will be returned on success, a negative errno will
  1707. * be returned in error cases.
  1708. */
  1709. int regmap_raw_write(struct regmap *map, unsigned int reg,
  1710. const void *val, size_t val_len)
  1711. {
  1712. int ret;
  1713. if (!regmap_can_raw_write(map))
  1714. return -EINVAL;
  1715. if (val_len % map->format.val_bytes)
  1716. return -EINVAL;
  1717. map->lock(map->lock_arg);
  1718. ret = _regmap_raw_write(map, reg, val, val_len, false);
  1719. map->unlock(map->lock_arg);
  1720. return ret;
  1721. }
  1722. EXPORT_SYMBOL_GPL(regmap_raw_write);
  1723. /**
  1724. * regmap_noinc_write(): Write data from a register without incrementing the
  1725. * register number
  1726. *
  1727. * @map: Register map to write to
  1728. * @reg: Register to write to
  1729. * @val: Pointer to data buffer
  1730. * @val_len: Length of output buffer in bytes.
  1731. *
  1732. * The regmap API usually assumes that bulk bus write operations will write a
  1733. * range of registers. Some devices have certain registers for which a write
  1734. * operation can write to an internal FIFO.
  1735. *
  1736. * The target register must be volatile but registers after it can be
  1737. * completely unrelated cacheable registers.
  1738. *
  1739. * This will attempt multiple writes as required to write val_len bytes.
  1740. *
  1741. * A value of zero will be returned on success, a negative errno will be
  1742. * returned in error cases.
  1743. */
  1744. int regmap_noinc_write(struct regmap *map, unsigned int reg,
  1745. const void *val, size_t val_len)
  1746. {
  1747. size_t write_len;
  1748. int ret;
  1749. if (!map->bus)
  1750. return -EINVAL;
  1751. if (!map->bus->write)
  1752. return -ENOTSUPP;
  1753. if (val_len % map->format.val_bytes)
  1754. return -EINVAL;
  1755. if (!IS_ALIGNED(reg, map->reg_stride))
  1756. return -EINVAL;
  1757. if (val_len == 0)
  1758. return -EINVAL;
  1759. map->lock(map->lock_arg);
  1760. if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
  1761. ret = -EINVAL;
  1762. goto out_unlock;
  1763. }
  1764. while (val_len) {
  1765. if (map->max_raw_write && map->max_raw_write < val_len)
  1766. write_len = map->max_raw_write;
  1767. else
  1768. write_len = val_len;
  1769. ret = _regmap_raw_write(map, reg, val, write_len, true);
  1770. if (ret)
  1771. goto out_unlock;
  1772. val = ((u8 *)val) + write_len;
  1773. val_len -= write_len;
  1774. }
  1775. out_unlock:
  1776. map->unlock(map->lock_arg);
  1777. return ret;
  1778. }
  1779. EXPORT_SYMBOL_GPL(regmap_noinc_write);
  1780. /**
  1781. * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
  1782. * register field.
  1783. *
  1784. * @field: Register field to write to
  1785. * @mask: Bitmask to change
  1786. * @val: Value to be written
  1787. * @change: Boolean indicating if a write was done
  1788. * @async: Boolean indicating asynchronously
  1789. * @force: Boolean indicating use force update
  1790. *
  1791. * Perform a read/modify/write cycle on the register field with change,
  1792. * async, force option.
  1793. *
  1794. * A value of zero will be returned on success, a negative errno will
  1795. * be returned in error cases.
  1796. */
  1797. int regmap_field_update_bits_base(struct regmap_field *field,
  1798. unsigned int mask, unsigned int val,
  1799. bool *change, bool async, bool force)
  1800. {
  1801. mask = (mask << field->shift) & field->mask;
  1802. return regmap_update_bits_base(field->regmap, field->reg,
  1803. mask, val << field->shift,
  1804. change, async, force);
  1805. }
  1806. EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
  1807. /**
  1808. * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
  1809. * register field with port ID
  1810. *
  1811. * @field: Register field to write to
  1812. * @id: port ID
  1813. * @mask: Bitmask to change
  1814. * @val: Value to be written
  1815. * @change: Boolean indicating if a write was done
  1816. * @async: Boolean indicating asynchronously
  1817. * @force: Boolean indicating use force update
  1818. *
  1819. * A value of zero will be returned on success, a negative errno will
  1820. * be returned in error cases.
  1821. */
  1822. int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
  1823. unsigned int mask, unsigned int val,
  1824. bool *change, bool async, bool force)
  1825. {
  1826. if (id >= field->id_size)
  1827. return -EINVAL;
  1828. mask = (mask << field->shift) & field->mask;
  1829. return regmap_update_bits_base(field->regmap,
  1830. field->reg + (field->id_offset * id),
  1831. mask, val << field->shift,
  1832. change, async, force);
  1833. }
  1834. EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
  1835. /**
  1836. * regmap_bulk_write() - Write multiple registers to the device
  1837. *
  1838. * @map: Register map to write to
  1839. * @reg: First register to be write from
  1840. * @val: Block of data to be written, in native register size for device
  1841. * @val_count: Number of registers to write
  1842. *
  1843. * This function is intended to be used for writing a large block of
  1844. * data to the device either in single transfer or multiple transfer.
  1845. *
  1846. * A value of zero will be returned on success, a negative errno will
  1847. * be returned in error cases.
  1848. */
  1849. int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
  1850. size_t val_count)
  1851. {
  1852. int ret = 0, i;
  1853. size_t val_bytes = map->format.val_bytes;
  1854. if (!IS_ALIGNED(reg, map->reg_stride))
  1855. return -EINVAL;
  1856. /*
  1857. * Some devices don't support bulk write, for them we have a series of
  1858. * single write operations.
  1859. */
  1860. if (!map->bus || !map->format.parse_inplace) {
  1861. map->lock(map->lock_arg);
  1862. for (i = 0; i < val_count; i++) {
  1863. unsigned int ival;
  1864. switch (val_bytes) {
  1865. case 1:
  1866. ival = *(u8 *)(val + (i * val_bytes));
  1867. break;
  1868. case 2:
  1869. ival = *(u16 *)(val + (i * val_bytes));
  1870. break;
  1871. case 4:
  1872. ival = *(u32 *)(val + (i * val_bytes));
  1873. break;
  1874. #ifdef CONFIG_64BIT
  1875. case 8:
  1876. ival = *(u64 *)(val + (i * val_bytes));
  1877. break;
  1878. #endif
  1879. default:
  1880. ret = -EINVAL;
  1881. goto out;
  1882. }
  1883. ret = _regmap_write(map,
  1884. reg + regmap_get_offset(map, i),
  1885. ival);
  1886. if (ret != 0)
  1887. goto out;
  1888. }
  1889. out:
  1890. map->unlock(map->lock_arg);
  1891. } else {
  1892. void *wval;
  1893. wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
  1894. if (!wval)
  1895. return -ENOMEM;
  1896. for (i = 0; i < val_count * val_bytes; i += val_bytes)
  1897. map->format.parse_inplace(wval + i);
  1898. ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
  1899. kfree(wval);
  1900. }
  1901. return ret;
  1902. }
  1903. EXPORT_SYMBOL_GPL(regmap_bulk_write);
  1904. /*
  1905. * _regmap_raw_multi_reg_write()
  1906. *
  1907. * the (register,newvalue) pairs in regs have not been formatted, but
  1908. * they are all in the same page and have been changed to being page
  1909. * relative. The page register has been written if that was necessary.
  1910. */
  1911. static int _regmap_raw_multi_reg_write(struct regmap *map,
  1912. const struct reg_sequence *regs,
  1913. size_t num_regs)
  1914. {
  1915. int ret;
  1916. void *buf;
  1917. int i;
  1918. u8 *u8;
  1919. size_t val_bytes = map->format.val_bytes;
  1920. size_t reg_bytes = map->format.reg_bytes;
  1921. size_t pad_bytes = map->format.pad_bytes;
  1922. size_t pair_size = reg_bytes + pad_bytes + val_bytes;
  1923. size_t len = pair_size * num_regs;
  1924. if (!len)
  1925. return -EINVAL;
  1926. buf = kzalloc(len, GFP_KERNEL);
  1927. if (!buf)
  1928. return -ENOMEM;
  1929. /* We have to linearise by hand. */
  1930. u8 = buf;
  1931. for (i = 0; i < num_regs; i++) {
  1932. unsigned int reg = regs[i].reg;
  1933. unsigned int val = regs[i].def;
  1934. trace_regmap_hw_write_start(map, reg, 1);
  1935. map->format.format_reg(u8, reg, map->reg_shift);
  1936. u8 += reg_bytes + pad_bytes;
  1937. map->format.format_val(u8, val, 0);
  1938. u8 += val_bytes;
  1939. }
  1940. u8 = buf;
  1941. *u8 |= map->write_flag_mask;
  1942. ret = map->bus->write(map->bus_context, buf, len);
  1943. kfree(buf);
  1944. for (i = 0; i < num_regs; i++) {
  1945. int reg = regs[i].reg;
  1946. trace_regmap_hw_write_done(map, reg, 1);
  1947. }
  1948. return ret;
  1949. }
  1950. static unsigned int _regmap_register_page(struct regmap *map,
  1951. unsigned int reg,
  1952. struct regmap_range_node *range)
  1953. {
  1954. unsigned int win_page = (reg - range->range_min) / range->window_len;
  1955. return win_page;
  1956. }
  1957. static int _regmap_range_multi_paged_reg_write(struct regmap *map,
  1958. struct reg_sequence *regs,
  1959. size_t num_regs)
  1960. {
  1961. int ret;
  1962. int i, n;
  1963. struct reg_sequence *base;
  1964. unsigned int this_page = 0;
  1965. unsigned int page_change = 0;
  1966. /*
  1967. * the set of registers are not neccessarily in order, but
  1968. * since the order of write must be preserved this algorithm
  1969. * chops the set each time the page changes. This also applies
  1970. * if there is a delay required at any point in the sequence.
  1971. */
  1972. base = regs;
  1973. for (i = 0, n = 0; i < num_regs; i++, n++) {
  1974. unsigned int reg = regs[i].reg;
  1975. struct regmap_range_node *range;
  1976. range = _regmap_range_lookup(map, reg);
  1977. if (range) {
  1978. unsigned int win_page = _regmap_register_page(map, reg,
  1979. range);
  1980. if (i == 0)
  1981. this_page = win_page;
  1982. if (win_page != this_page) {
  1983. this_page = win_page;
  1984. page_change = 1;
  1985. }
  1986. }
  1987. /* If we have both a page change and a delay make sure to
  1988. * write the regs and apply the delay before we change the
  1989. * page.
  1990. */
  1991. if (page_change || regs[i].delay_us) {
  1992. /* For situations where the first write requires
  1993. * a delay we need to make sure we don't call
  1994. * raw_multi_reg_write with n=0
  1995. * This can't occur with page breaks as we
  1996. * never write on the first iteration
  1997. */
  1998. if (regs[i].delay_us && i == 0)
  1999. n = 1;
  2000. ret = _regmap_raw_multi_reg_write(map, base, n);
  2001. if (ret != 0)
  2002. return ret;
  2003. if (regs[i].delay_us) {
  2004. if (map->can_sleep)
  2005. fsleep(regs[i].delay_us);
  2006. else
  2007. udelay(regs[i].delay_us);
  2008. }
  2009. base += n;
  2010. n = 0;
  2011. if (page_change) {
  2012. ret = _regmap_select_page(map,
  2013. &base[n].reg,
  2014. range, 1);
  2015. if (ret != 0)
  2016. return ret;
  2017. page_change = 0;
  2018. }
  2019. }
  2020. }
  2021. if (n > 0)
  2022. return _regmap_raw_multi_reg_write(map, base, n);
  2023. return 0;
  2024. }
  2025. static int _regmap_multi_reg_write(struct regmap *map,
  2026. const struct reg_sequence *regs,
  2027. size_t num_regs)
  2028. {
  2029. int i;
  2030. int ret;
  2031. if (!map->can_multi_write) {
  2032. for (i = 0; i < num_regs; i++) {
  2033. ret = _regmap_write(map, regs[i].reg, regs[i].def);
  2034. if (ret != 0)
  2035. return ret;
  2036. if (regs[i].delay_us) {
  2037. if (map->can_sleep)
  2038. fsleep(regs[i].delay_us);
  2039. else
  2040. udelay(regs[i].delay_us);
  2041. }
  2042. }
  2043. return 0;
  2044. }
  2045. if (!map->format.parse_inplace)
  2046. return -EINVAL;
  2047. if (map->writeable_reg)
  2048. for (i = 0; i < num_regs; i++) {
  2049. int reg = regs[i].reg;
  2050. if (!map->writeable_reg(map->dev, reg))
  2051. return -EINVAL;
  2052. if (!IS_ALIGNED(reg, map->reg_stride))
  2053. return -EINVAL;
  2054. }
  2055. if (!map->cache_bypass) {
  2056. for (i = 0; i < num_regs; i++) {
  2057. unsigned int val = regs[i].def;
  2058. unsigned int reg = regs[i].reg;
  2059. ret = regcache_write(map, reg, val);
  2060. if (ret) {
  2061. dev_err(map->dev,
  2062. "Error in caching of register: %x ret: %d\n",
  2063. reg, ret);
  2064. return ret;
  2065. }
  2066. }
  2067. if (map->cache_only) {
  2068. map->cache_dirty = true;
  2069. return 0;
  2070. }
  2071. }
  2072. WARN_ON(!map->bus);
  2073. for (i = 0; i < num_regs; i++) {
  2074. unsigned int reg = regs[i].reg;
  2075. struct regmap_range_node *range;
  2076. /* Coalesce all the writes between a page break or a delay
  2077. * in a sequence
  2078. */
  2079. range = _regmap_range_lookup(map, reg);
  2080. if (range || regs[i].delay_us) {
  2081. size_t len = sizeof(struct reg_sequence)*num_regs;
  2082. struct reg_sequence *base = kmemdup(regs, len,
  2083. GFP_KERNEL);
  2084. if (!base)
  2085. return -ENOMEM;
  2086. ret = _regmap_range_multi_paged_reg_write(map, base,
  2087. num_regs);
  2088. kfree(base);
  2089. return ret;
  2090. }
  2091. }
  2092. return _regmap_raw_multi_reg_write(map, regs, num_regs);
  2093. }
  2094. /**
  2095. * regmap_multi_reg_write() - Write multiple registers to the device
  2096. *
  2097. * @map: Register map to write to
  2098. * @regs: Array of structures containing register,value to be written
  2099. * @num_regs: Number of registers to write
  2100. *
  2101. * Write multiple registers to the device where the set of register, value
  2102. * pairs are supplied in any order, possibly not all in a single range.
  2103. *
  2104. * The 'normal' block write mode will send ultimately send data on the
  2105. * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
  2106. * addressed. However, this alternative block multi write mode will send
  2107. * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
  2108. * must of course support the mode.
  2109. *
  2110. * A value of zero will be returned on success, a negative errno will be
  2111. * returned in error cases.
  2112. */
  2113. int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
  2114. int num_regs)
  2115. {
  2116. int ret;
  2117. map->lock(map->lock_arg);
  2118. ret = _regmap_multi_reg_write(map, regs, num_regs);
  2119. map->unlock(map->lock_arg);
  2120. return ret;
  2121. }
  2122. EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
  2123. /**
  2124. * regmap_multi_reg_write_bypassed() - Write multiple registers to the
  2125. * device but not the cache
  2126. *
  2127. * @map: Register map to write to
  2128. * @regs: Array of structures containing register,value to be written
  2129. * @num_regs: Number of registers to write
  2130. *
  2131. * Write multiple registers to the device but not the cache where the set
  2132. * of register are supplied in any order.
  2133. *
  2134. * This function is intended to be used for writing a large block of data
  2135. * atomically to the device in single transfer for those I2C client devices
  2136. * that implement this alternative block write mode.
  2137. *
  2138. * A value of zero will be returned on success, a negative errno will
  2139. * be returned in error cases.
  2140. */
  2141. int regmap_multi_reg_write_bypassed(struct regmap *map,
  2142. const struct reg_sequence *regs,
  2143. int num_regs)
  2144. {
  2145. int ret;
  2146. bool bypass;
  2147. map->lock(map->lock_arg);
  2148. bypass = map->cache_bypass;
  2149. map->cache_bypass = true;
  2150. ret = _regmap_multi_reg_write(map, regs, num_regs);
  2151. map->cache_bypass = bypass;
  2152. map->unlock(map->lock_arg);
  2153. return ret;
  2154. }
  2155. EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
  2156. /**
  2157. * regmap_raw_write_async() - Write raw values to one or more registers
  2158. * asynchronously
  2159. *
  2160. * @map: Register map to write to
  2161. * @reg: Initial register to write to
  2162. * @val: Block of data to be written, laid out for direct transmission to the
  2163. * device. Must be valid until regmap_async_complete() is called.
  2164. * @val_len: Length of data pointed to by val.
  2165. *
  2166. * This function is intended to be used for things like firmware
  2167. * download where a large block of data needs to be transferred to the
  2168. * device. No formatting will be done on the data provided.
  2169. *
  2170. * If supported by the underlying bus the write will be scheduled
  2171. * asynchronously, helping maximise I/O speed on higher speed buses
  2172. * like SPI. regmap_async_complete() can be called to ensure that all
  2173. * asynchrnous writes have been completed.
  2174. *
  2175. * A value of zero will be returned on success, a negative errno will
  2176. * be returned in error cases.
  2177. */
  2178. int regmap_raw_write_async(struct regmap *map, unsigned int reg,
  2179. const void *val, size_t val_len)
  2180. {
  2181. int ret;
  2182. if (val_len % map->format.val_bytes)
  2183. return -EINVAL;
  2184. if (!IS_ALIGNED(reg, map->reg_stride))
  2185. return -EINVAL;
  2186. map->lock(map->lock_arg);
  2187. map->async = true;
  2188. ret = _regmap_raw_write(map, reg, val, val_len, false);
  2189. map->async = false;
  2190. map->unlock(map->lock_arg);
  2191. return ret;
  2192. }
  2193. EXPORT_SYMBOL_GPL(regmap_raw_write_async);
  2194. static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
  2195. unsigned int val_len, bool noinc)
  2196. {
  2197. struct regmap_range_node *range;
  2198. int ret;
  2199. WARN_ON(!map->bus);
  2200. if (!map->bus || !map->bus->read)
  2201. return -EINVAL;
  2202. range = _regmap_range_lookup(map, reg);
  2203. if (range) {
  2204. ret = _regmap_select_page(map, &reg, range,
  2205. noinc ? 1 : val_len / map->format.val_bytes);
  2206. if (ret != 0)
  2207. return ret;
  2208. }
  2209. map->format.format_reg(map->work_buf, reg, map->reg_shift);
  2210. regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
  2211. map->read_flag_mask);
  2212. trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
  2213. ret = map->bus->read(map->bus_context, map->work_buf,
  2214. map->format.reg_bytes + map->format.pad_bytes,
  2215. val, val_len);
  2216. trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
  2217. return ret;
  2218. }
  2219. static int _regmap_bus_reg_read(void *context, unsigned int reg,
  2220. unsigned int *val)
  2221. {
  2222. struct regmap *map = context;
  2223. return map->bus->reg_read(map->bus_context, reg, val);
  2224. }
  2225. static int _regmap_bus_read(void *context, unsigned int reg,
  2226. unsigned int *val)
  2227. {
  2228. int ret;
  2229. struct regmap *map = context;
  2230. void *work_val = map->work_buf + map->format.reg_bytes +
  2231. map->format.pad_bytes;
  2232. if (!map->format.parse_val)
  2233. return -EINVAL;
  2234. ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
  2235. if (ret == 0)
  2236. *val = map->format.parse_val(work_val);
  2237. return ret;
  2238. }
  2239. static int _regmap_read(struct regmap *map, unsigned int reg,
  2240. unsigned int *val)
  2241. {
  2242. int ret;
  2243. void *context = _regmap_map_get_context(map);
  2244. if (!map->cache_bypass) {
  2245. ret = regcache_read(map, reg, val);
  2246. if (ret == 0)
  2247. return 0;
  2248. }
  2249. if (map->cache_only)
  2250. return -EBUSY;
  2251. if (!regmap_readable(map, reg))
  2252. return -EIO;
  2253. ret = map->reg_read(context, reg, val);
  2254. if (ret == 0) {
  2255. if (regmap_should_log(map))
  2256. dev_info(map->dev, "%x => %x\n", reg, *val);
  2257. trace_regmap_reg_read(map, reg, *val);
  2258. if (!map->cache_bypass)
  2259. regcache_write(map, reg, *val);
  2260. }
  2261. return ret;
  2262. }
  2263. /**
  2264. * regmap_read() - Read a value from a single register
  2265. *
  2266. * @map: Register map to read from
  2267. * @reg: Register to be read from
  2268. * @val: Pointer to store read value
  2269. *
  2270. * A value of zero will be returned on success, a negative errno will
  2271. * be returned in error cases.
  2272. */
  2273. int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
  2274. {
  2275. int ret;
  2276. if (!IS_ALIGNED(reg, map->reg_stride))
  2277. return -EINVAL;
  2278. map->lock(map->lock_arg);
  2279. ret = _regmap_read(map, reg, val);
  2280. map->unlock(map->lock_arg);
  2281. return ret;
  2282. }
  2283. EXPORT_SYMBOL_GPL(regmap_read);
  2284. /**
  2285. * regmap_raw_read() - Read raw data from the device
  2286. *
  2287. * @map: Register map to read from
  2288. * @reg: First register to be read from
  2289. * @val: Pointer to store read value
  2290. * @val_len: Size of data to read
  2291. *
  2292. * A value of zero will be returned on success, a negative errno will
  2293. * be returned in error cases.
  2294. */
  2295. int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
  2296. size_t val_len)
  2297. {
  2298. size_t val_bytes = map->format.val_bytes;
  2299. size_t val_count = val_len / val_bytes;
  2300. unsigned int v;
  2301. int ret, i;
  2302. if (!map->bus)
  2303. return -EINVAL;
  2304. if (val_len % map->format.val_bytes)
  2305. return -EINVAL;
  2306. if (!IS_ALIGNED(reg, map->reg_stride))
  2307. return -EINVAL;
  2308. if (val_count == 0)
  2309. return -EINVAL;
  2310. map->lock(map->lock_arg);
  2311. if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
  2312. map->cache_type == REGCACHE_NONE) {
  2313. size_t chunk_count, chunk_bytes;
  2314. size_t chunk_regs = val_count;
  2315. if (!map->bus->read) {
  2316. ret = -ENOTSUPP;
  2317. goto out;
  2318. }
  2319. if (map->use_single_read)
  2320. chunk_regs = 1;
  2321. else if (map->max_raw_read && val_len > map->max_raw_read)
  2322. chunk_regs = map->max_raw_read / val_bytes;
  2323. chunk_count = val_count / chunk_regs;
  2324. chunk_bytes = chunk_regs * val_bytes;
  2325. /* Read bytes that fit into whole chunks */
  2326. for (i = 0; i < chunk_count; i++) {
  2327. ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
  2328. if (ret != 0)
  2329. goto out;
  2330. reg += regmap_get_offset(map, chunk_regs);
  2331. val += chunk_bytes;
  2332. val_len -= chunk_bytes;
  2333. }
  2334. /* Read remaining bytes */
  2335. if (val_len) {
  2336. ret = _regmap_raw_read(map, reg, val, val_len, false);
  2337. if (ret != 0)
  2338. goto out;
  2339. }
  2340. } else {
  2341. /* Otherwise go word by word for the cache; should be low
  2342. * cost as we expect to hit the cache.
  2343. */
  2344. for (i = 0; i < val_count; i++) {
  2345. ret = _regmap_read(map, reg + regmap_get_offset(map, i),
  2346. &v);
  2347. if (ret != 0)
  2348. goto out;
  2349. map->format.format_val(val + (i * val_bytes), v, 0);
  2350. }
  2351. }
  2352. out:
  2353. map->unlock(map->lock_arg);
  2354. return ret;
  2355. }
  2356. EXPORT_SYMBOL_GPL(regmap_raw_read);
  2357. /**
  2358. * regmap_noinc_read(): Read data from a register without incrementing the
  2359. * register number
  2360. *
  2361. * @map: Register map to read from
  2362. * @reg: Register to read from
  2363. * @val: Pointer to data buffer
  2364. * @val_len: Length of output buffer in bytes.
  2365. *
  2366. * The regmap API usually assumes that bulk bus read operations will read a
  2367. * range of registers. Some devices have certain registers for which a read
  2368. * operation read will read from an internal FIFO.
  2369. *
  2370. * The target register must be volatile but registers after it can be
  2371. * completely unrelated cacheable registers.
  2372. *
  2373. * This will attempt multiple reads as required to read val_len bytes.
  2374. *
  2375. * A value of zero will be returned on success, a negative errno will be
  2376. * returned in error cases.
  2377. */
  2378. int regmap_noinc_read(struct regmap *map, unsigned int reg,
  2379. void *val, size_t val_len)
  2380. {
  2381. size_t read_len;
  2382. int ret;
  2383. if (!map->bus)
  2384. return -EINVAL;
  2385. if (!map->bus->read)
  2386. return -ENOTSUPP;
  2387. if (val_len % map->format.val_bytes)
  2388. return -EINVAL;
  2389. if (!IS_ALIGNED(reg, map->reg_stride))
  2390. return -EINVAL;
  2391. if (val_len == 0)
  2392. return -EINVAL;
  2393. map->lock(map->lock_arg);
  2394. if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
  2395. ret = -EINVAL;
  2396. goto out_unlock;
  2397. }
  2398. while (val_len) {
  2399. if (map->max_raw_read && map->max_raw_read < val_len)
  2400. read_len = map->max_raw_read;
  2401. else
  2402. read_len = val_len;
  2403. ret = _regmap_raw_read(map, reg, val, read_len, true);
  2404. if (ret)
  2405. goto out_unlock;
  2406. val = ((u8 *)val) + read_len;
  2407. val_len -= read_len;
  2408. }
  2409. out_unlock:
  2410. map->unlock(map->lock_arg);
  2411. return ret;
  2412. }
  2413. EXPORT_SYMBOL_GPL(regmap_noinc_read);
  2414. /**
  2415. * regmap_field_read(): Read a value to a single register field
  2416. *
  2417. * @field: Register field to read from
  2418. * @val: Pointer to store read value
  2419. *
  2420. * A value of zero will be returned on success, a negative errno will
  2421. * be returned in error cases.
  2422. */
  2423. int regmap_field_read(struct regmap_field *field, unsigned int *val)
  2424. {
  2425. int ret;
  2426. unsigned int reg_val;
  2427. ret = regmap_read(field->regmap, field->reg, &reg_val);
  2428. if (ret != 0)
  2429. return ret;
  2430. reg_val &= field->mask;
  2431. reg_val >>= field->shift;
  2432. *val = reg_val;
  2433. return ret;
  2434. }
  2435. EXPORT_SYMBOL_GPL(regmap_field_read);
  2436. /**
  2437. * regmap_fields_read() - Read a value to a single register field with port ID
  2438. *
  2439. * @field: Register field to read from
  2440. * @id: port ID
  2441. * @val: Pointer to store read value
  2442. *
  2443. * A value of zero will be returned on success, a negative errno will
  2444. * be returned in error cases.
  2445. */
  2446. int regmap_fields_read(struct regmap_field *field, unsigned int id,
  2447. unsigned int *val)
  2448. {
  2449. int ret;
  2450. unsigned int reg_val;
  2451. if (id >= field->id_size)
  2452. return -EINVAL;
  2453. ret = regmap_read(field->regmap,
  2454. field->reg + (field->id_offset * id),
  2455. &reg_val);
  2456. if (ret != 0)
  2457. return ret;
  2458. reg_val &= field->mask;
  2459. reg_val >>= field->shift;
  2460. *val = reg_val;
  2461. return ret;
  2462. }
  2463. EXPORT_SYMBOL_GPL(regmap_fields_read);
  2464. /**
  2465. * regmap_bulk_read() - Read multiple registers from the device
  2466. *
  2467. * @map: Register map to read from
  2468. * @reg: First register to be read from
  2469. * @val: Pointer to store read value, in native register size for device
  2470. * @val_count: Number of registers to read
  2471. *
  2472. * A value of zero will be returned on success, a negative errno will
  2473. * be returned in error cases.
  2474. */
  2475. int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
  2476. size_t val_count)
  2477. {
  2478. int ret, i;
  2479. size_t val_bytes = map->format.val_bytes;
  2480. bool vol = regmap_volatile_range(map, reg, val_count);
  2481. if (!IS_ALIGNED(reg, map->reg_stride))
  2482. return -EINVAL;
  2483. if (val_count == 0)
  2484. return -EINVAL;
  2485. if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
  2486. ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
  2487. if (ret != 0)
  2488. return ret;
  2489. for (i = 0; i < val_count * val_bytes; i += val_bytes)
  2490. map->format.parse_inplace(val + i);
  2491. } else {
  2492. #ifdef CONFIG_64BIT
  2493. u64 *u64 = val;
  2494. #endif
  2495. u32 *u32 = val;
  2496. u16 *u16 = val;
  2497. u8 *u8 = val;
  2498. map->lock(map->lock_arg);
  2499. for (i = 0; i < val_count; i++) {
  2500. unsigned int ival;
  2501. ret = _regmap_read(map, reg + regmap_get_offset(map, i),
  2502. &ival);
  2503. if (ret != 0)
  2504. goto out;
  2505. switch (map->format.val_bytes) {
  2506. #ifdef CONFIG_64BIT
  2507. case 8:
  2508. u64[i] = ival;
  2509. break;
  2510. #endif
  2511. case 4:
  2512. u32[i] = ival;
  2513. break;
  2514. case 2:
  2515. u16[i] = ival;
  2516. break;
  2517. case 1:
  2518. u8[i] = ival;
  2519. break;
  2520. default:
  2521. ret = -EINVAL;
  2522. goto out;
  2523. }
  2524. }
  2525. out:
  2526. map->unlock(map->lock_arg);
  2527. }
  2528. return ret;
  2529. }
  2530. EXPORT_SYMBOL_GPL(regmap_bulk_read);
  2531. static int _regmap_update_bits(struct regmap *map, unsigned int reg,
  2532. unsigned int mask, unsigned int val,
  2533. bool *change, bool force_write)
  2534. {
  2535. int ret;
  2536. unsigned int tmp, orig;
  2537. if (change)
  2538. *change = false;
  2539. if (regmap_volatile(map, reg) && map->reg_update_bits) {
  2540. ret = map->reg_update_bits(map->bus_context, reg, mask, val);
  2541. if (ret == 0 && change)
  2542. *change = true;
  2543. } else {
  2544. ret = _regmap_read(map, reg, &orig);
  2545. if (ret != 0)
  2546. return ret;
  2547. tmp = orig & ~mask;
  2548. tmp |= val & mask;
  2549. if (force_write || (tmp != orig)) {
  2550. ret = _regmap_write(map, reg, tmp);
  2551. if (ret == 0 && change)
  2552. *change = true;
  2553. }
  2554. }
  2555. return ret;
  2556. }
  2557. /**
  2558. * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
  2559. *
  2560. * @map: Register map to update
  2561. * @reg: Register to update
  2562. * @mask: Bitmask to change
  2563. * @val: New value for bitmask
  2564. * @change: Boolean indicating if a write was done
  2565. * @async: Boolean indicating asynchronously
  2566. * @force: Boolean indicating use force update
  2567. *
  2568. * Perform a read/modify/write cycle on a register map with change, async, force
  2569. * options.
  2570. *
  2571. * If async is true:
  2572. *
  2573. * With most buses the read must be done synchronously so this is most useful
  2574. * for devices with a cache which do not need to interact with the hardware to
  2575. * determine the current register value.
  2576. *
  2577. * Returns zero for success, a negative number on error.
  2578. */
  2579. int regmap_update_bits_base(struct regmap *map, unsigned int reg,
  2580. unsigned int mask, unsigned int val,
  2581. bool *change, bool async, bool force)
  2582. {
  2583. int ret;
  2584. map->lock(map->lock_arg);
  2585. map->async = async;
  2586. ret = _regmap_update_bits(map, reg, mask, val, change, force);
  2587. map->async = false;
  2588. map->unlock(map->lock_arg);
  2589. return ret;
  2590. }
  2591. EXPORT_SYMBOL_GPL(regmap_update_bits_base);
  2592. /**
  2593. * regmap_test_bits() - Check if all specified bits are set in a register.
  2594. *
  2595. * @map: Register map to operate on
  2596. * @reg: Register to read from
  2597. * @bits: Bits to test
  2598. *
  2599. * Returns 0 if at least one of the tested bits is not set, 1 if all tested
  2600. * bits are set and a negative error number if the underlying regmap_read()
  2601. * fails.
  2602. */
  2603. int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
  2604. {
  2605. unsigned int val, ret;
  2606. ret = regmap_read(map, reg, &val);
  2607. if (ret)
  2608. return ret;
  2609. return (val & bits) == bits;
  2610. }
  2611. EXPORT_SYMBOL_GPL(regmap_test_bits);
  2612. void regmap_async_complete_cb(struct regmap_async *async, int ret)
  2613. {
  2614. struct regmap *map = async->map;
  2615. bool wake;
  2616. trace_regmap_async_io_complete(map);
  2617. spin_lock(&map->async_lock);
  2618. list_move(&async->list, &map->async_free);
  2619. wake = list_empty(&map->async_list);
  2620. if (ret != 0)
  2621. map->async_ret = ret;
  2622. spin_unlock(&map->async_lock);
  2623. if (wake)
  2624. wake_up(&map->async_waitq);
  2625. }
  2626. EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
  2627. static int regmap_async_is_done(struct regmap *map)
  2628. {
  2629. unsigned long flags;
  2630. int ret;
  2631. spin_lock_irqsave(&map->async_lock, flags);
  2632. ret = list_empty(&map->async_list);
  2633. spin_unlock_irqrestore(&map->async_lock, flags);
  2634. return ret;
  2635. }
  2636. /**
  2637. * regmap_async_complete - Ensure all asynchronous I/O has completed.
  2638. *
  2639. * @map: Map to operate on.
  2640. *
  2641. * Blocks until any pending asynchronous I/O has completed. Returns
  2642. * an error code for any failed I/O operations.
  2643. */
  2644. int regmap_async_complete(struct regmap *map)
  2645. {
  2646. unsigned long flags;
  2647. int ret;
  2648. /* Nothing to do with no async support */
  2649. if (!map->bus || !map->bus->async_write)
  2650. return 0;
  2651. trace_regmap_async_complete_start(map);
  2652. wait_event(map->async_waitq, regmap_async_is_done(map));
  2653. spin_lock_irqsave(&map->async_lock, flags);
  2654. ret = map->async_ret;
  2655. map->async_ret = 0;
  2656. spin_unlock_irqrestore(&map->async_lock, flags);
  2657. trace_regmap_async_complete_done(map);
  2658. return ret;
  2659. }
  2660. EXPORT_SYMBOL_GPL(regmap_async_complete);
  2661. /**
  2662. * regmap_register_patch - Register and apply register updates to be applied
  2663. * on device initialistion
  2664. *
  2665. * @map: Register map to apply updates to.
  2666. * @regs: Values to update.
  2667. * @num_regs: Number of entries in regs.
  2668. *
  2669. * Register a set of register updates to be applied to the device
  2670. * whenever the device registers are synchronised with the cache and
  2671. * apply them immediately. Typically this is used to apply
  2672. * corrections to be applied to the device defaults on startup, such
  2673. * as the updates some vendors provide to undocumented registers.
  2674. *
  2675. * The caller must ensure that this function cannot be called
  2676. * concurrently with either itself or regcache_sync().
  2677. */
  2678. int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
  2679. int num_regs)
  2680. {
  2681. struct reg_sequence *p;
  2682. int ret;
  2683. bool bypass;
  2684. if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
  2685. num_regs))
  2686. return 0;
  2687. p = krealloc(map->patch,
  2688. sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
  2689. GFP_KERNEL);
  2690. if (p) {
  2691. memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
  2692. map->patch = p;
  2693. map->patch_regs += num_regs;
  2694. } else {
  2695. return -ENOMEM;
  2696. }
  2697. map->lock(map->lock_arg);
  2698. bypass = map->cache_bypass;
  2699. map->cache_bypass = true;
  2700. map->async = true;
  2701. ret = _regmap_multi_reg_write(map, regs, num_regs);
  2702. map->async = false;
  2703. map->cache_bypass = bypass;
  2704. map->unlock(map->lock_arg);
  2705. regmap_async_complete(map);
  2706. return ret;
  2707. }
  2708. EXPORT_SYMBOL_GPL(regmap_register_patch);
  2709. /**
  2710. * regmap_get_val_bytes() - Report the size of a register value
  2711. *
  2712. * @map: Register map to operate on.
  2713. *
  2714. * Report the size of a register value, mainly intended to for use by
  2715. * generic infrastructure built on top of regmap.
  2716. */
  2717. int regmap_get_val_bytes(struct regmap *map)
  2718. {
  2719. if (map->format.format_write)
  2720. return -EINVAL;
  2721. return map->format.val_bytes;
  2722. }
  2723. EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
  2724. /**
  2725. * regmap_get_max_register() - Report the max register value
  2726. *
  2727. * @map: Register map to operate on.
  2728. *
  2729. * Report the max register value, mainly intended to for use by
  2730. * generic infrastructure built on top of regmap.
  2731. */
  2732. int regmap_get_max_register(struct regmap *map)
  2733. {
  2734. return map->max_register ? map->max_register : -EINVAL;
  2735. }
  2736. EXPORT_SYMBOL_GPL(regmap_get_max_register);
  2737. /**
  2738. * regmap_get_reg_stride() - Report the register address stride
  2739. *
  2740. * @map: Register map to operate on.
  2741. *
  2742. * Report the register address stride, mainly intended to for use by
  2743. * generic infrastructure built on top of regmap.
  2744. */
  2745. int regmap_get_reg_stride(struct regmap *map)
  2746. {
  2747. return map->reg_stride;
  2748. }
  2749. EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
  2750. int regmap_parse_val(struct regmap *map, const void *buf,
  2751. unsigned int *val)
  2752. {
  2753. if (!map->format.parse_val)
  2754. return -EINVAL;
  2755. *val = map->format.parse_val(buf);
  2756. return 0;
  2757. }
  2758. EXPORT_SYMBOL_GPL(regmap_parse_val);
  2759. static int __init regmap_initcall(void)
  2760. {
  2761. regmap_debugfs_initcall();
  2762. return 0;
  2763. }
  2764. postcore_initcall(regmap_initcall);