core.c 104 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/list_sort.h>
  6. #include <linux/libnvdimm.h>
  7. #include <linux/module.h>
  8. #include <linux/nospec.h>
  9. #include <linux/mutex.h>
  10. #include <linux/ndctl.h>
  11. #include <linux/sysfs.h>
  12. #include <linux/delay.h>
  13. #include <linux/list.h>
  14. #include <linux/acpi.h>
  15. #include <linux/sort.h>
  16. #include <linux/io.h>
  17. #include <linux/nd.h>
  18. #include <asm/cacheflush.h>
  19. #include <acpi/nfit.h>
  20. #include "intel.h"
  21. #include "nfit.h"
  22. /*
  23. * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
  24. * irrelevant.
  25. */
  26. #include <linux/io-64-nonatomic-hi-lo.h>
  27. static bool force_enable_dimms;
  28. module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
  29. MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
  30. static bool disable_vendor_specific;
  31. module_param(disable_vendor_specific, bool, S_IRUGO);
  32. MODULE_PARM_DESC(disable_vendor_specific,
  33. "Limit commands to the publicly specified set");
  34. static unsigned long override_dsm_mask;
  35. module_param(override_dsm_mask, ulong, S_IRUGO);
  36. MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
  37. static int default_dsm_family = -1;
  38. module_param(default_dsm_family, int, S_IRUGO);
  39. MODULE_PARM_DESC(default_dsm_family,
  40. "Try this DSM type first when identifying NVDIMM family");
  41. static bool no_init_ars;
  42. module_param(no_init_ars, bool, 0644);
  43. MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
  44. static bool force_labels;
  45. module_param(force_labels, bool, 0444);
  46. MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
  47. LIST_HEAD(acpi_descs);
  48. DEFINE_MUTEX(acpi_desc_lock);
  49. static struct workqueue_struct *nfit_wq;
  50. struct nfit_table_prev {
  51. struct list_head spas;
  52. struct list_head memdevs;
  53. struct list_head dcrs;
  54. struct list_head bdws;
  55. struct list_head idts;
  56. struct list_head flushes;
  57. };
  58. static guid_t nfit_uuid[NFIT_UUID_MAX];
  59. const guid_t *to_nfit_uuid(enum nfit_uuids id)
  60. {
  61. return &nfit_uuid[id];
  62. }
  63. EXPORT_SYMBOL(to_nfit_uuid);
  64. static const guid_t *to_nfit_bus_uuid(int family)
  65. {
  66. if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
  67. "only secondary bus families can be translated\n"))
  68. return NULL;
  69. /*
  70. * The index of bus UUIDs starts immediately following the last
  71. * NVDIMM/leaf family.
  72. */
  73. return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
  74. }
  75. static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
  76. {
  77. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  78. /*
  79. * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
  80. * acpi_device.
  81. */
  82. if (!nd_desc->provider_name
  83. || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
  84. return NULL;
  85. return to_acpi_device(acpi_desc->dev);
  86. }
  87. static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
  88. {
  89. struct nd_cmd_clear_error *clear_err;
  90. struct nd_cmd_ars_status *ars_status;
  91. u16 flags;
  92. switch (cmd) {
  93. case ND_CMD_ARS_CAP:
  94. if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
  95. return -ENOTTY;
  96. /* Command failed */
  97. if (status & 0xffff)
  98. return -EIO;
  99. /* No supported scan types for this range */
  100. flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
  101. if ((status >> 16 & flags) == 0)
  102. return -ENOTTY;
  103. return 0;
  104. case ND_CMD_ARS_START:
  105. /* ARS is in progress */
  106. if ((status & 0xffff) == NFIT_ARS_START_BUSY)
  107. return -EBUSY;
  108. /* Command failed */
  109. if (status & 0xffff)
  110. return -EIO;
  111. return 0;
  112. case ND_CMD_ARS_STATUS:
  113. ars_status = buf;
  114. /* Command failed */
  115. if (status & 0xffff)
  116. return -EIO;
  117. /* Check extended status (Upper two bytes) */
  118. if (status == NFIT_ARS_STATUS_DONE)
  119. return 0;
  120. /* ARS is in progress */
  121. if (status == NFIT_ARS_STATUS_BUSY)
  122. return -EBUSY;
  123. /* No ARS performed for the current boot */
  124. if (status == NFIT_ARS_STATUS_NONE)
  125. return -EAGAIN;
  126. /*
  127. * ARS interrupted, either we overflowed or some other
  128. * agent wants the scan to stop. If we didn't overflow
  129. * then just continue with the returned results.
  130. */
  131. if (status == NFIT_ARS_STATUS_INTR) {
  132. if (ars_status->out_length >= 40 && (ars_status->flags
  133. & NFIT_ARS_F_OVERFLOW))
  134. return -ENOSPC;
  135. return 0;
  136. }
  137. /* Unknown status */
  138. if (status >> 16)
  139. return -EIO;
  140. return 0;
  141. case ND_CMD_CLEAR_ERROR:
  142. clear_err = buf;
  143. if (status & 0xffff)
  144. return -EIO;
  145. if (!clear_err->cleared)
  146. return -EIO;
  147. if (clear_err->length > clear_err->cleared)
  148. return clear_err->cleared;
  149. return 0;
  150. default:
  151. break;
  152. }
  153. /* all other non-zero status results in an error */
  154. if (status)
  155. return -EIO;
  156. return 0;
  157. }
  158. #define ACPI_LABELS_LOCKED 3
  159. static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
  160. u32 status)
  161. {
  162. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  163. switch (cmd) {
  164. case ND_CMD_GET_CONFIG_SIZE:
  165. /*
  166. * In the _LSI, _LSR, _LSW case the locked status is
  167. * communicated via the read/write commands
  168. */
  169. if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
  170. break;
  171. if (status >> 16 & ND_CONFIG_LOCKED)
  172. return -EACCES;
  173. break;
  174. case ND_CMD_GET_CONFIG_DATA:
  175. if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
  176. && status == ACPI_LABELS_LOCKED)
  177. return -EACCES;
  178. break;
  179. case ND_CMD_SET_CONFIG_DATA:
  180. if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
  181. && status == ACPI_LABELS_LOCKED)
  182. return -EACCES;
  183. break;
  184. default:
  185. break;
  186. }
  187. /* all other non-zero status results in an error */
  188. if (status)
  189. return -EIO;
  190. return 0;
  191. }
  192. static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
  193. u32 status)
  194. {
  195. if (!nvdimm)
  196. return xlat_bus_status(buf, cmd, status);
  197. return xlat_nvdimm_status(nvdimm, buf, cmd, status);
  198. }
  199. /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
  200. static union acpi_object *pkg_to_buf(union acpi_object *pkg)
  201. {
  202. int i;
  203. void *dst;
  204. size_t size = 0;
  205. union acpi_object *buf = NULL;
  206. if (pkg->type != ACPI_TYPE_PACKAGE) {
  207. WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
  208. pkg->type);
  209. goto err;
  210. }
  211. for (i = 0; i < pkg->package.count; i++) {
  212. union acpi_object *obj = &pkg->package.elements[i];
  213. if (obj->type == ACPI_TYPE_INTEGER)
  214. size += 4;
  215. else if (obj->type == ACPI_TYPE_BUFFER)
  216. size += obj->buffer.length;
  217. else {
  218. WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
  219. obj->type);
  220. goto err;
  221. }
  222. }
  223. buf = ACPI_ALLOCATE(sizeof(*buf) + size);
  224. if (!buf)
  225. goto err;
  226. dst = buf + 1;
  227. buf->type = ACPI_TYPE_BUFFER;
  228. buf->buffer.length = size;
  229. buf->buffer.pointer = dst;
  230. for (i = 0; i < pkg->package.count; i++) {
  231. union acpi_object *obj = &pkg->package.elements[i];
  232. if (obj->type == ACPI_TYPE_INTEGER) {
  233. memcpy(dst, &obj->integer.value, 4);
  234. dst += 4;
  235. } else if (obj->type == ACPI_TYPE_BUFFER) {
  236. memcpy(dst, obj->buffer.pointer, obj->buffer.length);
  237. dst += obj->buffer.length;
  238. }
  239. }
  240. err:
  241. ACPI_FREE(pkg);
  242. return buf;
  243. }
  244. static union acpi_object *int_to_buf(union acpi_object *integer)
  245. {
  246. union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
  247. void *dst = NULL;
  248. if (!buf)
  249. goto err;
  250. if (integer->type != ACPI_TYPE_INTEGER) {
  251. WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
  252. integer->type);
  253. goto err;
  254. }
  255. dst = buf + 1;
  256. buf->type = ACPI_TYPE_BUFFER;
  257. buf->buffer.length = 4;
  258. buf->buffer.pointer = dst;
  259. memcpy(dst, &integer->integer.value, 4);
  260. err:
  261. ACPI_FREE(integer);
  262. return buf;
  263. }
  264. static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
  265. u32 len, void *data)
  266. {
  267. acpi_status rc;
  268. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  269. struct acpi_object_list input = {
  270. .count = 3,
  271. .pointer = (union acpi_object []) {
  272. [0] = {
  273. .integer.type = ACPI_TYPE_INTEGER,
  274. .integer.value = offset,
  275. },
  276. [1] = {
  277. .integer.type = ACPI_TYPE_INTEGER,
  278. .integer.value = len,
  279. },
  280. [2] = {
  281. .buffer.type = ACPI_TYPE_BUFFER,
  282. .buffer.pointer = data,
  283. .buffer.length = len,
  284. },
  285. },
  286. };
  287. rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
  288. if (ACPI_FAILURE(rc))
  289. return NULL;
  290. return int_to_buf(buf.pointer);
  291. }
  292. static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
  293. u32 len)
  294. {
  295. acpi_status rc;
  296. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  297. struct acpi_object_list input = {
  298. .count = 2,
  299. .pointer = (union acpi_object []) {
  300. [0] = {
  301. .integer.type = ACPI_TYPE_INTEGER,
  302. .integer.value = offset,
  303. },
  304. [1] = {
  305. .integer.type = ACPI_TYPE_INTEGER,
  306. .integer.value = len,
  307. },
  308. },
  309. };
  310. rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
  311. if (ACPI_FAILURE(rc))
  312. return NULL;
  313. return pkg_to_buf(buf.pointer);
  314. }
  315. static union acpi_object *acpi_label_info(acpi_handle handle)
  316. {
  317. acpi_status rc;
  318. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  319. rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
  320. if (ACPI_FAILURE(rc))
  321. return NULL;
  322. return pkg_to_buf(buf.pointer);
  323. }
  324. static u8 nfit_dsm_revid(unsigned family, unsigned func)
  325. {
  326. static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
  327. [NVDIMM_FAMILY_INTEL] = {
  328. [NVDIMM_INTEL_GET_MODES ...
  329. NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
  330. },
  331. };
  332. u8 id;
  333. if (family > NVDIMM_FAMILY_MAX)
  334. return 0;
  335. if (func > NVDIMM_CMD_MAX)
  336. return 0;
  337. id = revid_table[family][func];
  338. if (id == 0)
  339. return 1; /* default */
  340. return id;
  341. }
  342. static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
  343. {
  344. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  345. if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
  346. && func >= NVDIMM_INTEL_GET_SECURITY_STATE
  347. && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
  348. return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
  349. return true;
  350. }
  351. static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
  352. struct nd_cmd_pkg *call_pkg, int *family)
  353. {
  354. if (call_pkg) {
  355. int i;
  356. if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
  357. return -ENOTTY;
  358. for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
  359. if (call_pkg->nd_reserved2[i])
  360. return -EINVAL;
  361. *family = call_pkg->nd_family;
  362. return call_pkg->nd_command;
  363. }
  364. /* In the !call_pkg case, bus commands == bus functions */
  365. if (!nfit_mem)
  366. return cmd;
  367. /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
  368. if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
  369. return cmd;
  370. /*
  371. * Force function number validation to fail since 0 is never
  372. * published as a valid function in dsm_mask.
  373. */
  374. return 0;
  375. }
  376. int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
  377. unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
  378. {
  379. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  380. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  381. union acpi_object in_obj, in_buf, *out_obj;
  382. const struct nd_cmd_desc *desc = NULL;
  383. struct device *dev = acpi_desc->dev;
  384. struct nd_cmd_pkg *call_pkg = NULL;
  385. const char *cmd_name, *dimm_name;
  386. unsigned long cmd_mask, dsm_mask;
  387. u32 offset, fw_status = 0;
  388. acpi_handle handle;
  389. const guid_t *guid;
  390. int func, rc, i;
  391. int family = 0;
  392. if (cmd_rc)
  393. *cmd_rc = -EINVAL;
  394. if (cmd == ND_CMD_CALL)
  395. call_pkg = buf;
  396. func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
  397. if (func < 0)
  398. return func;
  399. if (nvdimm) {
  400. struct acpi_device *adev = nfit_mem->adev;
  401. if (!adev)
  402. return -ENOTTY;
  403. dimm_name = nvdimm_name(nvdimm);
  404. cmd_name = nvdimm_cmd_name(cmd);
  405. cmd_mask = nvdimm_cmd_mask(nvdimm);
  406. dsm_mask = nfit_mem->dsm_mask;
  407. desc = nd_cmd_dimm_desc(cmd);
  408. guid = to_nfit_uuid(nfit_mem->family);
  409. handle = adev->handle;
  410. } else {
  411. struct acpi_device *adev = to_acpi_dev(acpi_desc);
  412. cmd_name = nvdimm_bus_cmd_name(cmd);
  413. cmd_mask = nd_desc->cmd_mask;
  414. if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
  415. family = call_pkg->nd_family;
  416. if (family > NVDIMM_BUS_FAMILY_MAX ||
  417. !test_bit(family, &nd_desc->bus_family_mask))
  418. return -EINVAL;
  419. family = array_index_nospec(family,
  420. NVDIMM_BUS_FAMILY_MAX + 1);
  421. dsm_mask = acpi_desc->family_dsm_mask[family];
  422. guid = to_nfit_bus_uuid(family);
  423. } else {
  424. dsm_mask = acpi_desc->bus_dsm_mask;
  425. guid = to_nfit_uuid(NFIT_DEV_BUS);
  426. }
  427. desc = nd_cmd_bus_desc(cmd);
  428. handle = adev->handle;
  429. dimm_name = "bus";
  430. }
  431. if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
  432. return -ENOTTY;
  433. /*
  434. * Check for a valid command. For ND_CMD_CALL, we also have to
  435. * make sure that the DSM function is supported.
  436. */
  437. if (cmd == ND_CMD_CALL &&
  438. (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
  439. return -ENOTTY;
  440. else if (!test_bit(cmd, &cmd_mask))
  441. return -ENOTTY;
  442. in_obj.type = ACPI_TYPE_PACKAGE;
  443. in_obj.package.count = 1;
  444. in_obj.package.elements = &in_buf;
  445. in_buf.type = ACPI_TYPE_BUFFER;
  446. in_buf.buffer.pointer = buf;
  447. in_buf.buffer.length = 0;
  448. /* libnvdimm has already validated the input envelope */
  449. for (i = 0; i < desc->in_num; i++)
  450. in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
  451. i, buf);
  452. if (call_pkg) {
  453. /* skip over package wrapper */
  454. in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
  455. in_buf.buffer.length = call_pkg->nd_size_in;
  456. }
  457. dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
  458. dimm_name, cmd, family, func, in_buf.buffer.length);
  459. if (payload_dumpable(nvdimm, func))
  460. print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
  461. in_buf.buffer.pointer,
  462. min_t(u32, 256, in_buf.buffer.length), true);
  463. /* call the BIOS, prefer the named methods over _DSM if available */
  464. if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
  465. && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
  466. out_obj = acpi_label_info(handle);
  467. else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
  468. && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
  469. struct nd_cmd_get_config_data_hdr *p = buf;
  470. out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
  471. } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
  472. && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
  473. struct nd_cmd_set_config_hdr *p = buf;
  474. out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
  475. p->in_buf);
  476. } else {
  477. u8 revid;
  478. if (nvdimm)
  479. revid = nfit_dsm_revid(nfit_mem->family, func);
  480. else
  481. revid = 1;
  482. out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
  483. }
  484. if (!out_obj) {
  485. dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
  486. return -EINVAL;
  487. }
  488. if (out_obj->type != ACPI_TYPE_BUFFER) {
  489. dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
  490. dimm_name, cmd_name, out_obj->type);
  491. rc = -EINVAL;
  492. goto out;
  493. }
  494. dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
  495. cmd_name, out_obj->buffer.length);
  496. print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
  497. out_obj->buffer.pointer,
  498. min_t(u32, 128, out_obj->buffer.length), true);
  499. if (call_pkg) {
  500. call_pkg->nd_fw_size = out_obj->buffer.length;
  501. memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
  502. out_obj->buffer.pointer,
  503. min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
  504. ACPI_FREE(out_obj);
  505. /*
  506. * Need to support FW function w/o known size in advance.
  507. * Caller can determine required size based upon nd_fw_size.
  508. * If we return an error (like elsewhere) then caller wouldn't
  509. * be able to rely upon data returned to make calculation.
  510. */
  511. if (cmd_rc)
  512. *cmd_rc = 0;
  513. return 0;
  514. }
  515. for (i = 0, offset = 0; i < desc->out_num; i++) {
  516. u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
  517. (u32 *) out_obj->buffer.pointer,
  518. out_obj->buffer.length - offset);
  519. if (offset + out_size > out_obj->buffer.length) {
  520. dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
  521. dimm_name, cmd_name, i);
  522. break;
  523. }
  524. if (in_buf.buffer.length + offset + out_size > buf_len) {
  525. dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
  526. dimm_name, cmd_name, i);
  527. rc = -ENXIO;
  528. goto out;
  529. }
  530. memcpy(buf + in_buf.buffer.length + offset,
  531. out_obj->buffer.pointer + offset, out_size);
  532. offset += out_size;
  533. }
  534. /*
  535. * Set fw_status for all the commands with a known format to be
  536. * later interpreted by xlat_status().
  537. */
  538. if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
  539. && cmd <= ND_CMD_CLEAR_ERROR)
  540. || (nvdimm && cmd >= ND_CMD_SMART
  541. && cmd <= ND_CMD_VENDOR)))
  542. fw_status = *(u32 *) out_obj->buffer.pointer;
  543. if (offset + in_buf.buffer.length < buf_len) {
  544. if (i >= 1) {
  545. /*
  546. * status valid, return the number of bytes left
  547. * unfilled in the output buffer
  548. */
  549. rc = buf_len - offset - in_buf.buffer.length;
  550. if (cmd_rc)
  551. *cmd_rc = xlat_status(nvdimm, buf, cmd,
  552. fw_status);
  553. } else {
  554. dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
  555. __func__, dimm_name, cmd_name, buf_len,
  556. offset);
  557. rc = -ENXIO;
  558. }
  559. } else {
  560. rc = 0;
  561. if (cmd_rc)
  562. *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
  563. }
  564. out:
  565. ACPI_FREE(out_obj);
  566. return rc;
  567. }
  568. EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
  569. static const char *spa_type_name(u16 type)
  570. {
  571. static const char *to_name[] = {
  572. [NFIT_SPA_VOLATILE] = "volatile",
  573. [NFIT_SPA_PM] = "pmem",
  574. [NFIT_SPA_DCR] = "dimm-control-region",
  575. [NFIT_SPA_BDW] = "block-data-window",
  576. [NFIT_SPA_VDISK] = "volatile-disk",
  577. [NFIT_SPA_VCD] = "volatile-cd",
  578. [NFIT_SPA_PDISK] = "persistent-disk",
  579. [NFIT_SPA_PCD] = "persistent-cd",
  580. };
  581. if (type > NFIT_SPA_PCD)
  582. return "unknown";
  583. return to_name[type];
  584. }
  585. int nfit_spa_type(struct acpi_nfit_system_address *spa)
  586. {
  587. int i;
  588. for (i = 0; i < NFIT_UUID_MAX; i++)
  589. if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
  590. return i;
  591. return -1;
  592. }
  593. static bool add_spa(struct acpi_nfit_desc *acpi_desc,
  594. struct nfit_table_prev *prev,
  595. struct acpi_nfit_system_address *spa)
  596. {
  597. struct device *dev = acpi_desc->dev;
  598. struct nfit_spa *nfit_spa;
  599. if (spa->header.length != sizeof(*spa))
  600. return false;
  601. list_for_each_entry(nfit_spa, &prev->spas, list) {
  602. if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
  603. list_move_tail(&nfit_spa->list, &acpi_desc->spas);
  604. return true;
  605. }
  606. }
  607. nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
  608. GFP_KERNEL);
  609. if (!nfit_spa)
  610. return false;
  611. INIT_LIST_HEAD(&nfit_spa->list);
  612. memcpy(nfit_spa->spa, spa, sizeof(*spa));
  613. list_add_tail(&nfit_spa->list, &acpi_desc->spas);
  614. dev_dbg(dev, "spa index: %d type: %s\n",
  615. spa->range_index,
  616. spa_type_name(nfit_spa_type(spa)));
  617. return true;
  618. }
  619. static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
  620. struct nfit_table_prev *prev,
  621. struct acpi_nfit_memory_map *memdev)
  622. {
  623. struct device *dev = acpi_desc->dev;
  624. struct nfit_memdev *nfit_memdev;
  625. if (memdev->header.length != sizeof(*memdev))
  626. return false;
  627. list_for_each_entry(nfit_memdev, &prev->memdevs, list)
  628. if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
  629. list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  630. return true;
  631. }
  632. nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
  633. GFP_KERNEL);
  634. if (!nfit_memdev)
  635. return false;
  636. INIT_LIST_HEAD(&nfit_memdev->list);
  637. memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
  638. list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  639. dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
  640. memdev->device_handle, memdev->range_index,
  641. memdev->region_index, memdev->flags);
  642. return true;
  643. }
  644. int nfit_get_smbios_id(u32 device_handle, u16 *flags)
  645. {
  646. struct acpi_nfit_memory_map *memdev;
  647. struct acpi_nfit_desc *acpi_desc;
  648. struct nfit_mem *nfit_mem;
  649. u16 physical_id;
  650. mutex_lock(&acpi_desc_lock);
  651. list_for_each_entry(acpi_desc, &acpi_descs, list) {
  652. mutex_lock(&acpi_desc->init_mutex);
  653. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  654. memdev = __to_nfit_memdev(nfit_mem);
  655. if (memdev->device_handle == device_handle) {
  656. *flags = memdev->flags;
  657. physical_id = memdev->physical_id;
  658. mutex_unlock(&acpi_desc->init_mutex);
  659. mutex_unlock(&acpi_desc_lock);
  660. return physical_id;
  661. }
  662. }
  663. mutex_unlock(&acpi_desc->init_mutex);
  664. }
  665. mutex_unlock(&acpi_desc_lock);
  666. return -ENODEV;
  667. }
  668. EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
  669. /*
  670. * An implementation may provide a truncated control region if no block windows
  671. * are defined.
  672. */
  673. static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
  674. {
  675. if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
  676. window_size))
  677. return 0;
  678. if (dcr->windows)
  679. return sizeof(*dcr);
  680. return offsetof(struct acpi_nfit_control_region, window_size);
  681. }
  682. static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
  683. struct nfit_table_prev *prev,
  684. struct acpi_nfit_control_region *dcr)
  685. {
  686. struct device *dev = acpi_desc->dev;
  687. struct nfit_dcr *nfit_dcr;
  688. if (!sizeof_dcr(dcr))
  689. return false;
  690. list_for_each_entry(nfit_dcr, &prev->dcrs, list)
  691. if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
  692. list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  693. return true;
  694. }
  695. nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
  696. GFP_KERNEL);
  697. if (!nfit_dcr)
  698. return false;
  699. INIT_LIST_HEAD(&nfit_dcr->list);
  700. memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
  701. list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  702. dev_dbg(dev, "dcr index: %d windows: %d\n",
  703. dcr->region_index, dcr->windows);
  704. return true;
  705. }
  706. static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
  707. struct nfit_table_prev *prev,
  708. struct acpi_nfit_data_region *bdw)
  709. {
  710. struct device *dev = acpi_desc->dev;
  711. struct nfit_bdw *nfit_bdw;
  712. if (bdw->header.length != sizeof(*bdw))
  713. return false;
  714. list_for_each_entry(nfit_bdw, &prev->bdws, list)
  715. if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
  716. list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
  717. return true;
  718. }
  719. nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
  720. GFP_KERNEL);
  721. if (!nfit_bdw)
  722. return false;
  723. INIT_LIST_HEAD(&nfit_bdw->list);
  724. memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
  725. list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
  726. dev_dbg(dev, "bdw dcr: %d windows: %d\n",
  727. bdw->region_index, bdw->windows);
  728. return true;
  729. }
  730. static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
  731. {
  732. if (idt->header.length < sizeof(*idt))
  733. return 0;
  734. return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
  735. }
  736. static bool add_idt(struct acpi_nfit_desc *acpi_desc,
  737. struct nfit_table_prev *prev,
  738. struct acpi_nfit_interleave *idt)
  739. {
  740. struct device *dev = acpi_desc->dev;
  741. struct nfit_idt *nfit_idt;
  742. if (!sizeof_idt(idt))
  743. return false;
  744. list_for_each_entry(nfit_idt, &prev->idts, list) {
  745. if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
  746. continue;
  747. if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
  748. list_move_tail(&nfit_idt->list, &acpi_desc->idts);
  749. return true;
  750. }
  751. }
  752. nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
  753. GFP_KERNEL);
  754. if (!nfit_idt)
  755. return false;
  756. INIT_LIST_HEAD(&nfit_idt->list);
  757. memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
  758. list_add_tail(&nfit_idt->list, &acpi_desc->idts);
  759. dev_dbg(dev, "idt index: %d num_lines: %d\n",
  760. idt->interleave_index, idt->line_count);
  761. return true;
  762. }
  763. static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
  764. {
  765. if (flush->header.length < sizeof(*flush))
  766. return 0;
  767. return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
  768. }
  769. static bool add_flush(struct acpi_nfit_desc *acpi_desc,
  770. struct nfit_table_prev *prev,
  771. struct acpi_nfit_flush_address *flush)
  772. {
  773. struct device *dev = acpi_desc->dev;
  774. struct nfit_flush *nfit_flush;
  775. if (!sizeof_flush(flush))
  776. return false;
  777. list_for_each_entry(nfit_flush, &prev->flushes, list) {
  778. if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
  779. continue;
  780. if (memcmp(nfit_flush->flush, flush,
  781. sizeof_flush(flush)) == 0) {
  782. list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
  783. return true;
  784. }
  785. }
  786. nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
  787. + sizeof_flush(flush), GFP_KERNEL);
  788. if (!nfit_flush)
  789. return false;
  790. INIT_LIST_HEAD(&nfit_flush->list);
  791. memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
  792. list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
  793. dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
  794. flush->device_handle, flush->hint_count);
  795. return true;
  796. }
  797. static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
  798. struct acpi_nfit_capabilities *pcap)
  799. {
  800. struct device *dev = acpi_desc->dev;
  801. u32 mask;
  802. mask = (1 << (pcap->highest_capability + 1)) - 1;
  803. acpi_desc->platform_cap = pcap->capabilities & mask;
  804. dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
  805. return true;
  806. }
  807. static void *add_table(struct acpi_nfit_desc *acpi_desc,
  808. struct nfit_table_prev *prev, void *table, const void *end)
  809. {
  810. struct device *dev = acpi_desc->dev;
  811. struct acpi_nfit_header *hdr;
  812. void *err = ERR_PTR(-ENOMEM);
  813. if (table >= end)
  814. return NULL;
  815. hdr = table;
  816. if (!hdr->length) {
  817. dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
  818. hdr->type);
  819. return NULL;
  820. }
  821. switch (hdr->type) {
  822. case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
  823. if (!add_spa(acpi_desc, prev, table))
  824. return err;
  825. break;
  826. case ACPI_NFIT_TYPE_MEMORY_MAP:
  827. if (!add_memdev(acpi_desc, prev, table))
  828. return err;
  829. break;
  830. case ACPI_NFIT_TYPE_CONTROL_REGION:
  831. if (!add_dcr(acpi_desc, prev, table))
  832. return err;
  833. break;
  834. case ACPI_NFIT_TYPE_DATA_REGION:
  835. if (!add_bdw(acpi_desc, prev, table))
  836. return err;
  837. break;
  838. case ACPI_NFIT_TYPE_INTERLEAVE:
  839. if (!add_idt(acpi_desc, prev, table))
  840. return err;
  841. break;
  842. case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
  843. if (!add_flush(acpi_desc, prev, table))
  844. return err;
  845. break;
  846. case ACPI_NFIT_TYPE_SMBIOS:
  847. dev_dbg(dev, "smbios\n");
  848. break;
  849. case ACPI_NFIT_TYPE_CAPABILITIES:
  850. if (!add_platform_cap(acpi_desc, table))
  851. return err;
  852. break;
  853. default:
  854. dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
  855. break;
  856. }
  857. return table + hdr->length;
  858. }
  859. static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
  860. struct nfit_mem *nfit_mem)
  861. {
  862. u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  863. u16 dcr = nfit_mem->dcr->region_index;
  864. struct nfit_spa *nfit_spa;
  865. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  866. u16 range_index = nfit_spa->spa->range_index;
  867. int type = nfit_spa_type(nfit_spa->spa);
  868. struct nfit_memdev *nfit_memdev;
  869. if (type != NFIT_SPA_BDW)
  870. continue;
  871. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  872. if (nfit_memdev->memdev->range_index != range_index)
  873. continue;
  874. if (nfit_memdev->memdev->device_handle != device_handle)
  875. continue;
  876. if (nfit_memdev->memdev->region_index != dcr)
  877. continue;
  878. nfit_mem->spa_bdw = nfit_spa->spa;
  879. return;
  880. }
  881. }
  882. dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
  883. nfit_mem->spa_dcr->range_index);
  884. nfit_mem->bdw = NULL;
  885. }
  886. static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
  887. struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
  888. {
  889. u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
  890. struct nfit_memdev *nfit_memdev;
  891. struct nfit_bdw *nfit_bdw;
  892. struct nfit_idt *nfit_idt;
  893. u16 idt_idx, range_index;
  894. list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
  895. if (nfit_bdw->bdw->region_index != dcr)
  896. continue;
  897. nfit_mem->bdw = nfit_bdw->bdw;
  898. break;
  899. }
  900. if (!nfit_mem->bdw)
  901. return;
  902. nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
  903. if (!nfit_mem->spa_bdw)
  904. return;
  905. range_index = nfit_mem->spa_bdw->range_index;
  906. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  907. if (nfit_memdev->memdev->range_index != range_index ||
  908. nfit_memdev->memdev->region_index != dcr)
  909. continue;
  910. nfit_mem->memdev_bdw = nfit_memdev->memdev;
  911. idt_idx = nfit_memdev->memdev->interleave_index;
  912. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  913. if (nfit_idt->idt->interleave_index != idt_idx)
  914. continue;
  915. nfit_mem->idt_bdw = nfit_idt->idt;
  916. break;
  917. }
  918. break;
  919. }
  920. }
  921. static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
  922. struct acpi_nfit_system_address *spa)
  923. {
  924. struct nfit_mem *nfit_mem, *found;
  925. struct nfit_memdev *nfit_memdev;
  926. int type = spa ? nfit_spa_type(spa) : 0;
  927. switch (type) {
  928. case NFIT_SPA_DCR:
  929. case NFIT_SPA_PM:
  930. break;
  931. default:
  932. if (spa)
  933. return 0;
  934. }
  935. /*
  936. * This loop runs in two modes, when a dimm is mapped the loop
  937. * adds memdev associations to an existing dimm, or creates a
  938. * dimm. In the unmapped dimm case this loop sweeps for memdev
  939. * instances with an invalid / zero range_index and adds those
  940. * dimms without spa associations.
  941. */
  942. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  943. struct nfit_flush *nfit_flush;
  944. struct nfit_dcr *nfit_dcr;
  945. u32 device_handle;
  946. u16 dcr;
  947. if (spa && nfit_memdev->memdev->range_index != spa->range_index)
  948. continue;
  949. if (!spa && nfit_memdev->memdev->range_index)
  950. continue;
  951. found = NULL;
  952. dcr = nfit_memdev->memdev->region_index;
  953. device_handle = nfit_memdev->memdev->device_handle;
  954. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  955. if (__to_nfit_memdev(nfit_mem)->device_handle
  956. == device_handle) {
  957. found = nfit_mem;
  958. break;
  959. }
  960. if (found)
  961. nfit_mem = found;
  962. else {
  963. nfit_mem = devm_kzalloc(acpi_desc->dev,
  964. sizeof(*nfit_mem), GFP_KERNEL);
  965. if (!nfit_mem)
  966. return -ENOMEM;
  967. INIT_LIST_HEAD(&nfit_mem->list);
  968. nfit_mem->acpi_desc = acpi_desc;
  969. list_add(&nfit_mem->list, &acpi_desc->dimms);
  970. }
  971. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  972. if (nfit_dcr->dcr->region_index != dcr)
  973. continue;
  974. /*
  975. * Record the control region for the dimm. For
  976. * the ACPI 6.1 case, where there are separate
  977. * control regions for the pmem vs blk
  978. * interfaces, be sure to record the extended
  979. * blk details.
  980. */
  981. if (!nfit_mem->dcr)
  982. nfit_mem->dcr = nfit_dcr->dcr;
  983. else if (nfit_mem->dcr->windows == 0
  984. && nfit_dcr->dcr->windows)
  985. nfit_mem->dcr = nfit_dcr->dcr;
  986. break;
  987. }
  988. list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
  989. struct acpi_nfit_flush_address *flush;
  990. u16 i;
  991. if (nfit_flush->flush->device_handle != device_handle)
  992. continue;
  993. nfit_mem->nfit_flush = nfit_flush;
  994. flush = nfit_flush->flush;
  995. nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
  996. flush->hint_count,
  997. sizeof(struct resource),
  998. GFP_KERNEL);
  999. if (!nfit_mem->flush_wpq)
  1000. return -ENOMEM;
  1001. for (i = 0; i < flush->hint_count; i++) {
  1002. struct resource *res = &nfit_mem->flush_wpq[i];
  1003. res->start = flush->hint_address[i];
  1004. res->end = res->start + 8 - 1;
  1005. }
  1006. break;
  1007. }
  1008. if (dcr && !nfit_mem->dcr) {
  1009. dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
  1010. spa->range_index, dcr);
  1011. return -ENODEV;
  1012. }
  1013. if (type == NFIT_SPA_DCR) {
  1014. struct nfit_idt *nfit_idt;
  1015. u16 idt_idx;
  1016. /* multiple dimms may share a SPA when interleaved */
  1017. nfit_mem->spa_dcr = spa;
  1018. nfit_mem->memdev_dcr = nfit_memdev->memdev;
  1019. idt_idx = nfit_memdev->memdev->interleave_index;
  1020. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  1021. if (nfit_idt->idt->interleave_index != idt_idx)
  1022. continue;
  1023. nfit_mem->idt_dcr = nfit_idt->idt;
  1024. break;
  1025. }
  1026. nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
  1027. } else if (type == NFIT_SPA_PM) {
  1028. /*
  1029. * A single dimm may belong to multiple SPA-PM
  1030. * ranges, record at least one in addition to
  1031. * any SPA-DCR range.
  1032. */
  1033. nfit_mem->memdev_pmem = nfit_memdev->memdev;
  1034. } else
  1035. nfit_mem->memdev_dcr = nfit_memdev->memdev;
  1036. }
  1037. return 0;
  1038. }
  1039. static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
  1040. {
  1041. struct nfit_mem *a = container_of(_a, typeof(*a), list);
  1042. struct nfit_mem *b = container_of(_b, typeof(*b), list);
  1043. u32 handleA, handleB;
  1044. handleA = __to_nfit_memdev(a)->device_handle;
  1045. handleB = __to_nfit_memdev(b)->device_handle;
  1046. if (handleA < handleB)
  1047. return -1;
  1048. else if (handleA > handleB)
  1049. return 1;
  1050. return 0;
  1051. }
  1052. static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
  1053. {
  1054. struct nfit_spa *nfit_spa;
  1055. int rc;
  1056. /*
  1057. * For each SPA-DCR or SPA-PMEM address range find its
  1058. * corresponding MEMDEV(s). From each MEMDEV find the
  1059. * corresponding DCR. Then, if we're operating on a SPA-DCR,
  1060. * try to find a SPA-BDW and a corresponding BDW that references
  1061. * the DCR. Throw it all into an nfit_mem object. Note, that
  1062. * BDWs are optional.
  1063. */
  1064. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1065. rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
  1066. if (rc)
  1067. return rc;
  1068. }
  1069. /*
  1070. * If a DIMM has failed to be mapped into SPA there will be no
  1071. * SPA entries above. Find and register all the unmapped DIMMs
  1072. * for reporting and recovery purposes.
  1073. */
  1074. rc = __nfit_mem_init(acpi_desc, NULL);
  1075. if (rc)
  1076. return rc;
  1077. list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
  1078. return 0;
  1079. }
  1080. static ssize_t bus_dsm_mask_show(struct device *dev,
  1081. struct device_attribute *attr, char *buf)
  1082. {
  1083. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  1084. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1085. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1086. return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
  1087. }
  1088. static struct device_attribute dev_attr_bus_dsm_mask =
  1089. __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
  1090. static ssize_t revision_show(struct device *dev,
  1091. struct device_attribute *attr, char *buf)
  1092. {
  1093. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  1094. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1095. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1096. return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
  1097. }
  1098. static DEVICE_ATTR_RO(revision);
  1099. static ssize_t hw_error_scrub_show(struct device *dev,
  1100. struct device_attribute *attr, char *buf)
  1101. {
  1102. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  1103. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1104. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1105. return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
  1106. }
  1107. /*
  1108. * The 'hw_error_scrub' attribute can have the following values written to it:
  1109. * '0': Switch to the default mode where an exception will only insert
  1110. * the address of the memory error into the poison and badblocks lists.
  1111. * '1': Enable a full scrub to happen if an exception for a memory error is
  1112. * received.
  1113. */
  1114. static ssize_t hw_error_scrub_store(struct device *dev,
  1115. struct device_attribute *attr, const char *buf, size_t size)
  1116. {
  1117. struct nvdimm_bus_descriptor *nd_desc;
  1118. ssize_t rc;
  1119. long val;
  1120. rc = kstrtol(buf, 0, &val);
  1121. if (rc)
  1122. return rc;
  1123. nfit_device_lock(dev);
  1124. nd_desc = dev_get_drvdata(dev);
  1125. if (nd_desc) {
  1126. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1127. switch (val) {
  1128. case HW_ERROR_SCRUB_ON:
  1129. acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
  1130. break;
  1131. case HW_ERROR_SCRUB_OFF:
  1132. acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
  1133. break;
  1134. default:
  1135. rc = -EINVAL;
  1136. break;
  1137. }
  1138. }
  1139. nfit_device_unlock(dev);
  1140. if (rc)
  1141. return rc;
  1142. return size;
  1143. }
  1144. static DEVICE_ATTR_RW(hw_error_scrub);
  1145. /*
  1146. * This shows the number of full Address Range Scrubs that have been
  1147. * completed since driver load time. Userspace can wait on this using
  1148. * select/poll etc. A '+' at the end indicates an ARS is in progress
  1149. */
  1150. static ssize_t scrub_show(struct device *dev,
  1151. struct device_attribute *attr, char *buf)
  1152. {
  1153. struct nvdimm_bus_descriptor *nd_desc;
  1154. struct acpi_nfit_desc *acpi_desc;
  1155. ssize_t rc = -ENXIO;
  1156. bool busy;
  1157. nfit_device_lock(dev);
  1158. nd_desc = dev_get_drvdata(dev);
  1159. if (!nd_desc) {
  1160. nfit_device_unlock(dev);
  1161. return rc;
  1162. }
  1163. acpi_desc = to_acpi_desc(nd_desc);
  1164. mutex_lock(&acpi_desc->init_mutex);
  1165. busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
  1166. && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
  1167. rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
  1168. /* Allow an admin to poll the busy state at a higher rate */
  1169. if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
  1170. &acpi_desc->scrub_flags)) {
  1171. acpi_desc->scrub_tmo = 1;
  1172. mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
  1173. }
  1174. mutex_unlock(&acpi_desc->init_mutex);
  1175. nfit_device_unlock(dev);
  1176. return rc;
  1177. }
  1178. static ssize_t scrub_store(struct device *dev,
  1179. struct device_attribute *attr, const char *buf, size_t size)
  1180. {
  1181. struct nvdimm_bus_descriptor *nd_desc;
  1182. ssize_t rc;
  1183. long val;
  1184. rc = kstrtol(buf, 0, &val);
  1185. if (rc)
  1186. return rc;
  1187. if (val != 1)
  1188. return -EINVAL;
  1189. nfit_device_lock(dev);
  1190. nd_desc = dev_get_drvdata(dev);
  1191. if (nd_desc) {
  1192. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1193. rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
  1194. }
  1195. nfit_device_unlock(dev);
  1196. if (rc)
  1197. return rc;
  1198. return size;
  1199. }
  1200. static DEVICE_ATTR_RW(scrub);
  1201. static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
  1202. {
  1203. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1204. const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
  1205. | 1 << ND_CMD_ARS_STATUS;
  1206. return (nd_desc->cmd_mask & mask) == mask;
  1207. }
  1208. static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
  1209. {
  1210. struct device *dev = kobj_to_dev(kobj);
  1211. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  1212. if (a == &dev_attr_scrub.attr)
  1213. return ars_supported(nvdimm_bus) ? a->mode : 0;
  1214. if (a == &dev_attr_firmware_activate_noidle.attr)
  1215. return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
  1216. return a->mode;
  1217. }
  1218. static struct attribute *acpi_nfit_attributes[] = {
  1219. &dev_attr_revision.attr,
  1220. &dev_attr_scrub.attr,
  1221. &dev_attr_hw_error_scrub.attr,
  1222. &dev_attr_bus_dsm_mask.attr,
  1223. &dev_attr_firmware_activate_noidle.attr,
  1224. NULL,
  1225. };
  1226. static const struct attribute_group acpi_nfit_attribute_group = {
  1227. .name = "nfit",
  1228. .attrs = acpi_nfit_attributes,
  1229. .is_visible = nfit_visible,
  1230. };
  1231. static const struct attribute_group *acpi_nfit_attribute_groups[] = {
  1232. &acpi_nfit_attribute_group,
  1233. NULL,
  1234. };
  1235. static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
  1236. {
  1237. struct nvdimm *nvdimm = to_nvdimm(dev);
  1238. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1239. return __to_nfit_memdev(nfit_mem);
  1240. }
  1241. static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
  1242. {
  1243. struct nvdimm *nvdimm = to_nvdimm(dev);
  1244. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1245. return nfit_mem->dcr;
  1246. }
  1247. static ssize_t handle_show(struct device *dev,
  1248. struct device_attribute *attr, char *buf)
  1249. {
  1250. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  1251. return sprintf(buf, "%#x\n", memdev->device_handle);
  1252. }
  1253. static DEVICE_ATTR_RO(handle);
  1254. static ssize_t phys_id_show(struct device *dev,
  1255. struct device_attribute *attr, char *buf)
  1256. {
  1257. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  1258. return sprintf(buf, "%#x\n", memdev->physical_id);
  1259. }
  1260. static DEVICE_ATTR_RO(phys_id);
  1261. static ssize_t vendor_show(struct device *dev,
  1262. struct device_attribute *attr, char *buf)
  1263. {
  1264. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1265. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
  1266. }
  1267. static DEVICE_ATTR_RO(vendor);
  1268. static ssize_t rev_id_show(struct device *dev,
  1269. struct device_attribute *attr, char *buf)
  1270. {
  1271. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1272. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
  1273. }
  1274. static DEVICE_ATTR_RO(rev_id);
  1275. static ssize_t device_show(struct device *dev,
  1276. struct device_attribute *attr, char *buf)
  1277. {
  1278. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1279. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
  1280. }
  1281. static DEVICE_ATTR_RO(device);
  1282. static ssize_t subsystem_vendor_show(struct device *dev,
  1283. struct device_attribute *attr, char *buf)
  1284. {
  1285. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1286. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
  1287. }
  1288. static DEVICE_ATTR_RO(subsystem_vendor);
  1289. static ssize_t subsystem_rev_id_show(struct device *dev,
  1290. struct device_attribute *attr, char *buf)
  1291. {
  1292. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1293. return sprintf(buf, "0x%04x\n",
  1294. be16_to_cpu(dcr->subsystem_revision_id));
  1295. }
  1296. static DEVICE_ATTR_RO(subsystem_rev_id);
  1297. static ssize_t subsystem_device_show(struct device *dev,
  1298. struct device_attribute *attr, char *buf)
  1299. {
  1300. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1301. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
  1302. }
  1303. static DEVICE_ATTR_RO(subsystem_device);
  1304. static int num_nvdimm_formats(struct nvdimm *nvdimm)
  1305. {
  1306. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1307. int formats = 0;
  1308. if (nfit_mem->memdev_pmem)
  1309. formats++;
  1310. if (nfit_mem->memdev_bdw)
  1311. formats++;
  1312. return formats;
  1313. }
  1314. static ssize_t format_show(struct device *dev,
  1315. struct device_attribute *attr, char *buf)
  1316. {
  1317. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1318. return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
  1319. }
  1320. static DEVICE_ATTR_RO(format);
  1321. static ssize_t format1_show(struct device *dev,
  1322. struct device_attribute *attr, char *buf)
  1323. {
  1324. u32 handle;
  1325. ssize_t rc = -ENXIO;
  1326. struct nfit_mem *nfit_mem;
  1327. struct nfit_memdev *nfit_memdev;
  1328. struct acpi_nfit_desc *acpi_desc;
  1329. struct nvdimm *nvdimm = to_nvdimm(dev);
  1330. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1331. nfit_mem = nvdimm_provider_data(nvdimm);
  1332. acpi_desc = nfit_mem->acpi_desc;
  1333. handle = to_nfit_memdev(dev)->device_handle;
  1334. /* assumes DIMMs have at most 2 published interface codes */
  1335. mutex_lock(&acpi_desc->init_mutex);
  1336. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  1337. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  1338. struct nfit_dcr *nfit_dcr;
  1339. if (memdev->device_handle != handle)
  1340. continue;
  1341. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  1342. if (nfit_dcr->dcr->region_index != memdev->region_index)
  1343. continue;
  1344. if (nfit_dcr->dcr->code == dcr->code)
  1345. continue;
  1346. rc = sprintf(buf, "0x%04x\n",
  1347. le16_to_cpu(nfit_dcr->dcr->code));
  1348. break;
  1349. }
  1350. if (rc != -ENXIO)
  1351. break;
  1352. }
  1353. mutex_unlock(&acpi_desc->init_mutex);
  1354. return rc;
  1355. }
  1356. static DEVICE_ATTR_RO(format1);
  1357. static ssize_t formats_show(struct device *dev,
  1358. struct device_attribute *attr, char *buf)
  1359. {
  1360. struct nvdimm *nvdimm = to_nvdimm(dev);
  1361. return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
  1362. }
  1363. static DEVICE_ATTR_RO(formats);
  1364. static ssize_t serial_show(struct device *dev,
  1365. struct device_attribute *attr, char *buf)
  1366. {
  1367. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  1368. return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
  1369. }
  1370. static DEVICE_ATTR_RO(serial);
  1371. static ssize_t family_show(struct device *dev,
  1372. struct device_attribute *attr, char *buf)
  1373. {
  1374. struct nvdimm *nvdimm = to_nvdimm(dev);
  1375. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1376. if (nfit_mem->family < 0)
  1377. return -ENXIO;
  1378. return sprintf(buf, "%d\n", nfit_mem->family);
  1379. }
  1380. static DEVICE_ATTR_RO(family);
  1381. static ssize_t dsm_mask_show(struct device *dev,
  1382. struct device_attribute *attr, char *buf)
  1383. {
  1384. struct nvdimm *nvdimm = to_nvdimm(dev);
  1385. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1386. if (nfit_mem->family < 0)
  1387. return -ENXIO;
  1388. return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
  1389. }
  1390. static DEVICE_ATTR_RO(dsm_mask);
  1391. static ssize_t flags_show(struct device *dev,
  1392. struct device_attribute *attr, char *buf)
  1393. {
  1394. struct nvdimm *nvdimm = to_nvdimm(dev);
  1395. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1396. u16 flags = __to_nfit_memdev(nfit_mem)->flags;
  1397. if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
  1398. flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
  1399. return sprintf(buf, "%s%s%s%s%s%s%s\n",
  1400. flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
  1401. flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
  1402. flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
  1403. flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
  1404. flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
  1405. flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
  1406. flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
  1407. }
  1408. static DEVICE_ATTR_RO(flags);
  1409. static ssize_t id_show(struct device *dev,
  1410. struct device_attribute *attr, char *buf)
  1411. {
  1412. struct nvdimm *nvdimm = to_nvdimm(dev);
  1413. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1414. return sprintf(buf, "%s\n", nfit_mem->id);
  1415. }
  1416. static DEVICE_ATTR_RO(id);
  1417. static ssize_t dirty_shutdown_show(struct device *dev,
  1418. struct device_attribute *attr, char *buf)
  1419. {
  1420. struct nvdimm *nvdimm = to_nvdimm(dev);
  1421. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1422. return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
  1423. }
  1424. static DEVICE_ATTR_RO(dirty_shutdown);
  1425. static struct attribute *acpi_nfit_dimm_attributes[] = {
  1426. &dev_attr_handle.attr,
  1427. &dev_attr_phys_id.attr,
  1428. &dev_attr_vendor.attr,
  1429. &dev_attr_device.attr,
  1430. &dev_attr_rev_id.attr,
  1431. &dev_attr_subsystem_vendor.attr,
  1432. &dev_attr_subsystem_device.attr,
  1433. &dev_attr_subsystem_rev_id.attr,
  1434. &dev_attr_format.attr,
  1435. &dev_attr_formats.attr,
  1436. &dev_attr_format1.attr,
  1437. &dev_attr_serial.attr,
  1438. &dev_attr_flags.attr,
  1439. &dev_attr_id.attr,
  1440. &dev_attr_family.attr,
  1441. &dev_attr_dsm_mask.attr,
  1442. &dev_attr_dirty_shutdown.attr,
  1443. NULL,
  1444. };
  1445. static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
  1446. struct attribute *a, int n)
  1447. {
  1448. struct device *dev = kobj_to_dev(kobj);
  1449. struct nvdimm *nvdimm = to_nvdimm(dev);
  1450. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1451. if (!to_nfit_dcr(dev)) {
  1452. /* Without a dcr only the memdev attributes can be surfaced */
  1453. if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
  1454. || a == &dev_attr_flags.attr
  1455. || a == &dev_attr_family.attr
  1456. || a == &dev_attr_dsm_mask.attr)
  1457. return a->mode;
  1458. return 0;
  1459. }
  1460. if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
  1461. return 0;
  1462. if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
  1463. && a == &dev_attr_dirty_shutdown.attr)
  1464. return 0;
  1465. return a->mode;
  1466. }
  1467. static const struct attribute_group acpi_nfit_dimm_attribute_group = {
  1468. .name = "nfit",
  1469. .attrs = acpi_nfit_dimm_attributes,
  1470. .is_visible = acpi_nfit_dimm_attr_visible,
  1471. };
  1472. static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
  1473. &acpi_nfit_dimm_attribute_group,
  1474. NULL,
  1475. };
  1476. static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
  1477. u32 device_handle)
  1478. {
  1479. struct nfit_mem *nfit_mem;
  1480. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  1481. if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
  1482. return nfit_mem->nvdimm;
  1483. return NULL;
  1484. }
  1485. void __acpi_nvdimm_notify(struct device *dev, u32 event)
  1486. {
  1487. struct nfit_mem *nfit_mem;
  1488. struct acpi_nfit_desc *acpi_desc;
  1489. dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
  1490. event);
  1491. if (event != NFIT_NOTIFY_DIMM_HEALTH) {
  1492. dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
  1493. event);
  1494. return;
  1495. }
  1496. acpi_desc = dev_get_drvdata(dev->parent);
  1497. if (!acpi_desc)
  1498. return;
  1499. /*
  1500. * If we successfully retrieved acpi_desc, then we know nfit_mem data
  1501. * is still valid.
  1502. */
  1503. nfit_mem = dev_get_drvdata(dev);
  1504. if (nfit_mem && nfit_mem->flags_attr)
  1505. sysfs_notify_dirent(nfit_mem->flags_attr);
  1506. }
  1507. EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
  1508. static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
  1509. {
  1510. struct acpi_device *adev = data;
  1511. struct device *dev = &adev->dev;
  1512. nfit_device_lock(dev->parent);
  1513. __acpi_nvdimm_notify(dev, event);
  1514. nfit_device_unlock(dev->parent);
  1515. }
  1516. static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
  1517. {
  1518. acpi_handle handle;
  1519. acpi_status status;
  1520. status = acpi_get_handle(adev->handle, method, &handle);
  1521. if (ACPI_SUCCESS(status))
  1522. return true;
  1523. return false;
  1524. }
  1525. __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
  1526. {
  1527. struct device *dev = &nfit_mem->adev->dev;
  1528. struct nd_intel_smart smart = { 0 };
  1529. union acpi_object in_buf = {
  1530. .buffer.type = ACPI_TYPE_BUFFER,
  1531. .buffer.length = 0,
  1532. };
  1533. union acpi_object in_obj = {
  1534. .package.type = ACPI_TYPE_PACKAGE,
  1535. .package.count = 1,
  1536. .package.elements = &in_buf,
  1537. };
  1538. const u8 func = ND_INTEL_SMART;
  1539. const guid_t *guid = to_nfit_uuid(nfit_mem->family);
  1540. u8 revid = nfit_dsm_revid(nfit_mem->family, func);
  1541. struct acpi_device *adev = nfit_mem->adev;
  1542. acpi_handle handle = adev->handle;
  1543. union acpi_object *out_obj;
  1544. if ((nfit_mem->dsm_mask & (1 << func)) == 0)
  1545. return;
  1546. out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
  1547. if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
  1548. || out_obj->buffer.length < sizeof(smart)) {
  1549. dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
  1550. dev_name(dev));
  1551. ACPI_FREE(out_obj);
  1552. return;
  1553. }
  1554. memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
  1555. ACPI_FREE(out_obj);
  1556. if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
  1557. if (smart.shutdown_state)
  1558. set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
  1559. }
  1560. if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
  1561. set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
  1562. nfit_mem->dirty_shutdown = smart.shutdown_count;
  1563. }
  1564. }
  1565. static void populate_shutdown_status(struct nfit_mem *nfit_mem)
  1566. {
  1567. /*
  1568. * For DIMMs that provide a dynamic facility to retrieve a
  1569. * dirty-shutdown status and/or a dirty-shutdown count, cache
  1570. * these values in nfit_mem.
  1571. */
  1572. if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
  1573. nfit_intel_shutdown_status(nfit_mem);
  1574. }
  1575. static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
  1576. struct nfit_mem *nfit_mem, u32 device_handle)
  1577. {
  1578. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1579. struct acpi_device *adev, *adev_dimm;
  1580. struct device *dev = acpi_desc->dev;
  1581. unsigned long dsm_mask, label_mask;
  1582. const guid_t *guid;
  1583. int i;
  1584. int family = -1;
  1585. struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
  1586. /* nfit test assumes 1:1 relationship between commands and dsms */
  1587. nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
  1588. nfit_mem->family = NVDIMM_FAMILY_INTEL;
  1589. set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
  1590. if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
  1591. sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
  1592. be16_to_cpu(dcr->vendor_id),
  1593. dcr->manufacturing_location,
  1594. be16_to_cpu(dcr->manufacturing_date),
  1595. be32_to_cpu(dcr->serial_number));
  1596. else
  1597. sprintf(nfit_mem->id, "%04x-%08x",
  1598. be16_to_cpu(dcr->vendor_id),
  1599. be32_to_cpu(dcr->serial_number));
  1600. adev = to_acpi_dev(acpi_desc);
  1601. if (!adev) {
  1602. /* unit test case */
  1603. populate_shutdown_status(nfit_mem);
  1604. return 0;
  1605. }
  1606. adev_dimm = acpi_find_child_device(adev, device_handle, false);
  1607. nfit_mem->adev = adev_dimm;
  1608. if (!adev_dimm) {
  1609. dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
  1610. device_handle);
  1611. return force_enable_dimms ? 0 : -ENODEV;
  1612. }
  1613. if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
  1614. ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
  1615. dev_err(dev, "%s: notification registration failed\n",
  1616. dev_name(&adev_dimm->dev));
  1617. return -ENXIO;
  1618. }
  1619. /*
  1620. * Record nfit_mem for the notification path to track back to
  1621. * the nfit sysfs attributes for this dimm device object.
  1622. */
  1623. dev_set_drvdata(&adev_dimm->dev, nfit_mem);
  1624. /*
  1625. * There are 4 "legacy" NVDIMM command sets
  1626. * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
  1627. * an EFI working group was established to constrain this
  1628. * proliferation. The nfit driver probes for the supported command
  1629. * set by GUID. Note, if you're a platform developer looking to add
  1630. * a new command set to this probe, consider using an existing set,
  1631. * or otherwise seek approval to publish the command set at
  1632. * http://www.uefi.org/RFIC_LIST.
  1633. *
  1634. * Note, that checking for function0 (bit0) tells us if any commands
  1635. * are reachable through this GUID.
  1636. */
  1637. clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
  1638. for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
  1639. if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
  1640. set_bit(i, &nd_desc->dimm_family_mask);
  1641. if (family < 0 || i == default_dsm_family)
  1642. family = i;
  1643. }
  1644. /* limit the supported commands to those that are publicly documented */
  1645. nfit_mem->family = family;
  1646. if (override_dsm_mask && !disable_vendor_specific)
  1647. dsm_mask = override_dsm_mask;
  1648. else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
  1649. dsm_mask = NVDIMM_INTEL_CMDMASK;
  1650. if (disable_vendor_specific)
  1651. dsm_mask &= ~(1 << ND_CMD_VENDOR);
  1652. } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
  1653. dsm_mask = 0x1c3c76;
  1654. } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
  1655. dsm_mask = 0x1fe;
  1656. if (disable_vendor_specific)
  1657. dsm_mask &= ~(1 << 8);
  1658. } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
  1659. dsm_mask = 0xffffffff;
  1660. } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
  1661. dsm_mask = 0x1f;
  1662. } else {
  1663. dev_dbg(dev, "unknown dimm command family\n");
  1664. nfit_mem->family = -1;
  1665. /* DSMs are optional, continue loading the driver... */
  1666. return 0;
  1667. }
  1668. /*
  1669. * Function 0 is the command interrogation function, don't
  1670. * export it to potential userspace use, and enable it to be
  1671. * used as an error value in acpi_nfit_ctl().
  1672. */
  1673. dsm_mask &= ~1UL;
  1674. guid = to_nfit_uuid(nfit_mem->family);
  1675. for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
  1676. if (acpi_check_dsm(adev_dimm->handle, guid,
  1677. nfit_dsm_revid(nfit_mem->family, i),
  1678. 1ULL << i))
  1679. set_bit(i, &nfit_mem->dsm_mask);
  1680. /*
  1681. * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
  1682. * due to their better semantics handling locked capacity.
  1683. */
  1684. label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
  1685. | 1 << ND_CMD_SET_CONFIG_DATA;
  1686. if (family == NVDIMM_FAMILY_INTEL
  1687. && (dsm_mask & label_mask) == label_mask)
  1688. /* skip _LS{I,R,W} enabling */;
  1689. else {
  1690. if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
  1691. && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
  1692. dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
  1693. set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
  1694. }
  1695. if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
  1696. && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
  1697. dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
  1698. set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
  1699. }
  1700. /*
  1701. * Quirk read-only label configurations to preserve
  1702. * access to label-less namespaces by default.
  1703. */
  1704. if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
  1705. && !force_labels) {
  1706. dev_dbg(dev, "%s: No _LSW, disable labels\n",
  1707. dev_name(&adev_dimm->dev));
  1708. clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
  1709. } else
  1710. dev_dbg(dev, "%s: Force enable labels\n",
  1711. dev_name(&adev_dimm->dev));
  1712. }
  1713. populate_shutdown_status(nfit_mem);
  1714. return 0;
  1715. }
  1716. static void shutdown_dimm_notify(void *data)
  1717. {
  1718. struct acpi_nfit_desc *acpi_desc = data;
  1719. struct nfit_mem *nfit_mem;
  1720. mutex_lock(&acpi_desc->init_mutex);
  1721. /*
  1722. * Clear out the nfit_mem->flags_attr and shut down dimm event
  1723. * notifications.
  1724. */
  1725. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1726. struct acpi_device *adev_dimm = nfit_mem->adev;
  1727. if (nfit_mem->flags_attr) {
  1728. sysfs_put(nfit_mem->flags_attr);
  1729. nfit_mem->flags_attr = NULL;
  1730. }
  1731. if (adev_dimm) {
  1732. acpi_remove_notify_handler(adev_dimm->handle,
  1733. ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
  1734. dev_set_drvdata(&adev_dimm->dev, NULL);
  1735. }
  1736. }
  1737. mutex_unlock(&acpi_desc->init_mutex);
  1738. }
  1739. static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
  1740. {
  1741. switch (family) {
  1742. case NVDIMM_FAMILY_INTEL:
  1743. return intel_security_ops;
  1744. default:
  1745. return NULL;
  1746. }
  1747. }
  1748. static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
  1749. struct nfit_mem *nfit_mem)
  1750. {
  1751. unsigned long mask;
  1752. struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
  1753. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1754. if (!nd_desc->fw_ops)
  1755. return NULL;
  1756. if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
  1757. return NULL;
  1758. mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
  1759. if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
  1760. return NULL;
  1761. return intel_fw_ops;
  1762. }
  1763. static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
  1764. {
  1765. struct nfit_mem *nfit_mem;
  1766. int dimm_count = 0, rc;
  1767. struct nvdimm *nvdimm;
  1768. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1769. struct acpi_nfit_flush_address *flush;
  1770. unsigned long flags = 0, cmd_mask;
  1771. struct nfit_memdev *nfit_memdev;
  1772. u32 device_handle;
  1773. u16 mem_flags;
  1774. device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  1775. nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
  1776. if (nvdimm) {
  1777. dimm_count++;
  1778. continue;
  1779. }
  1780. if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
  1781. set_bit(NDD_ALIASING, &flags);
  1782. set_bit(NDD_LABELING, &flags);
  1783. }
  1784. /* collate flags across all memdevs for this dimm */
  1785. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  1786. struct acpi_nfit_memory_map *dimm_memdev;
  1787. dimm_memdev = __to_nfit_memdev(nfit_mem);
  1788. if (dimm_memdev->device_handle
  1789. != nfit_memdev->memdev->device_handle)
  1790. continue;
  1791. dimm_memdev->flags |= nfit_memdev->memdev->flags;
  1792. }
  1793. mem_flags = __to_nfit_memdev(nfit_mem)->flags;
  1794. if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
  1795. set_bit(NDD_UNARMED, &flags);
  1796. rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
  1797. if (rc)
  1798. continue;
  1799. /*
  1800. * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
  1801. * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
  1802. * userspace interface.
  1803. */
  1804. cmd_mask = 1UL << ND_CMD_CALL;
  1805. if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
  1806. /*
  1807. * These commands have a 1:1 correspondence
  1808. * between DSM payload and libnvdimm ioctl
  1809. * payload format.
  1810. */
  1811. cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
  1812. }
  1813. /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
  1814. if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
  1815. set_bit(NDD_NOBLK, &flags);
  1816. if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
  1817. set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
  1818. set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
  1819. }
  1820. if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
  1821. set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
  1822. flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
  1823. : NULL;
  1824. nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
  1825. acpi_nfit_dimm_attribute_groups,
  1826. flags, cmd_mask, flush ? flush->hint_count : 0,
  1827. nfit_mem->flush_wpq, &nfit_mem->id[0],
  1828. acpi_nfit_get_security_ops(nfit_mem->family),
  1829. acpi_nfit_get_fw_ops(nfit_mem));
  1830. if (!nvdimm)
  1831. return -ENOMEM;
  1832. nfit_mem->nvdimm = nvdimm;
  1833. dimm_count++;
  1834. if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
  1835. continue;
  1836. dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
  1837. nvdimm_name(nvdimm),
  1838. mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
  1839. mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
  1840. mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
  1841. mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
  1842. mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
  1843. }
  1844. rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
  1845. if (rc)
  1846. return rc;
  1847. /*
  1848. * Now that dimms are successfully registered, and async registration
  1849. * is flushed, attempt to enable event notification.
  1850. */
  1851. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1852. struct kernfs_node *nfit_kernfs;
  1853. nvdimm = nfit_mem->nvdimm;
  1854. if (!nvdimm)
  1855. continue;
  1856. nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
  1857. if (nfit_kernfs)
  1858. nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
  1859. "flags");
  1860. sysfs_put(nfit_kernfs);
  1861. if (!nfit_mem->flags_attr)
  1862. dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
  1863. nvdimm_name(nvdimm));
  1864. }
  1865. return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
  1866. acpi_desc);
  1867. }
  1868. /*
  1869. * These constants are private because there are no kernel consumers of
  1870. * these commands.
  1871. */
  1872. enum nfit_aux_cmds {
  1873. NFIT_CMD_TRANSLATE_SPA = 5,
  1874. NFIT_CMD_ARS_INJECT_SET = 7,
  1875. NFIT_CMD_ARS_INJECT_CLEAR = 8,
  1876. NFIT_CMD_ARS_INJECT_GET = 9,
  1877. };
  1878. static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
  1879. {
  1880. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1881. const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
  1882. unsigned long dsm_mask, *mask;
  1883. struct acpi_device *adev;
  1884. int i;
  1885. set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
  1886. set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
  1887. /* enable nfit_test to inject bus command emulation */
  1888. if (acpi_desc->bus_cmd_force_en) {
  1889. nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
  1890. mask = &nd_desc->bus_family_mask;
  1891. if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
  1892. set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
  1893. nd_desc->fw_ops = intel_bus_fw_ops;
  1894. }
  1895. }
  1896. adev = to_acpi_dev(acpi_desc);
  1897. if (!adev)
  1898. return;
  1899. for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
  1900. if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
  1901. set_bit(i, &nd_desc->cmd_mask);
  1902. dsm_mask =
  1903. (1 << ND_CMD_ARS_CAP) |
  1904. (1 << ND_CMD_ARS_START) |
  1905. (1 << ND_CMD_ARS_STATUS) |
  1906. (1 << ND_CMD_CLEAR_ERROR) |
  1907. (1 << NFIT_CMD_TRANSLATE_SPA) |
  1908. (1 << NFIT_CMD_ARS_INJECT_SET) |
  1909. (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
  1910. (1 << NFIT_CMD_ARS_INJECT_GET);
  1911. for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
  1912. if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
  1913. set_bit(i, &acpi_desc->bus_dsm_mask);
  1914. /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
  1915. dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
  1916. guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
  1917. mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
  1918. for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
  1919. if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
  1920. set_bit(i, mask);
  1921. if (*mask == dsm_mask) {
  1922. set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
  1923. nd_desc->fw_ops = intel_bus_fw_ops;
  1924. }
  1925. }
  1926. static ssize_t range_index_show(struct device *dev,
  1927. struct device_attribute *attr, char *buf)
  1928. {
  1929. struct nd_region *nd_region = to_nd_region(dev);
  1930. struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
  1931. return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
  1932. }
  1933. static DEVICE_ATTR_RO(range_index);
  1934. static struct attribute *acpi_nfit_region_attributes[] = {
  1935. &dev_attr_range_index.attr,
  1936. NULL,
  1937. };
  1938. static const struct attribute_group acpi_nfit_region_attribute_group = {
  1939. .name = "nfit",
  1940. .attrs = acpi_nfit_region_attributes,
  1941. };
  1942. static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
  1943. &acpi_nfit_region_attribute_group,
  1944. NULL,
  1945. };
  1946. /* enough info to uniquely specify an interleave set */
  1947. struct nfit_set_info {
  1948. struct nfit_set_info_map {
  1949. u64 region_offset;
  1950. u32 serial_number;
  1951. u32 pad;
  1952. } mapping[0];
  1953. };
  1954. struct nfit_set_info2 {
  1955. struct nfit_set_info_map2 {
  1956. u64 region_offset;
  1957. u32 serial_number;
  1958. u16 vendor_id;
  1959. u16 manufacturing_date;
  1960. u8 manufacturing_location;
  1961. u8 reserved[31];
  1962. } mapping[0];
  1963. };
  1964. static size_t sizeof_nfit_set_info(int num_mappings)
  1965. {
  1966. return sizeof(struct nfit_set_info)
  1967. + num_mappings * sizeof(struct nfit_set_info_map);
  1968. }
  1969. static size_t sizeof_nfit_set_info2(int num_mappings)
  1970. {
  1971. return sizeof(struct nfit_set_info2)
  1972. + num_mappings * sizeof(struct nfit_set_info_map2);
  1973. }
  1974. static int cmp_map_compat(const void *m0, const void *m1)
  1975. {
  1976. const struct nfit_set_info_map *map0 = m0;
  1977. const struct nfit_set_info_map *map1 = m1;
  1978. return memcmp(&map0->region_offset, &map1->region_offset,
  1979. sizeof(u64));
  1980. }
  1981. static int cmp_map(const void *m0, const void *m1)
  1982. {
  1983. const struct nfit_set_info_map *map0 = m0;
  1984. const struct nfit_set_info_map *map1 = m1;
  1985. if (map0->region_offset < map1->region_offset)
  1986. return -1;
  1987. else if (map0->region_offset > map1->region_offset)
  1988. return 1;
  1989. return 0;
  1990. }
  1991. static int cmp_map2(const void *m0, const void *m1)
  1992. {
  1993. const struct nfit_set_info_map2 *map0 = m0;
  1994. const struct nfit_set_info_map2 *map1 = m1;
  1995. if (map0->region_offset < map1->region_offset)
  1996. return -1;
  1997. else if (map0->region_offset > map1->region_offset)
  1998. return 1;
  1999. return 0;
  2000. }
  2001. /* Retrieve the nth entry referencing this spa */
  2002. static struct acpi_nfit_memory_map *memdev_from_spa(
  2003. struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
  2004. {
  2005. struct nfit_memdev *nfit_memdev;
  2006. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
  2007. if (nfit_memdev->memdev->range_index == range_index)
  2008. if (n-- == 0)
  2009. return nfit_memdev->memdev;
  2010. return NULL;
  2011. }
  2012. static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
  2013. struct nd_region_desc *ndr_desc,
  2014. struct acpi_nfit_system_address *spa)
  2015. {
  2016. struct device *dev = acpi_desc->dev;
  2017. struct nd_interleave_set *nd_set;
  2018. u16 nr = ndr_desc->num_mappings;
  2019. struct nfit_set_info2 *info2;
  2020. struct nfit_set_info *info;
  2021. int i;
  2022. nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
  2023. if (!nd_set)
  2024. return -ENOMEM;
  2025. import_guid(&nd_set->type_guid, spa->range_guid);
  2026. info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
  2027. if (!info)
  2028. return -ENOMEM;
  2029. info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
  2030. if (!info2)
  2031. return -ENOMEM;
  2032. for (i = 0; i < nr; i++) {
  2033. struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
  2034. struct nfit_set_info_map *map = &info->mapping[i];
  2035. struct nfit_set_info_map2 *map2 = &info2->mapping[i];
  2036. struct nvdimm *nvdimm = mapping->nvdimm;
  2037. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  2038. struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
  2039. spa->range_index, i);
  2040. struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
  2041. if (!memdev || !nfit_mem->dcr) {
  2042. dev_err(dev, "%s: failed to find DCR\n", __func__);
  2043. return -ENODEV;
  2044. }
  2045. map->region_offset = memdev->region_offset;
  2046. map->serial_number = dcr->serial_number;
  2047. map2->region_offset = memdev->region_offset;
  2048. map2->serial_number = dcr->serial_number;
  2049. map2->vendor_id = dcr->vendor_id;
  2050. map2->manufacturing_date = dcr->manufacturing_date;
  2051. map2->manufacturing_location = dcr->manufacturing_location;
  2052. }
  2053. /* v1.1 namespaces */
  2054. sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
  2055. cmp_map, NULL);
  2056. nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
  2057. /* v1.2 namespaces */
  2058. sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
  2059. cmp_map2, NULL);
  2060. nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
  2061. /* support v1.1 namespaces created with the wrong sort order */
  2062. sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
  2063. cmp_map_compat, NULL);
  2064. nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
  2065. /* record the result of the sort for the mapping position */
  2066. for (i = 0; i < nr; i++) {
  2067. struct nfit_set_info_map2 *map2 = &info2->mapping[i];
  2068. int j;
  2069. for (j = 0; j < nr; j++) {
  2070. struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
  2071. struct nvdimm *nvdimm = mapping->nvdimm;
  2072. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  2073. struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
  2074. if (map2->serial_number == dcr->serial_number &&
  2075. map2->vendor_id == dcr->vendor_id &&
  2076. map2->manufacturing_date == dcr->manufacturing_date &&
  2077. map2->manufacturing_location
  2078. == dcr->manufacturing_location) {
  2079. mapping->position = i;
  2080. break;
  2081. }
  2082. }
  2083. }
  2084. ndr_desc->nd_set = nd_set;
  2085. devm_kfree(dev, info);
  2086. devm_kfree(dev, info2);
  2087. return 0;
  2088. }
  2089. static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
  2090. {
  2091. struct acpi_nfit_interleave *idt = mmio->idt;
  2092. u32 sub_line_offset, line_index, line_offset;
  2093. u64 line_no, table_skip_count, table_offset;
  2094. line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
  2095. table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
  2096. line_offset = idt->line_offset[line_index]
  2097. * mmio->line_size;
  2098. table_offset = table_skip_count * mmio->table_size;
  2099. return mmio->base_offset + line_offset + table_offset + sub_line_offset;
  2100. }
  2101. static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
  2102. {
  2103. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  2104. u64 offset = nfit_blk->stat_offset + mmio->size * bw;
  2105. const u32 STATUS_MASK = 0x80000037;
  2106. if (mmio->num_lines)
  2107. offset = to_interleave_offset(offset, mmio);
  2108. return readl(mmio->addr.base + offset) & STATUS_MASK;
  2109. }
  2110. static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
  2111. resource_size_t dpa, unsigned int len, unsigned int write)
  2112. {
  2113. u64 cmd, offset;
  2114. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  2115. enum {
  2116. BCW_OFFSET_MASK = (1ULL << 48)-1,
  2117. BCW_LEN_SHIFT = 48,
  2118. BCW_LEN_MASK = (1ULL << 8) - 1,
  2119. BCW_CMD_SHIFT = 56,
  2120. };
  2121. cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
  2122. len = len >> L1_CACHE_SHIFT;
  2123. cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
  2124. cmd |= ((u64) write) << BCW_CMD_SHIFT;
  2125. offset = nfit_blk->cmd_offset + mmio->size * bw;
  2126. if (mmio->num_lines)
  2127. offset = to_interleave_offset(offset, mmio);
  2128. writeq(cmd, mmio->addr.base + offset);
  2129. nvdimm_flush(nfit_blk->nd_region, NULL);
  2130. if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
  2131. readq(mmio->addr.base + offset);
  2132. }
  2133. static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
  2134. resource_size_t dpa, void *iobuf, size_t len, int rw,
  2135. unsigned int lane)
  2136. {
  2137. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  2138. unsigned int copied = 0;
  2139. u64 base_offset;
  2140. int rc;
  2141. base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
  2142. + lane * mmio->size;
  2143. write_blk_ctl(nfit_blk, lane, dpa, len, rw);
  2144. while (len) {
  2145. unsigned int c;
  2146. u64 offset;
  2147. if (mmio->num_lines) {
  2148. u32 line_offset;
  2149. offset = to_interleave_offset(base_offset + copied,
  2150. mmio);
  2151. div_u64_rem(offset, mmio->line_size, &line_offset);
  2152. c = min_t(size_t, len, mmio->line_size - line_offset);
  2153. } else {
  2154. offset = base_offset + nfit_blk->bdw_offset;
  2155. c = len;
  2156. }
  2157. if (rw)
  2158. memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
  2159. else {
  2160. if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
  2161. arch_invalidate_pmem((void __force *)
  2162. mmio->addr.aperture + offset, c);
  2163. memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
  2164. }
  2165. copied += c;
  2166. len -= c;
  2167. }
  2168. if (rw)
  2169. nvdimm_flush(nfit_blk->nd_region, NULL);
  2170. rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
  2171. return rc;
  2172. }
  2173. static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
  2174. resource_size_t dpa, void *iobuf, u64 len, int rw)
  2175. {
  2176. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  2177. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  2178. struct nd_region *nd_region = nfit_blk->nd_region;
  2179. unsigned int lane, copied = 0;
  2180. int rc = 0;
  2181. lane = nd_region_acquire_lane(nd_region);
  2182. while (len) {
  2183. u64 c = min(len, mmio->size);
  2184. rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
  2185. iobuf + copied, c, rw, lane);
  2186. if (rc)
  2187. break;
  2188. copied += c;
  2189. len -= c;
  2190. }
  2191. nd_region_release_lane(nd_region, lane);
  2192. return rc;
  2193. }
  2194. static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
  2195. struct acpi_nfit_interleave *idt, u16 interleave_ways)
  2196. {
  2197. if (idt) {
  2198. mmio->num_lines = idt->line_count;
  2199. mmio->line_size = idt->line_size;
  2200. if (interleave_ways == 0)
  2201. return -ENXIO;
  2202. mmio->table_size = mmio->num_lines * interleave_ways
  2203. * mmio->line_size;
  2204. }
  2205. return 0;
  2206. }
  2207. static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
  2208. struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
  2209. {
  2210. struct nd_cmd_dimm_flags flags;
  2211. int rc;
  2212. memset(&flags, 0, sizeof(flags));
  2213. rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
  2214. sizeof(flags), NULL);
  2215. if (rc >= 0 && flags.status == 0)
  2216. nfit_blk->dimm_flags = flags.flags;
  2217. else if (rc == -ENOTTY) {
  2218. /* fall back to a conservative default */
  2219. nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
  2220. rc = 0;
  2221. } else
  2222. rc = -ENXIO;
  2223. return rc;
  2224. }
  2225. static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
  2226. struct device *dev)
  2227. {
  2228. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  2229. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  2230. struct nfit_blk_mmio *mmio;
  2231. struct nfit_blk *nfit_blk;
  2232. struct nfit_mem *nfit_mem;
  2233. struct nvdimm *nvdimm;
  2234. int rc;
  2235. nvdimm = nd_blk_region_to_dimm(ndbr);
  2236. nfit_mem = nvdimm_provider_data(nvdimm);
  2237. if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
  2238. dev_dbg(dev, "missing%s%s%s\n",
  2239. nfit_mem ? "" : " nfit_mem",
  2240. (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
  2241. (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
  2242. return -ENXIO;
  2243. }
  2244. nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
  2245. if (!nfit_blk)
  2246. return -ENOMEM;
  2247. nd_blk_region_set_provider_data(ndbr, nfit_blk);
  2248. nfit_blk->nd_region = to_nd_region(dev);
  2249. /* map block aperture memory */
  2250. nfit_blk->bdw_offset = nfit_mem->bdw->offset;
  2251. mmio = &nfit_blk->mmio[BDW];
  2252. mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
  2253. nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
  2254. if (!mmio->addr.base) {
  2255. dev_dbg(dev, "%s failed to map bdw\n",
  2256. nvdimm_name(nvdimm));
  2257. return -ENOMEM;
  2258. }
  2259. mmio->size = nfit_mem->bdw->size;
  2260. mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
  2261. mmio->idt = nfit_mem->idt_bdw;
  2262. mmio->spa = nfit_mem->spa_bdw;
  2263. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
  2264. nfit_mem->memdev_bdw->interleave_ways);
  2265. if (rc) {
  2266. dev_dbg(dev, "%s failed to init bdw interleave\n",
  2267. nvdimm_name(nvdimm));
  2268. return rc;
  2269. }
  2270. /* map block control memory */
  2271. nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
  2272. nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
  2273. mmio = &nfit_blk->mmio[DCR];
  2274. mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
  2275. nfit_mem->spa_dcr->length);
  2276. if (!mmio->addr.base) {
  2277. dev_dbg(dev, "%s failed to map dcr\n",
  2278. nvdimm_name(nvdimm));
  2279. return -ENOMEM;
  2280. }
  2281. mmio->size = nfit_mem->dcr->window_size;
  2282. mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
  2283. mmio->idt = nfit_mem->idt_dcr;
  2284. mmio->spa = nfit_mem->spa_dcr;
  2285. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
  2286. nfit_mem->memdev_dcr->interleave_ways);
  2287. if (rc) {
  2288. dev_dbg(dev, "%s failed to init dcr interleave\n",
  2289. nvdimm_name(nvdimm));
  2290. return rc;
  2291. }
  2292. rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
  2293. if (rc < 0) {
  2294. dev_dbg(dev, "%s failed get DIMM flags\n",
  2295. nvdimm_name(nvdimm));
  2296. return rc;
  2297. }
  2298. if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
  2299. dev_warn(dev, "unable to guarantee persistence of writes\n");
  2300. if (mmio->line_size == 0)
  2301. return 0;
  2302. if ((u32) nfit_blk->cmd_offset % mmio->line_size
  2303. + 8 > mmio->line_size) {
  2304. dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
  2305. return -ENXIO;
  2306. } else if ((u32) nfit_blk->stat_offset % mmio->line_size
  2307. + 8 > mmio->line_size) {
  2308. dev_dbg(dev, "stat_offset crosses interleave boundary\n");
  2309. return -ENXIO;
  2310. }
  2311. return 0;
  2312. }
  2313. static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
  2314. struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
  2315. {
  2316. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  2317. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  2318. int cmd_rc, rc;
  2319. cmd->address = spa->address;
  2320. cmd->length = spa->length;
  2321. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
  2322. sizeof(*cmd), &cmd_rc);
  2323. if (rc < 0)
  2324. return rc;
  2325. return cmd_rc;
  2326. }
  2327. static int ars_start(struct acpi_nfit_desc *acpi_desc,
  2328. struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
  2329. {
  2330. int rc;
  2331. int cmd_rc;
  2332. struct nd_cmd_ars_start ars_start;
  2333. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  2334. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  2335. memset(&ars_start, 0, sizeof(ars_start));
  2336. ars_start.address = spa->address;
  2337. ars_start.length = spa->length;
  2338. if (req_type == ARS_REQ_SHORT)
  2339. ars_start.flags = ND_ARS_RETURN_PREV_DATA;
  2340. if (nfit_spa_type(spa) == NFIT_SPA_PM)
  2341. ars_start.type = ND_ARS_PERSISTENT;
  2342. else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
  2343. ars_start.type = ND_ARS_VOLATILE;
  2344. else
  2345. return -ENOTTY;
  2346. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  2347. sizeof(ars_start), &cmd_rc);
  2348. if (rc < 0)
  2349. return rc;
  2350. if (cmd_rc < 0)
  2351. return cmd_rc;
  2352. set_bit(ARS_VALID, &acpi_desc->scrub_flags);
  2353. return 0;
  2354. }
  2355. static int ars_continue(struct acpi_nfit_desc *acpi_desc)
  2356. {
  2357. int rc, cmd_rc;
  2358. struct nd_cmd_ars_start ars_start;
  2359. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  2360. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  2361. ars_start = (struct nd_cmd_ars_start) {
  2362. .address = ars_status->restart_address,
  2363. .length = ars_status->restart_length,
  2364. .type = ars_status->type,
  2365. };
  2366. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  2367. sizeof(ars_start), &cmd_rc);
  2368. if (rc < 0)
  2369. return rc;
  2370. return cmd_rc;
  2371. }
  2372. static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
  2373. {
  2374. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  2375. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  2376. int rc, cmd_rc;
  2377. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
  2378. acpi_desc->max_ars, &cmd_rc);
  2379. if (rc < 0)
  2380. return rc;
  2381. return cmd_rc;
  2382. }
  2383. static void ars_complete(struct acpi_nfit_desc *acpi_desc,
  2384. struct nfit_spa *nfit_spa)
  2385. {
  2386. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  2387. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  2388. struct nd_region *nd_region = nfit_spa->nd_region;
  2389. struct device *dev;
  2390. lockdep_assert_held(&acpi_desc->init_mutex);
  2391. /*
  2392. * Only advance the ARS state for ARS runs initiated by the
  2393. * kernel, ignore ARS results from BIOS initiated runs for scrub
  2394. * completion tracking.
  2395. */
  2396. if (acpi_desc->scrub_spa != nfit_spa)
  2397. return;
  2398. if ((ars_status->address >= spa->address && ars_status->address
  2399. < spa->address + spa->length)
  2400. || (ars_status->address < spa->address)) {
  2401. /*
  2402. * Assume that if a scrub starts at an offset from the
  2403. * start of nfit_spa that we are in the continuation
  2404. * case.
  2405. *
  2406. * Otherwise, if the scrub covers the spa range, mark
  2407. * any pending request complete.
  2408. */
  2409. if (ars_status->address + ars_status->length
  2410. >= spa->address + spa->length)
  2411. /* complete */;
  2412. else
  2413. return;
  2414. } else
  2415. return;
  2416. acpi_desc->scrub_spa = NULL;
  2417. if (nd_region) {
  2418. dev = nd_region_dev(nd_region);
  2419. nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
  2420. } else
  2421. dev = acpi_desc->dev;
  2422. dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
  2423. }
  2424. static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
  2425. {
  2426. struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
  2427. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  2428. int rc;
  2429. u32 i;
  2430. /*
  2431. * First record starts at 44 byte offset from the start of the
  2432. * payload.
  2433. */
  2434. if (ars_status->out_length < 44)
  2435. return 0;
  2436. /*
  2437. * Ignore potentially stale results that are only refreshed
  2438. * after a start-ARS event.
  2439. */
  2440. if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
  2441. dev_dbg(acpi_desc->dev, "skip %d stale records\n",
  2442. ars_status->num_records);
  2443. return 0;
  2444. }
  2445. for (i = 0; i < ars_status->num_records; i++) {
  2446. /* only process full records */
  2447. if (ars_status->out_length
  2448. < 44 + sizeof(struct nd_ars_record) * (i + 1))
  2449. break;
  2450. rc = nvdimm_bus_add_badrange(nvdimm_bus,
  2451. ars_status->records[i].err_address,
  2452. ars_status->records[i].length);
  2453. if (rc)
  2454. return rc;
  2455. }
  2456. if (i < ars_status->num_records)
  2457. dev_warn(acpi_desc->dev, "detected truncated ars results\n");
  2458. return 0;
  2459. }
  2460. static void acpi_nfit_remove_resource(void *data)
  2461. {
  2462. struct resource *res = data;
  2463. remove_resource(res);
  2464. }
  2465. static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
  2466. struct nd_region_desc *ndr_desc)
  2467. {
  2468. struct resource *res, *nd_res = ndr_desc->res;
  2469. int is_pmem, ret;
  2470. /* No operation if the region is already registered as PMEM */
  2471. is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
  2472. IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
  2473. if (is_pmem == REGION_INTERSECTS)
  2474. return 0;
  2475. res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
  2476. if (!res)
  2477. return -ENOMEM;
  2478. res->name = "Persistent Memory";
  2479. res->start = nd_res->start;
  2480. res->end = nd_res->end;
  2481. res->flags = IORESOURCE_MEM;
  2482. res->desc = IORES_DESC_PERSISTENT_MEMORY;
  2483. ret = insert_resource(&iomem_resource, res);
  2484. if (ret)
  2485. return ret;
  2486. ret = devm_add_action_or_reset(acpi_desc->dev,
  2487. acpi_nfit_remove_resource,
  2488. res);
  2489. if (ret)
  2490. return ret;
  2491. return 0;
  2492. }
  2493. static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
  2494. struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
  2495. struct acpi_nfit_memory_map *memdev,
  2496. struct nfit_spa *nfit_spa)
  2497. {
  2498. struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
  2499. memdev->device_handle);
  2500. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  2501. struct nd_blk_region_desc *ndbr_desc;
  2502. struct nfit_mem *nfit_mem;
  2503. int rc;
  2504. if (!nvdimm) {
  2505. dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
  2506. spa->range_index, memdev->device_handle);
  2507. return -ENODEV;
  2508. }
  2509. mapping->nvdimm = nvdimm;
  2510. switch (nfit_spa_type(spa)) {
  2511. case NFIT_SPA_PM:
  2512. case NFIT_SPA_VOLATILE:
  2513. mapping->start = memdev->address;
  2514. mapping->size = memdev->region_size;
  2515. break;
  2516. case NFIT_SPA_DCR:
  2517. nfit_mem = nvdimm_provider_data(nvdimm);
  2518. if (!nfit_mem || !nfit_mem->bdw) {
  2519. dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
  2520. spa->range_index, nvdimm_name(nvdimm));
  2521. break;
  2522. }
  2523. mapping->size = nfit_mem->bdw->capacity;
  2524. mapping->start = nfit_mem->bdw->start_address;
  2525. ndr_desc->num_lanes = nfit_mem->bdw->windows;
  2526. ndr_desc->mapping = mapping;
  2527. ndr_desc->num_mappings = 1;
  2528. ndbr_desc = to_blk_region_desc(ndr_desc);
  2529. ndbr_desc->enable = acpi_nfit_blk_region_enable;
  2530. ndbr_desc->do_io = acpi_desc->blk_do_io;
  2531. rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
  2532. if (rc)
  2533. return rc;
  2534. nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
  2535. ndr_desc);
  2536. if (!nfit_spa->nd_region)
  2537. return -ENOMEM;
  2538. break;
  2539. }
  2540. return 0;
  2541. }
  2542. static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
  2543. {
  2544. return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
  2545. nfit_spa_type(spa) == NFIT_SPA_VCD ||
  2546. nfit_spa_type(spa) == NFIT_SPA_PDISK ||
  2547. nfit_spa_type(spa) == NFIT_SPA_PCD);
  2548. }
  2549. static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
  2550. {
  2551. return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
  2552. nfit_spa_type(spa) == NFIT_SPA_VCD ||
  2553. nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
  2554. }
  2555. static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
  2556. struct nfit_spa *nfit_spa)
  2557. {
  2558. static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
  2559. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  2560. struct nd_blk_region_desc ndbr_desc;
  2561. struct nd_region_desc *ndr_desc;
  2562. struct nfit_memdev *nfit_memdev;
  2563. struct nvdimm_bus *nvdimm_bus;
  2564. struct resource res;
  2565. int count = 0, rc;
  2566. if (nfit_spa->nd_region)
  2567. return 0;
  2568. if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
  2569. dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
  2570. return 0;
  2571. }
  2572. memset(&res, 0, sizeof(res));
  2573. memset(&mappings, 0, sizeof(mappings));
  2574. memset(&ndbr_desc, 0, sizeof(ndbr_desc));
  2575. res.start = spa->address;
  2576. res.end = res.start + spa->length - 1;
  2577. ndr_desc = &ndbr_desc.ndr_desc;
  2578. ndr_desc->res = &res;
  2579. ndr_desc->provider_data = nfit_spa;
  2580. ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
  2581. if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
  2582. ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain);
  2583. ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
  2584. } else {
  2585. ndr_desc->numa_node = NUMA_NO_NODE;
  2586. ndr_desc->target_node = NUMA_NO_NODE;
  2587. }
  2588. /* Fallback to address based numa information if node lookup failed */
  2589. if (ndr_desc->numa_node == NUMA_NO_NODE) {
  2590. ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
  2591. dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
  2592. NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
  2593. }
  2594. if (ndr_desc->target_node == NUMA_NO_NODE) {
  2595. ndr_desc->target_node = phys_to_target_node(spa->address);
  2596. dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
  2597. NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
  2598. }
  2599. /*
  2600. * Persistence domain bits are hierarchical, if
  2601. * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
  2602. * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
  2603. */
  2604. if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
  2605. set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
  2606. else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
  2607. set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
  2608. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  2609. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  2610. struct nd_mapping_desc *mapping;
  2611. /* range index 0 == unmapped in SPA or invalid-SPA */
  2612. if (memdev->range_index == 0 || spa->range_index == 0)
  2613. continue;
  2614. if (memdev->range_index != spa->range_index)
  2615. continue;
  2616. if (count >= ND_MAX_MAPPINGS) {
  2617. dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
  2618. spa->range_index, ND_MAX_MAPPINGS);
  2619. return -ENXIO;
  2620. }
  2621. mapping = &mappings[count++];
  2622. rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
  2623. memdev, nfit_spa);
  2624. if (rc)
  2625. goto out;
  2626. }
  2627. ndr_desc->mapping = mappings;
  2628. ndr_desc->num_mappings = count;
  2629. rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
  2630. if (rc)
  2631. goto out;
  2632. nvdimm_bus = acpi_desc->nvdimm_bus;
  2633. if (nfit_spa_type(spa) == NFIT_SPA_PM) {
  2634. rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
  2635. if (rc) {
  2636. dev_warn(acpi_desc->dev,
  2637. "failed to insert pmem resource to iomem: %d\n",
  2638. rc);
  2639. goto out;
  2640. }
  2641. nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
  2642. ndr_desc);
  2643. if (!nfit_spa->nd_region)
  2644. rc = -ENOMEM;
  2645. } else if (nfit_spa_is_volatile(spa)) {
  2646. nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
  2647. ndr_desc);
  2648. if (!nfit_spa->nd_region)
  2649. rc = -ENOMEM;
  2650. } else if (nfit_spa_is_virtual(spa)) {
  2651. nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
  2652. ndr_desc);
  2653. if (!nfit_spa->nd_region)
  2654. rc = -ENOMEM;
  2655. }
  2656. out:
  2657. if (rc)
  2658. dev_err(acpi_desc->dev, "failed to register spa range %d\n",
  2659. nfit_spa->spa->range_index);
  2660. return rc;
  2661. }
  2662. static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
  2663. {
  2664. struct device *dev = acpi_desc->dev;
  2665. struct nd_cmd_ars_status *ars_status;
  2666. if (acpi_desc->ars_status) {
  2667. memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
  2668. return 0;
  2669. }
  2670. ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
  2671. if (!ars_status)
  2672. return -ENOMEM;
  2673. acpi_desc->ars_status = ars_status;
  2674. return 0;
  2675. }
  2676. static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
  2677. {
  2678. int rc;
  2679. if (ars_status_alloc(acpi_desc))
  2680. return -ENOMEM;
  2681. rc = ars_get_status(acpi_desc);
  2682. if (rc < 0 && rc != -ENOSPC)
  2683. return rc;
  2684. if (ars_status_process_records(acpi_desc))
  2685. dev_err(acpi_desc->dev, "Failed to process ARS records\n");
  2686. return rc;
  2687. }
  2688. static int ars_register(struct acpi_nfit_desc *acpi_desc,
  2689. struct nfit_spa *nfit_spa)
  2690. {
  2691. int rc;
  2692. if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
  2693. return acpi_nfit_register_region(acpi_desc, nfit_spa);
  2694. set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
  2695. if (!no_init_ars)
  2696. set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
  2697. switch (acpi_nfit_query_poison(acpi_desc)) {
  2698. case 0:
  2699. case -ENOSPC:
  2700. case -EAGAIN:
  2701. rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
  2702. /* shouldn't happen, try again later */
  2703. if (rc == -EBUSY)
  2704. break;
  2705. if (rc) {
  2706. set_bit(ARS_FAILED, &nfit_spa->ars_state);
  2707. break;
  2708. }
  2709. clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
  2710. rc = acpi_nfit_query_poison(acpi_desc);
  2711. if (rc)
  2712. break;
  2713. acpi_desc->scrub_spa = nfit_spa;
  2714. ars_complete(acpi_desc, nfit_spa);
  2715. /*
  2716. * If ars_complete() says we didn't complete the
  2717. * short scrub, we'll try again with a long
  2718. * request.
  2719. */
  2720. acpi_desc->scrub_spa = NULL;
  2721. break;
  2722. case -EBUSY:
  2723. case -ENOMEM:
  2724. /*
  2725. * BIOS was using ARS, wait for it to complete (or
  2726. * resources to become available) and then perform our
  2727. * own scrubs.
  2728. */
  2729. break;
  2730. default:
  2731. set_bit(ARS_FAILED, &nfit_spa->ars_state);
  2732. break;
  2733. }
  2734. return acpi_nfit_register_region(acpi_desc, nfit_spa);
  2735. }
  2736. static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
  2737. {
  2738. struct nfit_spa *nfit_spa;
  2739. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  2740. if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
  2741. continue;
  2742. ars_complete(acpi_desc, nfit_spa);
  2743. }
  2744. }
  2745. static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
  2746. int query_rc)
  2747. {
  2748. unsigned int tmo = acpi_desc->scrub_tmo;
  2749. struct device *dev = acpi_desc->dev;
  2750. struct nfit_spa *nfit_spa;
  2751. lockdep_assert_held(&acpi_desc->init_mutex);
  2752. if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
  2753. return 0;
  2754. if (query_rc == -EBUSY) {
  2755. dev_dbg(dev, "ARS: ARS busy\n");
  2756. return min(30U * 60U, tmo * 2);
  2757. }
  2758. if (query_rc == -ENOSPC) {
  2759. dev_dbg(dev, "ARS: ARS continue\n");
  2760. ars_continue(acpi_desc);
  2761. return 1;
  2762. }
  2763. if (query_rc && query_rc != -EAGAIN) {
  2764. unsigned long long addr, end;
  2765. addr = acpi_desc->ars_status->address;
  2766. end = addr + acpi_desc->ars_status->length;
  2767. dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
  2768. query_rc);
  2769. }
  2770. ars_complete_all(acpi_desc);
  2771. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  2772. enum nfit_ars_state req_type;
  2773. int rc;
  2774. if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
  2775. continue;
  2776. /* prefer short ARS requests first */
  2777. if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
  2778. req_type = ARS_REQ_SHORT;
  2779. else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
  2780. req_type = ARS_REQ_LONG;
  2781. else
  2782. continue;
  2783. rc = ars_start(acpi_desc, nfit_spa, req_type);
  2784. dev = nd_region_dev(nfit_spa->nd_region);
  2785. dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
  2786. nfit_spa->spa->range_index,
  2787. req_type == ARS_REQ_SHORT ? "short" : "long",
  2788. rc);
  2789. /*
  2790. * Hmm, we raced someone else starting ARS? Try again in
  2791. * a bit.
  2792. */
  2793. if (rc == -EBUSY)
  2794. return 1;
  2795. if (rc == 0) {
  2796. dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
  2797. "scrub start while range %d active\n",
  2798. acpi_desc->scrub_spa->spa->range_index);
  2799. clear_bit(req_type, &nfit_spa->ars_state);
  2800. acpi_desc->scrub_spa = nfit_spa;
  2801. /*
  2802. * Consider this spa last for future scrub
  2803. * requests
  2804. */
  2805. list_move_tail(&nfit_spa->list, &acpi_desc->spas);
  2806. return 1;
  2807. }
  2808. dev_err(dev, "ARS: range %d ARS failed (%d)\n",
  2809. nfit_spa->spa->range_index, rc);
  2810. set_bit(ARS_FAILED, &nfit_spa->ars_state);
  2811. }
  2812. return 0;
  2813. }
  2814. static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
  2815. {
  2816. lockdep_assert_held(&acpi_desc->init_mutex);
  2817. set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
  2818. /* note this should only be set from within the workqueue */
  2819. if (tmo)
  2820. acpi_desc->scrub_tmo = tmo;
  2821. queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
  2822. }
  2823. static void sched_ars(struct acpi_nfit_desc *acpi_desc)
  2824. {
  2825. __sched_ars(acpi_desc, 0);
  2826. }
  2827. static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
  2828. {
  2829. lockdep_assert_held(&acpi_desc->init_mutex);
  2830. clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
  2831. acpi_desc->scrub_count++;
  2832. if (acpi_desc->scrub_count_state)
  2833. sysfs_notify_dirent(acpi_desc->scrub_count_state);
  2834. }
  2835. static void acpi_nfit_scrub(struct work_struct *work)
  2836. {
  2837. struct acpi_nfit_desc *acpi_desc;
  2838. unsigned int tmo;
  2839. int query_rc;
  2840. acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
  2841. mutex_lock(&acpi_desc->init_mutex);
  2842. query_rc = acpi_nfit_query_poison(acpi_desc);
  2843. tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
  2844. if (tmo)
  2845. __sched_ars(acpi_desc, tmo);
  2846. else
  2847. notify_ars_done(acpi_desc);
  2848. memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
  2849. clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
  2850. mutex_unlock(&acpi_desc->init_mutex);
  2851. }
  2852. static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
  2853. struct nfit_spa *nfit_spa)
  2854. {
  2855. int type = nfit_spa_type(nfit_spa->spa);
  2856. struct nd_cmd_ars_cap ars_cap;
  2857. int rc;
  2858. set_bit(ARS_FAILED, &nfit_spa->ars_state);
  2859. memset(&ars_cap, 0, sizeof(ars_cap));
  2860. rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
  2861. if (rc < 0)
  2862. return;
  2863. /* check that the supported scrub types match the spa type */
  2864. if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
  2865. & ND_ARS_VOLATILE) == 0)
  2866. return;
  2867. if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
  2868. & ND_ARS_PERSISTENT) == 0)
  2869. return;
  2870. nfit_spa->max_ars = ars_cap.max_ars_out;
  2871. nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
  2872. acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
  2873. clear_bit(ARS_FAILED, &nfit_spa->ars_state);
  2874. }
  2875. static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
  2876. {
  2877. struct nfit_spa *nfit_spa;
  2878. int rc, do_sched_ars = 0;
  2879. set_bit(ARS_VALID, &acpi_desc->scrub_flags);
  2880. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  2881. switch (nfit_spa_type(nfit_spa->spa)) {
  2882. case NFIT_SPA_VOLATILE:
  2883. case NFIT_SPA_PM:
  2884. acpi_nfit_init_ars(acpi_desc, nfit_spa);
  2885. break;
  2886. }
  2887. }
  2888. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  2889. switch (nfit_spa_type(nfit_spa->spa)) {
  2890. case NFIT_SPA_VOLATILE:
  2891. case NFIT_SPA_PM:
  2892. /* register regions and kick off initial ARS run */
  2893. rc = ars_register(acpi_desc, nfit_spa);
  2894. if (rc)
  2895. return rc;
  2896. /*
  2897. * Kick off background ARS if at least one
  2898. * region successfully registered ARS
  2899. */
  2900. if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
  2901. do_sched_ars++;
  2902. break;
  2903. case NFIT_SPA_BDW:
  2904. /* nothing to register */
  2905. break;
  2906. case NFIT_SPA_DCR:
  2907. case NFIT_SPA_VDISK:
  2908. case NFIT_SPA_VCD:
  2909. case NFIT_SPA_PDISK:
  2910. case NFIT_SPA_PCD:
  2911. /* register known regions that don't support ARS */
  2912. rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
  2913. if (rc)
  2914. return rc;
  2915. break;
  2916. default:
  2917. /* don't register unknown regions */
  2918. break;
  2919. }
  2920. }
  2921. if (do_sched_ars)
  2922. sched_ars(acpi_desc);
  2923. return 0;
  2924. }
  2925. static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
  2926. struct nfit_table_prev *prev)
  2927. {
  2928. struct device *dev = acpi_desc->dev;
  2929. if (!list_empty(&prev->spas) ||
  2930. !list_empty(&prev->memdevs) ||
  2931. !list_empty(&prev->dcrs) ||
  2932. !list_empty(&prev->bdws) ||
  2933. !list_empty(&prev->idts) ||
  2934. !list_empty(&prev->flushes)) {
  2935. dev_err(dev, "new nfit deletes entries (unsupported)\n");
  2936. return -ENXIO;
  2937. }
  2938. return 0;
  2939. }
  2940. static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
  2941. {
  2942. struct device *dev = acpi_desc->dev;
  2943. struct kernfs_node *nfit;
  2944. struct device *bus_dev;
  2945. if (!ars_supported(acpi_desc->nvdimm_bus))
  2946. return 0;
  2947. bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
  2948. nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
  2949. if (!nfit) {
  2950. dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
  2951. return -ENODEV;
  2952. }
  2953. acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
  2954. sysfs_put(nfit);
  2955. if (!acpi_desc->scrub_count_state) {
  2956. dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
  2957. return -ENODEV;
  2958. }
  2959. return 0;
  2960. }
  2961. static void acpi_nfit_unregister(void *data)
  2962. {
  2963. struct acpi_nfit_desc *acpi_desc = data;
  2964. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  2965. }
  2966. int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
  2967. {
  2968. struct device *dev = acpi_desc->dev;
  2969. struct nfit_table_prev prev;
  2970. const void *end;
  2971. int rc;
  2972. if (!acpi_desc->nvdimm_bus) {
  2973. acpi_nfit_init_dsms(acpi_desc);
  2974. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
  2975. &acpi_desc->nd_desc);
  2976. if (!acpi_desc->nvdimm_bus)
  2977. return -ENOMEM;
  2978. rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
  2979. acpi_desc);
  2980. if (rc)
  2981. return rc;
  2982. rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
  2983. if (rc)
  2984. return rc;
  2985. /* register this acpi_desc for mce notifications */
  2986. mutex_lock(&acpi_desc_lock);
  2987. list_add_tail(&acpi_desc->list, &acpi_descs);
  2988. mutex_unlock(&acpi_desc_lock);
  2989. }
  2990. mutex_lock(&acpi_desc->init_mutex);
  2991. INIT_LIST_HEAD(&prev.spas);
  2992. INIT_LIST_HEAD(&prev.memdevs);
  2993. INIT_LIST_HEAD(&prev.dcrs);
  2994. INIT_LIST_HEAD(&prev.bdws);
  2995. INIT_LIST_HEAD(&prev.idts);
  2996. INIT_LIST_HEAD(&prev.flushes);
  2997. list_cut_position(&prev.spas, &acpi_desc->spas,
  2998. acpi_desc->spas.prev);
  2999. list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
  3000. acpi_desc->memdevs.prev);
  3001. list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
  3002. acpi_desc->dcrs.prev);
  3003. list_cut_position(&prev.bdws, &acpi_desc->bdws,
  3004. acpi_desc->bdws.prev);
  3005. list_cut_position(&prev.idts, &acpi_desc->idts,
  3006. acpi_desc->idts.prev);
  3007. list_cut_position(&prev.flushes, &acpi_desc->flushes,
  3008. acpi_desc->flushes.prev);
  3009. end = data + sz;
  3010. while (!IS_ERR_OR_NULL(data))
  3011. data = add_table(acpi_desc, &prev, data, end);
  3012. if (IS_ERR(data)) {
  3013. dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
  3014. rc = PTR_ERR(data);
  3015. goto out_unlock;
  3016. }
  3017. rc = acpi_nfit_check_deletions(acpi_desc, &prev);
  3018. if (rc)
  3019. goto out_unlock;
  3020. rc = nfit_mem_init(acpi_desc);
  3021. if (rc)
  3022. goto out_unlock;
  3023. rc = acpi_nfit_register_dimms(acpi_desc);
  3024. if (rc)
  3025. goto out_unlock;
  3026. rc = acpi_nfit_register_regions(acpi_desc);
  3027. out_unlock:
  3028. mutex_unlock(&acpi_desc->init_mutex);
  3029. return rc;
  3030. }
  3031. EXPORT_SYMBOL_GPL(acpi_nfit_init);
  3032. static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
  3033. {
  3034. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  3035. struct device *dev = acpi_desc->dev;
  3036. /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
  3037. nfit_device_lock(dev);
  3038. nfit_device_unlock(dev);
  3039. /* Bounce the init_mutex to complete initial registration */
  3040. mutex_lock(&acpi_desc->init_mutex);
  3041. mutex_unlock(&acpi_desc->init_mutex);
  3042. return 0;
  3043. }
  3044. static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
  3045. struct nvdimm *nvdimm, unsigned int cmd)
  3046. {
  3047. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  3048. if (nvdimm)
  3049. return 0;
  3050. if (cmd != ND_CMD_ARS_START)
  3051. return 0;
  3052. /*
  3053. * The kernel and userspace may race to initiate a scrub, but
  3054. * the scrub thread is prepared to lose that initial race. It
  3055. * just needs guarantees that any ARS it initiates are not
  3056. * interrupted by any intervening start requests from userspace.
  3057. */
  3058. if (work_busy(&acpi_desc->dwork.work))
  3059. return -EBUSY;
  3060. return 0;
  3061. }
  3062. /*
  3063. * Prevent security and firmware activate commands from being issued via
  3064. * ioctl.
  3065. */
  3066. static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
  3067. struct nvdimm *nvdimm, unsigned int cmd, void *buf)
  3068. {
  3069. struct nd_cmd_pkg *call_pkg = buf;
  3070. unsigned int func;
  3071. if (nvdimm && cmd == ND_CMD_CALL &&
  3072. call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
  3073. func = call_pkg->nd_command;
  3074. if (func > NVDIMM_CMD_MAX ||
  3075. (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
  3076. return -EOPNOTSUPP;
  3077. }
  3078. /* block all non-nfit bus commands */
  3079. if (!nvdimm && cmd == ND_CMD_CALL &&
  3080. call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
  3081. return -EOPNOTSUPP;
  3082. return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
  3083. }
  3084. int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
  3085. enum nfit_ars_state req_type)
  3086. {
  3087. struct device *dev = acpi_desc->dev;
  3088. int scheduled = 0, busy = 0;
  3089. struct nfit_spa *nfit_spa;
  3090. mutex_lock(&acpi_desc->init_mutex);
  3091. if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
  3092. mutex_unlock(&acpi_desc->init_mutex);
  3093. return 0;
  3094. }
  3095. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  3096. int type = nfit_spa_type(nfit_spa->spa);
  3097. if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
  3098. continue;
  3099. if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
  3100. continue;
  3101. if (test_and_set_bit(req_type, &nfit_spa->ars_state))
  3102. busy++;
  3103. else
  3104. scheduled++;
  3105. }
  3106. if (scheduled) {
  3107. sched_ars(acpi_desc);
  3108. dev_dbg(dev, "ars_scan triggered\n");
  3109. }
  3110. mutex_unlock(&acpi_desc->init_mutex);
  3111. if (scheduled)
  3112. return 0;
  3113. if (busy)
  3114. return -EBUSY;
  3115. return -ENOTTY;
  3116. }
  3117. void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
  3118. {
  3119. struct nvdimm_bus_descriptor *nd_desc;
  3120. dev_set_drvdata(dev, acpi_desc);
  3121. acpi_desc->dev = dev;
  3122. acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
  3123. nd_desc = &acpi_desc->nd_desc;
  3124. nd_desc->provider_name = "ACPI.NFIT";
  3125. nd_desc->module = THIS_MODULE;
  3126. nd_desc->ndctl = acpi_nfit_ctl;
  3127. nd_desc->flush_probe = acpi_nfit_flush_probe;
  3128. nd_desc->clear_to_send = acpi_nfit_clear_to_send;
  3129. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  3130. INIT_LIST_HEAD(&acpi_desc->spas);
  3131. INIT_LIST_HEAD(&acpi_desc->dcrs);
  3132. INIT_LIST_HEAD(&acpi_desc->bdws);
  3133. INIT_LIST_HEAD(&acpi_desc->idts);
  3134. INIT_LIST_HEAD(&acpi_desc->flushes);
  3135. INIT_LIST_HEAD(&acpi_desc->memdevs);
  3136. INIT_LIST_HEAD(&acpi_desc->dimms);
  3137. INIT_LIST_HEAD(&acpi_desc->list);
  3138. mutex_init(&acpi_desc->init_mutex);
  3139. acpi_desc->scrub_tmo = 1;
  3140. INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
  3141. }
  3142. EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
  3143. static void acpi_nfit_put_table(void *table)
  3144. {
  3145. acpi_put_table(table);
  3146. }
  3147. void acpi_nfit_shutdown(void *data)
  3148. {
  3149. struct acpi_nfit_desc *acpi_desc = data;
  3150. struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
  3151. /*
  3152. * Destruct under acpi_desc_lock so that nfit_handle_mce does not
  3153. * race teardown
  3154. */
  3155. mutex_lock(&acpi_desc_lock);
  3156. list_del(&acpi_desc->list);
  3157. mutex_unlock(&acpi_desc_lock);
  3158. mutex_lock(&acpi_desc->init_mutex);
  3159. set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
  3160. cancel_delayed_work_sync(&acpi_desc->dwork);
  3161. mutex_unlock(&acpi_desc->init_mutex);
  3162. /*
  3163. * Bounce the nvdimm bus lock to make sure any in-flight
  3164. * acpi_nfit_ars_rescan() submissions have had a chance to
  3165. * either submit or see ->cancel set.
  3166. */
  3167. nfit_device_lock(bus_dev);
  3168. nfit_device_unlock(bus_dev);
  3169. flush_workqueue(nfit_wq);
  3170. }
  3171. EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
  3172. static int acpi_nfit_add(struct acpi_device *adev)
  3173. {
  3174. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  3175. struct acpi_nfit_desc *acpi_desc;
  3176. struct device *dev = &adev->dev;
  3177. struct acpi_table_header *tbl;
  3178. acpi_status status = AE_OK;
  3179. acpi_size sz;
  3180. int rc = 0;
  3181. status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
  3182. if (ACPI_FAILURE(status)) {
  3183. /* The NVDIMM root device allows OS to trigger enumeration of
  3184. * NVDIMMs through NFIT at boot time and re-enumeration at
  3185. * root level via the _FIT method during runtime.
  3186. * This is ok to return 0 here, we could have an nvdimm
  3187. * hotplugged later and evaluate _FIT method which returns
  3188. * data in the format of a series of NFIT Structures.
  3189. */
  3190. dev_dbg(dev, "failed to find NFIT at startup\n");
  3191. return 0;
  3192. }
  3193. rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
  3194. if (rc)
  3195. return rc;
  3196. sz = tbl->length;
  3197. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  3198. if (!acpi_desc)
  3199. return -ENOMEM;
  3200. acpi_nfit_desc_init(acpi_desc, &adev->dev);
  3201. /* Save the acpi header for exporting the revision via sysfs */
  3202. acpi_desc->acpi_header = *tbl;
  3203. /* Evaluate _FIT and override with that if present */
  3204. status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
  3205. if (ACPI_SUCCESS(status) && buf.length > 0) {
  3206. union acpi_object *obj = buf.pointer;
  3207. if (obj->type == ACPI_TYPE_BUFFER)
  3208. rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
  3209. obj->buffer.length);
  3210. else
  3211. dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
  3212. (int) obj->type);
  3213. kfree(buf.pointer);
  3214. } else
  3215. /* skip over the lead-in header table */
  3216. rc = acpi_nfit_init(acpi_desc, (void *) tbl
  3217. + sizeof(struct acpi_table_nfit),
  3218. sz - sizeof(struct acpi_table_nfit));
  3219. if (rc)
  3220. return rc;
  3221. return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
  3222. }
  3223. static int acpi_nfit_remove(struct acpi_device *adev)
  3224. {
  3225. /* see acpi_nfit_unregister */
  3226. return 0;
  3227. }
  3228. static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
  3229. {
  3230. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
  3231. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  3232. union acpi_object *obj;
  3233. acpi_status status;
  3234. int ret;
  3235. if (!dev->driver) {
  3236. /* dev->driver may be null if we're being removed */
  3237. dev_dbg(dev, "no driver found for dev\n");
  3238. return;
  3239. }
  3240. if (!acpi_desc) {
  3241. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  3242. if (!acpi_desc)
  3243. return;
  3244. acpi_nfit_desc_init(acpi_desc, dev);
  3245. } else {
  3246. /*
  3247. * Finish previous registration before considering new
  3248. * regions.
  3249. */
  3250. flush_workqueue(nfit_wq);
  3251. }
  3252. /* Evaluate _FIT */
  3253. status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
  3254. if (ACPI_FAILURE(status)) {
  3255. dev_err(dev, "failed to evaluate _FIT\n");
  3256. return;
  3257. }
  3258. obj = buf.pointer;
  3259. if (obj->type == ACPI_TYPE_BUFFER) {
  3260. ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
  3261. obj->buffer.length);
  3262. if (ret)
  3263. dev_err(dev, "failed to merge updated NFIT\n");
  3264. } else
  3265. dev_err(dev, "Invalid _FIT\n");
  3266. kfree(buf.pointer);
  3267. }
  3268. static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
  3269. {
  3270. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
  3271. if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
  3272. acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
  3273. else
  3274. acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
  3275. }
  3276. void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
  3277. {
  3278. dev_dbg(dev, "event: 0x%x\n", event);
  3279. switch (event) {
  3280. case NFIT_NOTIFY_UPDATE:
  3281. return acpi_nfit_update_notify(dev, handle);
  3282. case NFIT_NOTIFY_UC_MEMORY_ERROR:
  3283. return acpi_nfit_uc_error_notify(dev, handle);
  3284. default:
  3285. return;
  3286. }
  3287. }
  3288. EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
  3289. static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
  3290. {
  3291. nfit_device_lock(&adev->dev);
  3292. __acpi_nfit_notify(&adev->dev, adev->handle, event);
  3293. nfit_device_unlock(&adev->dev);
  3294. }
  3295. static const struct acpi_device_id acpi_nfit_ids[] = {
  3296. { "ACPI0012", 0 },
  3297. { "", 0 },
  3298. };
  3299. MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
  3300. static struct acpi_driver acpi_nfit_driver = {
  3301. .name = KBUILD_MODNAME,
  3302. .ids = acpi_nfit_ids,
  3303. .ops = {
  3304. .add = acpi_nfit_add,
  3305. .remove = acpi_nfit_remove,
  3306. .notify = acpi_nfit_notify,
  3307. },
  3308. };
  3309. static __init int nfit_init(void)
  3310. {
  3311. int ret;
  3312. BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
  3313. BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
  3314. BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
  3315. BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
  3316. BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
  3317. BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
  3318. BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
  3319. BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
  3320. guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
  3321. guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
  3322. guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
  3323. guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
  3324. guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
  3325. guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
  3326. guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
  3327. guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
  3328. guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
  3329. guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
  3330. guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
  3331. guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
  3332. guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
  3333. guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
  3334. guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
  3335. nfit_wq = create_singlethread_workqueue("nfit");
  3336. if (!nfit_wq)
  3337. return -ENOMEM;
  3338. nfit_mce_register();
  3339. ret = acpi_bus_register_driver(&acpi_nfit_driver);
  3340. if (ret) {
  3341. nfit_mce_unregister();
  3342. destroy_workqueue(nfit_wq);
  3343. }
  3344. return ret;
  3345. }
  3346. static __exit void nfit_exit(void)
  3347. {
  3348. nfit_mce_unregister();
  3349. acpi_bus_unregister_driver(&acpi_nfit_driver);
  3350. destroy_workqueue(nfit_wq);
  3351. WARN_ON(!list_empty(&acpi_descs));
  3352. }
  3353. module_init(nfit_init);
  3354. module_exit(nfit_exit);
  3355. MODULE_LICENSE("GPL v2");
  3356. MODULE_AUTHOR("Intel Corporation");