core.c 121 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NVM Express device driver
  4. * Copyright (c) 2011-2014, Intel Corporation.
  5. */
  6. #include <linux/blkdev.h>
  7. #include <linux/blk-mq.h>
  8. #include <linux/compat.h>
  9. #include <linux/delay.h>
  10. #include <linux/errno.h>
  11. #include <linux/hdreg.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/slab.h>
  16. #include <linux/types.h>
  17. #include <linux/pr.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/nvme_ioctl.h>
  20. #include <linux/pm_qos.h>
  21. #include <asm/unaligned.h>
  22. #include "nvme.h"
  23. #include "fabrics.h"
  24. #define CREATE_TRACE_POINTS
  25. #include "trace.h"
  26. #define NVME_MINORS (1U << MINORBITS)
  27. unsigned int admin_timeout = 60;
  28. module_param(admin_timeout, uint, 0644);
  29. MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
  30. EXPORT_SYMBOL_GPL(admin_timeout);
  31. unsigned int nvme_io_timeout = 30;
  32. module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
  33. MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
  34. EXPORT_SYMBOL_GPL(nvme_io_timeout);
  35. static unsigned char shutdown_timeout = 5;
  36. module_param(shutdown_timeout, byte, 0644);
  37. MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
  38. static u8 nvme_max_retries = 5;
  39. module_param_named(max_retries, nvme_max_retries, byte, 0644);
  40. MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
  41. static unsigned long default_ps_max_latency_us = 100000;
  42. module_param(default_ps_max_latency_us, ulong, 0644);
  43. MODULE_PARM_DESC(default_ps_max_latency_us,
  44. "max power saving latency for new devices; use PM QOS to change per device");
  45. static bool force_apst;
  46. module_param(force_apst, bool, 0644);
  47. MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
  48. static bool streams;
  49. module_param(streams, bool, 0644);
  50. MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
  51. /*
  52. * nvme_wq - hosts nvme related works that are not reset or delete
  53. * nvme_reset_wq - hosts nvme reset works
  54. * nvme_delete_wq - hosts nvme delete works
  55. *
  56. * nvme_wq will host works such as scan, aen handling, fw activation,
  57. * keep-alive, periodic reconnects etc. nvme_reset_wq
  58. * runs reset works which also flush works hosted on nvme_wq for
  59. * serialization purposes. nvme_delete_wq host controller deletion
  60. * works which flush reset works for serialization.
  61. */
  62. struct workqueue_struct *nvme_wq;
  63. EXPORT_SYMBOL_GPL(nvme_wq);
  64. struct workqueue_struct *nvme_reset_wq;
  65. EXPORT_SYMBOL_GPL(nvme_reset_wq);
  66. struct workqueue_struct *nvme_delete_wq;
  67. EXPORT_SYMBOL_GPL(nvme_delete_wq);
  68. static LIST_HEAD(nvme_subsystems);
  69. static DEFINE_MUTEX(nvme_subsystems_lock);
  70. static DEFINE_IDA(nvme_instance_ida);
  71. static dev_t nvme_chr_devt;
  72. static struct class *nvme_class;
  73. static struct class *nvme_subsys_class;
  74. static void nvme_put_subsystem(struct nvme_subsystem *subsys);
  75. static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
  76. unsigned nsid);
  77. static void nvme_update_bdev_size(struct gendisk *disk)
  78. {
  79. struct block_device *bdev = bdget_disk(disk, 0);
  80. if (bdev) {
  81. bd_set_nr_sectors(bdev, get_capacity(disk));
  82. bdput(bdev);
  83. }
  84. }
  85. /*
  86. * Prepare a queue for teardown.
  87. *
  88. * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
  89. * the capacity to 0 after that to avoid blocking dispatchers that may be
  90. * holding bd_butex. This will end buffered writers dirtying pages that can't
  91. * be synced.
  92. */
  93. static void nvme_set_queue_dying(struct nvme_ns *ns)
  94. {
  95. if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
  96. return;
  97. blk_set_queue_dying(ns->queue);
  98. blk_mq_unquiesce_queue(ns->queue);
  99. set_capacity(ns->disk, 0);
  100. nvme_update_bdev_size(ns->disk);
  101. }
  102. static void nvme_queue_scan(struct nvme_ctrl *ctrl)
  103. {
  104. /*
  105. * Only new queue scan work when admin and IO queues are both alive
  106. */
  107. if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
  108. queue_work(nvme_wq, &ctrl->scan_work);
  109. }
  110. /*
  111. * Use this function to proceed with scheduling reset_work for a controller
  112. * that had previously been set to the resetting state. This is intended for
  113. * code paths that can't be interrupted by other reset attempts. A hot removal
  114. * may prevent this from succeeding.
  115. */
  116. int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
  117. {
  118. if (ctrl->state != NVME_CTRL_RESETTING)
  119. return -EBUSY;
  120. if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
  121. return -EBUSY;
  122. return 0;
  123. }
  124. EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
  125. int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
  126. {
  127. if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
  128. return -EBUSY;
  129. if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
  130. return -EBUSY;
  131. return 0;
  132. }
  133. EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
  134. int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
  135. {
  136. int ret;
  137. ret = nvme_reset_ctrl(ctrl);
  138. if (!ret) {
  139. flush_work(&ctrl->reset_work);
  140. if (ctrl->state != NVME_CTRL_LIVE)
  141. ret = -ENETRESET;
  142. }
  143. return ret;
  144. }
  145. EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
  146. static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
  147. {
  148. dev_info(ctrl->device,
  149. "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
  150. flush_work(&ctrl->reset_work);
  151. nvme_stop_ctrl(ctrl);
  152. nvme_remove_namespaces(ctrl);
  153. ctrl->ops->delete_ctrl(ctrl);
  154. nvme_uninit_ctrl(ctrl);
  155. }
  156. static void nvme_delete_ctrl_work(struct work_struct *work)
  157. {
  158. struct nvme_ctrl *ctrl =
  159. container_of(work, struct nvme_ctrl, delete_work);
  160. nvme_do_delete_ctrl(ctrl);
  161. }
  162. int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
  163. {
  164. if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
  165. return -EBUSY;
  166. if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
  167. return -EBUSY;
  168. return 0;
  169. }
  170. EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
  171. static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
  172. {
  173. /*
  174. * Keep a reference until nvme_do_delete_ctrl() complete,
  175. * since ->delete_ctrl can free the controller.
  176. */
  177. nvme_get_ctrl(ctrl);
  178. if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
  179. nvme_do_delete_ctrl(ctrl);
  180. nvme_put_ctrl(ctrl);
  181. }
  182. static blk_status_t nvme_error_status(u16 status)
  183. {
  184. switch (status & 0x7ff) {
  185. case NVME_SC_SUCCESS:
  186. return BLK_STS_OK;
  187. case NVME_SC_CAP_EXCEEDED:
  188. return BLK_STS_NOSPC;
  189. case NVME_SC_LBA_RANGE:
  190. case NVME_SC_CMD_INTERRUPTED:
  191. case NVME_SC_NS_NOT_READY:
  192. return BLK_STS_TARGET;
  193. case NVME_SC_BAD_ATTRIBUTES:
  194. case NVME_SC_ONCS_NOT_SUPPORTED:
  195. case NVME_SC_INVALID_OPCODE:
  196. case NVME_SC_INVALID_FIELD:
  197. case NVME_SC_INVALID_NS:
  198. return BLK_STS_NOTSUPP;
  199. case NVME_SC_WRITE_FAULT:
  200. case NVME_SC_READ_ERROR:
  201. case NVME_SC_UNWRITTEN_BLOCK:
  202. case NVME_SC_ACCESS_DENIED:
  203. case NVME_SC_READ_ONLY:
  204. case NVME_SC_COMPARE_FAILED:
  205. return BLK_STS_MEDIUM;
  206. case NVME_SC_GUARD_CHECK:
  207. case NVME_SC_APPTAG_CHECK:
  208. case NVME_SC_REFTAG_CHECK:
  209. case NVME_SC_INVALID_PI:
  210. return BLK_STS_PROTECTION;
  211. case NVME_SC_RESERVATION_CONFLICT:
  212. return BLK_STS_NEXUS;
  213. case NVME_SC_HOST_PATH_ERROR:
  214. return BLK_STS_TRANSPORT;
  215. case NVME_SC_ZONE_TOO_MANY_ACTIVE:
  216. return BLK_STS_ZONE_ACTIVE_RESOURCE;
  217. case NVME_SC_ZONE_TOO_MANY_OPEN:
  218. return BLK_STS_ZONE_OPEN_RESOURCE;
  219. default:
  220. return BLK_STS_IOERR;
  221. }
  222. }
  223. static void nvme_retry_req(struct request *req)
  224. {
  225. struct nvme_ns *ns = req->q->queuedata;
  226. unsigned long delay = 0;
  227. u16 crd;
  228. /* The mask and shift result must be <= 3 */
  229. crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
  230. if (ns && crd)
  231. delay = ns->ctrl->crdt[crd - 1] * 100;
  232. nvme_req(req)->retries++;
  233. blk_mq_requeue_request(req, false);
  234. blk_mq_delay_kick_requeue_list(req->q, delay);
  235. }
  236. enum nvme_disposition {
  237. COMPLETE,
  238. RETRY,
  239. FAILOVER,
  240. };
  241. static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
  242. {
  243. if (likely(nvme_req(req)->status == 0))
  244. return COMPLETE;
  245. if (blk_noretry_request(req) ||
  246. (nvme_req(req)->status & NVME_SC_DNR) ||
  247. nvme_req(req)->retries >= nvme_max_retries)
  248. return COMPLETE;
  249. if (req->cmd_flags & REQ_NVME_MPATH) {
  250. if (nvme_is_path_error(nvme_req(req)->status) ||
  251. blk_queue_dying(req->q))
  252. return FAILOVER;
  253. } else {
  254. if (blk_queue_dying(req->q))
  255. return COMPLETE;
  256. }
  257. return RETRY;
  258. }
  259. static inline void nvme_end_req(struct request *req)
  260. {
  261. blk_status_t status = nvme_error_status(nvme_req(req)->status);
  262. if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
  263. req_op(req) == REQ_OP_ZONE_APPEND)
  264. req->__sector = nvme_lba_to_sect(req->q->queuedata,
  265. le64_to_cpu(nvme_req(req)->result.u64));
  266. nvme_trace_bio_complete(req, status);
  267. blk_mq_end_request(req, status);
  268. }
  269. void nvme_complete_rq(struct request *req)
  270. {
  271. trace_nvme_complete_rq(req);
  272. nvme_cleanup_cmd(req);
  273. if (nvme_req(req)->ctrl->kas)
  274. nvme_req(req)->ctrl->comp_seen = true;
  275. switch (nvme_decide_disposition(req)) {
  276. case COMPLETE:
  277. nvme_end_req(req);
  278. return;
  279. case RETRY:
  280. nvme_retry_req(req);
  281. return;
  282. case FAILOVER:
  283. nvme_failover_req(req);
  284. return;
  285. }
  286. }
  287. EXPORT_SYMBOL_GPL(nvme_complete_rq);
  288. bool nvme_cancel_request(struct request *req, void *data, bool reserved)
  289. {
  290. dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
  291. "Cancelling I/O %d", req->tag);
  292. /* don't abort one completed request */
  293. if (blk_mq_request_completed(req))
  294. return true;
  295. nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
  296. nvme_req(req)->flags |= NVME_REQ_CANCELLED;
  297. blk_mq_complete_request(req);
  298. return true;
  299. }
  300. EXPORT_SYMBOL_GPL(nvme_cancel_request);
  301. void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
  302. {
  303. if (ctrl->tagset) {
  304. blk_mq_tagset_busy_iter(ctrl->tagset,
  305. nvme_cancel_request, ctrl);
  306. blk_mq_tagset_wait_completed_request(ctrl->tagset);
  307. }
  308. }
  309. EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
  310. void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
  311. {
  312. if (ctrl->admin_tagset) {
  313. blk_mq_tagset_busy_iter(ctrl->admin_tagset,
  314. nvme_cancel_request, ctrl);
  315. blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
  316. }
  317. }
  318. EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
  319. bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
  320. enum nvme_ctrl_state new_state)
  321. {
  322. enum nvme_ctrl_state old_state;
  323. unsigned long flags;
  324. bool changed = false;
  325. spin_lock_irqsave(&ctrl->lock, flags);
  326. old_state = ctrl->state;
  327. switch (new_state) {
  328. case NVME_CTRL_LIVE:
  329. switch (old_state) {
  330. case NVME_CTRL_NEW:
  331. case NVME_CTRL_RESETTING:
  332. case NVME_CTRL_CONNECTING:
  333. changed = true;
  334. fallthrough;
  335. default:
  336. break;
  337. }
  338. break;
  339. case NVME_CTRL_RESETTING:
  340. switch (old_state) {
  341. case NVME_CTRL_NEW:
  342. case NVME_CTRL_LIVE:
  343. changed = true;
  344. fallthrough;
  345. default:
  346. break;
  347. }
  348. break;
  349. case NVME_CTRL_CONNECTING:
  350. switch (old_state) {
  351. case NVME_CTRL_NEW:
  352. case NVME_CTRL_RESETTING:
  353. changed = true;
  354. fallthrough;
  355. default:
  356. break;
  357. }
  358. break;
  359. case NVME_CTRL_DELETING:
  360. switch (old_state) {
  361. case NVME_CTRL_LIVE:
  362. case NVME_CTRL_RESETTING:
  363. case NVME_CTRL_CONNECTING:
  364. changed = true;
  365. fallthrough;
  366. default:
  367. break;
  368. }
  369. break;
  370. case NVME_CTRL_DELETING_NOIO:
  371. switch (old_state) {
  372. case NVME_CTRL_DELETING:
  373. case NVME_CTRL_DEAD:
  374. changed = true;
  375. fallthrough;
  376. default:
  377. break;
  378. }
  379. break;
  380. case NVME_CTRL_DEAD:
  381. switch (old_state) {
  382. case NVME_CTRL_DELETING:
  383. changed = true;
  384. fallthrough;
  385. default:
  386. break;
  387. }
  388. break;
  389. default:
  390. break;
  391. }
  392. if (changed) {
  393. ctrl->state = new_state;
  394. wake_up_all(&ctrl->state_wq);
  395. }
  396. spin_unlock_irqrestore(&ctrl->lock, flags);
  397. if (changed && ctrl->state == NVME_CTRL_LIVE)
  398. nvme_kick_requeue_lists(ctrl);
  399. return changed;
  400. }
  401. EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
  402. /*
  403. * Returns true for sink states that can't ever transition back to live.
  404. */
  405. static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
  406. {
  407. switch (ctrl->state) {
  408. case NVME_CTRL_NEW:
  409. case NVME_CTRL_LIVE:
  410. case NVME_CTRL_RESETTING:
  411. case NVME_CTRL_CONNECTING:
  412. return false;
  413. case NVME_CTRL_DELETING:
  414. case NVME_CTRL_DELETING_NOIO:
  415. case NVME_CTRL_DEAD:
  416. return true;
  417. default:
  418. WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
  419. return true;
  420. }
  421. }
  422. /*
  423. * Waits for the controller state to be resetting, or returns false if it is
  424. * not possible to ever transition to that state.
  425. */
  426. bool nvme_wait_reset(struct nvme_ctrl *ctrl)
  427. {
  428. wait_event(ctrl->state_wq,
  429. nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
  430. nvme_state_terminal(ctrl));
  431. return ctrl->state == NVME_CTRL_RESETTING;
  432. }
  433. EXPORT_SYMBOL_GPL(nvme_wait_reset);
  434. static void nvme_free_ns_head(struct kref *ref)
  435. {
  436. struct nvme_ns_head *head =
  437. container_of(ref, struct nvme_ns_head, ref);
  438. nvme_mpath_remove_disk(head);
  439. ida_simple_remove(&head->subsys->ns_ida, head->instance);
  440. cleanup_srcu_struct(&head->srcu);
  441. nvme_put_subsystem(head->subsys);
  442. kfree(head);
  443. }
  444. static void nvme_put_ns_head(struct nvme_ns_head *head)
  445. {
  446. kref_put(&head->ref, nvme_free_ns_head);
  447. }
  448. static void nvme_free_ns(struct kref *kref)
  449. {
  450. struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
  451. if (ns->ndev)
  452. nvme_nvm_unregister(ns);
  453. put_disk(ns->disk);
  454. nvme_put_ns_head(ns->head);
  455. nvme_put_ctrl(ns->ctrl);
  456. kfree(ns);
  457. }
  458. void nvme_put_ns(struct nvme_ns *ns)
  459. {
  460. kref_put(&ns->kref, nvme_free_ns);
  461. }
  462. EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
  463. static inline void nvme_clear_nvme_request(struct request *req)
  464. {
  465. if (!(req->rq_flags & RQF_DONTPREP)) {
  466. nvme_req(req)->retries = 0;
  467. nvme_req(req)->flags = 0;
  468. req->rq_flags |= RQF_DONTPREP;
  469. }
  470. }
  471. struct request *nvme_alloc_request(struct request_queue *q,
  472. struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
  473. {
  474. unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
  475. struct request *req;
  476. if (qid == NVME_QID_ANY) {
  477. req = blk_mq_alloc_request(q, op, flags);
  478. } else {
  479. req = blk_mq_alloc_request_hctx(q, op, flags,
  480. qid ? qid - 1 : 0);
  481. }
  482. if (IS_ERR(req))
  483. return req;
  484. req->cmd_flags |= REQ_FAILFAST_DRIVER;
  485. nvme_clear_nvme_request(req);
  486. nvme_req(req)->cmd = cmd;
  487. return req;
  488. }
  489. EXPORT_SYMBOL_GPL(nvme_alloc_request);
  490. static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
  491. {
  492. struct nvme_command c;
  493. memset(&c, 0, sizeof(c));
  494. c.directive.opcode = nvme_admin_directive_send;
  495. c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
  496. c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
  497. c.directive.dtype = NVME_DIR_IDENTIFY;
  498. c.directive.tdtype = NVME_DIR_STREAMS;
  499. c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
  500. return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
  501. }
  502. static int nvme_disable_streams(struct nvme_ctrl *ctrl)
  503. {
  504. return nvme_toggle_streams(ctrl, false);
  505. }
  506. static int nvme_enable_streams(struct nvme_ctrl *ctrl)
  507. {
  508. return nvme_toggle_streams(ctrl, true);
  509. }
  510. static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
  511. struct streams_directive_params *s, u32 nsid)
  512. {
  513. struct nvme_command c;
  514. memset(&c, 0, sizeof(c));
  515. memset(s, 0, sizeof(*s));
  516. c.directive.opcode = nvme_admin_directive_recv;
  517. c.directive.nsid = cpu_to_le32(nsid);
  518. c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
  519. c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
  520. c.directive.dtype = NVME_DIR_STREAMS;
  521. return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
  522. }
  523. static int nvme_configure_directives(struct nvme_ctrl *ctrl)
  524. {
  525. struct streams_directive_params s;
  526. int ret;
  527. if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
  528. return 0;
  529. if (!streams)
  530. return 0;
  531. ret = nvme_enable_streams(ctrl);
  532. if (ret)
  533. return ret;
  534. ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
  535. if (ret)
  536. goto out_disable_stream;
  537. ctrl->nssa = le16_to_cpu(s.nssa);
  538. if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
  539. dev_info(ctrl->device, "too few streams (%u) available\n",
  540. ctrl->nssa);
  541. goto out_disable_stream;
  542. }
  543. ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
  544. dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
  545. return 0;
  546. out_disable_stream:
  547. nvme_disable_streams(ctrl);
  548. return ret;
  549. }
  550. /*
  551. * Check if 'req' has a write hint associated with it. If it does, assign
  552. * a valid namespace stream to the write.
  553. */
  554. static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
  555. struct request *req, u16 *control,
  556. u32 *dsmgmt)
  557. {
  558. enum rw_hint streamid = req->write_hint;
  559. if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
  560. streamid = 0;
  561. else {
  562. streamid--;
  563. if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
  564. return;
  565. *control |= NVME_RW_DTYPE_STREAMS;
  566. *dsmgmt |= streamid << 16;
  567. }
  568. if (streamid < ARRAY_SIZE(req->q->write_hints))
  569. req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
  570. }
  571. static void nvme_setup_passthrough(struct request *req,
  572. struct nvme_command *cmd)
  573. {
  574. memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
  575. /* passthru commands should let the driver set the SGL flags */
  576. cmd->common.flags &= ~NVME_CMD_SGL_ALL;
  577. }
  578. static inline void nvme_setup_flush(struct nvme_ns *ns,
  579. struct nvme_command *cmnd)
  580. {
  581. cmnd->common.opcode = nvme_cmd_flush;
  582. cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
  583. }
  584. static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
  585. struct nvme_command *cmnd)
  586. {
  587. unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
  588. struct nvme_dsm_range *range;
  589. struct bio *bio;
  590. /*
  591. * Some devices do not consider the DSM 'Number of Ranges' field when
  592. * determining how much data to DMA. Always allocate memory for maximum
  593. * number of segments to prevent device reading beyond end of buffer.
  594. */
  595. static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
  596. range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
  597. if (!range) {
  598. /*
  599. * If we fail allocation our range, fallback to the controller
  600. * discard page. If that's also busy, it's safe to return
  601. * busy, as we know we can make progress once that's freed.
  602. */
  603. if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
  604. return BLK_STS_RESOURCE;
  605. range = page_address(ns->ctrl->discard_page);
  606. }
  607. __rq_for_each_bio(bio, req) {
  608. u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
  609. u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
  610. if (n < segments) {
  611. range[n].cattr = cpu_to_le32(0);
  612. range[n].nlb = cpu_to_le32(nlb);
  613. range[n].slba = cpu_to_le64(slba);
  614. }
  615. n++;
  616. }
  617. if (WARN_ON_ONCE(n != segments)) {
  618. if (virt_to_page(range) == ns->ctrl->discard_page)
  619. clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
  620. else
  621. kfree(range);
  622. return BLK_STS_IOERR;
  623. }
  624. cmnd->dsm.opcode = nvme_cmd_dsm;
  625. cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
  626. cmnd->dsm.nr = cpu_to_le32(segments - 1);
  627. cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
  628. req->special_vec.bv_page = virt_to_page(range);
  629. req->special_vec.bv_offset = offset_in_page(range);
  630. req->special_vec.bv_len = alloc_size;
  631. req->rq_flags |= RQF_SPECIAL_PAYLOAD;
  632. return BLK_STS_OK;
  633. }
  634. static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
  635. struct request *req, struct nvme_command *cmnd)
  636. {
  637. if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
  638. return nvme_setup_discard(ns, req, cmnd);
  639. cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
  640. cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
  641. cmnd->write_zeroes.slba =
  642. cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
  643. cmnd->write_zeroes.length =
  644. cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
  645. if (nvme_ns_has_pi(ns))
  646. cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
  647. else
  648. cmnd->write_zeroes.control = 0;
  649. return BLK_STS_OK;
  650. }
  651. static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
  652. struct request *req, struct nvme_command *cmnd,
  653. enum nvme_opcode op)
  654. {
  655. struct nvme_ctrl *ctrl = ns->ctrl;
  656. u16 control = 0;
  657. u32 dsmgmt = 0;
  658. if (req->cmd_flags & REQ_FUA)
  659. control |= NVME_RW_FUA;
  660. if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
  661. control |= NVME_RW_LR;
  662. if (req->cmd_flags & REQ_RAHEAD)
  663. dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
  664. cmnd->rw.opcode = op;
  665. cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
  666. cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
  667. cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
  668. if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
  669. nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
  670. if (ns->ms) {
  671. /*
  672. * If formated with metadata, the block layer always provides a
  673. * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
  674. * we enable the PRACT bit for protection information or set the
  675. * namespace capacity to zero to prevent any I/O.
  676. */
  677. if (!blk_integrity_rq(req)) {
  678. if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
  679. return BLK_STS_NOTSUPP;
  680. control |= NVME_RW_PRINFO_PRACT;
  681. }
  682. switch (ns->pi_type) {
  683. case NVME_NS_DPS_PI_TYPE3:
  684. control |= NVME_RW_PRINFO_PRCHK_GUARD;
  685. break;
  686. case NVME_NS_DPS_PI_TYPE1:
  687. case NVME_NS_DPS_PI_TYPE2:
  688. control |= NVME_RW_PRINFO_PRCHK_GUARD |
  689. NVME_RW_PRINFO_PRCHK_REF;
  690. if (op == nvme_cmd_zone_append)
  691. control |= NVME_RW_APPEND_PIREMAP;
  692. cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
  693. break;
  694. }
  695. }
  696. cmnd->rw.control = cpu_to_le16(control);
  697. cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
  698. return 0;
  699. }
  700. void nvme_cleanup_cmd(struct request *req)
  701. {
  702. if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
  703. struct nvme_ns *ns = req->rq_disk->private_data;
  704. struct page *page = req->special_vec.bv_page;
  705. if (page == ns->ctrl->discard_page)
  706. clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
  707. else
  708. kfree(page_address(page) + req->special_vec.bv_offset);
  709. }
  710. }
  711. EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
  712. blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
  713. struct nvme_command *cmd)
  714. {
  715. struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
  716. blk_status_t ret = BLK_STS_OK;
  717. nvme_clear_nvme_request(req);
  718. memset(cmd, 0, sizeof(*cmd));
  719. switch (req_op(req)) {
  720. case REQ_OP_DRV_IN:
  721. case REQ_OP_DRV_OUT:
  722. nvme_setup_passthrough(req, cmd);
  723. break;
  724. case REQ_OP_FLUSH:
  725. nvme_setup_flush(ns, cmd);
  726. break;
  727. case REQ_OP_ZONE_RESET_ALL:
  728. case REQ_OP_ZONE_RESET:
  729. ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
  730. break;
  731. case REQ_OP_ZONE_OPEN:
  732. ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
  733. break;
  734. case REQ_OP_ZONE_CLOSE:
  735. ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
  736. break;
  737. case REQ_OP_ZONE_FINISH:
  738. ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
  739. break;
  740. case REQ_OP_WRITE_ZEROES:
  741. ret = nvme_setup_write_zeroes(ns, req, cmd);
  742. break;
  743. case REQ_OP_DISCARD:
  744. ret = nvme_setup_discard(ns, req, cmd);
  745. break;
  746. case REQ_OP_READ:
  747. ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
  748. break;
  749. case REQ_OP_WRITE:
  750. ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
  751. break;
  752. case REQ_OP_ZONE_APPEND:
  753. ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
  754. break;
  755. default:
  756. WARN_ON_ONCE(1);
  757. return BLK_STS_IOERR;
  758. }
  759. if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
  760. nvme_req(req)->genctr++;
  761. cmd->common.command_id = nvme_cid(req);
  762. trace_nvme_setup_cmd(req, cmd);
  763. return ret;
  764. }
  765. EXPORT_SYMBOL_GPL(nvme_setup_cmd);
  766. static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
  767. {
  768. struct completion *waiting = rq->end_io_data;
  769. rq->end_io_data = NULL;
  770. complete(waiting);
  771. }
  772. static void nvme_execute_rq_polled(struct request_queue *q,
  773. struct gendisk *bd_disk, struct request *rq, int at_head)
  774. {
  775. DECLARE_COMPLETION_ONSTACK(wait);
  776. WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
  777. rq->cmd_flags |= REQ_HIPRI;
  778. rq->end_io_data = &wait;
  779. blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
  780. while (!completion_done(&wait)) {
  781. blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
  782. cond_resched();
  783. }
  784. }
  785. /*
  786. * Returns 0 on success. If the result is negative, it's a Linux error code;
  787. * if the result is positive, it's an NVM Express status code
  788. */
  789. int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
  790. union nvme_result *result, void *buffer, unsigned bufflen,
  791. unsigned timeout, int qid, int at_head,
  792. blk_mq_req_flags_t flags, bool poll)
  793. {
  794. struct request *req;
  795. int ret;
  796. req = nvme_alloc_request(q, cmd, flags, qid);
  797. if (IS_ERR(req))
  798. return PTR_ERR(req);
  799. req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
  800. if (buffer && bufflen) {
  801. ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
  802. if (ret)
  803. goto out;
  804. }
  805. if (poll)
  806. nvme_execute_rq_polled(req->q, NULL, req, at_head);
  807. else
  808. blk_execute_rq(req->q, NULL, req, at_head);
  809. if (result)
  810. *result = nvme_req(req)->result;
  811. if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
  812. ret = -EINTR;
  813. else
  814. ret = nvme_req(req)->status;
  815. out:
  816. blk_mq_free_request(req);
  817. return ret;
  818. }
  819. EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
  820. int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
  821. void *buffer, unsigned bufflen)
  822. {
  823. return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
  824. NVME_QID_ANY, 0, 0, false);
  825. }
  826. EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
  827. static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
  828. unsigned len, u32 seed, bool write)
  829. {
  830. struct bio_integrity_payload *bip;
  831. int ret = -ENOMEM;
  832. void *buf;
  833. buf = kmalloc(len, GFP_KERNEL);
  834. if (!buf)
  835. goto out;
  836. ret = -EFAULT;
  837. if (write && copy_from_user(buf, ubuf, len))
  838. goto out_free_meta;
  839. bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
  840. if (IS_ERR(bip)) {
  841. ret = PTR_ERR(bip);
  842. goto out_free_meta;
  843. }
  844. bip->bip_iter.bi_size = len;
  845. bip->bip_iter.bi_sector = seed;
  846. ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
  847. offset_in_page(buf));
  848. if (ret == len)
  849. return buf;
  850. ret = -ENOMEM;
  851. out_free_meta:
  852. kfree(buf);
  853. out:
  854. return ERR_PTR(ret);
  855. }
  856. static u32 nvme_known_admin_effects(u8 opcode)
  857. {
  858. switch (opcode) {
  859. case nvme_admin_format_nvm:
  860. return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
  861. NVME_CMD_EFFECTS_CSE_MASK;
  862. case nvme_admin_sanitize_nvm:
  863. return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
  864. default:
  865. break;
  866. }
  867. return 0;
  868. }
  869. u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
  870. {
  871. u32 effects = 0;
  872. if (ns) {
  873. if (ns->head->effects)
  874. effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
  875. if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
  876. dev_warn(ctrl->device,
  877. "IO command:%02x has unhandled effects:%08x\n",
  878. opcode, effects);
  879. return 0;
  880. }
  881. if (ctrl->effects)
  882. effects = le32_to_cpu(ctrl->effects->acs[opcode]);
  883. effects |= nvme_known_admin_effects(opcode);
  884. return effects;
  885. }
  886. EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
  887. static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
  888. u8 opcode)
  889. {
  890. u32 effects = nvme_command_effects(ctrl, ns, opcode);
  891. /*
  892. * For simplicity, IO to all namespaces is quiesced even if the command
  893. * effects say only one namespace is affected.
  894. */
  895. if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
  896. mutex_lock(&ctrl->scan_lock);
  897. mutex_lock(&ctrl->subsys->lock);
  898. nvme_mpath_start_freeze(ctrl->subsys);
  899. nvme_mpath_wait_freeze(ctrl->subsys);
  900. nvme_start_freeze(ctrl);
  901. nvme_wait_freeze(ctrl);
  902. }
  903. return effects;
  904. }
  905. static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
  906. {
  907. if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
  908. nvme_unfreeze(ctrl);
  909. nvme_mpath_unfreeze(ctrl->subsys);
  910. mutex_unlock(&ctrl->subsys->lock);
  911. nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
  912. mutex_unlock(&ctrl->scan_lock);
  913. }
  914. if (effects & NVME_CMD_EFFECTS_CCC)
  915. nvme_init_identify(ctrl);
  916. if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
  917. nvme_queue_scan(ctrl);
  918. flush_work(&ctrl->scan_work);
  919. }
  920. }
  921. void nvme_execute_passthru_rq(struct request *rq)
  922. {
  923. struct nvme_command *cmd = nvme_req(rq)->cmd;
  924. struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
  925. struct nvme_ns *ns = rq->q->queuedata;
  926. struct gendisk *disk = ns ? ns->disk : NULL;
  927. u32 effects;
  928. effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
  929. blk_execute_rq(rq->q, disk, rq, 0);
  930. nvme_passthru_end(ctrl, effects);
  931. }
  932. EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
  933. static int nvme_submit_user_cmd(struct request_queue *q,
  934. struct nvme_command *cmd, void __user *ubuffer,
  935. unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
  936. u32 meta_seed, u64 *result, unsigned timeout)
  937. {
  938. bool write = nvme_is_write(cmd);
  939. struct nvme_ns *ns = q->queuedata;
  940. struct gendisk *disk = ns ? ns->disk : NULL;
  941. struct request *req;
  942. struct bio *bio = NULL;
  943. void *meta = NULL;
  944. int ret;
  945. req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
  946. if (IS_ERR(req))
  947. return PTR_ERR(req);
  948. req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
  949. nvme_req(req)->flags |= NVME_REQ_USERCMD;
  950. if (ubuffer && bufflen) {
  951. ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
  952. GFP_KERNEL);
  953. if (ret)
  954. goto out;
  955. bio = req->bio;
  956. bio->bi_disk = disk;
  957. if (disk && meta_buffer && meta_len) {
  958. meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
  959. meta_seed, write);
  960. if (IS_ERR(meta)) {
  961. ret = PTR_ERR(meta);
  962. goto out_unmap;
  963. }
  964. req->cmd_flags |= REQ_INTEGRITY;
  965. }
  966. }
  967. nvme_execute_passthru_rq(req);
  968. if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
  969. ret = -EINTR;
  970. else
  971. ret = nvme_req(req)->status;
  972. if (result)
  973. *result = le64_to_cpu(nvme_req(req)->result.u64);
  974. if (meta && !ret && !write) {
  975. if (copy_to_user(meta_buffer, meta, meta_len))
  976. ret = -EFAULT;
  977. }
  978. kfree(meta);
  979. out_unmap:
  980. if (bio)
  981. blk_rq_unmap_user(bio);
  982. out:
  983. blk_mq_free_request(req);
  984. return ret;
  985. }
  986. static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
  987. {
  988. struct nvme_ctrl *ctrl = rq->end_io_data;
  989. unsigned long flags;
  990. bool startka = false;
  991. blk_mq_free_request(rq);
  992. if (status) {
  993. dev_err(ctrl->device,
  994. "failed nvme_keep_alive_end_io error=%d\n",
  995. status);
  996. return;
  997. }
  998. ctrl->comp_seen = false;
  999. spin_lock_irqsave(&ctrl->lock, flags);
  1000. if (ctrl->state == NVME_CTRL_LIVE ||
  1001. ctrl->state == NVME_CTRL_CONNECTING)
  1002. startka = true;
  1003. spin_unlock_irqrestore(&ctrl->lock, flags);
  1004. if (startka)
  1005. queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
  1006. }
  1007. static int nvme_keep_alive(struct nvme_ctrl *ctrl)
  1008. {
  1009. struct request *rq;
  1010. rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
  1011. NVME_QID_ANY);
  1012. if (IS_ERR(rq))
  1013. return PTR_ERR(rq);
  1014. rq->timeout = ctrl->kato * HZ;
  1015. rq->end_io_data = ctrl;
  1016. blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
  1017. return 0;
  1018. }
  1019. static void nvme_keep_alive_work(struct work_struct *work)
  1020. {
  1021. struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
  1022. struct nvme_ctrl, ka_work);
  1023. bool comp_seen = ctrl->comp_seen;
  1024. if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
  1025. dev_dbg(ctrl->device,
  1026. "reschedule traffic based keep-alive timer\n");
  1027. ctrl->comp_seen = false;
  1028. queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
  1029. return;
  1030. }
  1031. if (nvme_keep_alive(ctrl)) {
  1032. /* allocation failure, reset the controller */
  1033. dev_err(ctrl->device, "keep-alive failed\n");
  1034. nvme_reset_ctrl(ctrl);
  1035. return;
  1036. }
  1037. }
  1038. static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
  1039. {
  1040. if (unlikely(ctrl->kato == 0))
  1041. return;
  1042. queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
  1043. }
  1044. void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
  1045. {
  1046. if (unlikely(ctrl->kato == 0))
  1047. return;
  1048. cancel_delayed_work_sync(&ctrl->ka_work);
  1049. }
  1050. EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
  1051. /*
  1052. * In NVMe 1.0 the CNS field was just a binary controller or namespace
  1053. * flag, thus sending any new CNS opcodes has a big chance of not working.
  1054. * Qemu unfortunately had that bug after reporting a 1.1 version compliance
  1055. * (but not for any later version).
  1056. */
  1057. static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
  1058. {
  1059. if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
  1060. return ctrl->vs < NVME_VS(1, 2, 0);
  1061. return ctrl->vs < NVME_VS(1, 1, 0);
  1062. }
  1063. static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
  1064. {
  1065. struct nvme_command c = { };
  1066. int error;
  1067. /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
  1068. c.identify.opcode = nvme_admin_identify;
  1069. c.identify.cns = NVME_ID_CNS_CTRL;
  1070. *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
  1071. if (!*id)
  1072. return -ENOMEM;
  1073. error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
  1074. sizeof(struct nvme_id_ctrl));
  1075. if (error)
  1076. kfree(*id);
  1077. return error;
  1078. }
  1079. static bool nvme_multi_css(struct nvme_ctrl *ctrl)
  1080. {
  1081. return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
  1082. }
  1083. static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
  1084. struct nvme_ns_id_desc *cur, bool *csi_seen)
  1085. {
  1086. const char *warn_str = "ctrl returned bogus length:";
  1087. void *data = cur;
  1088. switch (cur->nidt) {
  1089. case NVME_NIDT_EUI64:
  1090. if (cur->nidl != NVME_NIDT_EUI64_LEN) {
  1091. dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
  1092. warn_str, cur->nidl);
  1093. return -1;
  1094. }
  1095. if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
  1096. return NVME_NIDT_EUI64_LEN;
  1097. memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
  1098. return NVME_NIDT_EUI64_LEN;
  1099. case NVME_NIDT_NGUID:
  1100. if (cur->nidl != NVME_NIDT_NGUID_LEN) {
  1101. dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
  1102. warn_str, cur->nidl);
  1103. return -1;
  1104. }
  1105. if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
  1106. return NVME_NIDT_NGUID_LEN;
  1107. memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
  1108. return NVME_NIDT_NGUID_LEN;
  1109. case NVME_NIDT_UUID:
  1110. if (cur->nidl != NVME_NIDT_UUID_LEN) {
  1111. dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
  1112. warn_str, cur->nidl);
  1113. return -1;
  1114. }
  1115. if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
  1116. return NVME_NIDT_UUID_LEN;
  1117. uuid_copy(&ids->uuid, data + sizeof(*cur));
  1118. return NVME_NIDT_UUID_LEN;
  1119. case NVME_NIDT_CSI:
  1120. if (cur->nidl != NVME_NIDT_CSI_LEN) {
  1121. dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
  1122. warn_str, cur->nidl);
  1123. return -1;
  1124. }
  1125. memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
  1126. *csi_seen = true;
  1127. return NVME_NIDT_CSI_LEN;
  1128. default:
  1129. /* Skip unknown types */
  1130. return cur->nidl;
  1131. }
  1132. }
  1133. static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
  1134. struct nvme_ns_ids *ids)
  1135. {
  1136. struct nvme_command c = { };
  1137. bool csi_seen = false;
  1138. int status, pos, len;
  1139. void *data;
  1140. if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
  1141. return 0;
  1142. if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
  1143. return 0;
  1144. c.identify.opcode = nvme_admin_identify;
  1145. c.identify.nsid = cpu_to_le32(nsid);
  1146. c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
  1147. data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
  1148. if (!data)
  1149. return -ENOMEM;
  1150. status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
  1151. NVME_IDENTIFY_DATA_SIZE);
  1152. if (status) {
  1153. dev_warn(ctrl->device,
  1154. "Identify Descriptors failed (%d)\n", status);
  1155. goto free_data;
  1156. }
  1157. for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
  1158. struct nvme_ns_id_desc *cur = data + pos;
  1159. if (cur->nidl == 0)
  1160. break;
  1161. len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
  1162. if (len < 0)
  1163. break;
  1164. len += sizeof(*cur);
  1165. }
  1166. if (nvme_multi_css(ctrl) && !csi_seen) {
  1167. dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
  1168. nsid);
  1169. status = -EINVAL;
  1170. }
  1171. free_data:
  1172. kfree(data);
  1173. return status;
  1174. }
  1175. static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
  1176. struct nvme_ns_ids *ids, struct nvme_id_ns **id)
  1177. {
  1178. struct nvme_command c = { };
  1179. int error;
  1180. /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
  1181. c.identify.opcode = nvme_admin_identify;
  1182. c.identify.nsid = cpu_to_le32(nsid);
  1183. c.identify.cns = NVME_ID_CNS_NS;
  1184. *id = kmalloc(sizeof(**id), GFP_KERNEL);
  1185. if (!*id)
  1186. return -ENOMEM;
  1187. error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
  1188. if (error) {
  1189. dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
  1190. goto out_free_id;
  1191. }
  1192. error = NVME_SC_INVALID_NS | NVME_SC_DNR;
  1193. if ((*id)->ncap == 0) /* namespace not allocated or attached */
  1194. goto out_free_id;
  1195. if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
  1196. dev_info(ctrl->device,
  1197. "Ignoring bogus Namespace Identifiers\n");
  1198. } else {
  1199. if (ctrl->vs >= NVME_VS(1, 1, 0) &&
  1200. !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
  1201. memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
  1202. if (ctrl->vs >= NVME_VS(1, 2, 0) &&
  1203. !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
  1204. memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
  1205. }
  1206. return 0;
  1207. out_free_id:
  1208. kfree(*id);
  1209. return error;
  1210. }
  1211. static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
  1212. unsigned int dword11, void *buffer, size_t buflen, u32 *result)
  1213. {
  1214. union nvme_result res = { 0 };
  1215. struct nvme_command c;
  1216. int ret;
  1217. memset(&c, 0, sizeof(c));
  1218. c.features.opcode = op;
  1219. c.features.fid = cpu_to_le32(fid);
  1220. c.features.dword11 = cpu_to_le32(dword11);
  1221. ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
  1222. buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
  1223. if (ret >= 0 && result)
  1224. *result = le32_to_cpu(res.u32);
  1225. return ret;
  1226. }
  1227. int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
  1228. unsigned int dword11, void *buffer, size_t buflen,
  1229. u32 *result)
  1230. {
  1231. return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
  1232. buflen, result);
  1233. }
  1234. EXPORT_SYMBOL_GPL(nvme_set_features);
  1235. int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
  1236. unsigned int dword11, void *buffer, size_t buflen,
  1237. u32 *result)
  1238. {
  1239. return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
  1240. buflen, result);
  1241. }
  1242. EXPORT_SYMBOL_GPL(nvme_get_features);
  1243. int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
  1244. {
  1245. u32 q_count = (*count - 1) | ((*count - 1) << 16);
  1246. u32 result;
  1247. int status, nr_io_queues;
  1248. status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
  1249. &result);
  1250. if (status < 0)
  1251. return status;
  1252. /*
  1253. * Degraded controllers might return an error when setting the queue
  1254. * count. We still want to be able to bring them online and offer
  1255. * access to the admin queue, as that might be only way to fix them up.
  1256. */
  1257. if (status > 0) {
  1258. dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
  1259. *count = 0;
  1260. } else {
  1261. nr_io_queues = min(result & 0xffff, result >> 16) + 1;
  1262. *count = min(*count, nr_io_queues);
  1263. }
  1264. return 0;
  1265. }
  1266. EXPORT_SYMBOL_GPL(nvme_set_queue_count);
  1267. #define NVME_AEN_SUPPORTED \
  1268. (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
  1269. NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
  1270. static void nvme_enable_aen(struct nvme_ctrl *ctrl)
  1271. {
  1272. u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
  1273. int status;
  1274. if (!supported_aens)
  1275. return;
  1276. status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
  1277. NULL, 0, &result);
  1278. if (status)
  1279. dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
  1280. supported_aens);
  1281. queue_work(nvme_wq, &ctrl->async_event_work);
  1282. }
  1283. /*
  1284. * Convert integer values from ioctl structures to user pointers, silently
  1285. * ignoring the upper bits in the compat case to match behaviour of 32-bit
  1286. * kernels.
  1287. */
  1288. static void __user *nvme_to_user_ptr(uintptr_t ptrval)
  1289. {
  1290. if (in_compat_syscall())
  1291. ptrval = (compat_uptr_t)ptrval;
  1292. return (void __user *)ptrval;
  1293. }
  1294. static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
  1295. {
  1296. struct nvme_user_io io;
  1297. struct nvme_command c;
  1298. unsigned length, meta_len;
  1299. void __user *metadata;
  1300. if (copy_from_user(&io, uio, sizeof(io)))
  1301. return -EFAULT;
  1302. if (io.flags)
  1303. return -EINVAL;
  1304. switch (io.opcode) {
  1305. case nvme_cmd_write:
  1306. case nvme_cmd_read:
  1307. case nvme_cmd_compare:
  1308. break;
  1309. default:
  1310. return -EINVAL;
  1311. }
  1312. length = (io.nblocks + 1) << ns->lba_shift;
  1313. if ((io.control & NVME_RW_PRINFO_PRACT) &&
  1314. ns->ms == sizeof(struct t10_pi_tuple)) {
  1315. /*
  1316. * Protection information is stripped/inserted by the
  1317. * controller.
  1318. */
  1319. if (nvme_to_user_ptr(io.metadata))
  1320. return -EINVAL;
  1321. meta_len = 0;
  1322. metadata = NULL;
  1323. } else {
  1324. meta_len = (io.nblocks + 1) * ns->ms;
  1325. metadata = nvme_to_user_ptr(io.metadata);
  1326. }
  1327. if (ns->features & NVME_NS_EXT_LBAS) {
  1328. length += meta_len;
  1329. meta_len = 0;
  1330. } else if (meta_len) {
  1331. if ((io.metadata & 3) || !io.metadata)
  1332. return -EINVAL;
  1333. }
  1334. memset(&c, 0, sizeof(c));
  1335. c.rw.opcode = io.opcode;
  1336. c.rw.flags = io.flags;
  1337. c.rw.nsid = cpu_to_le32(ns->head->ns_id);
  1338. c.rw.slba = cpu_to_le64(io.slba);
  1339. c.rw.length = cpu_to_le16(io.nblocks);
  1340. c.rw.control = cpu_to_le16(io.control);
  1341. c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
  1342. c.rw.reftag = cpu_to_le32(io.reftag);
  1343. c.rw.apptag = cpu_to_le16(io.apptag);
  1344. c.rw.appmask = cpu_to_le16(io.appmask);
  1345. return nvme_submit_user_cmd(ns->queue, &c,
  1346. nvme_to_user_ptr(io.addr), length,
  1347. metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
  1348. }
  1349. static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
  1350. struct nvme_passthru_cmd __user *ucmd)
  1351. {
  1352. struct nvme_passthru_cmd cmd;
  1353. struct nvme_command c;
  1354. unsigned timeout = 0;
  1355. u64 result;
  1356. int status;
  1357. if (!capable(CAP_SYS_ADMIN))
  1358. return -EACCES;
  1359. if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
  1360. return -EFAULT;
  1361. if (cmd.flags)
  1362. return -EINVAL;
  1363. memset(&c, 0, sizeof(c));
  1364. c.common.opcode = cmd.opcode;
  1365. c.common.flags = cmd.flags;
  1366. c.common.nsid = cpu_to_le32(cmd.nsid);
  1367. c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
  1368. c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
  1369. c.common.cdw10 = cpu_to_le32(cmd.cdw10);
  1370. c.common.cdw11 = cpu_to_le32(cmd.cdw11);
  1371. c.common.cdw12 = cpu_to_le32(cmd.cdw12);
  1372. c.common.cdw13 = cpu_to_le32(cmd.cdw13);
  1373. c.common.cdw14 = cpu_to_le32(cmd.cdw14);
  1374. c.common.cdw15 = cpu_to_le32(cmd.cdw15);
  1375. if (cmd.timeout_ms)
  1376. timeout = msecs_to_jiffies(cmd.timeout_ms);
  1377. status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
  1378. nvme_to_user_ptr(cmd.addr), cmd.data_len,
  1379. nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
  1380. 0, &result, timeout);
  1381. if (status >= 0) {
  1382. if (put_user(result, &ucmd->result))
  1383. return -EFAULT;
  1384. }
  1385. return status;
  1386. }
  1387. static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
  1388. struct nvme_passthru_cmd64 __user *ucmd)
  1389. {
  1390. struct nvme_passthru_cmd64 cmd;
  1391. struct nvme_command c;
  1392. unsigned timeout = 0;
  1393. int status;
  1394. if (!capable(CAP_SYS_ADMIN))
  1395. return -EACCES;
  1396. if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
  1397. return -EFAULT;
  1398. if (cmd.flags)
  1399. return -EINVAL;
  1400. memset(&c, 0, sizeof(c));
  1401. c.common.opcode = cmd.opcode;
  1402. c.common.flags = cmd.flags;
  1403. c.common.nsid = cpu_to_le32(cmd.nsid);
  1404. c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
  1405. c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
  1406. c.common.cdw10 = cpu_to_le32(cmd.cdw10);
  1407. c.common.cdw11 = cpu_to_le32(cmd.cdw11);
  1408. c.common.cdw12 = cpu_to_le32(cmd.cdw12);
  1409. c.common.cdw13 = cpu_to_le32(cmd.cdw13);
  1410. c.common.cdw14 = cpu_to_le32(cmd.cdw14);
  1411. c.common.cdw15 = cpu_to_le32(cmd.cdw15);
  1412. if (cmd.timeout_ms)
  1413. timeout = msecs_to_jiffies(cmd.timeout_ms);
  1414. status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
  1415. nvme_to_user_ptr(cmd.addr), cmd.data_len,
  1416. nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
  1417. 0, &cmd.result, timeout);
  1418. if (status >= 0) {
  1419. if (put_user(cmd.result, &ucmd->result))
  1420. return -EFAULT;
  1421. }
  1422. return status;
  1423. }
  1424. /*
  1425. * Issue ioctl requests on the first available path. Note that unlike normal
  1426. * block layer requests we will not retry failed request on another controller.
  1427. */
  1428. struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
  1429. struct nvme_ns_head **head, int *srcu_idx)
  1430. {
  1431. #ifdef CONFIG_NVME_MULTIPATH
  1432. if (disk->fops == &nvme_ns_head_ops) {
  1433. struct nvme_ns *ns;
  1434. *head = disk->private_data;
  1435. *srcu_idx = srcu_read_lock(&(*head)->srcu);
  1436. ns = nvme_find_path(*head);
  1437. if (!ns)
  1438. srcu_read_unlock(&(*head)->srcu, *srcu_idx);
  1439. return ns;
  1440. }
  1441. #endif
  1442. *head = NULL;
  1443. *srcu_idx = -1;
  1444. return disk->private_data;
  1445. }
  1446. void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
  1447. {
  1448. if (head)
  1449. srcu_read_unlock(&head->srcu, idx);
  1450. }
  1451. static bool is_ctrl_ioctl(unsigned int cmd)
  1452. {
  1453. if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
  1454. return true;
  1455. if (is_sed_ioctl(cmd))
  1456. return true;
  1457. return false;
  1458. }
  1459. static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
  1460. void __user *argp,
  1461. struct nvme_ns_head *head,
  1462. int srcu_idx)
  1463. {
  1464. struct nvme_ctrl *ctrl = ns->ctrl;
  1465. int ret;
  1466. nvme_get_ctrl(ns->ctrl);
  1467. nvme_put_ns_from_disk(head, srcu_idx);
  1468. switch (cmd) {
  1469. case NVME_IOCTL_ADMIN_CMD:
  1470. ret = nvme_user_cmd(ctrl, NULL, argp);
  1471. break;
  1472. case NVME_IOCTL_ADMIN64_CMD:
  1473. ret = nvme_user_cmd64(ctrl, NULL, argp);
  1474. break;
  1475. default:
  1476. ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
  1477. break;
  1478. }
  1479. nvme_put_ctrl(ctrl);
  1480. return ret;
  1481. }
  1482. static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
  1483. unsigned int cmd, unsigned long arg)
  1484. {
  1485. struct nvme_ns_head *head = NULL;
  1486. void __user *argp = (void __user *)arg;
  1487. struct nvme_ns *ns;
  1488. int srcu_idx, ret;
  1489. ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
  1490. if (unlikely(!ns))
  1491. return -EWOULDBLOCK;
  1492. /*
  1493. * Handle ioctls that apply to the controller instead of the namespace
  1494. * seperately and drop the ns SRCU reference early. This avoids a
  1495. * deadlock when deleting namespaces using the passthrough interface.
  1496. */
  1497. if (is_ctrl_ioctl(cmd))
  1498. return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
  1499. switch (cmd) {
  1500. case NVME_IOCTL_ID:
  1501. force_successful_syscall_return();
  1502. ret = ns->head->ns_id;
  1503. break;
  1504. case NVME_IOCTL_IO_CMD:
  1505. ret = nvme_user_cmd(ns->ctrl, ns, argp);
  1506. break;
  1507. case NVME_IOCTL_SUBMIT_IO:
  1508. ret = nvme_submit_io(ns, argp);
  1509. break;
  1510. case NVME_IOCTL_IO64_CMD:
  1511. ret = nvme_user_cmd64(ns->ctrl, ns, argp);
  1512. break;
  1513. default:
  1514. if (ns->ndev)
  1515. ret = nvme_nvm_ioctl(ns, cmd, arg);
  1516. else
  1517. ret = -ENOTTY;
  1518. }
  1519. nvme_put_ns_from_disk(head, srcu_idx);
  1520. return ret;
  1521. }
  1522. #ifdef CONFIG_COMPAT
  1523. struct nvme_user_io32 {
  1524. __u8 opcode;
  1525. __u8 flags;
  1526. __u16 control;
  1527. __u16 nblocks;
  1528. __u16 rsvd;
  1529. __u64 metadata;
  1530. __u64 addr;
  1531. __u64 slba;
  1532. __u32 dsmgmt;
  1533. __u32 reftag;
  1534. __u16 apptag;
  1535. __u16 appmask;
  1536. } __attribute__((__packed__));
  1537. #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
  1538. static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
  1539. unsigned int cmd, unsigned long arg)
  1540. {
  1541. /*
  1542. * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
  1543. * between 32 bit programs and 64 bit kernel.
  1544. * The cause is that the results of sizeof(struct nvme_user_io),
  1545. * which is used to define NVME_IOCTL_SUBMIT_IO,
  1546. * are not same between 32 bit compiler and 64 bit compiler.
  1547. * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
  1548. * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
  1549. * Other IOCTL numbers are same between 32 bit and 64 bit.
  1550. * So there is nothing to do regarding to other IOCTL numbers.
  1551. */
  1552. if (cmd == NVME_IOCTL_SUBMIT_IO32)
  1553. return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
  1554. return nvme_ioctl(bdev, mode, cmd, arg);
  1555. }
  1556. #else
  1557. #define nvme_compat_ioctl NULL
  1558. #endif /* CONFIG_COMPAT */
  1559. static int nvme_open(struct block_device *bdev, fmode_t mode)
  1560. {
  1561. struct nvme_ns *ns = bdev->bd_disk->private_data;
  1562. #ifdef CONFIG_NVME_MULTIPATH
  1563. /* should never be called due to GENHD_FL_HIDDEN */
  1564. if (WARN_ON_ONCE(ns->head->disk))
  1565. goto fail;
  1566. #endif
  1567. if (!kref_get_unless_zero(&ns->kref))
  1568. goto fail;
  1569. if (!try_module_get(ns->ctrl->ops->module))
  1570. goto fail_put_ns;
  1571. return 0;
  1572. fail_put_ns:
  1573. nvme_put_ns(ns);
  1574. fail:
  1575. return -ENXIO;
  1576. }
  1577. static void nvme_release(struct gendisk *disk, fmode_t mode)
  1578. {
  1579. struct nvme_ns *ns = disk->private_data;
  1580. module_put(ns->ctrl->ops->module);
  1581. nvme_put_ns(ns);
  1582. }
  1583. static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  1584. {
  1585. /* some standard values */
  1586. geo->heads = 1 << 6;
  1587. geo->sectors = 1 << 5;
  1588. geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
  1589. return 0;
  1590. }
  1591. #ifdef CONFIG_BLK_DEV_INTEGRITY
  1592. static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
  1593. u32 max_integrity_segments)
  1594. {
  1595. struct blk_integrity integrity;
  1596. memset(&integrity, 0, sizeof(integrity));
  1597. switch (pi_type) {
  1598. case NVME_NS_DPS_PI_TYPE3:
  1599. integrity.profile = &t10_pi_type3_crc;
  1600. integrity.tag_size = sizeof(u16) + sizeof(u32);
  1601. integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
  1602. break;
  1603. case NVME_NS_DPS_PI_TYPE1:
  1604. case NVME_NS_DPS_PI_TYPE2:
  1605. integrity.profile = &t10_pi_type1_crc;
  1606. integrity.tag_size = sizeof(u16);
  1607. integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
  1608. break;
  1609. default:
  1610. integrity.profile = NULL;
  1611. break;
  1612. }
  1613. integrity.tuple_size = ms;
  1614. blk_integrity_register(disk, &integrity);
  1615. blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
  1616. }
  1617. #else
  1618. static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
  1619. u32 max_integrity_segments)
  1620. {
  1621. }
  1622. #endif /* CONFIG_BLK_DEV_INTEGRITY */
  1623. static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
  1624. {
  1625. struct nvme_ctrl *ctrl = ns->ctrl;
  1626. struct request_queue *queue = disk->queue;
  1627. u32 size = queue_logical_block_size(queue);
  1628. if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
  1629. blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
  1630. return;
  1631. }
  1632. if (ctrl->nr_streams && ns->sws && ns->sgs)
  1633. size *= ns->sws * ns->sgs;
  1634. BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
  1635. NVME_DSM_MAX_RANGES);
  1636. queue->limits.discard_alignment = 0;
  1637. queue->limits.discard_granularity = size;
  1638. /* If discard is already enabled, don't reset queue limits */
  1639. if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
  1640. return;
  1641. blk_queue_max_discard_sectors(queue, UINT_MAX);
  1642. blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
  1643. if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
  1644. blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
  1645. }
  1646. /*
  1647. * Even though NVMe spec explicitly states that MDTS is not applicable to the
  1648. * write-zeroes, we are cautious and limit the size to the controllers
  1649. * max_hw_sectors value, which is based on the MDTS field and possibly other
  1650. * limiting factors.
  1651. */
  1652. static void nvme_config_write_zeroes(struct request_queue *q,
  1653. struct nvme_ctrl *ctrl)
  1654. {
  1655. if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
  1656. !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
  1657. blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors);
  1658. }
  1659. static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
  1660. {
  1661. return !uuid_is_null(&ids->uuid) ||
  1662. memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
  1663. memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
  1664. }
  1665. static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
  1666. {
  1667. return uuid_equal(&a->uuid, &b->uuid) &&
  1668. memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
  1669. memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
  1670. a->csi == b->csi;
  1671. }
  1672. static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
  1673. u32 *phys_bs, u32 *io_opt)
  1674. {
  1675. struct streams_directive_params s;
  1676. int ret;
  1677. if (!ctrl->nr_streams)
  1678. return 0;
  1679. ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
  1680. if (ret)
  1681. return ret;
  1682. ns->sws = le32_to_cpu(s.sws);
  1683. ns->sgs = le16_to_cpu(s.sgs);
  1684. if (ns->sws) {
  1685. *phys_bs = ns->sws * (1 << ns->lba_shift);
  1686. if (ns->sgs)
  1687. *io_opt = *phys_bs * ns->sgs;
  1688. }
  1689. return 0;
  1690. }
  1691. static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
  1692. {
  1693. struct nvme_ctrl *ctrl = ns->ctrl;
  1694. /*
  1695. * The PI implementation requires the metadata size to be equal to the
  1696. * t10 pi tuple size.
  1697. */
  1698. ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
  1699. if (ns->ms == sizeof(struct t10_pi_tuple))
  1700. ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
  1701. else
  1702. ns->pi_type = 0;
  1703. ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
  1704. if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
  1705. return 0;
  1706. if (ctrl->ops->flags & NVME_F_FABRICS) {
  1707. /*
  1708. * The NVMe over Fabrics specification only supports metadata as
  1709. * part of the extended data LBA. We rely on HCA/HBA support to
  1710. * remap the separate metadata buffer from the block layer.
  1711. */
  1712. if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
  1713. return -EINVAL;
  1714. if (ctrl->max_integrity_segments)
  1715. ns->features |=
  1716. (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
  1717. } else {
  1718. /*
  1719. * For PCIe controllers, we can't easily remap the separate
  1720. * metadata buffer from the block layer and thus require a
  1721. * separate metadata buffer for block layer metadata/PI support.
  1722. * We allow extended LBAs for the passthrough interface, though.
  1723. */
  1724. if (id->flbas & NVME_NS_FLBAS_META_EXT)
  1725. ns->features |= NVME_NS_EXT_LBAS;
  1726. else
  1727. ns->features |= NVME_NS_METADATA_SUPPORTED;
  1728. }
  1729. return 0;
  1730. }
  1731. static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
  1732. struct request_queue *q)
  1733. {
  1734. bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
  1735. if (ctrl->max_hw_sectors) {
  1736. u32 max_segments =
  1737. (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
  1738. max_segments = min_not_zero(max_segments, ctrl->max_segments);
  1739. blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
  1740. blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
  1741. }
  1742. blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
  1743. blk_queue_dma_alignment(q, 7);
  1744. blk_queue_write_cache(q, vwc, vwc);
  1745. }
  1746. static void nvme_update_disk_info(struct gendisk *disk,
  1747. struct nvme_ns *ns, struct nvme_id_ns *id)
  1748. {
  1749. sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
  1750. unsigned short bs = 1 << ns->lba_shift;
  1751. u32 atomic_bs, phys_bs, io_opt = 0;
  1752. /*
  1753. * The block layer can't support LBA sizes larger than the page size
  1754. * yet, so catch this early and don't allow block I/O.
  1755. */
  1756. if (ns->lba_shift > PAGE_SHIFT) {
  1757. capacity = 0;
  1758. bs = (1 << 9);
  1759. }
  1760. blk_integrity_unregister(disk);
  1761. atomic_bs = phys_bs = bs;
  1762. nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
  1763. if (id->nabo == 0) {
  1764. /*
  1765. * Bit 1 indicates whether NAWUPF is defined for this namespace
  1766. * and whether it should be used instead of AWUPF. If NAWUPF ==
  1767. * 0 then AWUPF must be used instead.
  1768. */
  1769. if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
  1770. atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
  1771. else
  1772. atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
  1773. }
  1774. if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
  1775. /* NPWG = Namespace Preferred Write Granularity */
  1776. phys_bs = bs * (1 + le16_to_cpu(id->npwg));
  1777. /* NOWS = Namespace Optimal Write Size */
  1778. io_opt = bs * (1 + le16_to_cpu(id->nows));
  1779. }
  1780. blk_queue_logical_block_size(disk->queue, bs);
  1781. /*
  1782. * Linux filesystems assume writing a single physical block is
  1783. * an atomic operation. Hence limit the physical block size to the
  1784. * value of the Atomic Write Unit Power Fail parameter.
  1785. */
  1786. blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
  1787. blk_queue_io_min(disk->queue, phys_bs);
  1788. blk_queue_io_opt(disk->queue, io_opt);
  1789. /*
  1790. * Register a metadata profile for PI, or the plain non-integrity NVMe
  1791. * metadata masquerading as Type 0 if supported, otherwise reject block
  1792. * I/O to namespaces with metadata except when the namespace supports
  1793. * PI, as it can strip/insert in that case.
  1794. */
  1795. if (ns->ms) {
  1796. if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
  1797. (ns->features & NVME_NS_METADATA_SUPPORTED))
  1798. nvme_init_integrity(disk, ns->ms, ns->pi_type,
  1799. ns->ctrl->max_integrity_segments);
  1800. else if (!nvme_ns_has_pi(ns))
  1801. capacity = 0;
  1802. }
  1803. set_capacity_revalidate_and_notify(disk, capacity, false);
  1804. nvme_config_discard(disk, ns);
  1805. nvme_config_write_zeroes(disk->queue, ns->ctrl);
  1806. if (id->nsattr & NVME_NS_ATTR_RO)
  1807. set_disk_ro(disk, true);
  1808. }
  1809. static inline bool nvme_first_scan(struct gendisk *disk)
  1810. {
  1811. /* nvme_alloc_ns() scans the disk prior to adding it */
  1812. return !(disk->flags & GENHD_FL_UP);
  1813. }
  1814. static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
  1815. {
  1816. struct nvme_ctrl *ctrl = ns->ctrl;
  1817. u32 iob;
  1818. if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
  1819. is_power_of_2(ctrl->max_hw_sectors))
  1820. iob = ctrl->max_hw_sectors;
  1821. else
  1822. iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
  1823. if (!iob)
  1824. return;
  1825. if (!is_power_of_2(iob)) {
  1826. if (nvme_first_scan(ns->disk))
  1827. pr_warn("%s: ignoring unaligned IO boundary:%u\n",
  1828. ns->disk->disk_name, iob);
  1829. return;
  1830. }
  1831. if (blk_queue_is_zoned(ns->disk->queue)) {
  1832. if (nvme_first_scan(ns->disk))
  1833. pr_warn("%s: ignoring zoned namespace IO boundary\n",
  1834. ns->disk->disk_name);
  1835. return;
  1836. }
  1837. blk_queue_chunk_sectors(ns->queue, iob);
  1838. }
  1839. static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
  1840. {
  1841. unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
  1842. int ret;
  1843. blk_mq_freeze_queue(ns->disk->queue);
  1844. ns->lba_shift = id->lbaf[lbaf].ds;
  1845. nvme_set_queue_limits(ns->ctrl, ns->queue);
  1846. if (ns->head->ids.csi == NVME_CSI_ZNS) {
  1847. ret = nvme_update_zone_info(ns, lbaf);
  1848. if (ret)
  1849. goto out_unfreeze;
  1850. }
  1851. ret = nvme_configure_metadata(ns, id);
  1852. if (ret)
  1853. goto out_unfreeze;
  1854. nvme_set_chunk_sectors(ns, id);
  1855. nvme_update_disk_info(ns->disk, ns, id);
  1856. blk_mq_unfreeze_queue(ns->disk->queue);
  1857. if (blk_queue_is_zoned(ns->queue)) {
  1858. ret = nvme_revalidate_zones(ns);
  1859. if (ret && !nvme_first_scan(ns->disk))
  1860. return ret;
  1861. }
  1862. #ifdef CONFIG_NVME_MULTIPATH
  1863. if (ns->head->disk) {
  1864. blk_mq_freeze_queue(ns->head->disk->queue);
  1865. nvme_update_disk_info(ns->head->disk, ns, id);
  1866. blk_stack_limits(&ns->head->disk->queue->limits,
  1867. &ns->queue->limits, 0);
  1868. blk_queue_update_readahead(ns->head->disk->queue);
  1869. nvme_update_bdev_size(ns->head->disk);
  1870. blk_mq_unfreeze_queue(ns->head->disk->queue);
  1871. }
  1872. #endif
  1873. return 0;
  1874. out_unfreeze:
  1875. blk_mq_unfreeze_queue(ns->disk->queue);
  1876. return ret;
  1877. }
  1878. static char nvme_pr_type(enum pr_type type)
  1879. {
  1880. switch (type) {
  1881. case PR_WRITE_EXCLUSIVE:
  1882. return 1;
  1883. case PR_EXCLUSIVE_ACCESS:
  1884. return 2;
  1885. case PR_WRITE_EXCLUSIVE_REG_ONLY:
  1886. return 3;
  1887. case PR_EXCLUSIVE_ACCESS_REG_ONLY:
  1888. return 4;
  1889. case PR_WRITE_EXCLUSIVE_ALL_REGS:
  1890. return 5;
  1891. case PR_EXCLUSIVE_ACCESS_ALL_REGS:
  1892. return 6;
  1893. default:
  1894. return 0;
  1895. }
  1896. };
  1897. static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
  1898. u64 key, u64 sa_key, u8 op)
  1899. {
  1900. struct nvme_ns_head *head = NULL;
  1901. struct nvme_ns *ns;
  1902. struct nvme_command c;
  1903. int srcu_idx, ret;
  1904. u8 data[16] = { 0, };
  1905. ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
  1906. if (unlikely(!ns))
  1907. return -EWOULDBLOCK;
  1908. put_unaligned_le64(key, &data[0]);
  1909. put_unaligned_le64(sa_key, &data[8]);
  1910. memset(&c, 0, sizeof(c));
  1911. c.common.opcode = op;
  1912. c.common.nsid = cpu_to_le32(ns->head->ns_id);
  1913. c.common.cdw10 = cpu_to_le32(cdw10);
  1914. ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
  1915. nvme_put_ns_from_disk(head, srcu_idx);
  1916. return ret;
  1917. }
  1918. static int nvme_pr_register(struct block_device *bdev, u64 old,
  1919. u64 new, unsigned flags)
  1920. {
  1921. u32 cdw10;
  1922. if (flags & ~PR_FL_IGNORE_KEY)
  1923. return -EOPNOTSUPP;
  1924. cdw10 = old ? 2 : 0;
  1925. cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
  1926. cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
  1927. return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
  1928. }
  1929. static int nvme_pr_reserve(struct block_device *bdev, u64 key,
  1930. enum pr_type type, unsigned flags)
  1931. {
  1932. u32 cdw10;
  1933. if (flags & ~PR_FL_IGNORE_KEY)
  1934. return -EOPNOTSUPP;
  1935. cdw10 = nvme_pr_type(type) << 8;
  1936. cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
  1937. return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
  1938. }
  1939. static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
  1940. enum pr_type type, bool abort)
  1941. {
  1942. u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
  1943. return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
  1944. }
  1945. static int nvme_pr_clear(struct block_device *bdev, u64 key)
  1946. {
  1947. u32 cdw10 = 1 | (key ? 1 << 3 : 0);
  1948. return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
  1949. }
  1950. static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
  1951. {
  1952. u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
  1953. return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
  1954. }
  1955. static const struct pr_ops nvme_pr_ops = {
  1956. .pr_register = nvme_pr_register,
  1957. .pr_reserve = nvme_pr_reserve,
  1958. .pr_release = nvme_pr_release,
  1959. .pr_preempt = nvme_pr_preempt,
  1960. .pr_clear = nvme_pr_clear,
  1961. };
  1962. #ifdef CONFIG_BLK_SED_OPAL
  1963. int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
  1964. bool send)
  1965. {
  1966. struct nvme_ctrl *ctrl = data;
  1967. struct nvme_command cmd;
  1968. memset(&cmd, 0, sizeof(cmd));
  1969. if (send)
  1970. cmd.common.opcode = nvme_admin_security_send;
  1971. else
  1972. cmd.common.opcode = nvme_admin_security_recv;
  1973. cmd.common.nsid = 0;
  1974. cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
  1975. cmd.common.cdw11 = cpu_to_le32(len);
  1976. return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
  1977. ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
  1978. }
  1979. EXPORT_SYMBOL_GPL(nvme_sec_submit);
  1980. #endif /* CONFIG_BLK_SED_OPAL */
  1981. static const struct block_device_operations nvme_fops = {
  1982. .owner = THIS_MODULE,
  1983. .ioctl = nvme_ioctl,
  1984. .compat_ioctl = nvme_compat_ioctl,
  1985. .open = nvme_open,
  1986. .release = nvme_release,
  1987. .getgeo = nvme_getgeo,
  1988. .report_zones = nvme_report_zones,
  1989. .pr_ops = &nvme_pr_ops,
  1990. };
  1991. #ifdef CONFIG_NVME_MULTIPATH
  1992. static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
  1993. {
  1994. struct nvme_ns_head *head = bdev->bd_disk->private_data;
  1995. if (!kref_get_unless_zero(&head->ref))
  1996. return -ENXIO;
  1997. return 0;
  1998. }
  1999. static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
  2000. {
  2001. nvme_put_ns_head(disk->private_data);
  2002. }
  2003. const struct block_device_operations nvme_ns_head_ops = {
  2004. .owner = THIS_MODULE,
  2005. .submit_bio = nvme_ns_head_submit_bio,
  2006. .open = nvme_ns_head_open,
  2007. .release = nvme_ns_head_release,
  2008. .ioctl = nvme_ioctl,
  2009. .compat_ioctl = nvme_compat_ioctl,
  2010. .getgeo = nvme_getgeo,
  2011. .report_zones = nvme_report_zones,
  2012. .pr_ops = &nvme_pr_ops,
  2013. };
  2014. #endif /* CONFIG_NVME_MULTIPATH */
  2015. static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
  2016. {
  2017. unsigned long timeout =
  2018. ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
  2019. u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
  2020. int ret;
  2021. while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
  2022. if (csts == ~0)
  2023. return -ENODEV;
  2024. if ((csts & NVME_CSTS_RDY) == bit)
  2025. break;
  2026. usleep_range(1000, 2000);
  2027. if (fatal_signal_pending(current))
  2028. return -EINTR;
  2029. if (time_after(jiffies, timeout)) {
  2030. dev_err(ctrl->device,
  2031. "Device not ready; aborting %s, CSTS=0x%x\n",
  2032. enabled ? "initialisation" : "reset", csts);
  2033. return -ENODEV;
  2034. }
  2035. }
  2036. return ret;
  2037. }
  2038. /*
  2039. * If the device has been passed off to us in an enabled state, just clear
  2040. * the enabled bit. The spec says we should set the 'shutdown notification
  2041. * bits', but doing so may cause the device to complete commands to the
  2042. * admin queue ... and we don't know what memory that might be pointing at!
  2043. */
  2044. int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
  2045. {
  2046. int ret;
  2047. ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
  2048. ctrl->ctrl_config &= ~NVME_CC_ENABLE;
  2049. ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
  2050. if (ret)
  2051. return ret;
  2052. if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
  2053. msleep(NVME_QUIRK_DELAY_AMOUNT);
  2054. return nvme_wait_ready(ctrl, ctrl->cap, false);
  2055. }
  2056. EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
  2057. int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
  2058. {
  2059. unsigned dev_page_min;
  2060. int ret;
  2061. ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
  2062. if (ret) {
  2063. dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
  2064. return ret;
  2065. }
  2066. dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
  2067. if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
  2068. dev_err(ctrl->device,
  2069. "Minimum device page size %u too large for host (%u)\n",
  2070. 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
  2071. return -ENODEV;
  2072. }
  2073. if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
  2074. ctrl->ctrl_config = NVME_CC_CSS_CSI;
  2075. else
  2076. ctrl->ctrl_config = NVME_CC_CSS_NVM;
  2077. ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
  2078. ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
  2079. ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
  2080. ctrl->ctrl_config |= NVME_CC_ENABLE;
  2081. ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
  2082. if (ret)
  2083. return ret;
  2084. return nvme_wait_ready(ctrl, ctrl->cap, true);
  2085. }
  2086. EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
  2087. int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
  2088. {
  2089. unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
  2090. u32 csts;
  2091. int ret;
  2092. ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
  2093. ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
  2094. ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
  2095. if (ret)
  2096. return ret;
  2097. while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
  2098. if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
  2099. break;
  2100. msleep(100);
  2101. if (fatal_signal_pending(current))
  2102. return -EINTR;
  2103. if (time_after(jiffies, timeout)) {
  2104. dev_err(ctrl->device,
  2105. "Device shutdown incomplete; abort shutdown\n");
  2106. return -ENODEV;
  2107. }
  2108. }
  2109. return ret;
  2110. }
  2111. EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
  2112. static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
  2113. {
  2114. __le64 ts;
  2115. int ret;
  2116. if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
  2117. return 0;
  2118. ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
  2119. ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
  2120. NULL);
  2121. if (ret)
  2122. dev_warn_once(ctrl->device,
  2123. "could not set timestamp (%d)\n", ret);
  2124. return ret;
  2125. }
  2126. static int nvme_configure_acre(struct nvme_ctrl *ctrl)
  2127. {
  2128. struct nvme_feat_host_behavior *host;
  2129. int ret;
  2130. /* Don't bother enabling the feature if retry delay is not reported */
  2131. if (!ctrl->crdt[0])
  2132. return 0;
  2133. host = kzalloc(sizeof(*host), GFP_KERNEL);
  2134. if (!host)
  2135. return 0;
  2136. host->acre = NVME_ENABLE_ACRE;
  2137. ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
  2138. host, sizeof(*host), NULL);
  2139. kfree(host);
  2140. return ret;
  2141. }
  2142. static int nvme_configure_apst(struct nvme_ctrl *ctrl)
  2143. {
  2144. /*
  2145. * APST (Autonomous Power State Transition) lets us program a
  2146. * table of power state transitions that the controller will
  2147. * perform automatically. We configure it with a simple
  2148. * heuristic: we are willing to spend at most 2% of the time
  2149. * transitioning between power states. Therefore, when running
  2150. * in any given state, we will enter the next lower-power
  2151. * non-operational state after waiting 50 * (enlat + exlat)
  2152. * microseconds, as long as that state's exit latency is under
  2153. * the requested maximum latency.
  2154. *
  2155. * We will not autonomously enter any non-operational state for
  2156. * which the total latency exceeds ps_max_latency_us. Users
  2157. * can set ps_max_latency_us to zero to turn off APST.
  2158. */
  2159. unsigned apste;
  2160. struct nvme_feat_auto_pst *table;
  2161. u64 max_lat_us = 0;
  2162. int max_ps = -1;
  2163. int ret;
  2164. /*
  2165. * If APST isn't supported or if we haven't been initialized yet,
  2166. * then don't do anything.
  2167. */
  2168. if (!ctrl->apsta)
  2169. return 0;
  2170. if (ctrl->npss > 31) {
  2171. dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
  2172. return 0;
  2173. }
  2174. table = kzalloc(sizeof(*table), GFP_KERNEL);
  2175. if (!table)
  2176. return 0;
  2177. if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
  2178. /* Turn off APST. */
  2179. apste = 0;
  2180. dev_dbg(ctrl->device, "APST disabled\n");
  2181. } else {
  2182. __le64 target = cpu_to_le64(0);
  2183. int state;
  2184. /*
  2185. * Walk through all states from lowest- to highest-power.
  2186. * According to the spec, lower-numbered states use more
  2187. * power. NPSS, despite the name, is the index of the
  2188. * lowest-power state, not the number of states.
  2189. */
  2190. for (state = (int)ctrl->npss; state >= 0; state--) {
  2191. u64 total_latency_us, exit_latency_us, transition_ms;
  2192. if (target)
  2193. table->entries[state] = target;
  2194. /*
  2195. * Don't allow transitions to the deepest state
  2196. * if it's quirked off.
  2197. */
  2198. if (state == ctrl->npss &&
  2199. (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
  2200. continue;
  2201. /*
  2202. * Is this state a useful non-operational state for
  2203. * higher-power states to autonomously transition to?
  2204. */
  2205. if (!(ctrl->psd[state].flags &
  2206. NVME_PS_FLAGS_NON_OP_STATE))
  2207. continue;
  2208. exit_latency_us =
  2209. (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
  2210. if (exit_latency_us > ctrl->ps_max_latency_us)
  2211. continue;
  2212. total_latency_us =
  2213. exit_latency_us +
  2214. le32_to_cpu(ctrl->psd[state].entry_lat);
  2215. /*
  2216. * This state is good. Use it as the APST idle
  2217. * target for higher power states.
  2218. */
  2219. transition_ms = total_latency_us + 19;
  2220. do_div(transition_ms, 20);
  2221. if (transition_ms > (1 << 24) - 1)
  2222. transition_ms = (1 << 24) - 1;
  2223. target = cpu_to_le64((state << 3) |
  2224. (transition_ms << 8));
  2225. if (max_ps == -1)
  2226. max_ps = state;
  2227. if (total_latency_us > max_lat_us)
  2228. max_lat_us = total_latency_us;
  2229. }
  2230. apste = 1;
  2231. if (max_ps == -1) {
  2232. dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
  2233. } else {
  2234. dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
  2235. max_ps, max_lat_us, (int)sizeof(*table), table);
  2236. }
  2237. }
  2238. ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
  2239. table, sizeof(*table), NULL);
  2240. if (ret)
  2241. dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
  2242. kfree(table);
  2243. return ret;
  2244. }
  2245. static void nvme_set_latency_tolerance(struct device *dev, s32 val)
  2246. {
  2247. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  2248. u64 latency;
  2249. switch (val) {
  2250. case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
  2251. case PM_QOS_LATENCY_ANY:
  2252. latency = U64_MAX;
  2253. break;
  2254. default:
  2255. latency = val;
  2256. }
  2257. if (ctrl->ps_max_latency_us != latency) {
  2258. ctrl->ps_max_latency_us = latency;
  2259. if (ctrl->state == NVME_CTRL_LIVE)
  2260. nvme_configure_apst(ctrl);
  2261. }
  2262. }
  2263. struct nvme_core_quirk_entry {
  2264. /*
  2265. * NVMe model and firmware strings are padded with spaces. For
  2266. * simplicity, strings in the quirk table are padded with NULLs
  2267. * instead.
  2268. */
  2269. u16 vid;
  2270. const char *mn;
  2271. const char *fr;
  2272. unsigned long quirks;
  2273. };
  2274. static const struct nvme_core_quirk_entry core_quirks[] = {
  2275. {
  2276. /*
  2277. * This Toshiba device seems to die using any APST states. See:
  2278. * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
  2279. */
  2280. .vid = 0x1179,
  2281. .mn = "THNSF5256GPUK TOSHIBA",
  2282. .quirks = NVME_QUIRK_NO_APST,
  2283. },
  2284. {
  2285. /*
  2286. * This LiteON CL1-3D*-Q11 firmware version has a race
  2287. * condition associated with actions related to suspend to idle
  2288. * LiteON has resolved the problem in future firmware
  2289. */
  2290. .vid = 0x14a4,
  2291. .fr = "22301111",
  2292. .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
  2293. }
  2294. };
  2295. /* match is null-terminated but idstr is space-padded. */
  2296. static bool string_matches(const char *idstr, const char *match, size_t len)
  2297. {
  2298. size_t matchlen;
  2299. if (!match)
  2300. return true;
  2301. matchlen = strlen(match);
  2302. WARN_ON_ONCE(matchlen > len);
  2303. if (memcmp(idstr, match, matchlen))
  2304. return false;
  2305. for (; matchlen < len; matchlen++)
  2306. if (idstr[matchlen] != ' ')
  2307. return false;
  2308. return true;
  2309. }
  2310. static bool quirk_matches(const struct nvme_id_ctrl *id,
  2311. const struct nvme_core_quirk_entry *q)
  2312. {
  2313. return q->vid == le16_to_cpu(id->vid) &&
  2314. string_matches(id->mn, q->mn, sizeof(id->mn)) &&
  2315. string_matches(id->fr, q->fr, sizeof(id->fr));
  2316. }
  2317. static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
  2318. struct nvme_id_ctrl *id)
  2319. {
  2320. size_t nqnlen;
  2321. int off;
  2322. if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
  2323. nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
  2324. if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
  2325. strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
  2326. return;
  2327. }
  2328. if (ctrl->vs >= NVME_VS(1, 2, 1))
  2329. dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
  2330. }
  2331. /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
  2332. off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
  2333. "nqn.2014.08.org.nvmexpress:%04x%04x",
  2334. le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
  2335. memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
  2336. off += sizeof(id->sn);
  2337. memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
  2338. off += sizeof(id->mn);
  2339. memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
  2340. }
  2341. static void nvme_release_subsystem(struct device *dev)
  2342. {
  2343. struct nvme_subsystem *subsys =
  2344. container_of(dev, struct nvme_subsystem, dev);
  2345. if (subsys->instance >= 0)
  2346. ida_simple_remove(&nvme_instance_ida, subsys->instance);
  2347. kfree(subsys);
  2348. }
  2349. static void nvme_destroy_subsystem(struct kref *ref)
  2350. {
  2351. struct nvme_subsystem *subsys =
  2352. container_of(ref, struct nvme_subsystem, ref);
  2353. mutex_lock(&nvme_subsystems_lock);
  2354. list_del(&subsys->entry);
  2355. mutex_unlock(&nvme_subsystems_lock);
  2356. ida_destroy(&subsys->ns_ida);
  2357. device_del(&subsys->dev);
  2358. put_device(&subsys->dev);
  2359. }
  2360. static void nvme_put_subsystem(struct nvme_subsystem *subsys)
  2361. {
  2362. kref_put(&subsys->ref, nvme_destroy_subsystem);
  2363. }
  2364. static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
  2365. {
  2366. struct nvme_subsystem *subsys;
  2367. lockdep_assert_held(&nvme_subsystems_lock);
  2368. /*
  2369. * Fail matches for discovery subsystems. This results
  2370. * in each discovery controller bound to a unique subsystem.
  2371. * This avoids issues with validating controller values
  2372. * that can only be true when there is a single unique subsystem.
  2373. * There may be multiple and completely independent entities
  2374. * that provide discovery controllers.
  2375. */
  2376. if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
  2377. return NULL;
  2378. list_for_each_entry(subsys, &nvme_subsystems, entry) {
  2379. if (strcmp(subsys->subnqn, subsysnqn))
  2380. continue;
  2381. if (!kref_get_unless_zero(&subsys->ref))
  2382. continue;
  2383. return subsys;
  2384. }
  2385. return NULL;
  2386. }
  2387. #define SUBSYS_ATTR_RO(_name, _mode, _show) \
  2388. struct device_attribute subsys_attr_##_name = \
  2389. __ATTR(_name, _mode, _show, NULL)
  2390. static ssize_t nvme_subsys_show_nqn(struct device *dev,
  2391. struct device_attribute *attr,
  2392. char *buf)
  2393. {
  2394. struct nvme_subsystem *subsys =
  2395. container_of(dev, struct nvme_subsystem, dev);
  2396. return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
  2397. }
  2398. static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
  2399. #define nvme_subsys_show_str_function(field) \
  2400. static ssize_t subsys_##field##_show(struct device *dev, \
  2401. struct device_attribute *attr, char *buf) \
  2402. { \
  2403. struct nvme_subsystem *subsys = \
  2404. container_of(dev, struct nvme_subsystem, dev); \
  2405. return sprintf(buf, "%.*s\n", \
  2406. (int)sizeof(subsys->field), subsys->field); \
  2407. } \
  2408. static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
  2409. nvme_subsys_show_str_function(model);
  2410. nvme_subsys_show_str_function(serial);
  2411. nvme_subsys_show_str_function(firmware_rev);
  2412. static struct attribute *nvme_subsys_attrs[] = {
  2413. &subsys_attr_model.attr,
  2414. &subsys_attr_serial.attr,
  2415. &subsys_attr_firmware_rev.attr,
  2416. &subsys_attr_subsysnqn.attr,
  2417. #ifdef CONFIG_NVME_MULTIPATH
  2418. &subsys_attr_iopolicy.attr,
  2419. #endif
  2420. NULL,
  2421. };
  2422. static struct attribute_group nvme_subsys_attrs_group = {
  2423. .attrs = nvme_subsys_attrs,
  2424. };
  2425. static const struct attribute_group *nvme_subsys_attrs_groups[] = {
  2426. &nvme_subsys_attrs_group,
  2427. NULL,
  2428. };
  2429. static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
  2430. {
  2431. return ctrl->opts && ctrl->opts->discovery_nqn;
  2432. }
  2433. static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
  2434. struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
  2435. {
  2436. struct nvme_ctrl *tmp;
  2437. lockdep_assert_held(&nvme_subsystems_lock);
  2438. list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
  2439. if (nvme_state_terminal(tmp))
  2440. continue;
  2441. if (tmp->cntlid == ctrl->cntlid) {
  2442. dev_err(ctrl->device,
  2443. "Duplicate cntlid %u with %s, rejecting\n",
  2444. ctrl->cntlid, dev_name(tmp->device));
  2445. return false;
  2446. }
  2447. if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
  2448. nvme_discovery_ctrl(ctrl))
  2449. continue;
  2450. dev_err(ctrl->device,
  2451. "Subsystem does not support multiple controllers\n");
  2452. return false;
  2453. }
  2454. return true;
  2455. }
  2456. static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
  2457. {
  2458. struct nvme_subsystem *subsys, *found;
  2459. int ret;
  2460. subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
  2461. if (!subsys)
  2462. return -ENOMEM;
  2463. subsys->instance = -1;
  2464. mutex_init(&subsys->lock);
  2465. kref_init(&subsys->ref);
  2466. INIT_LIST_HEAD(&subsys->ctrls);
  2467. INIT_LIST_HEAD(&subsys->nsheads);
  2468. nvme_init_subnqn(subsys, ctrl, id);
  2469. memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
  2470. memcpy(subsys->model, id->mn, sizeof(subsys->model));
  2471. memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
  2472. subsys->vendor_id = le16_to_cpu(id->vid);
  2473. subsys->cmic = id->cmic;
  2474. subsys->awupf = le16_to_cpu(id->awupf);
  2475. #ifdef CONFIG_NVME_MULTIPATH
  2476. subsys->iopolicy = NVME_IOPOLICY_NUMA;
  2477. #endif
  2478. subsys->dev.class = nvme_subsys_class;
  2479. subsys->dev.release = nvme_release_subsystem;
  2480. subsys->dev.groups = nvme_subsys_attrs_groups;
  2481. dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
  2482. device_initialize(&subsys->dev);
  2483. mutex_lock(&nvme_subsystems_lock);
  2484. found = __nvme_find_get_subsystem(subsys->subnqn);
  2485. if (found) {
  2486. put_device(&subsys->dev);
  2487. subsys = found;
  2488. if (!nvme_validate_cntlid(subsys, ctrl, id)) {
  2489. ret = -EINVAL;
  2490. goto out_put_subsystem;
  2491. }
  2492. } else {
  2493. ret = device_add(&subsys->dev);
  2494. if (ret) {
  2495. dev_err(ctrl->device,
  2496. "failed to register subsystem device.\n");
  2497. put_device(&subsys->dev);
  2498. goto out_unlock;
  2499. }
  2500. ida_init(&subsys->ns_ida);
  2501. list_add_tail(&subsys->entry, &nvme_subsystems);
  2502. }
  2503. ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
  2504. dev_name(ctrl->device));
  2505. if (ret) {
  2506. dev_err(ctrl->device,
  2507. "failed to create sysfs link from subsystem.\n");
  2508. goto out_put_subsystem;
  2509. }
  2510. if (!found)
  2511. subsys->instance = ctrl->instance;
  2512. ctrl->subsys = subsys;
  2513. list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
  2514. mutex_unlock(&nvme_subsystems_lock);
  2515. return 0;
  2516. out_put_subsystem:
  2517. nvme_put_subsystem(subsys);
  2518. out_unlock:
  2519. mutex_unlock(&nvme_subsystems_lock);
  2520. return ret;
  2521. }
  2522. int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
  2523. void *log, size_t size, u64 offset)
  2524. {
  2525. struct nvme_command c = { };
  2526. u32 dwlen = nvme_bytes_to_numd(size);
  2527. c.get_log_page.opcode = nvme_admin_get_log_page;
  2528. c.get_log_page.nsid = cpu_to_le32(nsid);
  2529. c.get_log_page.lid = log_page;
  2530. c.get_log_page.lsp = lsp;
  2531. c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
  2532. c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
  2533. c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
  2534. c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
  2535. c.get_log_page.csi = csi;
  2536. return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
  2537. }
  2538. static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
  2539. struct nvme_effects_log **log)
  2540. {
  2541. struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
  2542. int ret;
  2543. if (cel)
  2544. goto out;
  2545. cel = kzalloc(sizeof(*cel), GFP_KERNEL);
  2546. if (!cel)
  2547. return -ENOMEM;
  2548. ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
  2549. cel, sizeof(*cel), 0);
  2550. if (ret) {
  2551. kfree(cel);
  2552. return ret;
  2553. }
  2554. xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
  2555. out:
  2556. *log = cel;
  2557. return 0;
  2558. }
  2559. /*
  2560. * Initialize the cached copies of the Identify data and various controller
  2561. * register in our nvme_ctrl structure. This should be called as soon as
  2562. * the admin queue is fully up and running.
  2563. */
  2564. int nvme_init_identify(struct nvme_ctrl *ctrl)
  2565. {
  2566. struct nvme_id_ctrl *id;
  2567. int ret, page_shift;
  2568. u32 max_hw_sectors;
  2569. bool prev_apst_enabled;
  2570. ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
  2571. if (ret) {
  2572. dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
  2573. return ret;
  2574. }
  2575. page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
  2576. ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
  2577. if (ctrl->vs >= NVME_VS(1, 1, 0))
  2578. ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
  2579. ret = nvme_identify_ctrl(ctrl, &id);
  2580. if (ret) {
  2581. dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
  2582. return -EIO;
  2583. }
  2584. if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
  2585. ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
  2586. if (ret < 0)
  2587. goto out_free;
  2588. }
  2589. if (!(ctrl->ops->flags & NVME_F_FABRICS))
  2590. ctrl->cntlid = le16_to_cpu(id->cntlid);
  2591. if (!ctrl->identified) {
  2592. int i;
  2593. ret = nvme_init_subsystem(ctrl, id);
  2594. if (ret)
  2595. goto out_free;
  2596. /*
  2597. * Check for quirks. Quirk can depend on firmware version,
  2598. * so, in principle, the set of quirks present can change
  2599. * across a reset. As a possible future enhancement, we
  2600. * could re-scan for quirks every time we reinitialize
  2601. * the device, but we'd have to make sure that the driver
  2602. * behaves intelligently if the quirks change.
  2603. */
  2604. for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
  2605. if (quirk_matches(id, &core_quirks[i]))
  2606. ctrl->quirks |= core_quirks[i].quirks;
  2607. }
  2608. }
  2609. if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
  2610. dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
  2611. ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
  2612. }
  2613. ctrl->crdt[0] = le16_to_cpu(id->crdt1);
  2614. ctrl->crdt[1] = le16_to_cpu(id->crdt2);
  2615. ctrl->crdt[2] = le16_to_cpu(id->crdt3);
  2616. ctrl->oacs = le16_to_cpu(id->oacs);
  2617. ctrl->oncs = le16_to_cpu(id->oncs);
  2618. ctrl->mtfa = le16_to_cpu(id->mtfa);
  2619. ctrl->oaes = le32_to_cpu(id->oaes);
  2620. ctrl->wctemp = le16_to_cpu(id->wctemp);
  2621. ctrl->cctemp = le16_to_cpu(id->cctemp);
  2622. atomic_set(&ctrl->abort_limit, id->acl + 1);
  2623. ctrl->vwc = id->vwc;
  2624. if (id->mdts)
  2625. max_hw_sectors = 1 << (id->mdts + page_shift - 9);
  2626. else
  2627. max_hw_sectors = UINT_MAX;
  2628. ctrl->max_hw_sectors =
  2629. min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
  2630. nvme_set_queue_limits(ctrl, ctrl->admin_q);
  2631. ctrl->sgls = le32_to_cpu(id->sgls);
  2632. ctrl->kas = le16_to_cpu(id->kas);
  2633. ctrl->max_namespaces = le32_to_cpu(id->mnan);
  2634. ctrl->ctratt = le32_to_cpu(id->ctratt);
  2635. if (id->rtd3e) {
  2636. /* us -> s */
  2637. u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
  2638. ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
  2639. shutdown_timeout, 60);
  2640. if (ctrl->shutdown_timeout != shutdown_timeout)
  2641. dev_info(ctrl->device,
  2642. "Shutdown timeout set to %u seconds\n",
  2643. ctrl->shutdown_timeout);
  2644. } else
  2645. ctrl->shutdown_timeout = shutdown_timeout;
  2646. ctrl->npss = id->npss;
  2647. ctrl->apsta = id->apsta;
  2648. prev_apst_enabled = ctrl->apst_enabled;
  2649. if (ctrl->quirks & NVME_QUIRK_NO_APST) {
  2650. if (force_apst && id->apsta) {
  2651. dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
  2652. ctrl->apst_enabled = true;
  2653. } else {
  2654. ctrl->apst_enabled = false;
  2655. }
  2656. } else {
  2657. ctrl->apst_enabled = id->apsta;
  2658. }
  2659. memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
  2660. if (ctrl->ops->flags & NVME_F_FABRICS) {
  2661. ctrl->icdoff = le16_to_cpu(id->icdoff);
  2662. ctrl->ioccsz = le32_to_cpu(id->ioccsz);
  2663. ctrl->iorcsz = le32_to_cpu(id->iorcsz);
  2664. ctrl->maxcmd = le16_to_cpu(id->maxcmd);
  2665. /*
  2666. * In fabrics we need to verify the cntlid matches the
  2667. * admin connect
  2668. */
  2669. if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
  2670. dev_err(ctrl->device,
  2671. "Mismatching cntlid: Connect %u vs Identify "
  2672. "%u, rejecting\n",
  2673. ctrl->cntlid, le16_to_cpu(id->cntlid));
  2674. ret = -EINVAL;
  2675. goto out_free;
  2676. }
  2677. if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
  2678. dev_err(ctrl->device,
  2679. "keep-alive support is mandatory for fabrics\n");
  2680. ret = -EINVAL;
  2681. goto out_free;
  2682. }
  2683. } else {
  2684. ctrl->hmpre = le32_to_cpu(id->hmpre);
  2685. ctrl->hmmin = le32_to_cpu(id->hmmin);
  2686. ctrl->hmminds = le32_to_cpu(id->hmminds);
  2687. ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
  2688. }
  2689. ret = nvme_mpath_init_identify(ctrl, id);
  2690. kfree(id);
  2691. if (ret < 0)
  2692. return ret;
  2693. if (ctrl->apst_enabled && !prev_apst_enabled)
  2694. dev_pm_qos_expose_latency_tolerance(ctrl->device);
  2695. else if (!ctrl->apst_enabled && prev_apst_enabled)
  2696. dev_pm_qos_hide_latency_tolerance(ctrl->device);
  2697. ret = nvme_configure_apst(ctrl);
  2698. if (ret < 0)
  2699. return ret;
  2700. ret = nvme_configure_timestamp(ctrl);
  2701. if (ret < 0)
  2702. return ret;
  2703. ret = nvme_configure_directives(ctrl);
  2704. if (ret < 0)
  2705. return ret;
  2706. ret = nvme_configure_acre(ctrl);
  2707. if (ret < 0)
  2708. return ret;
  2709. if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
  2710. ret = nvme_hwmon_init(ctrl);
  2711. if (ret < 0)
  2712. return ret;
  2713. }
  2714. ctrl->identified = true;
  2715. return 0;
  2716. out_free:
  2717. kfree(id);
  2718. return ret;
  2719. }
  2720. EXPORT_SYMBOL_GPL(nvme_init_identify);
  2721. static int nvme_dev_open(struct inode *inode, struct file *file)
  2722. {
  2723. struct nvme_ctrl *ctrl =
  2724. container_of(inode->i_cdev, struct nvme_ctrl, cdev);
  2725. switch (ctrl->state) {
  2726. case NVME_CTRL_LIVE:
  2727. break;
  2728. default:
  2729. return -EWOULDBLOCK;
  2730. }
  2731. nvme_get_ctrl(ctrl);
  2732. if (!try_module_get(ctrl->ops->module)) {
  2733. nvme_put_ctrl(ctrl);
  2734. return -EINVAL;
  2735. }
  2736. file->private_data = ctrl;
  2737. return 0;
  2738. }
  2739. static int nvme_dev_release(struct inode *inode, struct file *file)
  2740. {
  2741. struct nvme_ctrl *ctrl =
  2742. container_of(inode->i_cdev, struct nvme_ctrl, cdev);
  2743. module_put(ctrl->ops->module);
  2744. nvme_put_ctrl(ctrl);
  2745. return 0;
  2746. }
  2747. static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
  2748. {
  2749. struct nvme_ns *ns;
  2750. int ret;
  2751. down_read(&ctrl->namespaces_rwsem);
  2752. if (list_empty(&ctrl->namespaces)) {
  2753. ret = -ENOTTY;
  2754. goto out_unlock;
  2755. }
  2756. ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
  2757. if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
  2758. dev_warn(ctrl->device,
  2759. "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
  2760. ret = -EINVAL;
  2761. goto out_unlock;
  2762. }
  2763. dev_warn(ctrl->device,
  2764. "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
  2765. kref_get(&ns->kref);
  2766. up_read(&ctrl->namespaces_rwsem);
  2767. ret = nvme_user_cmd(ctrl, ns, argp);
  2768. nvme_put_ns(ns);
  2769. return ret;
  2770. out_unlock:
  2771. up_read(&ctrl->namespaces_rwsem);
  2772. return ret;
  2773. }
  2774. static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
  2775. unsigned long arg)
  2776. {
  2777. struct nvme_ctrl *ctrl = file->private_data;
  2778. void __user *argp = (void __user *)arg;
  2779. switch (cmd) {
  2780. case NVME_IOCTL_ADMIN_CMD:
  2781. return nvme_user_cmd(ctrl, NULL, argp);
  2782. case NVME_IOCTL_ADMIN64_CMD:
  2783. return nvme_user_cmd64(ctrl, NULL, argp);
  2784. case NVME_IOCTL_IO_CMD:
  2785. return nvme_dev_user_cmd(ctrl, argp);
  2786. case NVME_IOCTL_RESET:
  2787. dev_warn(ctrl->device, "resetting controller\n");
  2788. return nvme_reset_ctrl_sync(ctrl);
  2789. case NVME_IOCTL_SUBSYS_RESET:
  2790. return nvme_reset_subsystem(ctrl);
  2791. case NVME_IOCTL_RESCAN:
  2792. nvme_queue_scan(ctrl);
  2793. return 0;
  2794. default:
  2795. return -ENOTTY;
  2796. }
  2797. }
  2798. static const struct file_operations nvme_dev_fops = {
  2799. .owner = THIS_MODULE,
  2800. .open = nvme_dev_open,
  2801. .release = nvme_dev_release,
  2802. .unlocked_ioctl = nvme_dev_ioctl,
  2803. .compat_ioctl = compat_ptr_ioctl,
  2804. };
  2805. static ssize_t nvme_sysfs_reset(struct device *dev,
  2806. struct device_attribute *attr, const char *buf,
  2807. size_t count)
  2808. {
  2809. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  2810. int ret;
  2811. ret = nvme_reset_ctrl_sync(ctrl);
  2812. if (ret < 0)
  2813. return ret;
  2814. return count;
  2815. }
  2816. static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
  2817. static ssize_t nvme_sysfs_rescan(struct device *dev,
  2818. struct device_attribute *attr, const char *buf,
  2819. size_t count)
  2820. {
  2821. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  2822. nvme_queue_scan(ctrl);
  2823. return count;
  2824. }
  2825. static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
  2826. static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
  2827. {
  2828. struct gendisk *disk = dev_to_disk(dev);
  2829. if (disk->fops == &nvme_fops)
  2830. return nvme_get_ns_from_dev(dev)->head;
  2831. else
  2832. return disk->private_data;
  2833. }
  2834. static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
  2835. char *buf)
  2836. {
  2837. struct nvme_ns_head *head = dev_to_ns_head(dev);
  2838. struct nvme_ns_ids *ids = &head->ids;
  2839. struct nvme_subsystem *subsys = head->subsys;
  2840. int serial_len = sizeof(subsys->serial);
  2841. int model_len = sizeof(subsys->model);
  2842. if (!uuid_is_null(&ids->uuid))
  2843. return sprintf(buf, "uuid.%pU\n", &ids->uuid);
  2844. if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
  2845. return sprintf(buf, "eui.%16phN\n", ids->nguid);
  2846. if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
  2847. return sprintf(buf, "eui.%8phN\n", ids->eui64);
  2848. while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
  2849. subsys->serial[serial_len - 1] == '\0'))
  2850. serial_len--;
  2851. while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
  2852. subsys->model[model_len - 1] == '\0'))
  2853. model_len--;
  2854. return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
  2855. serial_len, subsys->serial, model_len, subsys->model,
  2856. head->ns_id);
  2857. }
  2858. static DEVICE_ATTR_RO(wwid);
  2859. static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
  2860. char *buf)
  2861. {
  2862. return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
  2863. }
  2864. static DEVICE_ATTR_RO(nguid);
  2865. static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
  2866. char *buf)
  2867. {
  2868. struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
  2869. /* For backward compatibility expose the NGUID to userspace if
  2870. * we have no UUID set
  2871. */
  2872. if (uuid_is_null(&ids->uuid)) {
  2873. printk_ratelimited(KERN_WARNING
  2874. "No UUID available providing old NGUID\n");
  2875. return sprintf(buf, "%pU\n", ids->nguid);
  2876. }
  2877. return sprintf(buf, "%pU\n", &ids->uuid);
  2878. }
  2879. static DEVICE_ATTR_RO(uuid);
  2880. static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
  2881. char *buf)
  2882. {
  2883. return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
  2884. }
  2885. static DEVICE_ATTR_RO(eui);
  2886. static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
  2887. char *buf)
  2888. {
  2889. return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
  2890. }
  2891. static DEVICE_ATTR_RO(nsid);
  2892. static struct attribute *nvme_ns_id_attrs[] = {
  2893. &dev_attr_wwid.attr,
  2894. &dev_attr_uuid.attr,
  2895. &dev_attr_nguid.attr,
  2896. &dev_attr_eui.attr,
  2897. &dev_attr_nsid.attr,
  2898. #ifdef CONFIG_NVME_MULTIPATH
  2899. &dev_attr_ana_grpid.attr,
  2900. &dev_attr_ana_state.attr,
  2901. #endif
  2902. NULL,
  2903. };
  2904. static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
  2905. struct attribute *a, int n)
  2906. {
  2907. struct device *dev = container_of(kobj, struct device, kobj);
  2908. struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
  2909. if (a == &dev_attr_uuid.attr) {
  2910. if (uuid_is_null(&ids->uuid) &&
  2911. !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
  2912. return 0;
  2913. }
  2914. if (a == &dev_attr_nguid.attr) {
  2915. if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
  2916. return 0;
  2917. }
  2918. if (a == &dev_attr_eui.attr) {
  2919. if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
  2920. return 0;
  2921. }
  2922. #ifdef CONFIG_NVME_MULTIPATH
  2923. if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
  2924. if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
  2925. return 0;
  2926. if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
  2927. return 0;
  2928. }
  2929. #endif
  2930. return a->mode;
  2931. }
  2932. static const struct attribute_group nvme_ns_id_attr_group = {
  2933. .attrs = nvme_ns_id_attrs,
  2934. .is_visible = nvme_ns_id_attrs_are_visible,
  2935. };
  2936. const struct attribute_group *nvme_ns_id_attr_groups[] = {
  2937. &nvme_ns_id_attr_group,
  2938. #ifdef CONFIG_NVM
  2939. &nvme_nvm_attr_group,
  2940. #endif
  2941. NULL,
  2942. };
  2943. #define nvme_show_str_function(field) \
  2944. static ssize_t field##_show(struct device *dev, \
  2945. struct device_attribute *attr, char *buf) \
  2946. { \
  2947. struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
  2948. return sprintf(buf, "%.*s\n", \
  2949. (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
  2950. } \
  2951. static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
  2952. nvme_show_str_function(model);
  2953. nvme_show_str_function(serial);
  2954. nvme_show_str_function(firmware_rev);
  2955. #define nvme_show_int_function(field) \
  2956. static ssize_t field##_show(struct device *dev, \
  2957. struct device_attribute *attr, char *buf) \
  2958. { \
  2959. struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
  2960. return sprintf(buf, "%d\n", ctrl->field); \
  2961. } \
  2962. static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
  2963. nvme_show_int_function(cntlid);
  2964. nvme_show_int_function(numa_node);
  2965. nvme_show_int_function(queue_count);
  2966. nvme_show_int_function(sqsize);
  2967. static ssize_t nvme_sysfs_delete(struct device *dev,
  2968. struct device_attribute *attr, const char *buf,
  2969. size_t count)
  2970. {
  2971. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  2972. if (device_remove_file_self(dev, attr))
  2973. nvme_delete_ctrl_sync(ctrl);
  2974. return count;
  2975. }
  2976. static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
  2977. static ssize_t nvme_sysfs_show_transport(struct device *dev,
  2978. struct device_attribute *attr,
  2979. char *buf)
  2980. {
  2981. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  2982. return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
  2983. }
  2984. static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
  2985. static ssize_t nvme_sysfs_show_state(struct device *dev,
  2986. struct device_attribute *attr,
  2987. char *buf)
  2988. {
  2989. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  2990. static const char *const state_name[] = {
  2991. [NVME_CTRL_NEW] = "new",
  2992. [NVME_CTRL_LIVE] = "live",
  2993. [NVME_CTRL_RESETTING] = "resetting",
  2994. [NVME_CTRL_CONNECTING] = "connecting",
  2995. [NVME_CTRL_DELETING] = "deleting",
  2996. [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
  2997. [NVME_CTRL_DEAD] = "dead",
  2998. };
  2999. if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
  3000. state_name[ctrl->state])
  3001. return sprintf(buf, "%s\n", state_name[ctrl->state]);
  3002. return sprintf(buf, "unknown state\n");
  3003. }
  3004. static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
  3005. static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
  3006. struct device_attribute *attr,
  3007. char *buf)
  3008. {
  3009. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3010. return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
  3011. }
  3012. static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
  3013. static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
  3014. struct device_attribute *attr,
  3015. char *buf)
  3016. {
  3017. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3018. return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
  3019. }
  3020. static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
  3021. static ssize_t nvme_sysfs_show_hostid(struct device *dev,
  3022. struct device_attribute *attr,
  3023. char *buf)
  3024. {
  3025. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3026. return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
  3027. }
  3028. static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
  3029. static ssize_t nvme_sysfs_show_address(struct device *dev,
  3030. struct device_attribute *attr,
  3031. char *buf)
  3032. {
  3033. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3034. return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
  3035. }
  3036. static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
  3037. static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
  3038. struct device_attribute *attr, char *buf)
  3039. {
  3040. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3041. struct nvmf_ctrl_options *opts = ctrl->opts;
  3042. if (ctrl->opts->max_reconnects == -1)
  3043. return sprintf(buf, "off\n");
  3044. return sprintf(buf, "%d\n",
  3045. opts->max_reconnects * opts->reconnect_delay);
  3046. }
  3047. static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
  3048. struct device_attribute *attr, const char *buf, size_t count)
  3049. {
  3050. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3051. struct nvmf_ctrl_options *opts = ctrl->opts;
  3052. int ctrl_loss_tmo, err;
  3053. err = kstrtoint(buf, 10, &ctrl_loss_tmo);
  3054. if (err)
  3055. return -EINVAL;
  3056. else if (ctrl_loss_tmo < 0)
  3057. opts->max_reconnects = -1;
  3058. else
  3059. opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
  3060. opts->reconnect_delay);
  3061. return count;
  3062. }
  3063. static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
  3064. nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
  3065. static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
  3066. struct device_attribute *attr, char *buf)
  3067. {
  3068. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3069. if (ctrl->opts->reconnect_delay == -1)
  3070. return sprintf(buf, "off\n");
  3071. return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
  3072. }
  3073. static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
  3074. struct device_attribute *attr, const char *buf, size_t count)
  3075. {
  3076. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3077. unsigned int v;
  3078. int err;
  3079. err = kstrtou32(buf, 10, &v);
  3080. if (err)
  3081. return err;
  3082. ctrl->opts->reconnect_delay = v;
  3083. return count;
  3084. }
  3085. static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
  3086. nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
  3087. static struct attribute *nvme_dev_attrs[] = {
  3088. &dev_attr_reset_controller.attr,
  3089. &dev_attr_rescan_controller.attr,
  3090. &dev_attr_model.attr,
  3091. &dev_attr_serial.attr,
  3092. &dev_attr_firmware_rev.attr,
  3093. &dev_attr_cntlid.attr,
  3094. &dev_attr_delete_controller.attr,
  3095. &dev_attr_transport.attr,
  3096. &dev_attr_subsysnqn.attr,
  3097. &dev_attr_address.attr,
  3098. &dev_attr_state.attr,
  3099. &dev_attr_numa_node.attr,
  3100. &dev_attr_queue_count.attr,
  3101. &dev_attr_sqsize.attr,
  3102. &dev_attr_hostnqn.attr,
  3103. &dev_attr_hostid.attr,
  3104. &dev_attr_ctrl_loss_tmo.attr,
  3105. &dev_attr_reconnect_delay.attr,
  3106. NULL
  3107. };
  3108. static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
  3109. struct attribute *a, int n)
  3110. {
  3111. struct device *dev = container_of(kobj, struct device, kobj);
  3112. struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
  3113. if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
  3114. return 0;
  3115. if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
  3116. return 0;
  3117. if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
  3118. return 0;
  3119. if (a == &dev_attr_hostid.attr && !ctrl->opts)
  3120. return 0;
  3121. if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
  3122. return 0;
  3123. if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
  3124. return 0;
  3125. return a->mode;
  3126. }
  3127. static struct attribute_group nvme_dev_attrs_group = {
  3128. .attrs = nvme_dev_attrs,
  3129. .is_visible = nvme_dev_attrs_are_visible,
  3130. };
  3131. static const struct attribute_group *nvme_dev_attr_groups[] = {
  3132. &nvme_dev_attrs_group,
  3133. NULL,
  3134. };
  3135. static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
  3136. unsigned nsid)
  3137. {
  3138. struct nvme_ns_head *h;
  3139. lockdep_assert_held(&subsys->lock);
  3140. list_for_each_entry(h, &subsys->nsheads, entry) {
  3141. if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
  3142. return h;
  3143. }
  3144. return NULL;
  3145. }
  3146. static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
  3147. struct nvme_ns_ids *ids)
  3148. {
  3149. struct nvme_ns_head *h;
  3150. lockdep_assert_held(&subsys->lock);
  3151. list_for_each_entry(h, &subsys->nsheads, entry) {
  3152. if (nvme_ns_ids_valid(ids) && nvme_ns_ids_equal(ids, &h->ids))
  3153. return -EINVAL;
  3154. }
  3155. return 0;
  3156. }
  3157. static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
  3158. unsigned nsid, struct nvme_ns_ids *ids)
  3159. {
  3160. struct nvme_ns_head *head;
  3161. size_t size = sizeof(*head);
  3162. int ret = -ENOMEM;
  3163. #ifdef CONFIG_NVME_MULTIPATH
  3164. size += num_possible_nodes() * sizeof(struct nvme_ns *);
  3165. #endif
  3166. head = kzalloc(size, GFP_KERNEL);
  3167. if (!head)
  3168. goto out;
  3169. ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
  3170. if (ret < 0)
  3171. goto out_free_head;
  3172. head->instance = ret;
  3173. INIT_LIST_HEAD(&head->list);
  3174. ret = init_srcu_struct(&head->srcu);
  3175. if (ret)
  3176. goto out_ida_remove;
  3177. head->subsys = ctrl->subsys;
  3178. head->ns_id = nsid;
  3179. head->ids = *ids;
  3180. kref_init(&head->ref);
  3181. ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids);
  3182. if (ret) {
  3183. dev_err(ctrl->device,
  3184. "duplicate IDs for nsid %d\n", nsid);
  3185. goto out_cleanup_srcu;
  3186. }
  3187. if (head->ids.csi) {
  3188. ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
  3189. if (ret)
  3190. goto out_cleanup_srcu;
  3191. } else
  3192. head->effects = ctrl->effects;
  3193. ret = nvme_mpath_alloc_disk(ctrl, head);
  3194. if (ret)
  3195. goto out_cleanup_srcu;
  3196. list_add_tail(&head->entry, &ctrl->subsys->nsheads);
  3197. kref_get(&ctrl->subsys->ref);
  3198. return head;
  3199. out_cleanup_srcu:
  3200. cleanup_srcu_struct(&head->srcu);
  3201. out_ida_remove:
  3202. ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
  3203. out_free_head:
  3204. kfree(head);
  3205. out:
  3206. if (ret > 0)
  3207. ret = blk_status_to_errno(nvme_error_status(ret));
  3208. return ERR_PTR(ret);
  3209. }
  3210. static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
  3211. struct nvme_ns_ids *ids, bool is_shared)
  3212. {
  3213. struct nvme_ctrl *ctrl = ns->ctrl;
  3214. struct nvme_ns_head *head = NULL;
  3215. int ret = 0;
  3216. mutex_lock(&ctrl->subsys->lock);
  3217. head = nvme_find_ns_head(ctrl->subsys, nsid);
  3218. if (!head) {
  3219. head = nvme_alloc_ns_head(ctrl, nsid, ids);
  3220. if (IS_ERR(head)) {
  3221. ret = PTR_ERR(head);
  3222. goto out_unlock;
  3223. }
  3224. head->shared = is_shared;
  3225. } else {
  3226. ret = -EINVAL;
  3227. if (!is_shared || !head->shared) {
  3228. dev_err(ctrl->device,
  3229. "Duplicate unshared namespace %d\n", nsid);
  3230. goto out_put_ns_head;
  3231. }
  3232. if (!nvme_ns_ids_equal(&head->ids, ids)) {
  3233. dev_err(ctrl->device,
  3234. "IDs don't match for shared namespace %d\n",
  3235. nsid);
  3236. goto out_put_ns_head;
  3237. }
  3238. }
  3239. list_add_tail(&ns->siblings, &head->list);
  3240. ns->head = head;
  3241. mutex_unlock(&ctrl->subsys->lock);
  3242. return 0;
  3243. out_put_ns_head:
  3244. nvme_put_ns_head(head);
  3245. out_unlock:
  3246. mutex_unlock(&ctrl->subsys->lock);
  3247. return ret;
  3248. }
  3249. struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
  3250. {
  3251. struct nvme_ns *ns, *ret = NULL;
  3252. down_read(&ctrl->namespaces_rwsem);
  3253. list_for_each_entry(ns, &ctrl->namespaces, list) {
  3254. if (ns->head->ns_id == nsid) {
  3255. if (!kref_get_unless_zero(&ns->kref))
  3256. continue;
  3257. ret = ns;
  3258. break;
  3259. }
  3260. if (ns->head->ns_id > nsid)
  3261. break;
  3262. }
  3263. up_read(&ctrl->namespaces_rwsem);
  3264. return ret;
  3265. }
  3266. EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
  3267. /*
  3268. * Add the namespace to the controller list while keeping the list ordered.
  3269. */
  3270. static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
  3271. {
  3272. struct nvme_ns *tmp;
  3273. list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
  3274. if (tmp->head->ns_id < ns->head->ns_id) {
  3275. list_add(&ns->list, &tmp->list);
  3276. return;
  3277. }
  3278. }
  3279. list_add(&ns->list, &ns->ctrl->namespaces);
  3280. }
  3281. static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
  3282. struct nvme_ns_ids *ids)
  3283. {
  3284. struct nvme_ns *ns;
  3285. struct gendisk *disk;
  3286. struct nvme_id_ns *id;
  3287. char disk_name[DISK_NAME_LEN];
  3288. int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
  3289. if (nvme_identify_ns(ctrl, nsid, ids, &id))
  3290. return;
  3291. ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
  3292. if (!ns)
  3293. goto out_free_id;
  3294. ns->queue = blk_mq_init_queue(ctrl->tagset);
  3295. if (IS_ERR(ns->queue))
  3296. goto out_free_ns;
  3297. if (ctrl->opts && ctrl->opts->data_digest)
  3298. blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
  3299. blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
  3300. if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
  3301. blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
  3302. ns->queue->queuedata = ns;
  3303. ns->ctrl = ctrl;
  3304. kref_init(&ns->kref);
  3305. ret = nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED);
  3306. if (ret)
  3307. goto out_free_queue;
  3308. nvme_set_disk_name(disk_name, ns, ctrl, &flags);
  3309. disk = alloc_disk_node(0, node);
  3310. if (!disk)
  3311. goto out_unlink_ns;
  3312. disk->fops = &nvme_fops;
  3313. disk->private_data = ns;
  3314. disk->queue = ns->queue;
  3315. disk->flags = flags;
  3316. memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
  3317. ns->disk = disk;
  3318. if (nvme_update_ns_info(ns, id))
  3319. goto out_put_disk;
  3320. if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
  3321. ret = nvme_nvm_register(ns, disk_name, node);
  3322. if (ret) {
  3323. dev_warn(ctrl->device, "LightNVM init failure\n");
  3324. goto out_put_disk;
  3325. }
  3326. }
  3327. down_write(&ctrl->namespaces_rwsem);
  3328. nvme_ns_add_to_ctrl_list(ns);
  3329. up_write(&ctrl->namespaces_rwsem);
  3330. nvme_get_ctrl(ctrl);
  3331. device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
  3332. nvme_mpath_add_disk(ns, id);
  3333. nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
  3334. kfree(id);
  3335. return;
  3336. out_put_disk:
  3337. /* prevent double queue cleanup */
  3338. ns->disk->queue = NULL;
  3339. put_disk(ns->disk);
  3340. out_unlink_ns:
  3341. mutex_lock(&ctrl->subsys->lock);
  3342. list_del_rcu(&ns->siblings);
  3343. if (list_empty(&ns->head->list))
  3344. list_del_init(&ns->head->entry);
  3345. mutex_unlock(&ctrl->subsys->lock);
  3346. nvme_put_ns_head(ns->head);
  3347. out_free_queue:
  3348. blk_cleanup_queue(ns->queue);
  3349. out_free_ns:
  3350. kfree(ns);
  3351. out_free_id:
  3352. kfree(id);
  3353. }
  3354. static void nvme_ns_remove(struct nvme_ns *ns)
  3355. {
  3356. if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
  3357. return;
  3358. set_capacity(ns->disk, 0);
  3359. nvme_fault_inject_fini(&ns->fault_inject);
  3360. mutex_lock(&ns->ctrl->subsys->lock);
  3361. list_del_rcu(&ns->siblings);
  3362. if (list_empty(&ns->head->list))
  3363. list_del_init(&ns->head->entry);
  3364. mutex_unlock(&ns->ctrl->subsys->lock);
  3365. synchronize_rcu(); /* guarantee not available in head->list */
  3366. nvme_mpath_clear_current_path(ns);
  3367. synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
  3368. if (ns->disk->flags & GENHD_FL_UP) {
  3369. del_gendisk(ns->disk);
  3370. blk_cleanup_queue(ns->queue);
  3371. if (blk_get_integrity(ns->disk))
  3372. blk_integrity_unregister(ns->disk);
  3373. }
  3374. down_write(&ns->ctrl->namespaces_rwsem);
  3375. list_del_init(&ns->list);
  3376. up_write(&ns->ctrl->namespaces_rwsem);
  3377. nvme_mpath_check_last_path(ns);
  3378. nvme_put_ns(ns);
  3379. }
  3380. static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
  3381. {
  3382. struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
  3383. if (ns) {
  3384. nvme_ns_remove(ns);
  3385. nvme_put_ns(ns);
  3386. }
  3387. }
  3388. static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
  3389. {
  3390. struct nvme_id_ns *id;
  3391. int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
  3392. if (test_bit(NVME_NS_DEAD, &ns->flags))
  3393. goto out;
  3394. ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
  3395. if (ret)
  3396. goto out;
  3397. ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
  3398. if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
  3399. dev_err(ns->ctrl->device,
  3400. "identifiers changed for nsid %d\n", ns->head->ns_id);
  3401. goto out_free_id;
  3402. }
  3403. ret = nvme_update_ns_info(ns, id);
  3404. out_free_id:
  3405. kfree(id);
  3406. out:
  3407. /*
  3408. * Only remove the namespace if we got a fatal error back from the
  3409. * device, otherwise ignore the error and just move on.
  3410. *
  3411. * TODO: we should probably schedule a delayed retry here.
  3412. */
  3413. if (ret > 0 && (ret & NVME_SC_DNR))
  3414. nvme_ns_remove(ns);
  3415. else
  3416. revalidate_disk_size(ns->disk, true);
  3417. }
  3418. static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
  3419. {
  3420. struct nvme_ns_ids ids = { };
  3421. struct nvme_ns *ns;
  3422. if (nvme_identify_ns_descs(ctrl, nsid, &ids))
  3423. return;
  3424. ns = nvme_find_get_ns(ctrl, nsid);
  3425. if (ns) {
  3426. nvme_validate_ns(ns, &ids);
  3427. nvme_put_ns(ns);
  3428. return;
  3429. }
  3430. switch (ids.csi) {
  3431. case NVME_CSI_NVM:
  3432. nvme_alloc_ns(ctrl, nsid, &ids);
  3433. break;
  3434. case NVME_CSI_ZNS:
  3435. if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
  3436. dev_warn(ctrl->device,
  3437. "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
  3438. nsid);
  3439. break;
  3440. }
  3441. if (!nvme_multi_css(ctrl)) {
  3442. dev_warn(ctrl->device,
  3443. "command set not reported for nsid: %d\n",
  3444. nsid);
  3445. break;
  3446. }
  3447. nvme_alloc_ns(ctrl, nsid, &ids);
  3448. break;
  3449. default:
  3450. dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
  3451. ids.csi, nsid);
  3452. break;
  3453. }
  3454. }
  3455. static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
  3456. unsigned nsid)
  3457. {
  3458. struct nvme_ns *ns, *next;
  3459. LIST_HEAD(rm_list);
  3460. down_write(&ctrl->namespaces_rwsem);
  3461. list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
  3462. if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
  3463. list_move_tail(&ns->list, &rm_list);
  3464. }
  3465. up_write(&ctrl->namespaces_rwsem);
  3466. list_for_each_entry_safe(ns, next, &rm_list, list)
  3467. nvme_ns_remove(ns);
  3468. }
  3469. static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
  3470. {
  3471. const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
  3472. __le32 *ns_list;
  3473. u32 prev = 0;
  3474. int ret = 0, i;
  3475. if (nvme_ctrl_limited_cns(ctrl))
  3476. return -EOPNOTSUPP;
  3477. ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
  3478. if (!ns_list)
  3479. return -ENOMEM;
  3480. for (;;) {
  3481. struct nvme_command cmd = {
  3482. .identify.opcode = nvme_admin_identify,
  3483. .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
  3484. .identify.nsid = cpu_to_le32(prev),
  3485. };
  3486. ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
  3487. NVME_IDENTIFY_DATA_SIZE);
  3488. if (ret)
  3489. goto free;
  3490. for (i = 0; i < nr_entries; i++) {
  3491. u32 nsid = le32_to_cpu(ns_list[i]);
  3492. if (!nsid) /* end of the list? */
  3493. goto out;
  3494. nvme_validate_or_alloc_ns(ctrl, nsid);
  3495. while (++prev < nsid)
  3496. nvme_ns_remove_by_nsid(ctrl, prev);
  3497. }
  3498. }
  3499. out:
  3500. nvme_remove_invalid_namespaces(ctrl, prev);
  3501. free:
  3502. kfree(ns_list);
  3503. return ret;
  3504. }
  3505. static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
  3506. {
  3507. struct nvme_id_ctrl *id;
  3508. u32 nn, i;
  3509. if (nvme_identify_ctrl(ctrl, &id))
  3510. return;
  3511. nn = le32_to_cpu(id->nn);
  3512. kfree(id);
  3513. for (i = 1; i <= nn; i++)
  3514. nvme_validate_or_alloc_ns(ctrl, i);
  3515. nvme_remove_invalid_namespaces(ctrl, nn);
  3516. }
  3517. static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
  3518. {
  3519. size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
  3520. __le32 *log;
  3521. int error;
  3522. log = kzalloc(log_size, GFP_KERNEL);
  3523. if (!log)
  3524. return;
  3525. /*
  3526. * We need to read the log to clear the AEN, but we don't want to rely
  3527. * on it for the changed namespace information as userspace could have
  3528. * raced with us in reading the log page, which could cause us to miss
  3529. * updates.
  3530. */
  3531. error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
  3532. NVME_CSI_NVM, log, log_size, 0);
  3533. if (error)
  3534. dev_warn(ctrl->device,
  3535. "reading changed ns log failed: %d\n", error);
  3536. kfree(log);
  3537. }
  3538. static void nvme_scan_work(struct work_struct *work)
  3539. {
  3540. struct nvme_ctrl *ctrl =
  3541. container_of(work, struct nvme_ctrl, scan_work);
  3542. /* No tagset on a live ctrl means IO queues could not created */
  3543. if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
  3544. return;
  3545. if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
  3546. dev_info(ctrl->device, "rescanning namespaces.\n");
  3547. nvme_clear_changed_ns_log(ctrl);
  3548. }
  3549. mutex_lock(&ctrl->scan_lock);
  3550. if (nvme_scan_ns_list(ctrl) != 0)
  3551. nvme_scan_ns_sequential(ctrl);
  3552. mutex_unlock(&ctrl->scan_lock);
  3553. }
  3554. /*
  3555. * This function iterates the namespace list unlocked to allow recovery from
  3556. * controller failure. It is up to the caller to ensure the namespace list is
  3557. * not modified by scan work while this function is executing.
  3558. */
  3559. void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
  3560. {
  3561. struct nvme_ns *ns, *next;
  3562. LIST_HEAD(ns_list);
  3563. /*
  3564. * make sure to requeue I/O to all namespaces as these
  3565. * might result from the scan itself and must complete
  3566. * for the scan_work to make progress
  3567. */
  3568. nvme_mpath_clear_ctrl_paths(ctrl);
  3569. /* prevent racing with ns scanning */
  3570. flush_work(&ctrl->scan_work);
  3571. /*
  3572. * The dead states indicates the controller was not gracefully
  3573. * disconnected. In that case, we won't be able to flush any data while
  3574. * removing the namespaces' disks; fail all the queues now to avoid
  3575. * potentially having to clean up the failed sync later.
  3576. */
  3577. if (ctrl->state == NVME_CTRL_DEAD)
  3578. nvme_kill_queues(ctrl);
  3579. /* this is a no-op when called from the controller reset handler */
  3580. nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
  3581. down_write(&ctrl->namespaces_rwsem);
  3582. list_splice_init(&ctrl->namespaces, &ns_list);
  3583. up_write(&ctrl->namespaces_rwsem);
  3584. list_for_each_entry_safe(ns, next, &ns_list, list)
  3585. nvme_ns_remove(ns);
  3586. }
  3587. EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
  3588. static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
  3589. {
  3590. struct nvme_ctrl *ctrl =
  3591. container_of(dev, struct nvme_ctrl, ctrl_device);
  3592. struct nvmf_ctrl_options *opts = ctrl->opts;
  3593. int ret;
  3594. ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
  3595. if (ret)
  3596. return ret;
  3597. if (opts) {
  3598. ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
  3599. if (ret)
  3600. return ret;
  3601. ret = add_uevent_var(env, "NVME_TRSVCID=%s",
  3602. opts->trsvcid ?: "none");
  3603. if (ret)
  3604. return ret;
  3605. ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
  3606. opts->host_traddr ?: "none");
  3607. }
  3608. return ret;
  3609. }
  3610. static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
  3611. {
  3612. char *envp[2] = { NULL, NULL };
  3613. u32 aen_result = ctrl->aen_result;
  3614. ctrl->aen_result = 0;
  3615. if (!aen_result)
  3616. return;
  3617. envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
  3618. if (!envp[0])
  3619. return;
  3620. kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
  3621. kfree(envp[0]);
  3622. }
  3623. static void nvme_async_event_work(struct work_struct *work)
  3624. {
  3625. struct nvme_ctrl *ctrl =
  3626. container_of(work, struct nvme_ctrl, async_event_work);
  3627. nvme_aen_uevent(ctrl);
  3628. /*
  3629. * The transport drivers must guarantee AER submission here is safe by
  3630. * flushing ctrl async_event_work after changing the controller state
  3631. * from LIVE and before freeing the admin queue.
  3632. */
  3633. if (ctrl->state == NVME_CTRL_LIVE)
  3634. ctrl->ops->submit_async_event(ctrl);
  3635. }
  3636. static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
  3637. {
  3638. u32 csts;
  3639. if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
  3640. return false;
  3641. if (csts == ~0)
  3642. return false;
  3643. return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
  3644. }
  3645. static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
  3646. {
  3647. struct nvme_fw_slot_info_log *log;
  3648. log = kmalloc(sizeof(*log), GFP_KERNEL);
  3649. if (!log)
  3650. return;
  3651. if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
  3652. log, sizeof(*log), 0))
  3653. dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
  3654. kfree(log);
  3655. }
  3656. static void nvme_fw_act_work(struct work_struct *work)
  3657. {
  3658. struct nvme_ctrl *ctrl = container_of(work,
  3659. struct nvme_ctrl, fw_act_work);
  3660. unsigned long fw_act_timeout;
  3661. if (ctrl->mtfa)
  3662. fw_act_timeout = jiffies +
  3663. msecs_to_jiffies(ctrl->mtfa * 100);
  3664. else
  3665. fw_act_timeout = jiffies +
  3666. msecs_to_jiffies(admin_timeout * 1000);
  3667. nvme_stop_queues(ctrl);
  3668. while (nvme_ctrl_pp_status(ctrl)) {
  3669. if (time_after(jiffies, fw_act_timeout)) {
  3670. dev_warn(ctrl->device,
  3671. "Fw activation timeout, reset controller\n");
  3672. nvme_try_sched_reset(ctrl);
  3673. return;
  3674. }
  3675. msleep(100);
  3676. }
  3677. if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
  3678. return;
  3679. nvme_start_queues(ctrl);
  3680. /* read FW slot information to clear the AER */
  3681. nvme_get_fw_slot_info(ctrl);
  3682. }
  3683. static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
  3684. {
  3685. u32 aer_notice_type = (result & 0xff00) >> 8;
  3686. trace_nvme_async_event(ctrl, aer_notice_type);
  3687. switch (aer_notice_type) {
  3688. case NVME_AER_NOTICE_NS_CHANGED:
  3689. set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
  3690. nvme_queue_scan(ctrl);
  3691. break;
  3692. case NVME_AER_NOTICE_FW_ACT_STARTING:
  3693. /*
  3694. * We are (ab)using the RESETTING state to prevent subsequent
  3695. * recovery actions from interfering with the controller's
  3696. * firmware activation.
  3697. */
  3698. if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
  3699. queue_work(nvme_wq, &ctrl->fw_act_work);
  3700. break;
  3701. #ifdef CONFIG_NVME_MULTIPATH
  3702. case NVME_AER_NOTICE_ANA:
  3703. if (!ctrl->ana_log_buf)
  3704. break;
  3705. queue_work(nvme_wq, &ctrl->ana_work);
  3706. break;
  3707. #endif
  3708. case NVME_AER_NOTICE_DISC_CHANGED:
  3709. ctrl->aen_result = result;
  3710. break;
  3711. default:
  3712. dev_warn(ctrl->device, "async event result %08x\n", result);
  3713. }
  3714. }
  3715. void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
  3716. volatile union nvme_result *res)
  3717. {
  3718. u32 result = le32_to_cpu(res->u32);
  3719. u32 aer_type = result & 0x07;
  3720. if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
  3721. return;
  3722. switch (aer_type) {
  3723. case NVME_AER_NOTICE:
  3724. nvme_handle_aen_notice(ctrl, result);
  3725. break;
  3726. case NVME_AER_ERROR:
  3727. case NVME_AER_SMART:
  3728. case NVME_AER_CSS:
  3729. case NVME_AER_VS:
  3730. trace_nvme_async_event(ctrl, aer_type);
  3731. ctrl->aen_result = result;
  3732. break;
  3733. default:
  3734. break;
  3735. }
  3736. queue_work(nvme_wq, &ctrl->async_event_work);
  3737. }
  3738. EXPORT_SYMBOL_GPL(nvme_complete_async_event);
  3739. void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
  3740. {
  3741. nvme_mpath_stop(ctrl);
  3742. nvme_stop_keep_alive(ctrl);
  3743. flush_work(&ctrl->async_event_work);
  3744. cancel_work_sync(&ctrl->fw_act_work);
  3745. }
  3746. EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
  3747. void nvme_start_ctrl(struct nvme_ctrl *ctrl)
  3748. {
  3749. nvme_start_keep_alive(ctrl);
  3750. nvme_enable_aen(ctrl);
  3751. if (ctrl->queue_count > 1) {
  3752. nvme_queue_scan(ctrl);
  3753. nvme_start_queues(ctrl);
  3754. }
  3755. }
  3756. EXPORT_SYMBOL_GPL(nvme_start_ctrl);
  3757. void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
  3758. {
  3759. nvme_fault_inject_fini(&ctrl->fault_inject);
  3760. dev_pm_qos_hide_latency_tolerance(ctrl->device);
  3761. cdev_device_del(&ctrl->cdev, ctrl->device);
  3762. nvme_put_ctrl(ctrl);
  3763. }
  3764. EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
  3765. static void nvme_free_cels(struct nvme_ctrl *ctrl)
  3766. {
  3767. struct nvme_effects_log *cel;
  3768. unsigned long i;
  3769. xa_for_each (&ctrl->cels, i, cel) {
  3770. xa_erase(&ctrl->cels, i);
  3771. kfree(cel);
  3772. }
  3773. xa_destroy(&ctrl->cels);
  3774. }
  3775. static void nvme_free_ctrl(struct device *dev)
  3776. {
  3777. struct nvme_ctrl *ctrl =
  3778. container_of(dev, struct nvme_ctrl, ctrl_device);
  3779. struct nvme_subsystem *subsys = ctrl->subsys;
  3780. if (!subsys || ctrl->instance != subsys->instance)
  3781. ida_simple_remove(&nvme_instance_ida, ctrl->instance);
  3782. nvme_free_cels(ctrl);
  3783. nvme_mpath_uninit(ctrl);
  3784. __free_page(ctrl->discard_page);
  3785. if (subsys) {
  3786. mutex_lock(&nvme_subsystems_lock);
  3787. list_del(&ctrl->subsys_entry);
  3788. sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
  3789. mutex_unlock(&nvme_subsystems_lock);
  3790. }
  3791. ctrl->ops->free_ctrl(ctrl);
  3792. if (subsys)
  3793. nvme_put_subsystem(subsys);
  3794. }
  3795. /*
  3796. * Initialize a NVMe controller structures. This needs to be called during
  3797. * earliest initialization so that we have the initialized structured around
  3798. * during probing.
  3799. */
  3800. int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
  3801. const struct nvme_ctrl_ops *ops, unsigned long quirks)
  3802. {
  3803. int ret;
  3804. ctrl->state = NVME_CTRL_NEW;
  3805. spin_lock_init(&ctrl->lock);
  3806. mutex_init(&ctrl->scan_lock);
  3807. INIT_LIST_HEAD(&ctrl->namespaces);
  3808. xa_init(&ctrl->cels);
  3809. init_rwsem(&ctrl->namespaces_rwsem);
  3810. ctrl->dev = dev;
  3811. ctrl->ops = ops;
  3812. ctrl->quirks = quirks;
  3813. ctrl->numa_node = NUMA_NO_NODE;
  3814. INIT_WORK(&ctrl->scan_work, nvme_scan_work);
  3815. INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
  3816. INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
  3817. INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
  3818. init_waitqueue_head(&ctrl->state_wq);
  3819. INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
  3820. memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
  3821. ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
  3822. BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
  3823. PAGE_SIZE);
  3824. ctrl->discard_page = alloc_page(GFP_KERNEL);
  3825. if (!ctrl->discard_page) {
  3826. ret = -ENOMEM;
  3827. goto out;
  3828. }
  3829. ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
  3830. if (ret < 0)
  3831. goto out;
  3832. ctrl->instance = ret;
  3833. device_initialize(&ctrl->ctrl_device);
  3834. ctrl->device = &ctrl->ctrl_device;
  3835. ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
  3836. ctrl->device->class = nvme_class;
  3837. ctrl->device->parent = ctrl->dev;
  3838. ctrl->device->groups = nvme_dev_attr_groups;
  3839. ctrl->device->release = nvme_free_ctrl;
  3840. dev_set_drvdata(ctrl->device, ctrl);
  3841. ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
  3842. if (ret)
  3843. goto out_release_instance;
  3844. nvme_get_ctrl(ctrl);
  3845. cdev_init(&ctrl->cdev, &nvme_dev_fops);
  3846. ctrl->cdev.owner = ops->module;
  3847. ret = cdev_device_add(&ctrl->cdev, ctrl->device);
  3848. if (ret)
  3849. goto out_free_name;
  3850. /*
  3851. * Initialize latency tolerance controls. The sysfs files won't
  3852. * be visible to userspace unless the device actually supports APST.
  3853. */
  3854. ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
  3855. dev_pm_qos_update_user_latency_tolerance(ctrl->device,
  3856. min(default_ps_max_latency_us, (unsigned long)S32_MAX));
  3857. nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
  3858. nvme_mpath_init_ctrl(ctrl);
  3859. return 0;
  3860. out_free_name:
  3861. nvme_put_ctrl(ctrl);
  3862. kfree_const(ctrl->device->kobj.name);
  3863. out_release_instance:
  3864. ida_simple_remove(&nvme_instance_ida, ctrl->instance);
  3865. out:
  3866. if (ctrl->discard_page)
  3867. __free_page(ctrl->discard_page);
  3868. return ret;
  3869. }
  3870. EXPORT_SYMBOL_GPL(nvme_init_ctrl);
  3871. /**
  3872. * nvme_kill_queues(): Ends all namespace queues
  3873. * @ctrl: the dead controller that needs to end
  3874. *
  3875. * Call this function when the driver determines it is unable to get the
  3876. * controller in a state capable of servicing IO.
  3877. */
  3878. void nvme_kill_queues(struct nvme_ctrl *ctrl)
  3879. {
  3880. struct nvme_ns *ns;
  3881. down_read(&ctrl->namespaces_rwsem);
  3882. /* Forcibly unquiesce queues to avoid blocking dispatch */
  3883. if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
  3884. blk_mq_unquiesce_queue(ctrl->admin_q);
  3885. list_for_each_entry(ns, &ctrl->namespaces, list)
  3886. nvme_set_queue_dying(ns);
  3887. up_read(&ctrl->namespaces_rwsem);
  3888. }
  3889. EXPORT_SYMBOL_GPL(nvme_kill_queues);
  3890. void nvme_unfreeze(struct nvme_ctrl *ctrl)
  3891. {
  3892. struct nvme_ns *ns;
  3893. down_read(&ctrl->namespaces_rwsem);
  3894. list_for_each_entry(ns, &ctrl->namespaces, list)
  3895. blk_mq_unfreeze_queue(ns->queue);
  3896. up_read(&ctrl->namespaces_rwsem);
  3897. }
  3898. EXPORT_SYMBOL_GPL(nvme_unfreeze);
  3899. int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
  3900. {
  3901. struct nvme_ns *ns;
  3902. down_read(&ctrl->namespaces_rwsem);
  3903. list_for_each_entry(ns, &ctrl->namespaces, list) {
  3904. timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
  3905. if (timeout <= 0)
  3906. break;
  3907. }
  3908. up_read(&ctrl->namespaces_rwsem);
  3909. return timeout;
  3910. }
  3911. EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
  3912. void nvme_wait_freeze(struct nvme_ctrl *ctrl)
  3913. {
  3914. struct nvme_ns *ns;
  3915. down_read(&ctrl->namespaces_rwsem);
  3916. list_for_each_entry(ns, &ctrl->namespaces, list)
  3917. blk_mq_freeze_queue_wait(ns->queue);
  3918. up_read(&ctrl->namespaces_rwsem);
  3919. }
  3920. EXPORT_SYMBOL_GPL(nvme_wait_freeze);
  3921. void nvme_start_freeze(struct nvme_ctrl *ctrl)
  3922. {
  3923. struct nvme_ns *ns;
  3924. down_read(&ctrl->namespaces_rwsem);
  3925. list_for_each_entry(ns, &ctrl->namespaces, list)
  3926. blk_freeze_queue_start(ns->queue);
  3927. up_read(&ctrl->namespaces_rwsem);
  3928. }
  3929. EXPORT_SYMBOL_GPL(nvme_start_freeze);
  3930. void nvme_stop_queues(struct nvme_ctrl *ctrl)
  3931. {
  3932. struct nvme_ns *ns;
  3933. down_read(&ctrl->namespaces_rwsem);
  3934. list_for_each_entry(ns, &ctrl->namespaces, list)
  3935. blk_mq_quiesce_queue(ns->queue);
  3936. up_read(&ctrl->namespaces_rwsem);
  3937. }
  3938. EXPORT_SYMBOL_GPL(nvme_stop_queues);
  3939. void nvme_start_queues(struct nvme_ctrl *ctrl)
  3940. {
  3941. struct nvme_ns *ns;
  3942. down_read(&ctrl->namespaces_rwsem);
  3943. list_for_each_entry(ns, &ctrl->namespaces, list)
  3944. blk_mq_unquiesce_queue(ns->queue);
  3945. up_read(&ctrl->namespaces_rwsem);
  3946. }
  3947. EXPORT_SYMBOL_GPL(nvme_start_queues);
  3948. void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
  3949. {
  3950. struct nvme_ns *ns;
  3951. down_read(&ctrl->namespaces_rwsem);
  3952. list_for_each_entry(ns, &ctrl->namespaces, list)
  3953. blk_sync_queue(ns->queue);
  3954. up_read(&ctrl->namespaces_rwsem);
  3955. }
  3956. EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
  3957. void nvme_sync_queues(struct nvme_ctrl *ctrl)
  3958. {
  3959. nvme_sync_io_queues(ctrl);
  3960. if (ctrl->admin_q)
  3961. blk_sync_queue(ctrl->admin_q);
  3962. }
  3963. EXPORT_SYMBOL_GPL(nvme_sync_queues);
  3964. struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
  3965. {
  3966. if (file->f_op != &nvme_dev_fops)
  3967. return NULL;
  3968. return file->private_data;
  3969. }
  3970. EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
  3971. /*
  3972. * Check we didn't inadvertently grow the command structure sizes:
  3973. */
  3974. static inline void _nvme_check_size(void)
  3975. {
  3976. BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
  3977. BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
  3978. BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
  3979. BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
  3980. BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
  3981. BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
  3982. BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
  3983. BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
  3984. BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
  3985. BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
  3986. BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
  3987. BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
  3988. BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
  3989. BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
  3990. BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
  3991. BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
  3992. BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
  3993. BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
  3994. BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
  3995. }
  3996. static int __init nvme_core_init(void)
  3997. {
  3998. int result = -ENOMEM;
  3999. _nvme_check_size();
  4000. nvme_wq = alloc_workqueue("nvme-wq",
  4001. WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
  4002. if (!nvme_wq)
  4003. goto out;
  4004. nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
  4005. WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
  4006. if (!nvme_reset_wq)
  4007. goto destroy_wq;
  4008. nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
  4009. WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
  4010. if (!nvme_delete_wq)
  4011. goto destroy_reset_wq;
  4012. result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
  4013. if (result < 0)
  4014. goto destroy_delete_wq;
  4015. nvme_class = class_create(THIS_MODULE, "nvme");
  4016. if (IS_ERR(nvme_class)) {
  4017. result = PTR_ERR(nvme_class);
  4018. goto unregister_chrdev;
  4019. }
  4020. nvme_class->dev_uevent = nvme_class_uevent;
  4021. nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
  4022. if (IS_ERR(nvme_subsys_class)) {
  4023. result = PTR_ERR(nvme_subsys_class);
  4024. goto destroy_class;
  4025. }
  4026. return 0;
  4027. destroy_class:
  4028. class_destroy(nvme_class);
  4029. unregister_chrdev:
  4030. unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
  4031. destroy_delete_wq:
  4032. destroy_workqueue(nvme_delete_wq);
  4033. destroy_reset_wq:
  4034. destroy_workqueue(nvme_reset_wq);
  4035. destroy_wq:
  4036. destroy_workqueue(nvme_wq);
  4037. out:
  4038. return result;
  4039. }
  4040. static void __exit nvme_core_exit(void)
  4041. {
  4042. class_destroy(nvme_subsys_class);
  4043. class_destroy(nvme_class);
  4044. unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
  4045. destroy_workqueue(nvme_delete_wq);
  4046. destroy_workqueue(nvme_reset_wq);
  4047. destroy_workqueue(nvme_wq);
  4048. ida_destroy(&nvme_instance_ida);
  4049. }
  4050. MODULE_LICENSE("GPL");
  4051. MODULE_VERSION("1.0");
  4052. module_init(nvme_core_init);
  4053. module_exit(nvme_core_exit);