blk-mq.c 101 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Block multiqueue core code
  4. *
  5. * Copyright (C) 2013-2014 Jens Axboe
  6. * Copyright (C) 2013-2014 Christoph Hellwig
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/bio.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/kmemleak.h>
  14. #include <linux/mm.h>
  15. #include <linux/init.h>
  16. #include <linux/slab.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/smp.h>
  19. #include <linux/llist.h>
  20. #include <linux/list_sort.h>
  21. #include <linux/cpu.h>
  22. #include <linux/cache.h>
  23. #include <linux/sched/sysctl.h>
  24. #include <linux/sched/topology.h>
  25. #include <linux/sched/signal.h>
  26. #include <linux/delay.h>
  27. #include <linux/crash_dump.h>
  28. #include <linux/prefetch.h>
  29. #include <linux/blk-crypto.h>
  30. #include <trace/events/block.h>
  31. #include <linux/blk-mq.h>
  32. #include <linux/t10-pi.h>
  33. #include "blk.h"
  34. #include "blk-mq.h"
  35. #include "blk-mq-debugfs.h"
  36. #include "blk-mq-tag.h"
  37. #include "blk-pm.h"
  38. #include "blk-stat.h"
  39. #include "blk-mq-sched.h"
  40. #include "blk-rq-qos.h"
  41. #include <trace/hooks/block.h>
  42. static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
  43. static void blk_mq_poll_stats_start(struct request_queue *q);
  44. static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
  45. static int blk_mq_poll_stats_bkt(const struct request *rq)
  46. {
  47. int ddir, sectors, bucket;
  48. ddir = rq_data_dir(rq);
  49. sectors = blk_rq_stats_sectors(rq);
  50. bucket = ddir + 2 * ilog2(sectors);
  51. if (bucket < 0)
  52. return -1;
  53. else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
  54. return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
  55. return bucket;
  56. }
  57. /*
  58. * Check if any of the ctx, dispatch list or elevator
  59. * have pending work in this hardware queue.
  60. */
  61. static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
  62. {
  63. return !list_empty_careful(&hctx->dispatch) ||
  64. sbitmap_any_bit_set(&hctx->ctx_map) ||
  65. blk_mq_sched_has_work(hctx);
  66. }
  67. /*
  68. * Mark this ctx as having pending work in this hardware queue
  69. */
  70. static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
  71. struct blk_mq_ctx *ctx)
  72. {
  73. const int bit = ctx->index_hw[hctx->type];
  74. if (!sbitmap_test_bit(&hctx->ctx_map, bit))
  75. sbitmap_set_bit(&hctx->ctx_map, bit);
  76. }
  77. static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
  78. struct blk_mq_ctx *ctx)
  79. {
  80. const int bit = ctx->index_hw[hctx->type];
  81. sbitmap_clear_bit(&hctx->ctx_map, bit);
  82. }
  83. struct mq_inflight {
  84. struct hd_struct *part;
  85. unsigned int inflight[2];
  86. };
  87. static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
  88. struct request *rq, void *priv,
  89. bool reserved)
  90. {
  91. struct mq_inflight *mi = priv;
  92. if ((!mi->part->partno || rq->part == mi->part) &&
  93. blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
  94. mi->inflight[rq_data_dir(rq)]++;
  95. return true;
  96. }
  97. unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
  98. {
  99. struct mq_inflight mi = { .part = part };
  100. blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
  101. return mi.inflight[0] + mi.inflight[1];
  102. }
  103. void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
  104. unsigned int inflight[2])
  105. {
  106. struct mq_inflight mi = { .part = part };
  107. blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
  108. inflight[0] = mi.inflight[0];
  109. inflight[1] = mi.inflight[1];
  110. }
  111. void blk_freeze_queue_start(struct request_queue *q)
  112. {
  113. mutex_lock(&q->mq_freeze_lock);
  114. if (++q->mq_freeze_depth == 1) {
  115. percpu_ref_kill(&q->q_usage_counter);
  116. mutex_unlock(&q->mq_freeze_lock);
  117. if (queue_is_mq(q))
  118. blk_mq_run_hw_queues(q, false);
  119. } else {
  120. mutex_unlock(&q->mq_freeze_lock);
  121. }
  122. }
  123. EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
  124. void blk_mq_freeze_queue_wait(struct request_queue *q)
  125. {
  126. wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
  127. }
  128. EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
  129. int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
  130. unsigned long timeout)
  131. {
  132. return wait_event_timeout(q->mq_freeze_wq,
  133. percpu_ref_is_zero(&q->q_usage_counter),
  134. timeout);
  135. }
  136. EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
  137. /*
  138. * Guarantee no request is in use, so we can change any data structure of
  139. * the queue afterward.
  140. */
  141. void blk_freeze_queue(struct request_queue *q)
  142. {
  143. /*
  144. * In the !blk_mq case we are only calling this to kill the
  145. * q_usage_counter, otherwise this increases the freeze depth
  146. * and waits for it to return to zero. For this reason there is
  147. * no blk_unfreeze_queue(), and blk_freeze_queue() is not
  148. * exported to drivers as the only user for unfreeze is blk_mq.
  149. */
  150. blk_freeze_queue_start(q);
  151. blk_mq_freeze_queue_wait(q);
  152. }
  153. void blk_mq_freeze_queue(struct request_queue *q)
  154. {
  155. /*
  156. * ...just an alias to keep freeze and unfreeze actions balanced
  157. * in the blk_mq_* namespace
  158. */
  159. blk_freeze_queue(q);
  160. }
  161. EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
  162. void blk_mq_unfreeze_queue(struct request_queue *q)
  163. {
  164. mutex_lock(&q->mq_freeze_lock);
  165. q->mq_freeze_depth--;
  166. WARN_ON_ONCE(q->mq_freeze_depth < 0);
  167. if (!q->mq_freeze_depth) {
  168. percpu_ref_resurrect(&q->q_usage_counter);
  169. wake_up_all(&q->mq_freeze_wq);
  170. }
  171. mutex_unlock(&q->mq_freeze_lock);
  172. }
  173. EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
  174. /*
  175. * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
  176. * mpt3sas driver such that this function can be removed.
  177. */
  178. void blk_mq_quiesce_queue_nowait(struct request_queue *q)
  179. {
  180. blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
  181. }
  182. EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
  183. /**
  184. * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
  185. * @q: request queue.
  186. *
  187. * Note: this function does not prevent that the struct request end_io()
  188. * callback function is invoked. Once this function is returned, we make
  189. * sure no dispatch can happen until the queue is unquiesced via
  190. * blk_mq_unquiesce_queue().
  191. */
  192. void blk_mq_quiesce_queue(struct request_queue *q)
  193. {
  194. struct blk_mq_hw_ctx *hctx;
  195. unsigned int i;
  196. bool rcu = false;
  197. blk_mq_quiesce_queue_nowait(q);
  198. queue_for_each_hw_ctx(q, hctx, i) {
  199. if (hctx->flags & BLK_MQ_F_BLOCKING)
  200. synchronize_srcu(hctx->srcu);
  201. else
  202. rcu = true;
  203. }
  204. if (rcu)
  205. synchronize_rcu();
  206. }
  207. EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
  208. /*
  209. * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
  210. * @q: request queue.
  211. *
  212. * This function recovers queue into the state before quiescing
  213. * which is done by blk_mq_quiesce_queue.
  214. */
  215. void blk_mq_unquiesce_queue(struct request_queue *q)
  216. {
  217. blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
  218. /* dispatch requests which are inserted during quiescing */
  219. blk_mq_run_hw_queues(q, true);
  220. }
  221. EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
  222. void blk_mq_wake_waiters(struct request_queue *q)
  223. {
  224. struct blk_mq_hw_ctx *hctx;
  225. unsigned int i;
  226. queue_for_each_hw_ctx(q, hctx, i)
  227. if (blk_mq_hw_queue_mapped(hctx))
  228. blk_mq_tag_wakeup_all(hctx->tags, true);
  229. }
  230. /*
  231. * Only need start/end time stamping if we have iostat or
  232. * blk stats enabled, or using an IO scheduler.
  233. */
  234. static inline bool blk_mq_need_time_stamp(struct request *rq)
  235. {
  236. return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
  237. }
  238. static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
  239. unsigned int tag, u64 alloc_time_ns)
  240. {
  241. struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
  242. struct request *rq = tags->static_rqs[tag];
  243. if (data->q->elevator) {
  244. rq->tag = BLK_MQ_NO_TAG;
  245. rq->internal_tag = tag;
  246. } else {
  247. rq->tag = tag;
  248. rq->internal_tag = BLK_MQ_NO_TAG;
  249. }
  250. /* csd/requeue_work/fifo_time is initialized before use */
  251. rq->q = data->q;
  252. rq->mq_ctx = data->ctx;
  253. rq->mq_hctx = data->hctx;
  254. rq->rq_flags = 0;
  255. rq->cmd_flags = data->cmd_flags;
  256. if (data->flags & BLK_MQ_REQ_PM)
  257. rq->rq_flags |= RQF_PM;
  258. if (blk_queue_io_stat(data->q))
  259. rq->rq_flags |= RQF_IO_STAT;
  260. INIT_LIST_HEAD(&rq->queuelist);
  261. INIT_HLIST_NODE(&rq->hash);
  262. RB_CLEAR_NODE(&rq->rb_node);
  263. rq->rq_disk = NULL;
  264. rq->part = NULL;
  265. #ifdef CONFIG_BLK_RQ_ALLOC_TIME
  266. rq->alloc_time_ns = alloc_time_ns;
  267. #endif
  268. if (blk_mq_need_time_stamp(rq))
  269. rq->start_time_ns = ktime_get_ns();
  270. else
  271. rq->start_time_ns = 0;
  272. rq->io_start_time_ns = 0;
  273. rq->stats_sectors = 0;
  274. rq->nr_phys_segments = 0;
  275. #if defined(CONFIG_BLK_DEV_INTEGRITY)
  276. rq->nr_integrity_segments = 0;
  277. #endif
  278. blk_crypto_rq_set_defaults(rq);
  279. /* tag was already set */
  280. WRITE_ONCE(rq->deadline, 0);
  281. rq->timeout = 0;
  282. rq->end_io = NULL;
  283. rq->end_io_data = NULL;
  284. data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
  285. refcount_set(&rq->ref, 1);
  286. if (!op_is_flush(data->cmd_flags)) {
  287. struct elevator_queue *e = data->q->elevator;
  288. rq->elv.icq = NULL;
  289. if (e && e->type->ops.prepare_request) {
  290. if (e->type->icq_cache)
  291. blk_mq_sched_assign_ioc(rq);
  292. e->type->ops.prepare_request(rq);
  293. rq->rq_flags |= RQF_ELVPRIV;
  294. }
  295. }
  296. data->hctx->queued++;
  297. trace_android_vh_blk_rq_ctx_init(rq, tags, data, alloc_time_ns);
  298. return rq;
  299. }
  300. static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
  301. {
  302. struct request_queue *q = data->q;
  303. struct elevator_queue *e = q->elevator;
  304. u64 alloc_time_ns = 0;
  305. unsigned int tag;
  306. /* alloc_time includes depth and tag waits */
  307. if (blk_queue_rq_alloc_time(q))
  308. alloc_time_ns = ktime_get_ns();
  309. if (data->cmd_flags & REQ_NOWAIT)
  310. data->flags |= BLK_MQ_REQ_NOWAIT;
  311. if (e) {
  312. /*
  313. * Flush requests are special and go directly to the
  314. * dispatch list. Don't include reserved tags in the
  315. * limiting, as it isn't useful.
  316. */
  317. if (!op_is_flush(data->cmd_flags) &&
  318. e->type->ops.limit_depth &&
  319. !(data->flags & BLK_MQ_REQ_RESERVED))
  320. e->type->ops.limit_depth(data->cmd_flags, data);
  321. }
  322. retry:
  323. data->ctx = blk_mq_get_ctx(q);
  324. data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
  325. if (!e)
  326. blk_mq_tag_busy(data->hctx);
  327. /*
  328. * Waiting allocations only fail because of an inactive hctx. In that
  329. * case just retry the hctx assignment and tag allocation as CPU hotplug
  330. * should have migrated us to an online CPU by now.
  331. */
  332. tag = blk_mq_get_tag(data);
  333. if (tag == BLK_MQ_NO_TAG) {
  334. if (data->flags & BLK_MQ_REQ_NOWAIT)
  335. return NULL;
  336. /*
  337. * Give up the CPU and sleep for a random short time to ensure
  338. * that thread using a realtime scheduling class are migrated
  339. * off the CPU, and thus off the hctx that is going away.
  340. */
  341. msleep(3);
  342. goto retry;
  343. }
  344. return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
  345. }
  346. struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
  347. blk_mq_req_flags_t flags)
  348. {
  349. struct blk_mq_alloc_data data = {
  350. .q = q,
  351. .flags = flags,
  352. .cmd_flags = op,
  353. };
  354. struct request *rq;
  355. int ret;
  356. ret = blk_queue_enter(q, flags);
  357. if (ret)
  358. return ERR_PTR(ret);
  359. rq = __blk_mq_alloc_request(&data);
  360. if (!rq)
  361. goto out_queue_exit;
  362. rq->__data_len = 0;
  363. rq->__sector = (sector_t) -1;
  364. rq->bio = rq->biotail = NULL;
  365. return rq;
  366. out_queue_exit:
  367. blk_queue_exit(q);
  368. return ERR_PTR(-EWOULDBLOCK);
  369. }
  370. EXPORT_SYMBOL(blk_mq_alloc_request);
  371. struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
  372. unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
  373. {
  374. struct blk_mq_alloc_data data = {
  375. .q = q,
  376. .flags = flags,
  377. .cmd_flags = op,
  378. };
  379. u64 alloc_time_ns = 0;
  380. unsigned int cpu;
  381. unsigned int tag;
  382. int ret;
  383. /* alloc_time includes depth and tag waits */
  384. if (blk_queue_rq_alloc_time(q))
  385. alloc_time_ns = ktime_get_ns();
  386. /*
  387. * If the tag allocator sleeps we could get an allocation for a
  388. * different hardware context. No need to complicate the low level
  389. * allocator for this for the rare use case of a command tied to
  390. * a specific queue.
  391. */
  392. if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
  393. return ERR_PTR(-EINVAL);
  394. if (hctx_idx >= q->nr_hw_queues)
  395. return ERR_PTR(-EIO);
  396. ret = blk_queue_enter(q, flags);
  397. if (ret)
  398. return ERR_PTR(ret);
  399. /*
  400. * Check if the hardware context is actually mapped to anything.
  401. * If not tell the caller that it should skip this queue.
  402. */
  403. ret = -EXDEV;
  404. data.hctx = q->queue_hw_ctx[hctx_idx];
  405. if (!blk_mq_hw_queue_mapped(data.hctx))
  406. goto out_queue_exit;
  407. cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
  408. data.ctx = __blk_mq_get_ctx(q, cpu);
  409. if (!q->elevator)
  410. blk_mq_tag_busy(data.hctx);
  411. ret = -EWOULDBLOCK;
  412. tag = blk_mq_get_tag(&data);
  413. if (tag == BLK_MQ_NO_TAG)
  414. goto out_queue_exit;
  415. return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
  416. out_queue_exit:
  417. blk_queue_exit(q);
  418. return ERR_PTR(ret);
  419. }
  420. EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
  421. static void __blk_mq_free_request(struct request *rq)
  422. {
  423. struct request_queue *q = rq->q;
  424. struct blk_mq_ctx *ctx = rq->mq_ctx;
  425. struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
  426. const int sched_tag = rq->internal_tag;
  427. blk_crypto_free_request(rq);
  428. blk_pm_mark_last_busy(rq);
  429. rq->mq_hctx = NULL;
  430. if (rq->tag != BLK_MQ_NO_TAG)
  431. blk_mq_put_tag(hctx->tags, ctx, rq->tag);
  432. if (sched_tag != BLK_MQ_NO_TAG)
  433. blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
  434. blk_mq_sched_restart(hctx);
  435. blk_queue_exit(q);
  436. }
  437. void blk_mq_free_request(struct request *rq)
  438. {
  439. struct request_queue *q = rq->q;
  440. struct elevator_queue *e = q->elevator;
  441. struct blk_mq_ctx *ctx = rq->mq_ctx;
  442. struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
  443. if (rq->rq_flags & RQF_ELVPRIV) {
  444. if (e && e->type->ops.finish_request)
  445. e->type->ops.finish_request(rq);
  446. if (rq->elv.icq) {
  447. put_io_context(rq->elv.icq->ioc);
  448. rq->elv.icq = NULL;
  449. }
  450. }
  451. ctx->rq_completed[rq_is_sync(rq)]++;
  452. if (rq->rq_flags & RQF_MQ_INFLIGHT)
  453. __blk_mq_dec_active_requests(hctx);
  454. if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
  455. laptop_io_completion(q->backing_dev_info);
  456. rq_qos_done(q, rq);
  457. WRITE_ONCE(rq->state, MQ_RQ_IDLE);
  458. if (refcount_dec_and_test(&rq->ref))
  459. __blk_mq_free_request(rq);
  460. }
  461. EXPORT_SYMBOL_GPL(blk_mq_free_request);
  462. inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
  463. {
  464. u64 now = 0;
  465. if (blk_mq_need_time_stamp(rq))
  466. now = ktime_get_ns();
  467. if (rq->rq_flags & RQF_STATS) {
  468. blk_mq_poll_stats_start(rq->q);
  469. blk_stat_add(rq, now);
  470. }
  471. blk_mq_sched_completed_request(rq, now);
  472. blk_account_io_done(rq, now);
  473. if (rq->end_io) {
  474. rq_qos_done(rq->q, rq);
  475. rq->end_io(rq, error);
  476. } else {
  477. blk_mq_free_request(rq);
  478. }
  479. }
  480. EXPORT_SYMBOL(__blk_mq_end_request);
  481. void blk_mq_end_request(struct request *rq, blk_status_t error)
  482. {
  483. if (blk_update_request(rq, error, blk_rq_bytes(rq)))
  484. BUG();
  485. __blk_mq_end_request(rq, error);
  486. }
  487. EXPORT_SYMBOL(blk_mq_end_request);
  488. /*
  489. * Softirq action handler - move entries to local list and loop over them
  490. * while passing them to the queue registered handler.
  491. */
  492. static __latent_entropy void blk_done_softirq(struct softirq_action *h)
  493. {
  494. struct list_head *cpu_list, local_list;
  495. local_irq_disable();
  496. cpu_list = this_cpu_ptr(&blk_cpu_done);
  497. list_replace_init(cpu_list, &local_list);
  498. local_irq_enable();
  499. while (!list_empty(&local_list)) {
  500. struct request *rq;
  501. rq = list_entry(local_list.next, struct request, ipi_list);
  502. list_del_init(&rq->ipi_list);
  503. rq->q->mq_ops->complete(rq);
  504. }
  505. }
  506. static void blk_mq_trigger_softirq(struct request *rq)
  507. {
  508. struct list_head *list;
  509. unsigned long flags;
  510. local_irq_save(flags);
  511. list = this_cpu_ptr(&blk_cpu_done);
  512. list_add_tail(&rq->ipi_list, list);
  513. /*
  514. * If the list only contains our just added request, signal a raise of
  515. * the softirq. If there are already entries there, someone already
  516. * raised the irq but it hasn't run yet.
  517. */
  518. if (list->next == &rq->ipi_list)
  519. raise_softirq_irqoff(BLOCK_SOFTIRQ);
  520. local_irq_restore(flags);
  521. }
  522. static int blk_softirq_cpu_dead(unsigned int cpu)
  523. {
  524. /*
  525. * If a CPU goes away, splice its entries to the current CPU
  526. * and trigger a run of the softirq
  527. */
  528. local_irq_disable();
  529. list_splice_init(&per_cpu(blk_cpu_done, cpu),
  530. this_cpu_ptr(&blk_cpu_done));
  531. raise_softirq_irqoff(BLOCK_SOFTIRQ);
  532. local_irq_enable();
  533. return 0;
  534. }
  535. static void __blk_mq_complete_request_remote(void *data)
  536. {
  537. struct request *rq = data;
  538. /*
  539. * For most of single queue controllers, there is only one irq vector
  540. * for handling I/O completion, and the only irq's affinity is set
  541. * to all possible CPUs. On most of ARCHs, this affinity means the irq
  542. * is handled on one specific CPU.
  543. *
  544. * So complete I/O requests in softirq context in case of single queue
  545. * devices to avoid degrading I/O performance due to irqsoff latency.
  546. */
  547. if (rq->q->nr_hw_queues == 1)
  548. blk_mq_trigger_softirq(rq);
  549. else
  550. rq->q->mq_ops->complete(rq);
  551. }
  552. static inline bool blk_mq_complete_need_ipi(struct request *rq)
  553. {
  554. int cpu = raw_smp_processor_id();
  555. if (!IS_ENABLED(CONFIG_SMP) ||
  556. !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
  557. return false;
  558. /* same CPU or cache domain? Complete locally */
  559. if (cpu == rq->mq_ctx->cpu ||
  560. (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
  561. cpus_share_cache(cpu, rq->mq_ctx->cpu)))
  562. return false;
  563. /* don't try to IPI to an offline CPU */
  564. return cpu_online(rq->mq_ctx->cpu);
  565. }
  566. bool blk_mq_complete_request_remote(struct request *rq)
  567. {
  568. WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
  569. /*
  570. * For a polled request, always complete locallly, it's pointless
  571. * to redirect the completion.
  572. */
  573. if (rq->cmd_flags & REQ_HIPRI)
  574. return false;
  575. if (blk_mq_complete_need_ipi(rq)) {
  576. rq->csd.func = __blk_mq_complete_request_remote;
  577. rq->csd.info = rq;
  578. rq->csd.flags = 0;
  579. smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
  580. } else {
  581. if (rq->q->nr_hw_queues > 1)
  582. return false;
  583. blk_mq_trigger_softirq(rq);
  584. }
  585. return true;
  586. }
  587. EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
  588. /**
  589. * blk_mq_complete_request - end I/O on a request
  590. * @rq: the request being processed
  591. *
  592. * Description:
  593. * Complete a request by scheduling the ->complete_rq operation.
  594. **/
  595. void blk_mq_complete_request(struct request *rq)
  596. {
  597. if (!blk_mq_complete_request_remote(rq))
  598. rq->q->mq_ops->complete(rq);
  599. }
  600. EXPORT_SYMBOL(blk_mq_complete_request);
  601. static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
  602. __releases(hctx->srcu)
  603. {
  604. if (!(hctx->flags & BLK_MQ_F_BLOCKING))
  605. rcu_read_unlock();
  606. else
  607. srcu_read_unlock(hctx->srcu, srcu_idx);
  608. }
  609. static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
  610. __acquires(hctx->srcu)
  611. {
  612. if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
  613. /* shut up gcc false positive */
  614. *srcu_idx = 0;
  615. rcu_read_lock();
  616. } else
  617. *srcu_idx = srcu_read_lock(hctx->srcu);
  618. }
  619. /**
  620. * blk_mq_start_request - Start processing a request
  621. * @rq: Pointer to request to be started
  622. *
  623. * Function used by device drivers to notify the block layer that a request
  624. * is going to be processed now, so blk layer can do proper initializations
  625. * such as starting the timeout timer.
  626. */
  627. void blk_mq_start_request(struct request *rq)
  628. {
  629. struct request_queue *q = rq->q;
  630. trace_block_rq_issue(q, rq);
  631. if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
  632. rq->io_start_time_ns = ktime_get_ns();
  633. rq->stats_sectors = blk_rq_sectors(rq);
  634. rq->rq_flags |= RQF_STATS;
  635. rq_qos_issue(q, rq);
  636. }
  637. WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
  638. blk_add_timer(rq);
  639. WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
  640. #ifdef CONFIG_BLK_DEV_INTEGRITY
  641. if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
  642. q->integrity.profile->prepare_fn(rq);
  643. #endif
  644. }
  645. EXPORT_SYMBOL(blk_mq_start_request);
  646. static void __blk_mq_requeue_request(struct request *rq)
  647. {
  648. struct request_queue *q = rq->q;
  649. blk_mq_put_driver_tag(rq);
  650. trace_block_rq_requeue(q, rq);
  651. rq_qos_requeue(q, rq);
  652. if (blk_mq_request_started(rq)) {
  653. WRITE_ONCE(rq->state, MQ_RQ_IDLE);
  654. rq->rq_flags &= ~RQF_TIMED_OUT;
  655. }
  656. }
  657. void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
  658. {
  659. __blk_mq_requeue_request(rq);
  660. /* this request will be re-inserted to io scheduler queue */
  661. blk_mq_sched_requeue_request(rq);
  662. blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
  663. }
  664. EXPORT_SYMBOL(blk_mq_requeue_request);
  665. static void blk_mq_requeue_work(struct work_struct *work)
  666. {
  667. struct request_queue *q =
  668. container_of(work, struct request_queue, requeue_work.work);
  669. LIST_HEAD(rq_list);
  670. struct request *rq, *next;
  671. spin_lock_irq(&q->requeue_lock);
  672. list_splice_init(&q->requeue_list, &rq_list);
  673. spin_unlock_irq(&q->requeue_lock);
  674. list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
  675. if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
  676. continue;
  677. rq->rq_flags &= ~RQF_SOFTBARRIER;
  678. list_del_init(&rq->queuelist);
  679. /*
  680. * If RQF_DONTPREP, rq has contained some driver specific
  681. * data, so insert it to hctx dispatch list to avoid any
  682. * merge.
  683. */
  684. if (rq->rq_flags & RQF_DONTPREP)
  685. blk_mq_request_bypass_insert(rq, false, false);
  686. else
  687. blk_mq_sched_insert_request(rq, true, false, false);
  688. }
  689. while (!list_empty(&rq_list)) {
  690. rq = list_entry(rq_list.next, struct request, queuelist);
  691. list_del_init(&rq->queuelist);
  692. blk_mq_sched_insert_request(rq, false, false, false);
  693. }
  694. blk_mq_run_hw_queues(q, false);
  695. }
  696. void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
  697. bool kick_requeue_list)
  698. {
  699. struct request_queue *q = rq->q;
  700. unsigned long flags;
  701. /*
  702. * We abuse this flag that is otherwise used by the I/O scheduler to
  703. * request head insertion from the workqueue.
  704. */
  705. BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
  706. spin_lock_irqsave(&q->requeue_lock, flags);
  707. if (at_head) {
  708. rq->rq_flags |= RQF_SOFTBARRIER;
  709. list_add(&rq->queuelist, &q->requeue_list);
  710. } else {
  711. list_add_tail(&rq->queuelist, &q->requeue_list);
  712. }
  713. spin_unlock_irqrestore(&q->requeue_lock, flags);
  714. if (kick_requeue_list)
  715. blk_mq_kick_requeue_list(q);
  716. }
  717. void blk_mq_kick_requeue_list(struct request_queue *q)
  718. {
  719. kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
  720. }
  721. EXPORT_SYMBOL(blk_mq_kick_requeue_list);
  722. void blk_mq_delay_kick_requeue_list(struct request_queue *q,
  723. unsigned long msecs)
  724. {
  725. kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
  726. msecs_to_jiffies(msecs));
  727. }
  728. EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
  729. struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
  730. {
  731. if (tag < tags->nr_tags) {
  732. prefetch(tags->rqs[tag]);
  733. return tags->rqs[tag];
  734. }
  735. return NULL;
  736. }
  737. EXPORT_SYMBOL(blk_mq_tag_to_rq);
  738. static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
  739. void *priv, bool reserved)
  740. {
  741. /*
  742. * If we find a request that isn't idle and the queue matches,
  743. * we know the queue is busy. Return false to stop the iteration.
  744. */
  745. if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
  746. bool *busy = priv;
  747. *busy = true;
  748. return false;
  749. }
  750. return true;
  751. }
  752. bool blk_mq_queue_inflight(struct request_queue *q)
  753. {
  754. bool busy = false;
  755. blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
  756. return busy;
  757. }
  758. EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
  759. static void blk_mq_rq_timed_out(struct request *req, bool reserved)
  760. {
  761. req->rq_flags |= RQF_TIMED_OUT;
  762. if (req->q->mq_ops->timeout) {
  763. enum blk_eh_timer_return ret;
  764. ret = req->q->mq_ops->timeout(req, reserved);
  765. if (ret == BLK_EH_DONE)
  766. return;
  767. WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
  768. }
  769. blk_add_timer(req);
  770. }
  771. static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
  772. {
  773. unsigned long deadline;
  774. if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
  775. return false;
  776. if (rq->rq_flags & RQF_TIMED_OUT)
  777. return false;
  778. deadline = READ_ONCE(rq->deadline);
  779. if (time_after_eq(jiffies, deadline))
  780. return true;
  781. if (*next == 0)
  782. *next = deadline;
  783. else if (time_after(*next, deadline))
  784. *next = deadline;
  785. return false;
  786. }
  787. void blk_mq_put_rq_ref(struct request *rq)
  788. {
  789. if (is_flush_rq(rq))
  790. rq->end_io(rq, 0);
  791. else if (refcount_dec_and_test(&rq->ref))
  792. __blk_mq_free_request(rq);
  793. }
  794. static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
  795. struct request *rq, void *priv, bool reserved)
  796. {
  797. unsigned long *next = priv;
  798. /*
  799. * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
  800. * be reallocated underneath the timeout handler's processing, then
  801. * the expire check is reliable. If the request is not expired, then
  802. * it was completed and reallocated as a new request after returning
  803. * from blk_mq_check_expired().
  804. */
  805. if (blk_mq_req_expired(rq, next))
  806. blk_mq_rq_timed_out(rq, reserved);
  807. return true;
  808. }
  809. static void blk_mq_timeout_work(struct work_struct *work)
  810. {
  811. struct request_queue *q =
  812. container_of(work, struct request_queue, timeout_work);
  813. unsigned long next = 0;
  814. struct blk_mq_hw_ctx *hctx;
  815. int i;
  816. /* A deadlock might occur if a request is stuck requiring a
  817. * timeout at the same time a queue freeze is waiting
  818. * completion, since the timeout code would not be able to
  819. * acquire the queue reference here.
  820. *
  821. * That's why we don't use blk_queue_enter here; instead, we use
  822. * percpu_ref_tryget directly, because we need to be able to
  823. * obtain a reference even in the short window between the queue
  824. * starting to freeze, by dropping the first reference in
  825. * blk_freeze_queue_start, and the moment the last request is
  826. * consumed, marked by the instant q_usage_counter reaches
  827. * zero.
  828. */
  829. if (!percpu_ref_tryget(&q->q_usage_counter))
  830. return;
  831. blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
  832. if (next != 0) {
  833. mod_timer(&q->timeout, next);
  834. } else {
  835. /*
  836. * Request timeouts are handled as a forward rolling timer. If
  837. * we end up here it means that no requests are pending and
  838. * also that no request has been pending for a while. Mark
  839. * each hctx as idle.
  840. */
  841. queue_for_each_hw_ctx(q, hctx, i) {
  842. /* the hctx may be unmapped, so check it here */
  843. if (blk_mq_hw_queue_mapped(hctx))
  844. blk_mq_tag_idle(hctx);
  845. }
  846. }
  847. blk_queue_exit(q);
  848. }
  849. struct flush_busy_ctx_data {
  850. struct blk_mq_hw_ctx *hctx;
  851. struct list_head *list;
  852. };
  853. static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
  854. {
  855. struct flush_busy_ctx_data *flush_data = data;
  856. struct blk_mq_hw_ctx *hctx = flush_data->hctx;
  857. struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
  858. enum hctx_type type = hctx->type;
  859. spin_lock(&ctx->lock);
  860. list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
  861. sbitmap_clear_bit(sb, bitnr);
  862. spin_unlock(&ctx->lock);
  863. return true;
  864. }
  865. /*
  866. * Process software queues that have been marked busy, splicing them
  867. * to the for-dispatch
  868. */
  869. void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
  870. {
  871. struct flush_busy_ctx_data data = {
  872. .hctx = hctx,
  873. .list = list,
  874. };
  875. sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
  876. }
  877. EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
  878. struct dispatch_rq_data {
  879. struct blk_mq_hw_ctx *hctx;
  880. struct request *rq;
  881. };
  882. static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
  883. void *data)
  884. {
  885. struct dispatch_rq_data *dispatch_data = data;
  886. struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
  887. struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
  888. enum hctx_type type = hctx->type;
  889. spin_lock(&ctx->lock);
  890. if (!list_empty(&ctx->rq_lists[type])) {
  891. dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
  892. list_del_init(&dispatch_data->rq->queuelist);
  893. if (list_empty(&ctx->rq_lists[type]))
  894. sbitmap_clear_bit(sb, bitnr);
  895. }
  896. spin_unlock(&ctx->lock);
  897. return !dispatch_data->rq;
  898. }
  899. struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
  900. struct blk_mq_ctx *start)
  901. {
  902. unsigned off = start ? start->index_hw[hctx->type] : 0;
  903. struct dispatch_rq_data data = {
  904. .hctx = hctx,
  905. .rq = NULL,
  906. };
  907. __sbitmap_for_each_set(&hctx->ctx_map, off,
  908. dispatch_rq_from_ctx, &data);
  909. return data.rq;
  910. }
  911. static inline unsigned int queued_to_index(unsigned int queued)
  912. {
  913. if (!queued)
  914. return 0;
  915. return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
  916. }
  917. static bool __blk_mq_get_driver_tag(struct request *rq)
  918. {
  919. struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags;
  920. unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
  921. int tag;
  922. blk_mq_tag_busy(rq->mq_hctx);
  923. if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
  924. bt = rq->mq_hctx->tags->breserved_tags;
  925. tag_offset = 0;
  926. } else {
  927. if (!hctx_may_queue(rq->mq_hctx, bt))
  928. return false;
  929. }
  930. tag = __sbitmap_queue_get(bt);
  931. if (tag == BLK_MQ_NO_TAG)
  932. return false;
  933. rq->tag = tag + tag_offset;
  934. return true;
  935. }
  936. static bool blk_mq_get_driver_tag(struct request *rq)
  937. {
  938. struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
  939. if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
  940. return false;
  941. if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
  942. !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
  943. rq->rq_flags |= RQF_MQ_INFLIGHT;
  944. __blk_mq_inc_active_requests(hctx);
  945. }
  946. hctx->tags->rqs[rq->tag] = rq;
  947. return true;
  948. }
  949. static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
  950. int flags, void *key)
  951. {
  952. struct blk_mq_hw_ctx *hctx;
  953. hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
  954. spin_lock(&hctx->dispatch_wait_lock);
  955. if (!list_empty(&wait->entry)) {
  956. struct sbitmap_queue *sbq;
  957. list_del_init(&wait->entry);
  958. sbq = hctx->tags->bitmap_tags;
  959. atomic_dec(&sbq->ws_active);
  960. }
  961. spin_unlock(&hctx->dispatch_wait_lock);
  962. blk_mq_run_hw_queue(hctx, true);
  963. return 1;
  964. }
  965. /*
  966. * Mark us waiting for a tag. For shared tags, this involves hooking us into
  967. * the tag wakeups. For non-shared tags, we can simply mark us needing a
  968. * restart. For both cases, take care to check the condition again after
  969. * marking us as waiting.
  970. */
  971. static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
  972. struct request *rq)
  973. {
  974. struct sbitmap_queue *sbq = hctx->tags->bitmap_tags;
  975. struct wait_queue_head *wq;
  976. wait_queue_entry_t *wait;
  977. bool ret;
  978. if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
  979. blk_mq_sched_mark_restart_hctx(hctx);
  980. /*
  981. * It's possible that a tag was freed in the window between the
  982. * allocation failure and adding the hardware queue to the wait
  983. * queue.
  984. *
  985. * Don't clear RESTART here, someone else could have set it.
  986. * At most this will cost an extra queue run.
  987. */
  988. return blk_mq_get_driver_tag(rq);
  989. }
  990. wait = &hctx->dispatch_wait;
  991. if (!list_empty_careful(&wait->entry))
  992. return false;
  993. wq = &bt_wait_ptr(sbq, hctx)->wait;
  994. spin_lock_irq(&wq->lock);
  995. spin_lock(&hctx->dispatch_wait_lock);
  996. if (!list_empty(&wait->entry)) {
  997. spin_unlock(&hctx->dispatch_wait_lock);
  998. spin_unlock_irq(&wq->lock);
  999. return false;
  1000. }
  1001. atomic_inc(&sbq->ws_active);
  1002. wait->flags &= ~WQ_FLAG_EXCLUSIVE;
  1003. __add_wait_queue(wq, wait);
  1004. /*
  1005. * It's possible that a tag was freed in the window between the
  1006. * allocation failure and adding the hardware queue to the wait
  1007. * queue.
  1008. */
  1009. ret = blk_mq_get_driver_tag(rq);
  1010. if (!ret) {
  1011. spin_unlock(&hctx->dispatch_wait_lock);
  1012. spin_unlock_irq(&wq->lock);
  1013. return false;
  1014. }
  1015. /*
  1016. * We got a tag, remove ourselves from the wait queue to ensure
  1017. * someone else gets the wakeup.
  1018. */
  1019. list_del_init(&wait->entry);
  1020. atomic_dec(&sbq->ws_active);
  1021. spin_unlock(&hctx->dispatch_wait_lock);
  1022. spin_unlock_irq(&wq->lock);
  1023. return true;
  1024. }
  1025. #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
  1026. #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
  1027. /*
  1028. * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
  1029. * - EWMA is one simple way to compute running average value
  1030. * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
  1031. * - take 4 as factor for avoiding to get too small(0) result, and this
  1032. * factor doesn't matter because EWMA decreases exponentially
  1033. */
  1034. static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
  1035. {
  1036. unsigned int ewma;
  1037. ewma = hctx->dispatch_busy;
  1038. if (!ewma && !busy)
  1039. return;
  1040. ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
  1041. if (busy)
  1042. ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
  1043. ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
  1044. hctx->dispatch_busy = ewma;
  1045. }
  1046. #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
  1047. static void blk_mq_handle_dev_resource(struct request *rq,
  1048. struct list_head *list)
  1049. {
  1050. struct request *next =
  1051. list_first_entry_or_null(list, struct request, queuelist);
  1052. /*
  1053. * If an I/O scheduler has been configured and we got a driver tag for
  1054. * the next request already, free it.
  1055. */
  1056. if (next)
  1057. blk_mq_put_driver_tag(next);
  1058. list_add(&rq->queuelist, list);
  1059. __blk_mq_requeue_request(rq);
  1060. }
  1061. static void blk_mq_handle_zone_resource(struct request *rq,
  1062. struct list_head *zone_list)
  1063. {
  1064. /*
  1065. * If we end up here it is because we cannot dispatch a request to a
  1066. * specific zone due to LLD level zone-write locking or other zone
  1067. * related resource not being available. In this case, set the request
  1068. * aside in zone_list for retrying it later.
  1069. */
  1070. list_add(&rq->queuelist, zone_list);
  1071. __blk_mq_requeue_request(rq);
  1072. }
  1073. enum prep_dispatch {
  1074. PREP_DISPATCH_OK,
  1075. PREP_DISPATCH_NO_TAG,
  1076. PREP_DISPATCH_NO_BUDGET,
  1077. };
  1078. static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
  1079. bool need_budget)
  1080. {
  1081. struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
  1082. if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) {
  1083. blk_mq_put_driver_tag(rq);
  1084. return PREP_DISPATCH_NO_BUDGET;
  1085. }
  1086. if (!blk_mq_get_driver_tag(rq)) {
  1087. /*
  1088. * The initial allocation attempt failed, so we need to
  1089. * rerun the hardware queue when a tag is freed. The
  1090. * waitqueue takes care of that. If the queue is run
  1091. * before we add this entry back on the dispatch list,
  1092. * we'll re-run it below.
  1093. */
  1094. if (!blk_mq_mark_tag_wait(hctx, rq)) {
  1095. /*
  1096. * All budgets not got from this function will be put
  1097. * together during handling partial dispatch
  1098. */
  1099. if (need_budget)
  1100. blk_mq_put_dispatch_budget(rq->q);
  1101. return PREP_DISPATCH_NO_TAG;
  1102. }
  1103. }
  1104. return PREP_DISPATCH_OK;
  1105. }
  1106. /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
  1107. static void blk_mq_release_budgets(struct request_queue *q,
  1108. unsigned int nr_budgets)
  1109. {
  1110. int i;
  1111. for (i = 0; i < nr_budgets; i++)
  1112. blk_mq_put_dispatch_budget(q);
  1113. }
  1114. /*
  1115. * Returns true if we did some work AND can potentially do more.
  1116. */
  1117. bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
  1118. unsigned int nr_budgets)
  1119. {
  1120. enum prep_dispatch prep;
  1121. struct request_queue *q = hctx->queue;
  1122. struct request *rq, *nxt;
  1123. int errors, queued;
  1124. blk_status_t ret = BLK_STS_OK;
  1125. LIST_HEAD(zone_list);
  1126. bool needs_resource = false;
  1127. if (list_empty(list))
  1128. return false;
  1129. /*
  1130. * Now process all the entries, sending them to the driver.
  1131. */
  1132. errors = queued = 0;
  1133. do {
  1134. struct blk_mq_queue_data bd;
  1135. rq = list_first_entry(list, struct request, queuelist);
  1136. WARN_ON_ONCE(hctx != rq->mq_hctx);
  1137. prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
  1138. if (prep != PREP_DISPATCH_OK)
  1139. break;
  1140. list_del_init(&rq->queuelist);
  1141. bd.rq = rq;
  1142. /*
  1143. * Flag last if we have no more requests, or if we have more
  1144. * but can't assign a driver tag to it.
  1145. */
  1146. if (list_empty(list))
  1147. bd.last = true;
  1148. else {
  1149. nxt = list_first_entry(list, struct request, queuelist);
  1150. bd.last = !blk_mq_get_driver_tag(nxt);
  1151. }
  1152. /*
  1153. * once the request is queued to lld, no need to cover the
  1154. * budget any more
  1155. */
  1156. if (nr_budgets)
  1157. nr_budgets--;
  1158. ret = q->mq_ops->queue_rq(hctx, &bd);
  1159. switch (ret) {
  1160. case BLK_STS_OK:
  1161. queued++;
  1162. break;
  1163. case BLK_STS_RESOURCE:
  1164. needs_resource = true;
  1165. fallthrough;
  1166. case BLK_STS_DEV_RESOURCE:
  1167. blk_mq_handle_dev_resource(rq, list);
  1168. goto out;
  1169. case BLK_STS_ZONE_RESOURCE:
  1170. /*
  1171. * Move the request to zone_list and keep going through
  1172. * the dispatch list to find more requests the drive can
  1173. * accept.
  1174. */
  1175. blk_mq_handle_zone_resource(rq, &zone_list);
  1176. needs_resource = true;
  1177. break;
  1178. default:
  1179. errors++;
  1180. blk_mq_end_request(rq, BLK_STS_IOERR);
  1181. }
  1182. } while (!list_empty(list));
  1183. out:
  1184. if (!list_empty(&zone_list))
  1185. list_splice_tail_init(&zone_list, list);
  1186. hctx->dispatched[queued_to_index(queued)]++;
  1187. /* If we didn't flush the entire list, we could have told the driver
  1188. * there was more coming, but that turned out to be a lie.
  1189. */
  1190. if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
  1191. q->mq_ops->commit_rqs(hctx);
  1192. /*
  1193. * Any items that need requeuing? Stuff them into hctx->dispatch,
  1194. * that is where we will continue on next queue run.
  1195. */
  1196. if (!list_empty(list)) {
  1197. bool needs_restart;
  1198. /* For non-shared tags, the RESTART check will suffice */
  1199. bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
  1200. (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
  1201. blk_mq_release_budgets(q, nr_budgets);
  1202. spin_lock(&hctx->lock);
  1203. list_splice_tail_init(list, &hctx->dispatch);
  1204. spin_unlock(&hctx->lock);
  1205. /*
  1206. * Order adding requests to hctx->dispatch and checking
  1207. * SCHED_RESTART flag. The pair of this smp_mb() is the one
  1208. * in blk_mq_sched_restart(). Avoid restart code path to
  1209. * miss the new added requests to hctx->dispatch, meantime
  1210. * SCHED_RESTART is observed here.
  1211. */
  1212. smp_mb();
  1213. /*
  1214. * If SCHED_RESTART was set by the caller of this function and
  1215. * it is no longer set that means that it was cleared by another
  1216. * thread and hence that a queue rerun is needed.
  1217. *
  1218. * If 'no_tag' is set, that means that we failed getting
  1219. * a driver tag with an I/O scheduler attached. If our dispatch
  1220. * waitqueue is no longer active, ensure that we run the queue
  1221. * AFTER adding our entries back to the list.
  1222. *
  1223. * If no I/O scheduler has been configured it is possible that
  1224. * the hardware queue got stopped and restarted before requests
  1225. * were pushed back onto the dispatch list. Rerun the queue to
  1226. * avoid starvation. Notes:
  1227. * - blk_mq_run_hw_queue() checks whether or not a queue has
  1228. * been stopped before rerunning a queue.
  1229. * - Some but not all block drivers stop a queue before
  1230. * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
  1231. * and dm-rq.
  1232. *
  1233. * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
  1234. * bit is set, run queue after a delay to avoid IO stalls
  1235. * that could otherwise occur if the queue is idle. We'll do
  1236. * similar if we couldn't get budget or couldn't lock a zone
  1237. * and SCHED_RESTART is set.
  1238. */
  1239. needs_restart = blk_mq_sched_needs_restart(hctx);
  1240. if (prep == PREP_DISPATCH_NO_BUDGET)
  1241. needs_resource = true;
  1242. if (!needs_restart ||
  1243. (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
  1244. blk_mq_run_hw_queue(hctx, true);
  1245. else if (needs_restart && needs_resource)
  1246. blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
  1247. blk_mq_update_dispatch_busy(hctx, true);
  1248. return false;
  1249. } else
  1250. blk_mq_update_dispatch_busy(hctx, false);
  1251. return (queued + errors) != 0;
  1252. }
  1253. /**
  1254. * __blk_mq_run_hw_queue - Run a hardware queue.
  1255. * @hctx: Pointer to the hardware queue to run.
  1256. *
  1257. * Send pending requests to the hardware.
  1258. */
  1259. static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
  1260. {
  1261. int srcu_idx;
  1262. /*
  1263. * We should be running this queue from one of the CPUs that
  1264. * are mapped to it.
  1265. *
  1266. * There are at least two related races now between setting
  1267. * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
  1268. * __blk_mq_run_hw_queue():
  1269. *
  1270. * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
  1271. * but later it becomes online, then this warning is harmless
  1272. * at all
  1273. *
  1274. * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
  1275. * but later it becomes offline, then the warning can't be
  1276. * triggered, and we depend on blk-mq timeout handler to
  1277. * handle dispatched requests to this hctx
  1278. */
  1279. if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
  1280. cpu_online(hctx->next_cpu)) {
  1281. printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
  1282. raw_smp_processor_id(),
  1283. cpumask_empty(hctx->cpumask) ? "inactive": "active");
  1284. dump_stack();
  1285. }
  1286. /*
  1287. * We can't run the queue inline with ints disabled. Ensure that
  1288. * we catch bad users of this early.
  1289. */
  1290. WARN_ON_ONCE(in_interrupt());
  1291. might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
  1292. hctx_lock(hctx, &srcu_idx);
  1293. blk_mq_sched_dispatch_requests(hctx);
  1294. hctx_unlock(hctx, srcu_idx);
  1295. }
  1296. static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
  1297. {
  1298. int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
  1299. if (cpu >= nr_cpu_ids)
  1300. cpu = cpumask_first(hctx->cpumask);
  1301. return cpu;
  1302. }
  1303. /*
  1304. * It'd be great if the workqueue API had a way to pass
  1305. * in a mask and had some smarts for more clever placement.
  1306. * For now we just round-robin here, switching for every
  1307. * BLK_MQ_CPU_WORK_BATCH queued items.
  1308. */
  1309. static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
  1310. {
  1311. bool tried = false;
  1312. int next_cpu = hctx->next_cpu;
  1313. if (hctx->queue->nr_hw_queues == 1)
  1314. return WORK_CPU_UNBOUND;
  1315. if (--hctx->next_cpu_batch <= 0) {
  1316. select_cpu:
  1317. next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
  1318. cpu_online_mask);
  1319. if (next_cpu >= nr_cpu_ids)
  1320. next_cpu = blk_mq_first_mapped_cpu(hctx);
  1321. hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
  1322. }
  1323. /*
  1324. * Do unbound schedule if we can't find a online CPU for this hctx,
  1325. * and it should only happen in the path of handling CPU DEAD.
  1326. */
  1327. if (!cpu_online(next_cpu)) {
  1328. if (!tried) {
  1329. tried = true;
  1330. goto select_cpu;
  1331. }
  1332. /*
  1333. * Make sure to re-select CPU next time once after CPUs
  1334. * in hctx->cpumask become online again.
  1335. */
  1336. hctx->next_cpu = next_cpu;
  1337. hctx->next_cpu_batch = 1;
  1338. return WORK_CPU_UNBOUND;
  1339. }
  1340. hctx->next_cpu = next_cpu;
  1341. return next_cpu;
  1342. }
  1343. /**
  1344. * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
  1345. * @hctx: Pointer to the hardware queue to run.
  1346. * @async: If we want to run the queue asynchronously.
  1347. * @msecs: Microseconds of delay to wait before running the queue.
  1348. *
  1349. * If !@async, try to run the queue now. Else, run the queue asynchronously and
  1350. * with a delay of @msecs.
  1351. */
  1352. static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
  1353. unsigned long msecs)
  1354. {
  1355. if (unlikely(blk_mq_hctx_stopped(hctx)))
  1356. return;
  1357. if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
  1358. int cpu = get_cpu();
  1359. if (cpumask_test_cpu(cpu, hctx->cpumask)) {
  1360. __blk_mq_run_hw_queue(hctx);
  1361. put_cpu();
  1362. return;
  1363. }
  1364. put_cpu();
  1365. }
  1366. kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
  1367. msecs_to_jiffies(msecs));
  1368. }
  1369. /**
  1370. * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
  1371. * @hctx: Pointer to the hardware queue to run.
  1372. * @msecs: Microseconds of delay to wait before running the queue.
  1373. *
  1374. * Run a hardware queue asynchronously with a delay of @msecs.
  1375. */
  1376. void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
  1377. {
  1378. __blk_mq_delay_run_hw_queue(hctx, true, msecs);
  1379. }
  1380. EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
  1381. /**
  1382. * blk_mq_run_hw_queue - Start to run a hardware queue.
  1383. * @hctx: Pointer to the hardware queue to run.
  1384. * @async: If we want to run the queue asynchronously.
  1385. *
  1386. * Check if the request queue is not in a quiesced state and if there are
  1387. * pending requests to be sent. If this is true, run the queue to send requests
  1388. * to hardware.
  1389. */
  1390. void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
  1391. {
  1392. int srcu_idx;
  1393. bool need_run;
  1394. /*
  1395. * When queue is quiesced, we may be switching io scheduler, or
  1396. * updating nr_hw_queues, or other things, and we can't run queue
  1397. * any more, even __blk_mq_hctx_has_pending() can't be called safely.
  1398. *
  1399. * And queue will be rerun in blk_mq_unquiesce_queue() if it is
  1400. * quiesced.
  1401. */
  1402. hctx_lock(hctx, &srcu_idx);
  1403. need_run = !blk_queue_quiesced(hctx->queue) &&
  1404. blk_mq_hctx_has_pending(hctx);
  1405. hctx_unlock(hctx, srcu_idx);
  1406. if (need_run)
  1407. __blk_mq_delay_run_hw_queue(hctx, async, 0);
  1408. }
  1409. EXPORT_SYMBOL(blk_mq_run_hw_queue);
  1410. /*
  1411. * Is the request queue handled by an IO scheduler that does not respect
  1412. * hardware queues when dispatching?
  1413. */
  1414. static bool blk_mq_has_sqsched(struct request_queue *q)
  1415. {
  1416. struct elevator_queue *e = q->elevator;
  1417. if (e && e->type->ops.dispatch_request &&
  1418. !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
  1419. return true;
  1420. return false;
  1421. }
  1422. /*
  1423. * Return prefered queue to dispatch from (if any) for non-mq aware IO
  1424. * scheduler.
  1425. */
  1426. static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
  1427. {
  1428. struct blk_mq_hw_ctx *hctx;
  1429. /*
  1430. * If the IO scheduler does not respect hardware queues when
  1431. * dispatching, we just don't bother with multiple HW queues and
  1432. * dispatch from hctx for the current CPU since running multiple queues
  1433. * just causes lock contention inside the scheduler and pointless cache
  1434. * bouncing.
  1435. */
  1436. hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
  1437. raw_smp_processor_id());
  1438. if (!blk_mq_hctx_stopped(hctx))
  1439. return hctx;
  1440. return NULL;
  1441. }
  1442. /**
  1443. * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
  1444. * @q: Pointer to the request queue to run.
  1445. * @async: If we want to run the queue asynchronously.
  1446. */
  1447. void blk_mq_run_hw_queues(struct request_queue *q, bool async)
  1448. {
  1449. struct blk_mq_hw_ctx *hctx, *sq_hctx;
  1450. int i;
  1451. sq_hctx = NULL;
  1452. if (blk_mq_has_sqsched(q))
  1453. sq_hctx = blk_mq_get_sq_hctx(q);
  1454. queue_for_each_hw_ctx(q, hctx, i) {
  1455. if (blk_mq_hctx_stopped(hctx))
  1456. continue;
  1457. /*
  1458. * Dispatch from this hctx either if there's no hctx preferred
  1459. * by IO scheduler or if it has requests that bypass the
  1460. * scheduler.
  1461. */
  1462. if (!sq_hctx || sq_hctx == hctx ||
  1463. !list_empty_careful(&hctx->dispatch))
  1464. blk_mq_run_hw_queue(hctx, async);
  1465. }
  1466. }
  1467. EXPORT_SYMBOL(blk_mq_run_hw_queues);
  1468. /**
  1469. * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
  1470. * @q: Pointer to the request queue to run.
  1471. * @msecs: Microseconds of delay to wait before running the queues.
  1472. */
  1473. void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
  1474. {
  1475. struct blk_mq_hw_ctx *hctx, *sq_hctx;
  1476. int i;
  1477. sq_hctx = NULL;
  1478. if (blk_mq_has_sqsched(q))
  1479. sq_hctx = blk_mq_get_sq_hctx(q);
  1480. queue_for_each_hw_ctx(q, hctx, i) {
  1481. if (blk_mq_hctx_stopped(hctx))
  1482. continue;
  1483. /*
  1484. * Dispatch from this hctx either if there's no hctx preferred
  1485. * by IO scheduler or if it has requests that bypass the
  1486. * scheduler.
  1487. */
  1488. if (!sq_hctx || sq_hctx == hctx ||
  1489. !list_empty_careful(&hctx->dispatch))
  1490. blk_mq_delay_run_hw_queue(hctx, msecs);
  1491. }
  1492. }
  1493. EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
  1494. /**
  1495. * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
  1496. * @q: request queue.
  1497. *
  1498. * The caller is responsible for serializing this function against
  1499. * blk_mq_{start,stop}_hw_queue().
  1500. */
  1501. bool blk_mq_queue_stopped(struct request_queue *q)
  1502. {
  1503. struct blk_mq_hw_ctx *hctx;
  1504. int i;
  1505. queue_for_each_hw_ctx(q, hctx, i)
  1506. if (blk_mq_hctx_stopped(hctx))
  1507. return true;
  1508. return false;
  1509. }
  1510. EXPORT_SYMBOL(blk_mq_queue_stopped);
  1511. /*
  1512. * This function is often used for pausing .queue_rq() by driver when
  1513. * there isn't enough resource or some conditions aren't satisfied, and
  1514. * BLK_STS_RESOURCE is usually returned.
  1515. *
  1516. * We do not guarantee that dispatch can be drained or blocked
  1517. * after blk_mq_stop_hw_queue() returns. Please use
  1518. * blk_mq_quiesce_queue() for that requirement.
  1519. */
  1520. void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
  1521. {
  1522. cancel_delayed_work(&hctx->run_work);
  1523. set_bit(BLK_MQ_S_STOPPED, &hctx->state);
  1524. }
  1525. EXPORT_SYMBOL(blk_mq_stop_hw_queue);
  1526. /*
  1527. * This function is often used for pausing .queue_rq() by driver when
  1528. * there isn't enough resource or some conditions aren't satisfied, and
  1529. * BLK_STS_RESOURCE is usually returned.
  1530. *
  1531. * We do not guarantee that dispatch can be drained or blocked
  1532. * after blk_mq_stop_hw_queues() returns. Please use
  1533. * blk_mq_quiesce_queue() for that requirement.
  1534. */
  1535. void blk_mq_stop_hw_queues(struct request_queue *q)
  1536. {
  1537. struct blk_mq_hw_ctx *hctx;
  1538. int i;
  1539. queue_for_each_hw_ctx(q, hctx, i)
  1540. blk_mq_stop_hw_queue(hctx);
  1541. }
  1542. EXPORT_SYMBOL(blk_mq_stop_hw_queues);
  1543. void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
  1544. {
  1545. clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
  1546. blk_mq_run_hw_queue(hctx, false);
  1547. }
  1548. EXPORT_SYMBOL(blk_mq_start_hw_queue);
  1549. void blk_mq_start_hw_queues(struct request_queue *q)
  1550. {
  1551. struct blk_mq_hw_ctx *hctx;
  1552. int i;
  1553. queue_for_each_hw_ctx(q, hctx, i)
  1554. blk_mq_start_hw_queue(hctx);
  1555. }
  1556. EXPORT_SYMBOL(blk_mq_start_hw_queues);
  1557. void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
  1558. {
  1559. if (!blk_mq_hctx_stopped(hctx))
  1560. return;
  1561. clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
  1562. blk_mq_run_hw_queue(hctx, async);
  1563. }
  1564. EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
  1565. void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
  1566. {
  1567. struct blk_mq_hw_ctx *hctx;
  1568. int i;
  1569. queue_for_each_hw_ctx(q, hctx, i)
  1570. blk_mq_start_stopped_hw_queue(hctx, async);
  1571. }
  1572. EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
  1573. static void blk_mq_run_work_fn(struct work_struct *work)
  1574. {
  1575. struct blk_mq_hw_ctx *hctx;
  1576. hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
  1577. /*
  1578. * If we are stopped, don't run the queue.
  1579. */
  1580. if (blk_mq_hctx_stopped(hctx))
  1581. return;
  1582. __blk_mq_run_hw_queue(hctx);
  1583. }
  1584. static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
  1585. struct request *rq,
  1586. bool at_head)
  1587. {
  1588. struct blk_mq_ctx *ctx = rq->mq_ctx;
  1589. enum hctx_type type = hctx->type;
  1590. lockdep_assert_held(&ctx->lock);
  1591. trace_block_rq_insert(hctx->queue, rq);
  1592. if (at_head)
  1593. list_add(&rq->queuelist, &ctx->rq_lists[type]);
  1594. else
  1595. list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
  1596. }
  1597. void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  1598. bool at_head)
  1599. {
  1600. struct blk_mq_ctx *ctx = rq->mq_ctx;
  1601. lockdep_assert_held(&ctx->lock);
  1602. __blk_mq_insert_req_list(hctx, rq, at_head);
  1603. blk_mq_hctx_mark_pending(hctx, ctx);
  1604. }
  1605. /**
  1606. * blk_mq_request_bypass_insert - Insert a request at dispatch list.
  1607. * @rq: Pointer to request to be inserted.
  1608. * @at_head: true if the request should be inserted at the head of the list.
  1609. * @run_queue: If we should run the hardware queue after inserting the request.
  1610. *
  1611. * Should only be used carefully, when the caller knows we want to
  1612. * bypass a potential IO scheduler on the target device.
  1613. */
  1614. void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
  1615. bool run_queue)
  1616. {
  1617. struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
  1618. spin_lock(&hctx->lock);
  1619. if (at_head)
  1620. list_add(&rq->queuelist, &hctx->dispatch);
  1621. else
  1622. list_add_tail(&rq->queuelist, &hctx->dispatch);
  1623. spin_unlock(&hctx->lock);
  1624. if (run_queue)
  1625. blk_mq_run_hw_queue(hctx, false);
  1626. }
  1627. void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
  1628. struct list_head *list)
  1629. {
  1630. struct request *rq;
  1631. enum hctx_type type = hctx->type;
  1632. /*
  1633. * preemption doesn't flush plug list, so it's possible ctx->cpu is
  1634. * offline now
  1635. */
  1636. list_for_each_entry(rq, list, queuelist) {
  1637. BUG_ON(rq->mq_ctx != ctx);
  1638. trace_block_rq_insert(hctx->queue, rq);
  1639. }
  1640. spin_lock(&ctx->lock);
  1641. list_splice_tail_init(list, &ctx->rq_lists[type]);
  1642. blk_mq_hctx_mark_pending(hctx, ctx);
  1643. spin_unlock(&ctx->lock);
  1644. }
  1645. static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
  1646. {
  1647. struct request *rqa = container_of(a, struct request, queuelist);
  1648. struct request *rqb = container_of(b, struct request, queuelist);
  1649. if (rqa->mq_ctx != rqb->mq_ctx)
  1650. return rqa->mq_ctx > rqb->mq_ctx;
  1651. if (rqa->mq_hctx != rqb->mq_hctx)
  1652. return rqa->mq_hctx > rqb->mq_hctx;
  1653. return blk_rq_pos(rqa) > blk_rq_pos(rqb);
  1654. }
  1655. void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  1656. {
  1657. LIST_HEAD(list);
  1658. if (list_empty(&plug->mq_list))
  1659. return;
  1660. list_splice_init(&plug->mq_list, &list);
  1661. if (plug->rq_count > 2 && plug->multiple_queues)
  1662. list_sort(NULL, &list, plug_rq_cmp);
  1663. plug->rq_count = 0;
  1664. do {
  1665. struct list_head rq_list;
  1666. struct request *rq, *head_rq = list_entry_rq(list.next);
  1667. struct list_head *pos = &head_rq->queuelist; /* skip first */
  1668. struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
  1669. struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
  1670. unsigned int depth = 1;
  1671. list_for_each_continue(pos, &list) {
  1672. rq = list_entry_rq(pos);
  1673. BUG_ON(!rq->q);
  1674. if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
  1675. break;
  1676. depth++;
  1677. }
  1678. list_cut_before(&rq_list, &list, pos);
  1679. trace_block_unplug(head_rq->q, depth, !from_schedule);
  1680. blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
  1681. from_schedule);
  1682. } while(!list_empty(&list));
  1683. }
  1684. static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
  1685. unsigned int nr_segs)
  1686. {
  1687. int err;
  1688. if (bio->bi_opf & REQ_RAHEAD)
  1689. rq->cmd_flags |= REQ_FAILFAST_MASK;
  1690. rq->__sector = bio->bi_iter.bi_sector;
  1691. rq->write_hint = bio->bi_write_hint;
  1692. blk_rq_bio_prep(rq, bio, nr_segs);
  1693. /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
  1694. err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
  1695. WARN_ON_ONCE(err);
  1696. blk_account_io_start(rq);
  1697. }
  1698. static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
  1699. struct request *rq,
  1700. blk_qc_t *cookie, bool last)
  1701. {
  1702. struct request_queue *q = rq->q;
  1703. struct blk_mq_queue_data bd = {
  1704. .rq = rq,
  1705. .last = last,
  1706. };
  1707. blk_qc_t new_cookie;
  1708. blk_status_t ret;
  1709. new_cookie = request_to_qc_t(hctx, rq);
  1710. /*
  1711. * For OK queue, we are done. For error, caller may kill it.
  1712. * Any other error (busy), just add it to our list as we
  1713. * previously would have done.
  1714. */
  1715. ret = q->mq_ops->queue_rq(hctx, &bd);
  1716. switch (ret) {
  1717. case BLK_STS_OK:
  1718. blk_mq_update_dispatch_busy(hctx, false);
  1719. *cookie = new_cookie;
  1720. break;
  1721. case BLK_STS_RESOURCE:
  1722. case BLK_STS_DEV_RESOURCE:
  1723. blk_mq_update_dispatch_busy(hctx, true);
  1724. __blk_mq_requeue_request(rq);
  1725. break;
  1726. default:
  1727. blk_mq_update_dispatch_busy(hctx, false);
  1728. *cookie = BLK_QC_T_NONE;
  1729. break;
  1730. }
  1731. return ret;
  1732. }
  1733. static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
  1734. struct request *rq,
  1735. blk_qc_t *cookie,
  1736. bool bypass_insert, bool last)
  1737. {
  1738. struct request_queue *q = rq->q;
  1739. bool run_queue = true;
  1740. /*
  1741. * RCU or SRCU read lock is needed before checking quiesced flag.
  1742. *
  1743. * When queue is stopped or quiesced, ignore 'bypass_insert' from
  1744. * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
  1745. * and avoid driver to try to dispatch again.
  1746. */
  1747. if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
  1748. run_queue = false;
  1749. bypass_insert = false;
  1750. goto insert;
  1751. }
  1752. if (q->elevator && !bypass_insert)
  1753. goto insert;
  1754. if (!blk_mq_get_dispatch_budget(q))
  1755. goto insert;
  1756. if (!blk_mq_get_driver_tag(rq)) {
  1757. blk_mq_put_dispatch_budget(q);
  1758. goto insert;
  1759. }
  1760. return __blk_mq_issue_directly(hctx, rq, cookie, last);
  1761. insert:
  1762. if (bypass_insert)
  1763. return BLK_STS_RESOURCE;
  1764. blk_mq_sched_insert_request(rq, false, run_queue, false);
  1765. return BLK_STS_OK;
  1766. }
  1767. /**
  1768. * blk_mq_try_issue_directly - Try to send a request directly to device driver.
  1769. * @hctx: Pointer of the associated hardware queue.
  1770. * @rq: Pointer to request to be sent.
  1771. * @cookie: Request queue cookie.
  1772. *
  1773. * If the device has enough resources to accept a new request now, send the
  1774. * request directly to device driver. Else, insert at hctx->dispatch queue, so
  1775. * we can try send it another time in the future. Requests inserted at this
  1776. * queue have higher priority.
  1777. */
  1778. static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
  1779. struct request *rq, blk_qc_t *cookie)
  1780. {
  1781. blk_status_t ret;
  1782. int srcu_idx;
  1783. might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
  1784. hctx_lock(hctx, &srcu_idx);
  1785. ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
  1786. if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
  1787. blk_mq_request_bypass_insert(rq, false, true);
  1788. else if (ret != BLK_STS_OK)
  1789. blk_mq_end_request(rq, ret);
  1790. hctx_unlock(hctx, srcu_idx);
  1791. }
  1792. blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
  1793. {
  1794. blk_status_t ret;
  1795. int srcu_idx;
  1796. blk_qc_t unused_cookie;
  1797. struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
  1798. hctx_lock(hctx, &srcu_idx);
  1799. ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
  1800. hctx_unlock(hctx, srcu_idx);
  1801. return ret;
  1802. }
  1803. void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
  1804. struct list_head *list)
  1805. {
  1806. int queued = 0;
  1807. int errors = 0;
  1808. while (!list_empty(list)) {
  1809. blk_status_t ret;
  1810. struct request *rq = list_first_entry(list, struct request,
  1811. queuelist);
  1812. list_del_init(&rq->queuelist);
  1813. ret = blk_mq_request_issue_directly(rq, list_empty(list));
  1814. if (ret != BLK_STS_OK) {
  1815. if (ret == BLK_STS_RESOURCE ||
  1816. ret == BLK_STS_DEV_RESOURCE) {
  1817. blk_mq_request_bypass_insert(rq, false,
  1818. list_empty(list));
  1819. break;
  1820. }
  1821. blk_mq_end_request(rq, ret);
  1822. errors++;
  1823. } else
  1824. queued++;
  1825. }
  1826. /*
  1827. * If we didn't flush the entire list, we could have told
  1828. * the driver there was more coming, but that turned out to
  1829. * be a lie.
  1830. */
  1831. if ((!list_empty(list) || errors) &&
  1832. hctx->queue->mq_ops->commit_rqs && queued)
  1833. hctx->queue->mq_ops->commit_rqs(hctx);
  1834. }
  1835. static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
  1836. {
  1837. list_add_tail(&rq->queuelist, &plug->mq_list);
  1838. plug->rq_count++;
  1839. if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
  1840. struct request *tmp;
  1841. tmp = list_first_entry(&plug->mq_list, struct request,
  1842. queuelist);
  1843. if (tmp->q != rq->q)
  1844. plug->multiple_queues = true;
  1845. }
  1846. }
  1847. /*
  1848. * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
  1849. * queues. This is important for md arrays to benefit from merging
  1850. * requests.
  1851. */
  1852. static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
  1853. {
  1854. if (plug->multiple_queues)
  1855. return BLK_MAX_REQUEST_COUNT * 2;
  1856. return BLK_MAX_REQUEST_COUNT;
  1857. }
  1858. /**
  1859. * blk_mq_submit_bio - Create and send a request to block device.
  1860. * @bio: Bio pointer.
  1861. *
  1862. * Builds up a request structure from @q and @bio and send to the device. The
  1863. * request may not be queued directly to hardware if:
  1864. * * This request can be merged with another one
  1865. * * We want to place request at plug queue for possible future merging
  1866. * * There is an IO scheduler active at this queue
  1867. *
  1868. * It will not queue the request if there is an error with the bio, or at the
  1869. * request creation.
  1870. *
  1871. * Returns: Request queue cookie.
  1872. */
  1873. blk_qc_t blk_mq_submit_bio(struct bio *bio)
  1874. {
  1875. struct request_queue *q = bio->bi_disk->queue;
  1876. const int is_sync = op_is_sync(bio->bi_opf);
  1877. const int is_flush_fua = op_is_flush(bio->bi_opf);
  1878. struct blk_mq_alloc_data data = {
  1879. .q = q,
  1880. };
  1881. struct request *rq;
  1882. struct blk_plug *plug;
  1883. struct request *same_queue_rq = NULL;
  1884. unsigned int nr_segs;
  1885. blk_qc_t cookie;
  1886. blk_status_t ret;
  1887. blk_queue_bounce(q, &bio);
  1888. __blk_queue_split(&bio, &nr_segs);
  1889. if (!bio_integrity_prep(bio))
  1890. goto queue_exit;
  1891. if (!is_flush_fua && !blk_queue_nomerges(q) &&
  1892. blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
  1893. goto queue_exit;
  1894. if (blk_mq_sched_bio_merge(q, bio, nr_segs))
  1895. goto queue_exit;
  1896. rq_qos_throttle(q, bio);
  1897. data.cmd_flags = bio->bi_opf;
  1898. rq = __blk_mq_alloc_request(&data);
  1899. if (unlikely(!rq)) {
  1900. rq_qos_cleanup(q, bio);
  1901. if (bio->bi_opf & REQ_NOWAIT)
  1902. bio_wouldblock_error(bio);
  1903. goto queue_exit;
  1904. }
  1905. trace_block_getrq(q, bio, bio->bi_opf);
  1906. rq_qos_track(q, rq, bio);
  1907. cookie = request_to_qc_t(data.hctx, rq);
  1908. blk_mq_bio_to_request(rq, bio, nr_segs);
  1909. ret = blk_crypto_init_request(rq);
  1910. if (ret != BLK_STS_OK) {
  1911. bio->bi_status = ret;
  1912. bio_endio(bio);
  1913. blk_mq_free_request(rq);
  1914. return BLK_QC_T_NONE;
  1915. }
  1916. plug = blk_mq_plug(q, bio);
  1917. if (unlikely(is_flush_fua)) {
  1918. /* Bypass scheduler for flush requests */
  1919. blk_insert_flush(rq);
  1920. blk_mq_run_hw_queue(data.hctx, true);
  1921. } else if (plug && (q->nr_hw_queues == 1 ||
  1922. blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
  1923. q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
  1924. /*
  1925. * Use plugging if we have a ->commit_rqs() hook as well, as
  1926. * we know the driver uses bd->last in a smart fashion.
  1927. *
  1928. * Use normal plugging if this disk is slow HDD, as sequential
  1929. * IO may benefit a lot from plug merging.
  1930. */
  1931. unsigned int request_count = plug->rq_count;
  1932. struct request *last = NULL;
  1933. if (!request_count)
  1934. trace_block_plug(q);
  1935. else
  1936. last = list_entry_rq(plug->mq_list.prev);
  1937. if (request_count >= blk_plug_max_rq_count(plug) || (last &&
  1938. blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
  1939. blk_flush_plug_list(plug, false);
  1940. trace_block_plug(q);
  1941. }
  1942. blk_add_rq_to_plug(plug, rq);
  1943. } else if (q->elevator) {
  1944. /* Insert the request at the IO scheduler queue */
  1945. blk_mq_sched_insert_request(rq, false, true, true);
  1946. } else if (plug && !blk_queue_nomerges(q)) {
  1947. /*
  1948. * We do limited plugging. If the bio can be merged, do that.
  1949. * Otherwise the existing request in the plug list will be
  1950. * issued. So the plug list will have one request at most
  1951. * The plug list might get flushed before this. If that happens,
  1952. * the plug list is empty, and same_queue_rq is invalid.
  1953. */
  1954. if (list_empty(&plug->mq_list))
  1955. same_queue_rq = NULL;
  1956. if (same_queue_rq) {
  1957. list_del_init(&same_queue_rq->queuelist);
  1958. plug->rq_count--;
  1959. }
  1960. blk_add_rq_to_plug(plug, rq);
  1961. trace_block_plug(q);
  1962. if (same_queue_rq) {
  1963. data.hctx = same_queue_rq->mq_hctx;
  1964. trace_block_unplug(q, 1, true);
  1965. blk_mq_try_issue_directly(data.hctx, same_queue_rq,
  1966. &cookie);
  1967. }
  1968. } else if ((q->nr_hw_queues > 1 && is_sync) ||
  1969. !data.hctx->dispatch_busy) {
  1970. /*
  1971. * There is no scheduler and we can try to send directly
  1972. * to the hardware.
  1973. */
  1974. blk_mq_try_issue_directly(data.hctx, rq, &cookie);
  1975. } else {
  1976. /* Default case. */
  1977. blk_mq_sched_insert_request(rq, false, true, true);
  1978. }
  1979. return cookie;
  1980. queue_exit:
  1981. blk_queue_exit(q);
  1982. return BLK_QC_T_NONE;
  1983. }
  1984. static size_t order_to_size(unsigned int order)
  1985. {
  1986. return (size_t)PAGE_SIZE << order;
  1987. }
  1988. /* called before freeing request pool in @tags */
  1989. static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
  1990. struct blk_mq_tags *tags, unsigned int hctx_idx)
  1991. {
  1992. struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
  1993. struct page *page;
  1994. unsigned long flags;
  1995. list_for_each_entry(page, &tags->page_list, lru) {
  1996. unsigned long start = (unsigned long)page_address(page);
  1997. unsigned long end = start + order_to_size(page->private);
  1998. int i;
  1999. for (i = 0; i < set->queue_depth; i++) {
  2000. struct request *rq = drv_tags->rqs[i];
  2001. unsigned long rq_addr = (unsigned long)rq;
  2002. if (rq_addr >= start && rq_addr < end) {
  2003. WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
  2004. cmpxchg(&drv_tags->rqs[i], rq, NULL);
  2005. }
  2006. }
  2007. }
  2008. /*
  2009. * Wait until all pending iteration is done.
  2010. *
  2011. * Request reference is cleared and it is guaranteed to be observed
  2012. * after the ->lock is released.
  2013. */
  2014. spin_lock_irqsave(&drv_tags->lock, flags);
  2015. spin_unlock_irqrestore(&drv_tags->lock, flags);
  2016. }
  2017. void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  2018. unsigned int hctx_idx)
  2019. {
  2020. struct page *page;
  2021. if (tags->rqs && set->ops->exit_request) {
  2022. int i;
  2023. for (i = 0; i < tags->nr_tags; i++) {
  2024. struct request *rq = tags->static_rqs[i];
  2025. if (!rq)
  2026. continue;
  2027. set->ops->exit_request(set, rq, hctx_idx);
  2028. tags->static_rqs[i] = NULL;
  2029. }
  2030. }
  2031. blk_mq_clear_rq_mapping(set, tags, hctx_idx);
  2032. while (!list_empty(&tags->page_list)) {
  2033. page = list_first_entry(&tags->page_list, struct page, lru);
  2034. list_del_init(&page->lru);
  2035. /*
  2036. * Remove kmemleak object previously allocated in
  2037. * blk_mq_alloc_rqs().
  2038. */
  2039. kmemleak_free(page_address(page));
  2040. __free_pages(page, page->private);
  2041. }
  2042. }
  2043. void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
  2044. {
  2045. kfree(tags->rqs);
  2046. tags->rqs = NULL;
  2047. kfree(tags->static_rqs);
  2048. tags->static_rqs = NULL;
  2049. blk_mq_free_tags(tags, flags);
  2050. }
  2051. struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
  2052. unsigned int hctx_idx,
  2053. unsigned int nr_tags,
  2054. unsigned int reserved_tags,
  2055. unsigned int flags)
  2056. {
  2057. struct blk_mq_tags *tags;
  2058. int node;
  2059. node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
  2060. if (node == NUMA_NO_NODE)
  2061. node = set->numa_node;
  2062. tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
  2063. if (!tags)
  2064. return NULL;
  2065. tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
  2066. GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
  2067. node);
  2068. if (!tags->rqs) {
  2069. blk_mq_free_tags(tags, flags);
  2070. return NULL;
  2071. }
  2072. tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
  2073. GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
  2074. node);
  2075. if (!tags->static_rqs) {
  2076. kfree(tags->rqs);
  2077. blk_mq_free_tags(tags, flags);
  2078. return NULL;
  2079. }
  2080. return tags;
  2081. }
  2082. static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
  2083. unsigned int hctx_idx, int node)
  2084. {
  2085. int ret;
  2086. if (set->ops->init_request) {
  2087. ret = set->ops->init_request(set, rq, hctx_idx, node);
  2088. if (ret)
  2089. return ret;
  2090. }
  2091. WRITE_ONCE(rq->state, MQ_RQ_IDLE);
  2092. return 0;
  2093. }
  2094. int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
  2095. unsigned int hctx_idx, unsigned int depth)
  2096. {
  2097. unsigned int i, j, entries_per_page, max_order = 4;
  2098. size_t rq_size, left;
  2099. int node;
  2100. node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
  2101. if (node == NUMA_NO_NODE)
  2102. node = set->numa_node;
  2103. INIT_LIST_HEAD(&tags->page_list);
  2104. /*
  2105. * rq_size is the size of the request plus driver payload, rounded
  2106. * to the cacheline size
  2107. */
  2108. rq_size = round_up(sizeof(struct request) + set->cmd_size,
  2109. cache_line_size());
  2110. trace_android_vh_blk_alloc_rqs(&rq_size, set, tags);
  2111. left = rq_size * depth;
  2112. for (i = 0; i < depth; ) {
  2113. int this_order = max_order;
  2114. struct page *page;
  2115. int to_do;
  2116. void *p;
  2117. while (this_order && left < order_to_size(this_order - 1))
  2118. this_order--;
  2119. do {
  2120. page = alloc_pages_node(node,
  2121. GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
  2122. this_order);
  2123. if (page)
  2124. break;
  2125. if (!this_order--)
  2126. break;
  2127. if (order_to_size(this_order) < rq_size)
  2128. break;
  2129. } while (1);
  2130. if (!page)
  2131. goto fail;
  2132. page->private = this_order;
  2133. list_add_tail(&page->lru, &tags->page_list);
  2134. p = page_address(page);
  2135. /*
  2136. * Allow kmemleak to scan these pages as they contain pointers
  2137. * to additional allocations like via ops->init_request().
  2138. */
  2139. kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
  2140. entries_per_page = order_to_size(this_order) / rq_size;
  2141. to_do = min(entries_per_page, depth - i);
  2142. left -= to_do * rq_size;
  2143. for (j = 0; j < to_do; j++) {
  2144. struct request *rq = p;
  2145. tags->static_rqs[i] = rq;
  2146. if (blk_mq_init_request(set, rq, hctx_idx, node)) {
  2147. tags->static_rqs[i] = NULL;
  2148. goto fail;
  2149. }
  2150. p += rq_size;
  2151. i++;
  2152. }
  2153. }
  2154. return 0;
  2155. fail:
  2156. blk_mq_free_rqs(set, tags, hctx_idx);
  2157. return -ENOMEM;
  2158. }
  2159. struct rq_iter_data {
  2160. struct blk_mq_hw_ctx *hctx;
  2161. bool has_rq;
  2162. };
  2163. static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
  2164. {
  2165. struct rq_iter_data *iter_data = data;
  2166. if (rq->mq_hctx != iter_data->hctx)
  2167. return true;
  2168. iter_data->has_rq = true;
  2169. return false;
  2170. }
  2171. static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
  2172. {
  2173. struct blk_mq_tags *tags = hctx->sched_tags ?
  2174. hctx->sched_tags : hctx->tags;
  2175. struct rq_iter_data data = {
  2176. .hctx = hctx,
  2177. };
  2178. blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
  2179. return data.has_rq;
  2180. }
  2181. static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
  2182. struct blk_mq_hw_ctx *hctx)
  2183. {
  2184. if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
  2185. return false;
  2186. if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
  2187. return false;
  2188. return true;
  2189. }
  2190. static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
  2191. {
  2192. struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
  2193. struct blk_mq_hw_ctx, cpuhp_online);
  2194. if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
  2195. !blk_mq_last_cpu_in_hctx(cpu, hctx))
  2196. return 0;
  2197. /*
  2198. * Prevent new request from being allocated on the current hctx.
  2199. *
  2200. * The smp_mb__after_atomic() Pairs with the implied barrier in
  2201. * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is
  2202. * seen once we return from the tag allocator.
  2203. */
  2204. set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
  2205. smp_mb__after_atomic();
  2206. /*
  2207. * Try to grab a reference to the queue and wait for any outstanding
  2208. * requests. If we could not grab a reference the queue has been
  2209. * frozen and there are no requests.
  2210. */
  2211. if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
  2212. while (blk_mq_hctx_has_requests(hctx))
  2213. msleep(5);
  2214. percpu_ref_put(&hctx->queue->q_usage_counter);
  2215. }
  2216. return 0;
  2217. }
  2218. static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
  2219. {
  2220. struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
  2221. struct blk_mq_hw_ctx, cpuhp_online);
  2222. if (cpumask_test_cpu(cpu, hctx->cpumask))
  2223. clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
  2224. return 0;
  2225. }
  2226. /*
  2227. * 'cpu' is going away. splice any existing rq_list entries from this
  2228. * software queue to the hw queue dispatch list, and ensure that it
  2229. * gets run.
  2230. */
  2231. static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
  2232. {
  2233. struct blk_mq_hw_ctx *hctx;
  2234. struct blk_mq_ctx *ctx;
  2235. LIST_HEAD(tmp);
  2236. enum hctx_type type;
  2237. hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
  2238. if (!cpumask_test_cpu(cpu, hctx->cpumask))
  2239. return 0;
  2240. ctx = __blk_mq_get_ctx(hctx->queue, cpu);
  2241. type = hctx->type;
  2242. spin_lock(&ctx->lock);
  2243. if (!list_empty(&ctx->rq_lists[type])) {
  2244. list_splice_init(&ctx->rq_lists[type], &tmp);
  2245. blk_mq_hctx_clear_pending(hctx, ctx);
  2246. }
  2247. spin_unlock(&ctx->lock);
  2248. if (list_empty(&tmp))
  2249. return 0;
  2250. spin_lock(&hctx->lock);
  2251. list_splice_tail_init(&tmp, &hctx->dispatch);
  2252. spin_unlock(&hctx->lock);
  2253. blk_mq_run_hw_queue(hctx, true);
  2254. return 0;
  2255. }
  2256. static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
  2257. {
  2258. if (!(hctx->flags & BLK_MQ_F_STACKING))
  2259. cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
  2260. &hctx->cpuhp_online);
  2261. cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
  2262. &hctx->cpuhp_dead);
  2263. }
  2264. /*
  2265. * Before freeing hw queue, clearing the flush request reference in
  2266. * tags->rqs[] for avoiding potential UAF.
  2267. */
  2268. static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
  2269. unsigned int queue_depth, struct request *flush_rq)
  2270. {
  2271. int i;
  2272. unsigned long flags;
  2273. /* The hw queue may not be mapped yet */
  2274. if (!tags)
  2275. return;
  2276. WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
  2277. for (i = 0; i < queue_depth; i++)
  2278. cmpxchg(&tags->rqs[i], flush_rq, NULL);
  2279. /*
  2280. * Wait until all pending iteration is done.
  2281. *
  2282. * Request reference is cleared and it is guaranteed to be observed
  2283. * after the ->lock is released.
  2284. */
  2285. spin_lock_irqsave(&tags->lock, flags);
  2286. spin_unlock_irqrestore(&tags->lock, flags);
  2287. }
  2288. /* hctx->ctxs will be freed in queue's release handler */
  2289. static void blk_mq_exit_hctx(struct request_queue *q,
  2290. struct blk_mq_tag_set *set,
  2291. struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
  2292. {
  2293. struct request *flush_rq = hctx->fq->flush_rq;
  2294. if (blk_mq_hw_queue_mapped(hctx))
  2295. blk_mq_tag_idle(hctx);
  2296. blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
  2297. set->queue_depth, flush_rq);
  2298. if (set->ops->exit_request)
  2299. set->ops->exit_request(set, flush_rq, hctx_idx);
  2300. if (set->ops->exit_hctx)
  2301. set->ops->exit_hctx(hctx, hctx_idx);
  2302. blk_mq_remove_cpuhp(hctx);
  2303. spin_lock(&q->unused_hctx_lock);
  2304. list_add(&hctx->hctx_list, &q->unused_hctx_list);
  2305. spin_unlock(&q->unused_hctx_lock);
  2306. }
  2307. static void blk_mq_exit_hw_queues(struct request_queue *q,
  2308. struct blk_mq_tag_set *set, int nr_queue)
  2309. {
  2310. struct blk_mq_hw_ctx *hctx;
  2311. unsigned int i;
  2312. queue_for_each_hw_ctx(q, hctx, i) {
  2313. if (i == nr_queue)
  2314. break;
  2315. blk_mq_debugfs_unregister_hctx(hctx);
  2316. blk_mq_exit_hctx(q, set, hctx, i);
  2317. }
  2318. }
  2319. static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
  2320. {
  2321. int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
  2322. BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
  2323. __alignof__(struct blk_mq_hw_ctx)) !=
  2324. sizeof(struct blk_mq_hw_ctx));
  2325. if (tag_set->flags & BLK_MQ_F_BLOCKING)
  2326. hw_ctx_size += sizeof(struct srcu_struct);
  2327. return hw_ctx_size;
  2328. }
  2329. static int blk_mq_init_hctx(struct request_queue *q,
  2330. struct blk_mq_tag_set *set,
  2331. struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
  2332. {
  2333. hctx->queue_num = hctx_idx;
  2334. if (!(hctx->flags & BLK_MQ_F_STACKING))
  2335. cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
  2336. &hctx->cpuhp_online);
  2337. cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
  2338. hctx->tags = set->tags[hctx_idx];
  2339. if (set->ops->init_hctx &&
  2340. set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
  2341. goto unregister_cpu_notifier;
  2342. if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
  2343. hctx->numa_node))
  2344. goto exit_hctx;
  2345. return 0;
  2346. exit_hctx:
  2347. if (set->ops->exit_hctx)
  2348. set->ops->exit_hctx(hctx, hctx_idx);
  2349. unregister_cpu_notifier:
  2350. blk_mq_remove_cpuhp(hctx);
  2351. return -1;
  2352. }
  2353. static struct blk_mq_hw_ctx *
  2354. blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
  2355. int node)
  2356. {
  2357. struct blk_mq_hw_ctx *hctx;
  2358. gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
  2359. hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
  2360. if (!hctx)
  2361. goto fail_alloc_hctx;
  2362. if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
  2363. goto free_hctx;
  2364. atomic_set(&hctx->nr_active, 0);
  2365. if (node == NUMA_NO_NODE)
  2366. node = set->numa_node;
  2367. hctx->numa_node = node;
  2368. INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
  2369. spin_lock_init(&hctx->lock);
  2370. INIT_LIST_HEAD(&hctx->dispatch);
  2371. hctx->queue = q;
  2372. hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
  2373. INIT_LIST_HEAD(&hctx->hctx_list);
  2374. /*
  2375. * Allocate space for all possible cpus to avoid allocation at
  2376. * runtime
  2377. */
  2378. hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
  2379. gfp, node);
  2380. if (!hctx->ctxs)
  2381. goto free_cpumask;
  2382. if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
  2383. gfp, node))
  2384. goto free_ctxs;
  2385. hctx->nr_ctx = 0;
  2386. spin_lock_init(&hctx->dispatch_wait_lock);
  2387. init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
  2388. INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
  2389. hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
  2390. if (!hctx->fq)
  2391. goto free_bitmap;
  2392. if (hctx->flags & BLK_MQ_F_BLOCKING)
  2393. init_srcu_struct(hctx->srcu);
  2394. blk_mq_hctx_kobj_init(hctx);
  2395. return hctx;
  2396. free_bitmap:
  2397. sbitmap_free(&hctx->ctx_map);
  2398. free_ctxs:
  2399. kfree(hctx->ctxs);
  2400. free_cpumask:
  2401. free_cpumask_var(hctx->cpumask);
  2402. free_hctx:
  2403. kfree(hctx);
  2404. fail_alloc_hctx:
  2405. return NULL;
  2406. }
  2407. static void blk_mq_init_cpu_queues(struct request_queue *q,
  2408. unsigned int nr_hw_queues)
  2409. {
  2410. struct blk_mq_tag_set *set = q->tag_set;
  2411. unsigned int i, j;
  2412. for_each_possible_cpu(i) {
  2413. struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
  2414. struct blk_mq_hw_ctx *hctx;
  2415. int k;
  2416. __ctx->cpu = i;
  2417. spin_lock_init(&__ctx->lock);
  2418. for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
  2419. INIT_LIST_HEAD(&__ctx->rq_lists[k]);
  2420. __ctx->queue = q;
  2421. /*
  2422. * Set local node, IFF we have more than one hw queue. If
  2423. * not, we remain on the home node of the device
  2424. */
  2425. for (j = 0; j < set->nr_maps; j++) {
  2426. hctx = blk_mq_map_queue_type(q, j, i);
  2427. if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
  2428. hctx->numa_node = cpu_to_node(i);
  2429. }
  2430. }
  2431. }
  2432. static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
  2433. int hctx_idx)
  2434. {
  2435. unsigned int flags = set->flags;
  2436. int ret = 0;
  2437. set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
  2438. set->queue_depth, set->reserved_tags, flags);
  2439. if (!set->tags[hctx_idx])
  2440. return false;
  2441. ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
  2442. set->queue_depth);
  2443. if (!ret)
  2444. return true;
  2445. blk_mq_free_rq_map(set->tags[hctx_idx], flags);
  2446. set->tags[hctx_idx] = NULL;
  2447. return false;
  2448. }
  2449. static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
  2450. unsigned int hctx_idx)
  2451. {
  2452. unsigned int flags = set->flags;
  2453. if (set->tags && set->tags[hctx_idx]) {
  2454. blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
  2455. blk_mq_free_rq_map(set->tags[hctx_idx], flags);
  2456. set->tags[hctx_idx] = NULL;
  2457. }
  2458. }
  2459. static void blk_mq_map_swqueue(struct request_queue *q)
  2460. {
  2461. unsigned int i, j, hctx_idx;
  2462. struct blk_mq_hw_ctx *hctx;
  2463. struct blk_mq_ctx *ctx;
  2464. struct blk_mq_tag_set *set = q->tag_set;
  2465. queue_for_each_hw_ctx(q, hctx, i) {
  2466. cpumask_clear(hctx->cpumask);
  2467. hctx->nr_ctx = 0;
  2468. hctx->dispatch_from = NULL;
  2469. }
  2470. /*
  2471. * Map software to hardware queues.
  2472. *
  2473. * If the cpu isn't present, the cpu is mapped to first hctx.
  2474. */
  2475. for_each_possible_cpu(i) {
  2476. ctx = per_cpu_ptr(q->queue_ctx, i);
  2477. for (j = 0; j < set->nr_maps; j++) {
  2478. if (!set->map[j].nr_queues) {
  2479. ctx->hctxs[j] = blk_mq_map_queue_type(q,
  2480. HCTX_TYPE_DEFAULT, i);
  2481. continue;
  2482. }
  2483. hctx_idx = set->map[j].mq_map[i];
  2484. /* unmapped hw queue can be remapped after CPU topo changed */
  2485. if (!set->tags[hctx_idx] &&
  2486. !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
  2487. /*
  2488. * If tags initialization fail for some hctx,
  2489. * that hctx won't be brought online. In this
  2490. * case, remap the current ctx to hctx[0] which
  2491. * is guaranteed to always have tags allocated
  2492. */
  2493. set->map[j].mq_map[i] = 0;
  2494. }
  2495. hctx = blk_mq_map_queue_type(q, j, i);
  2496. ctx->hctxs[j] = hctx;
  2497. /*
  2498. * If the CPU is already set in the mask, then we've
  2499. * mapped this one already. This can happen if
  2500. * devices share queues across queue maps.
  2501. */
  2502. if (cpumask_test_cpu(i, hctx->cpumask))
  2503. continue;
  2504. cpumask_set_cpu(i, hctx->cpumask);
  2505. hctx->type = j;
  2506. ctx->index_hw[hctx->type] = hctx->nr_ctx;
  2507. hctx->ctxs[hctx->nr_ctx++] = ctx;
  2508. /*
  2509. * If the nr_ctx type overflows, we have exceeded the
  2510. * amount of sw queues we can support.
  2511. */
  2512. BUG_ON(!hctx->nr_ctx);
  2513. }
  2514. for (; j < HCTX_MAX_TYPES; j++)
  2515. ctx->hctxs[j] = blk_mq_map_queue_type(q,
  2516. HCTX_TYPE_DEFAULT, i);
  2517. }
  2518. queue_for_each_hw_ctx(q, hctx, i) {
  2519. /*
  2520. * If no software queues are mapped to this hardware queue,
  2521. * disable it and free the request entries.
  2522. */
  2523. if (!hctx->nr_ctx) {
  2524. /* Never unmap queue 0. We need it as a
  2525. * fallback in case of a new remap fails
  2526. * allocation
  2527. */
  2528. if (i && set->tags[i])
  2529. blk_mq_free_map_and_requests(set, i);
  2530. hctx->tags = NULL;
  2531. continue;
  2532. }
  2533. hctx->tags = set->tags[i];
  2534. WARN_ON(!hctx->tags);
  2535. /*
  2536. * Set the map size to the number of mapped software queues.
  2537. * This is more accurate and more efficient than looping
  2538. * over all possibly mapped software queues.
  2539. */
  2540. sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
  2541. /*
  2542. * Initialize batch roundrobin counts
  2543. */
  2544. hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
  2545. hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
  2546. }
  2547. }
  2548. /*
  2549. * Caller needs to ensure that we're either frozen/quiesced, or that
  2550. * the queue isn't live yet.
  2551. */
  2552. static void queue_set_hctx_shared(struct request_queue *q, bool shared)
  2553. {
  2554. struct blk_mq_hw_ctx *hctx;
  2555. int i;
  2556. queue_for_each_hw_ctx(q, hctx, i) {
  2557. if (shared)
  2558. hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
  2559. else
  2560. hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
  2561. }
  2562. }
  2563. static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
  2564. bool shared)
  2565. {
  2566. struct request_queue *q;
  2567. lockdep_assert_held(&set->tag_list_lock);
  2568. list_for_each_entry(q, &set->tag_list, tag_set_list) {
  2569. blk_mq_freeze_queue(q);
  2570. queue_set_hctx_shared(q, shared);
  2571. blk_mq_unfreeze_queue(q);
  2572. }
  2573. }
  2574. static void blk_mq_del_queue_tag_set(struct request_queue *q)
  2575. {
  2576. struct blk_mq_tag_set *set = q->tag_set;
  2577. mutex_lock(&set->tag_list_lock);
  2578. list_del(&q->tag_set_list);
  2579. if (list_is_singular(&set->tag_list)) {
  2580. /* just transitioned to unshared */
  2581. set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
  2582. /* update existing queue */
  2583. blk_mq_update_tag_set_shared(set, false);
  2584. }
  2585. mutex_unlock(&set->tag_list_lock);
  2586. INIT_LIST_HEAD(&q->tag_set_list);
  2587. }
  2588. static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
  2589. struct request_queue *q)
  2590. {
  2591. mutex_lock(&set->tag_list_lock);
  2592. /*
  2593. * Check to see if we're transitioning to shared (from 1 to 2 queues).
  2594. */
  2595. if (!list_empty(&set->tag_list) &&
  2596. !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
  2597. set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
  2598. /* update existing queue */
  2599. blk_mq_update_tag_set_shared(set, true);
  2600. }
  2601. if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
  2602. queue_set_hctx_shared(q, true);
  2603. list_add_tail(&q->tag_set_list, &set->tag_list);
  2604. mutex_unlock(&set->tag_list_lock);
  2605. }
  2606. /* All allocations will be freed in release handler of q->mq_kobj */
  2607. static int blk_mq_alloc_ctxs(struct request_queue *q)
  2608. {
  2609. struct blk_mq_ctxs *ctxs;
  2610. int cpu;
  2611. ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
  2612. if (!ctxs)
  2613. return -ENOMEM;
  2614. ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
  2615. if (!ctxs->queue_ctx)
  2616. goto fail;
  2617. for_each_possible_cpu(cpu) {
  2618. struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
  2619. ctx->ctxs = ctxs;
  2620. }
  2621. q->mq_kobj = &ctxs->kobj;
  2622. q->queue_ctx = ctxs->queue_ctx;
  2623. return 0;
  2624. fail:
  2625. kfree(ctxs);
  2626. return -ENOMEM;
  2627. }
  2628. /*
  2629. * It is the actual release handler for mq, but we do it from
  2630. * request queue's release handler for avoiding use-after-free
  2631. * and headache because q->mq_kobj shouldn't have been introduced,
  2632. * but we can't group ctx/kctx kobj without it.
  2633. */
  2634. void blk_mq_release(struct request_queue *q)
  2635. {
  2636. struct blk_mq_hw_ctx *hctx, *next;
  2637. int i;
  2638. queue_for_each_hw_ctx(q, hctx, i)
  2639. WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
  2640. /* all hctx are in .unused_hctx_list now */
  2641. list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
  2642. list_del_init(&hctx->hctx_list);
  2643. kobject_put(&hctx->kobj);
  2644. }
  2645. kfree(q->queue_hw_ctx);
  2646. /*
  2647. * release .mq_kobj and sw queue's kobject now because
  2648. * both share lifetime with request queue.
  2649. */
  2650. blk_mq_sysfs_deinit(q);
  2651. }
  2652. struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
  2653. void *queuedata)
  2654. {
  2655. struct request_queue *uninit_q, *q;
  2656. uninit_q = blk_alloc_queue(set->numa_node);
  2657. if (!uninit_q)
  2658. return ERR_PTR(-ENOMEM);
  2659. uninit_q->queuedata = queuedata;
  2660. /*
  2661. * Initialize the queue without an elevator. device_add_disk() will do
  2662. * the initialization.
  2663. */
  2664. q = blk_mq_init_allocated_queue(set, uninit_q, false);
  2665. if (IS_ERR(q))
  2666. blk_cleanup_queue(uninit_q);
  2667. return q;
  2668. }
  2669. EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
  2670. struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
  2671. {
  2672. return blk_mq_init_queue_data(set, NULL);
  2673. }
  2674. EXPORT_SYMBOL(blk_mq_init_queue);
  2675. /*
  2676. * Helper for setting up a queue with mq ops, given queue depth, and
  2677. * the passed in mq ops flags.
  2678. */
  2679. struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
  2680. const struct blk_mq_ops *ops,
  2681. unsigned int queue_depth,
  2682. unsigned int set_flags)
  2683. {
  2684. struct request_queue *q;
  2685. int ret;
  2686. memset(set, 0, sizeof(*set));
  2687. set->ops = ops;
  2688. set->nr_hw_queues = 1;
  2689. set->nr_maps = 1;
  2690. set->queue_depth = queue_depth;
  2691. set->numa_node = NUMA_NO_NODE;
  2692. set->flags = set_flags;
  2693. ret = blk_mq_alloc_tag_set(set);
  2694. if (ret)
  2695. return ERR_PTR(ret);
  2696. q = blk_mq_init_queue(set);
  2697. if (IS_ERR(q)) {
  2698. blk_mq_free_tag_set(set);
  2699. return q;
  2700. }
  2701. return q;
  2702. }
  2703. EXPORT_SYMBOL(blk_mq_init_sq_queue);
  2704. static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
  2705. struct blk_mq_tag_set *set, struct request_queue *q,
  2706. int hctx_idx, int node)
  2707. {
  2708. struct blk_mq_hw_ctx *hctx = NULL, *tmp;
  2709. /* reuse dead hctx first */
  2710. spin_lock(&q->unused_hctx_lock);
  2711. list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
  2712. if (tmp->numa_node == node) {
  2713. hctx = tmp;
  2714. break;
  2715. }
  2716. }
  2717. if (hctx)
  2718. list_del_init(&hctx->hctx_list);
  2719. spin_unlock(&q->unused_hctx_lock);
  2720. if (!hctx)
  2721. hctx = blk_mq_alloc_hctx(q, set, node);
  2722. if (!hctx)
  2723. goto fail;
  2724. if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
  2725. goto free_hctx;
  2726. return hctx;
  2727. free_hctx:
  2728. kobject_put(&hctx->kobj);
  2729. fail:
  2730. return NULL;
  2731. }
  2732. static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
  2733. struct request_queue *q)
  2734. {
  2735. int i, j, end;
  2736. struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
  2737. if (q->nr_hw_queues < set->nr_hw_queues) {
  2738. struct blk_mq_hw_ctx **new_hctxs;
  2739. new_hctxs = kcalloc_node(set->nr_hw_queues,
  2740. sizeof(*new_hctxs), GFP_KERNEL,
  2741. set->numa_node);
  2742. if (!new_hctxs)
  2743. return;
  2744. if (hctxs)
  2745. memcpy(new_hctxs, hctxs, q->nr_hw_queues *
  2746. sizeof(*hctxs));
  2747. q->queue_hw_ctx = new_hctxs;
  2748. kfree(hctxs);
  2749. hctxs = new_hctxs;
  2750. }
  2751. /* protect against switching io scheduler */
  2752. mutex_lock(&q->sysfs_lock);
  2753. for (i = 0; i < set->nr_hw_queues; i++) {
  2754. int node;
  2755. struct blk_mq_hw_ctx *hctx;
  2756. node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
  2757. /*
  2758. * If the hw queue has been mapped to another numa node,
  2759. * we need to realloc the hctx. If allocation fails, fallback
  2760. * to use the previous one.
  2761. */
  2762. if (hctxs[i] && (hctxs[i]->numa_node == node))
  2763. continue;
  2764. hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
  2765. if (hctx) {
  2766. if (hctxs[i])
  2767. blk_mq_exit_hctx(q, set, hctxs[i], i);
  2768. hctxs[i] = hctx;
  2769. } else {
  2770. if (hctxs[i])
  2771. pr_warn("Allocate new hctx on node %d fails,\
  2772. fallback to previous one on node %d\n",
  2773. node, hctxs[i]->numa_node);
  2774. else
  2775. break;
  2776. }
  2777. }
  2778. /*
  2779. * Increasing nr_hw_queues fails. Free the newly allocated
  2780. * hctxs and keep the previous q->nr_hw_queues.
  2781. */
  2782. if (i != set->nr_hw_queues) {
  2783. j = q->nr_hw_queues;
  2784. end = i;
  2785. } else {
  2786. j = i;
  2787. end = q->nr_hw_queues;
  2788. q->nr_hw_queues = set->nr_hw_queues;
  2789. }
  2790. for (; j < end; j++) {
  2791. struct blk_mq_hw_ctx *hctx = hctxs[j];
  2792. if (hctx) {
  2793. if (hctx->tags)
  2794. blk_mq_free_map_and_requests(set, j);
  2795. blk_mq_exit_hctx(q, set, hctx, j);
  2796. hctxs[j] = NULL;
  2797. }
  2798. }
  2799. mutex_unlock(&q->sysfs_lock);
  2800. }
  2801. struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
  2802. struct request_queue *q,
  2803. bool elevator_init)
  2804. {
  2805. /* mark the queue as mq asap */
  2806. q->mq_ops = set->ops;
  2807. q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
  2808. blk_mq_poll_stats_bkt,
  2809. BLK_MQ_POLL_STATS_BKTS, q);
  2810. if (!q->poll_cb)
  2811. goto err_exit;
  2812. if (blk_mq_alloc_ctxs(q))
  2813. goto err_poll;
  2814. /* init q->mq_kobj and sw queues' kobjects */
  2815. blk_mq_sysfs_init(q);
  2816. INIT_LIST_HEAD(&q->unused_hctx_list);
  2817. spin_lock_init(&q->unused_hctx_lock);
  2818. blk_mq_realloc_hw_ctxs(set, q);
  2819. if (!q->nr_hw_queues)
  2820. goto err_hctxs;
  2821. INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
  2822. blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
  2823. q->tag_set = set;
  2824. q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
  2825. if (set->nr_maps > HCTX_TYPE_POLL &&
  2826. set->map[HCTX_TYPE_POLL].nr_queues)
  2827. blk_queue_flag_set(QUEUE_FLAG_POLL, q);
  2828. q->sg_reserved_size = INT_MAX;
  2829. INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
  2830. INIT_LIST_HEAD(&q->requeue_list);
  2831. spin_lock_init(&q->requeue_lock);
  2832. q->nr_requests = set->queue_depth;
  2833. /*
  2834. * Default to classic polling
  2835. */
  2836. q->poll_nsec = BLK_MQ_POLL_CLASSIC;
  2837. blk_mq_init_cpu_queues(q, set->nr_hw_queues);
  2838. blk_mq_add_queue_tag_set(set, q);
  2839. blk_mq_map_swqueue(q);
  2840. if (elevator_init)
  2841. elevator_init_mq(q);
  2842. return q;
  2843. err_hctxs:
  2844. kfree(q->queue_hw_ctx);
  2845. q->nr_hw_queues = 0;
  2846. blk_mq_sysfs_deinit(q);
  2847. err_poll:
  2848. blk_stat_free_callback(q->poll_cb);
  2849. q->poll_cb = NULL;
  2850. err_exit:
  2851. q->mq_ops = NULL;
  2852. return ERR_PTR(-ENOMEM);
  2853. }
  2854. EXPORT_SYMBOL(blk_mq_init_allocated_queue);
  2855. /* tags can _not_ be used after returning from blk_mq_exit_queue */
  2856. void blk_mq_exit_queue(struct request_queue *q)
  2857. {
  2858. struct blk_mq_tag_set *set = q->tag_set;
  2859. /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
  2860. blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
  2861. /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
  2862. blk_mq_del_queue_tag_set(q);
  2863. }
  2864. static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
  2865. {
  2866. int i;
  2867. for (i = 0; i < set->nr_hw_queues; i++) {
  2868. if (!__blk_mq_alloc_map_and_request(set, i))
  2869. goto out_unwind;
  2870. cond_resched();
  2871. }
  2872. return 0;
  2873. out_unwind:
  2874. while (--i >= 0)
  2875. blk_mq_free_map_and_requests(set, i);
  2876. return -ENOMEM;
  2877. }
  2878. /*
  2879. * Allocate the request maps associated with this tag_set. Note that this
  2880. * may reduce the depth asked for, if memory is tight. set->queue_depth
  2881. * will be updated to reflect the allocated depth.
  2882. */
  2883. static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set)
  2884. {
  2885. unsigned int depth;
  2886. int err;
  2887. depth = set->queue_depth;
  2888. do {
  2889. err = __blk_mq_alloc_rq_maps(set);
  2890. if (!err)
  2891. break;
  2892. set->queue_depth >>= 1;
  2893. if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
  2894. err = -ENOMEM;
  2895. break;
  2896. }
  2897. } while (set->queue_depth);
  2898. if (!set->queue_depth || err) {
  2899. pr_err("blk-mq: failed to allocate request map\n");
  2900. return -ENOMEM;
  2901. }
  2902. if (depth != set->queue_depth)
  2903. pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
  2904. depth, set->queue_depth);
  2905. return 0;
  2906. }
  2907. static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
  2908. {
  2909. /*
  2910. * blk_mq_map_queues() and multiple .map_queues() implementations
  2911. * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
  2912. * number of hardware queues.
  2913. */
  2914. if (set->nr_maps == 1)
  2915. set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
  2916. if (set->ops->map_queues && !is_kdump_kernel()) {
  2917. int i;
  2918. /*
  2919. * transport .map_queues is usually done in the following
  2920. * way:
  2921. *
  2922. * for (queue = 0; queue < set->nr_hw_queues; queue++) {
  2923. * mask = get_cpu_mask(queue)
  2924. * for_each_cpu(cpu, mask)
  2925. * set->map[x].mq_map[cpu] = queue;
  2926. * }
  2927. *
  2928. * When we need to remap, the table has to be cleared for
  2929. * killing stale mapping since one CPU may not be mapped
  2930. * to any hw queue.
  2931. */
  2932. for (i = 0; i < set->nr_maps; i++)
  2933. blk_mq_clear_mq_map(&set->map[i]);
  2934. return set->ops->map_queues(set);
  2935. } else {
  2936. BUG_ON(set->nr_maps > 1);
  2937. return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
  2938. }
  2939. }
  2940. static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
  2941. int cur_nr_hw_queues, int new_nr_hw_queues)
  2942. {
  2943. struct blk_mq_tags **new_tags;
  2944. if (cur_nr_hw_queues >= new_nr_hw_queues)
  2945. return 0;
  2946. new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
  2947. GFP_KERNEL, set->numa_node);
  2948. if (!new_tags)
  2949. return -ENOMEM;
  2950. if (set->tags)
  2951. memcpy(new_tags, set->tags, cur_nr_hw_queues *
  2952. sizeof(*set->tags));
  2953. kfree(set->tags);
  2954. set->tags = new_tags;
  2955. set->nr_hw_queues = new_nr_hw_queues;
  2956. return 0;
  2957. }
  2958. /*
  2959. * Alloc a tag set to be associated with one or more request queues.
  2960. * May fail with EINVAL for various error conditions. May adjust the
  2961. * requested depth down, if it's too large. In that case, the set
  2962. * value will be stored in set->queue_depth.
  2963. */
  2964. int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
  2965. {
  2966. int i, ret;
  2967. BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
  2968. if (!set->nr_hw_queues)
  2969. return -EINVAL;
  2970. if (!set->queue_depth)
  2971. return -EINVAL;
  2972. if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
  2973. return -EINVAL;
  2974. if (!set->ops->queue_rq)
  2975. return -EINVAL;
  2976. if (!set->ops->get_budget ^ !set->ops->put_budget)
  2977. return -EINVAL;
  2978. if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
  2979. pr_info("blk-mq: reduced tag depth to %u\n",
  2980. BLK_MQ_MAX_DEPTH);
  2981. set->queue_depth = BLK_MQ_MAX_DEPTH;
  2982. }
  2983. if (!set->nr_maps)
  2984. set->nr_maps = 1;
  2985. else if (set->nr_maps > HCTX_MAX_TYPES)
  2986. return -EINVAL;
  2987. /*
  2988. * If a crashdump is active, then we are potentially in a very
  2989. * memory constrained environment. Limit us to 1 queue and
  2990. * 64 tags to prevent using too much memory.
  2991. */
  2992. if (is_kdump_kernel()) {
  2993. set->nr_hw_queues = 1;
  2994. set->nr_maps = 1;
  2995. set->queue_depth = min(64U, set->queue_depth);
  2996. }
  2997. /*
  2998. * There is no use for more h/w queues than cpus if we just have
  2999. * a single map
  3000. */
  3001. if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
  3002. set->nr_hw_queues = nr_cpu_ids;
  3003. if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0)
  3004. return -ENOMEM;
  3005. ret = -ENOMEM;
  3006. for (i = 0; i < set->nr_maps; i++) {
  3007. set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
  3008. sizeof(set->map[i].mq_map[0]),
  3009. GFP_KERNEL, set->numa_node);
  3010. if (!set->map[i].mq_map)
  3011. goto out_free_mq_map;
  3012. set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
  3013. }
  3014. ret = blk_mq_update_queue_map(set);
  3015. if (ret)
  3016. goto out_free_mq_map;
  3017. ret = blk_mq_alloc_map_and_requests(set);
  3018. if (ret)
  3019. goto out_free_mq_map;
  3020. if (blk_mq_is_sbitmap_shared(set->flags)) {
  3021. atomic_set(&set->active_queues_shared_sbitmap, 0);
  3022. if (blk_mq_init_shared_sbitmap(set, set->flags)) {
  3023. ret = -ENOMEM;
  3024. goto out_free_mq_rq_maps;
  3025. }
  3026. }
  3027. mutex_init(&set->tag_list_lock);
  3028. INIT_LIST_HEAD(&set->tag_list);
  3029. return 0;
  3030. out_free_mq_rq_maps:
  3031. for (i = 0; i < set->nr_hw_queues; i++)
  3032. blk_mq_free_map_and_requests(set, i);
  3033. out_free_mq_map:
  3034. for (i = 0; i < set->nr_maps; i++) {
  3035. kfree(set->map[i].mq_map);
  3036. set->map[i].mq_map = NULL;
  3037. }
  3038. kfree(set->tags);
  3039. set->tags = NULL;
  3040. return ret;
  3041. }
  3042. EXPORT_SYMBOL(blk_mq_alloc_tag_set);
  3043. void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
  3044. {
  3045. int i, j;
  3046. for (i = 0; i < set->nr_hw_queues; i++)
  3047. blk_mq_free_map_and_requests(set, i);
  3048. if (blk_mq_is_sbitmap_shared(set->flags))
  3049. blk_mq_exit_shared_sbitmap(set);
  3050. for (j = 0; j < set->nr_maps; j++) {
  3051. kfree(set->map[j].mq_map);
  3052. set->map[j].mq_map = NULL;
  3053. }
  3054. kfree(set->tags);
  3055. set->tags = NULL;
  3056. }
  3057. EXPORT_SYMBOL(blk_mq_free_tag_set);
  3058. int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
  3059. {
  3060. struct blk_mq_tag_set *set = q->tag_set;
  3061. struct blk_mq_hw_ctx *hctx;
  3062. int i, ret;
  3063. if (!set)
  3064. return -EINVAL;
  3065. if (q->nr_requests == nr)
  3066. return 0;
  3067. blk_mq_freeze_queue(q);
  3068. blk_mq_quiesce_queue(q);
  3069. ret = 0;
  3070. queue_for_each_hw_ctx(q, hctx, i) {
  3071. if (!hctx->tags)
  3072. continue;
  3073. /*
  3074. * If we're using an MQ scheduler, just update the scheduler
  3075. * queue depth. This is similar to what the old code would do.
  3076. */
  3077. if (!hctx->sched_tags) {
  3078. ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
  3079. false);
  3080. if (!ret && blk_mq_is_sbitmap_shared(set->flags))
  3081. blk_mq_tag_resize_shared_sbitmap(set, nr);
  3082. } else {
  3083. ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
  3084. nr, true);
  3085. }
  3086. if (ret)
  3087. break;
  3088. if (q->elevator && q->elevator->type->ops.depth_updated)
  3089. q->elevator->type->ops.depth_updated(hctx);
  3090. }
  3091. if (!ret)
  3092. q->nr_requests = nr;
  3093. blk_mq_unquiesce_queue(q);
  3094. blk_mq_unfreeze_queue(q);
  3095. return ret;
  3096. }
  3097. /*
  3098. * request_queue and elevator_type pair.
  3099. * It is just used by __blk_mq_update_nr_hw_queues to cache
  3100. * the elevator_type associated with a request_queue.
  3101. */
  3102. struct blk_mq_qe_pair {
  3103. struct list_head node;
  3104. struct request_queue *q;
  3105. struct elevator_type *type;
  3106. };
  3107. /*
  3108. * Cache the elevator_type in qe pair list and switch the
  3109. * io scheduler to 'none'
  3110. */
  3111. static bool blk_mq_elv_switch_none(struct list_head *head,
  3112. struct request_queue *q)
  3113. {
  3114. struct blk_mq_qe_pair *qe;
  3115. if (!q->elevator)
  3116. return true;
  3117. qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
  3118. if (!qe)
  3119. return false;
  3120. INIT_LIST_HEAD(&qe->node);
  3121. qe->q = q;
  3122. qe->type = q->elevator->type;
  3123. list_add(&qe->node, head);
  3124. mutex_lock(&q->sysfs_lock);
  3125. /*
  3126. * After elevator_switch_mq, the previous elevator_queue will be
  3127. * released by elevator_release. The reference of the io scheduler
  3128. * module get by elevator_get will also be put. So we need to get
  3129. * a reference of the io scheduler module here to prevent it to be
  3130. * removed.
  3131. */
  3132. __module_get(qe->type->elevator_owner);
  3133. elevator_switch_mq(q, NULL);
  3134. mutex_unlock(&q->sysfs_lock);
  3135. return true;
  3136. }
  3137. static void blk_mq_elv_switch_back(struct list_head *head,
  3138. struct request_queue *q)
  3139. {
  3140. struct blk_mq_qe_pair *qe;
  3141. struct elevator_type *t = NULL;
  3142. list_for_each_entry(qe, head, node)
  3143. if (qe->q == q) {
  3144. t = qe->type;
  3145. break;
  3146. }
  3147. if (!t)
  3148. return;
  3149. list_del(&qe->node);
  3150. kfree(qe);
  3151. mutex_lock(&q->sysfs_lock);
  3152. elevator_switch_mq(q, t);
  3153. mutex_unlock(&q->sysfs_lock);
  3154. }
  3155. static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
  3156. int nr_hw_queues)
  3157. {
  3158. struct request_queue *q;
  3159. LIST_HEAD(head);
  3160. int prev_nr_hw_queues;
  3161. lockdep_assert_held(&set->tag_list_lock);
  3162. if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
  3163. nr_hw_queues = nr_cpu_ids;
  3164. if (nr_hw_queues < 1)
  3165. return;
  3166. if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
  3167. return;
  3168. list_for_each_entry(q, &set->tag_list, tag_set_list)
  3169. blk_mq_freeze_queue(q);
  3170. /*
  3171. * Switch IO scheduler to 'none', cleaning up the data associated
  3172. * with the previous scheduler. We will switch back once we are done
  3173. * updating the new sw to hw queue mappings.
  3174. */
  3175. list_for_each_entry(q, &set->tag_list, tag_set_list)
  3176. if (!blk_mq_elv_switch_none(&head, q))
  3177. goto switch_back;
  3178. list_for_each_entry(q, &set->tag_list, tag_set_list) {
  3179. blk_mq_debugfs_unregister_hctxs(q);
  3180. blk_mq_sysfs_unregister(q);
  3181. }
  3182. prev_nr_hw_queues = set->nr_hw_queues;
  3183. if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
  3184. 0)
  3185. goto reregister;
  3186. set->nr_hw_queues = nr_hw_queues;
  3187. fallback:
  3188. blk_mq_update_queue_map(set);
  3189. list_for_each_entry(q, &set->tag_list, tag_set_list) {
  3190. blk_mq_realloc_hw_ctxs(set, q);
  3191. if (q->nr_hw_queues != set->nr_hw_queues) {
  3192. pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
  3193. nr_hw_queues, prev_nr_hw_queues);
  3194. set->nr_hw_queues = prev_nr_hw_queues;
  3195. blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
  3196. goto fallback;
  3197. }
  3198. blk_mq_map_swqueue(q);
  3199. }
  3200. reregister:
  3201. list_for_each_entry(q, &set->tag_list, tag_set_list) {
  3202. blk_mq_sysfs_register(q);
  3203. blk_mq_debugfs_register_hctxs(q);
  3204. }
  3205. switch_back:
  3206. list_for_each_entry(q, &set->tag_list, tag_set_list)
  3207. blk_mq_elv_switch_back(&head, q);
  3208. list_for_each_entry(q, &set->tag_list, tag_set_list)
  3209. blk_mq_unfreeze_queue(q);
  3210. }
  3211. void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
  3212. {
  3213. mutex_lock(&set->tag_list_lock);
  3214. __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
  3215. mutex_unlock(&set->tag_list_lock);
  3216. }
  3217. EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
  3218. /* Enable polling stats and return whether they were already enabled. */
  3219. static bool blk_poll_stats_enable(struct request_queue *q)
  3220. {
  3221. if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
  3222. blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
  3223. return true;
  3224. blk_stat_add_callback(q, q->poll_cb);
  3225. return false;
  3226. }
  3227. static void blk_mq_poll_stats_start(struct request_queue *q)
  3228. {
  3229. /*
  3230. * We don't arm the callback if polling stats are not enabled or the
  3231. * callback is already active.
  3232. */
  3233. if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
  3234. blk_stat_is_active(q->poll_cb))
  3235. return;
  3236. blk_stat_activate_msecs(q->poll_cb, 100);
  3237. }
  3238. static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
  3239. {
  3240. struct request_queue *q = cb->data;
  3241. int bucket;
  3242. for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
  3243. if (cb->stat[bucket].nr_samples)
  3244. q->poll_stat[bucket] = cb->stat[bucket];
  3245. }
  3246. }
  3247. static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
  3248. struct request *rq)
  3249. {
  3250. unsigned long ret = 0;
  3251. int bucket;
  3252. /*
  3253. * If stats collection isn't on, don't sleep but turn it on for
  3254. * future users
  3255. */
  3256. if (!blk_poll_stats_enable(q))
  3257. return 0;
  3258. /*
  3259. * As an optimistic guess, use half of the mean service time
  3260. * for this type of request. We can (and should) make this smarter.
  3261. * For instance, if the completion latencies are tight, we can
  3262. * get closer than just half the mean. This is especially
  3263. * important on devices where the completion latencies are longer
  3264. * than ~10 usec. We do use the stats for the relevant IO size
  3265. * if available which does lead to better estimates.
  3266. */
  3267. bucket = blk_mq_poll_stats_bkt(rq);
  3268. if (bucket < 0)
  3269. return ret;
  3270. if (q->poll_stat[bucket].nr_samples)
  3271. ret = (q->poll_stat[bucket].mean + 1) / 2;
  3272. return ret;
  3273. }
  3274. static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
  3275. struct request *rq)
  3276. {
  3277. struct hrtimer_sleeper hs;
  3278. enum hrtimer_mode mode;
  3279. unsigned int nsecs;
  3280. ktime_t kt;
  3281. if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
  3282. return false;
  3283. /*
  3284. * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
  3285. *
  3286. * 0: use half of prev avg
  3287. * >0: use this specific value
  3288. */
  3289. if (q->poll_nsec > 0)
  3290. nsecs = q->poll_nsec;
  3291. else
  3292. nsecs = blk_mq_poll_nsecs(q, rq);
  3293. if (!nsecs)
  3294. return false;
  3295. rq->rq_flags |= RQF_MQ_POLL_SLEPT;
  3296. /*
  3297. * This will be replaced with the stats tracking code, using
  3298. * 'avg_completion_time / 2' as the pre-sleep target.
  3299. */
  3300. kt = nsecs;
  3301. mode = HRTIMER_MODE_REL;
  3302. hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
  3303. hrtimer_set_expires(&hs.timer, kt);
  3304. do {
  3305. if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
  3306. break;
  3307. set_current_state(TASK_UNINTERRUPTIBLE);
  3308. hrtimer_sleeper_start_expires(&hs, mode);
  3309. if (hs.task)
  3310. io_schedule();
  3311. hrtimer_cancel(&hs.timer);
  3312. mode = HRTIMER_MODE_ABS;
  3313. } while (hs.task && !signal_pending(current));
  3314. __set_current_state(TASK_RUNNING);
  3315. destroy_hrtimer_on_stack(&hs.timer);
  3316. return true;
  3317. }
  3318. static bool blk_mq_poll_hybrid(struct request_queue *q,
  3319. struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
  3320. {
  3321. struct request *rq;
  3322. if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
  3323. return false;
  3324. if (!blk_qc_t_is_internal(cookie))
  3325. rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
  3326. else {
  3327. rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
  3328. /*
  3329. * With scheduling, if the request has completed, we'll
  3330. * get a NULL return here, as we clear the sched tag when
  3331. * that happens. The request still remains valid, like always,
  3332. * so we should be safe with just the NULL check.
  3333. */
  3334. if (!rq)
  3335. return false;
  3336. }
  3337. return blk_mq_poll_hybrid_sleep(q, rq);
  3338. }
  3339. /**
  3340. * blk_poll - poll for IO completions
  3341. * @q: the queue
  3342. * @cookie: cookie passed back at IO submission time
  3343. * @spin: whether to spin for completions
  3344. *
  3345. * Description:
  3346. * Poll for completions on the passed in queue. Returns number of
  3347. * completed entries found. If @spin is true, then blk_poll will continue
  3348. * looping until at least one completion is found, unless the task is
  3349. * otherwise marked running (or we need to reschedule).
  3350. */
  3351. int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
  3352. {
  3353. struct blk_mq_hw_ctx *hctx;
  3354. long state;
  3355. if (!blk_qc_t_valid(cookie) ||
  3356. !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
  3357. return 0;
  3358. if (current->plug)
  3359. blk_flush_plug_list(current->plug, false);
  3360. hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
  3361. /*
  3362. * If we sleep, have the caller restart the poll loop to reset
  3363. * the state. Like for the other success return cases, the
  3364. * caller is responsible for checking if the IO completed. If
  3365. * the IO isn't complete, we'll get called again and will go
  3366. * straight to the busy poll loop.
  3367. */
  3368. if (blk_mq_poll_hybrid(q, hctx, cookie))
  3369. return 1;
  3370. hctx->poll_considered++;
  3371. state = current->state;
  3372. do {
  3373. int ret;
  3374. hctx->poll_invoked++;
  3375. ret = q->mq_ops->poll(hctx);
  3376. if (ret > 0) {
  3377. hctx->poll_success++;
  3378. __set_current_state(TASK_RUNNING);
  3379. return ret;
  3380. }
  3381. if (signal_pending_state(state, current))
  3382. __set_current_state(TASK_RUNNING);
  3383. if (current->state == TASK_RUNNING)
  3384. return 1;
  3385. if (ret < 0 || !spin)
  3386. break;
  3387. cpu_relax();
  3388. } while (!need_resched());
  3389. __set_current_state(TASK_RUNNING);
  3390. return 0;
  3391. }
  3392. EXPORT_SYMBOL_GPL(blk_poll);
  3393. unsigned int blk_mq_rq_cpu(struct request *rq)
  3394. {
  3395. return rq->mq_ctx->cpu;
  3396. }
  3397. EXPORT_SYMBOL(blk_mq_rq_cpu);
  3398. static int __init blk_mq_init(void)
  3399. {
  3400. int i;
  3401. for_each_possible_cpu(i)
  3402. INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
  3403. open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
  3404. cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
  3405. "block/softirq:dead", NULL,
  3406. blk_softirq_cpu_dead);
  3407. cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
  3408. blk_mq_hctx_notify_dead);
  3409. cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
  3410. blk_mq_hctx_notify_online,
  3411. blk_mq_hctx_notify_offline);
  3412. return 0;
  3413. }
  3414. subsys_initcall(blk_mq_init);