blk-iocost.c 96 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436
  1. /* SPDX-License-Identifier: GPL-2.0
  2. *
  3. * IO cost model based controller.
  4. *
  5. * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
  6. * Copyright (C) 2019 Andy Newell <newella@fb.com>
  7. * Copyright (C) 2019 Facebook
  8. *
  9. * One challenge of controlling IO resources is the lack of trivially
  10. * observable cost metric. This is distinguished from CPU and memory where
  11. * wallclock time and the number of bytes can serve as accurate enough
  12. * approximations.
  13. *
  14. * Bandwidth and iops are the most commonly used metrics for IO devices but
  15. * depending on the type and specifics of the device, different IO patterns
  16. * easily lead to multiple orders of magnitude variations rendering them
  17. * useless for the purpose of IO capacity distribution. While on-device
  18. * time, with a lot of clutches, could serve as a useful approximation for
  19. * non-queued rotational devices, this is no longer viable with modern
  20. * devices, even the rotational ones.
  21. *
  22. * While there is no cost metric we can trivially observe, it isn't a
  23. * complete mystery. For example, on a rotational device, seek cost
  24. * dominates while a contiguous transfer contributes a smaller amount
  25. * proportional to the size. If we can characterize at least the relative
  26. * costs of these different types of IOs, it should be possible to
  27. * implement a reasonable work-conserving proportional IO resource
  28. * distribution.
  29. *
  30. * 1. IO Cost Model
  31. *
  32. * IO cost model estimates the cost of an IO given its basic parameters and
  33. * history (e.g. the end sector of the last IO). The cost is measured in
  34. * device time. If a given IO is estimated to cost 10ms, the device should
  35. * be able to process ~100 of those IOs in a second.
  36. *
  37. * Currently, there's only one builtin cost model - linear. Each IO is
  38. * classified as sequential or random and given a base cost accordingly.
  39. * On top of that, a size cost proportional to the length of the IO is
  40. * added. While simple, this model captures the operational
  41. * characteristics of a wide varienty of devices well enough. Default
  42. * paramters for several different classes of devices are provided and the
  43. * parameters can be configured from userspace via
  44. * /sys/fs/cgroup/io.cost.model.
  45. *
  46. * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
  47. * device-specific coefficients.
  48. *
  49. * 2. Control Strategy
  50. *
  51. * The device virtual time (vtime) is used as the primary control metric.
  52. * The control strategy is composed of the following three parts.
  53. *
  54. * 2-1. Vtime Distribution
  55. *
  56. * When a cgroup becomes active in terms of IOs, its hierarchical share is
  57. * calculated. Please consider the following hierarchy where the numbers
  58. * inside parentheses denote the configured weights.
  59. *
  60. * root
  61. * / \
  62. * A (w:100) B (w:300)
  63. * / \
  64. * A0 (w:100) A1 (w:100)
  65. *
  66. * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
  67. * of equal weight, each gets 50% share. If then B starts issuing IOs, B
  68. * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
  69. * 12.5% each. The distribution mechanism only cares about these flattened
  70. * shares. They're called hweights (hierarchical weights) and always add
  71. * upto 1 (WEIGHT_ONE).
  72. *
  73. * A given cgroup's vtime runs slower in inverse proportion to its hweight.
  74. * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
  75. * against the device vtime - an IO which takes 10ms on the underlying
  76. * device is considered to take 80ms on A0.
  77. *
  78. * This constitutes the basis of IO capacity distribution. Each cgroup's
  79. * vtime is running at a rate determined by its hweight. A cgroup tracks
  80. * the vtime consumed by past IOs and can issue a new IO iff doing so
  81. * wouldn't outrun the current device vtime. Otherwise, the IO is
  82. * suspended until the vtime has progressed enough to cover it.
  83. *
  84. * 2-2. Vrate Adjustment
  85. *
  86. * It's unrealistic to expect the cost model to be perfect. There are too
  87. * many devices and even on the same device the overall performance
  88. * fluctuates depending on numerous factors such as IO mixture and device
  89. * internal garbage collection. The controller needs to adapt dynamically.
  90. *
  91. * This is achieved by adjusting the overall IO rate according to how busy
  92. * the device is. If the device becomes overloaded, we're sending down too
  93. * many IOs and should generally slow down. If there are waiting issuers
  94. * but the device isn't saturated, we're issuing too few and should
  95. * generally speed up.
  96. *
  97. * To slow down, we lower the vrate - the rate at which the device vtime
  98. * passes compared to the wall clock. For example, if the vtime is running
  99. * at the vrate of 75%, all cgroups added up would only be able to issue
  100. * 750ms worth of IOs per second, and vice-versa for speeding up.
  101. *
  102. * Device business is determined using two criteria - rq wait and
  103. * completion latencies.
  104. *
  105. * When a device gets saturated, the on-device and then the request queues
  106. * fill up and a bio which is ready to be issued has to wait for a request
  107. * to become available. When this delay becomes noticeable, it's a clear
  108. * indication that the device is saturated and we lower the vrate. This
  109. * saturation signal is fairly conservative as it only triggers when both
  110. * hardware and software queues are filled up, and is used as the default
  111. * busy signal.
  112. *
  113. * As devices can have deep queues and be unfair in how the queued commands
  114. * are executed, soley depending on rq wait may not result in satisfactory
  115. * control quality. For a better control quality, completion latency QoS
  116. * parameters can be configured so that the device is considered saturated
  117. * if N'th percentile completion latency rises above the set point.
  118. *
  119. * The completion latency requirements are a function of both the
  120. * underlying device characteristics and the desired IO latency quality of
  121. * service. There is an inherent trade-off - the tighter the latency QoS,
  122. * the higher the bandwidth lossage. Latency QoS is disabled by default
  123. * and can be set through /sys/fs/cgroup/io.cost.qos.
  124. *
  125. * 2-3. Work Conservation
  126. *
  127. * Imagine two cgroups A and B with equal weights. A is issuing a small IO
  128. * periodically while B is sending out enough parallel IOs to saturate the
  129. * device on its own. Let's say A's usage amounts to 100ms worth of IO
  130. * cost per second, i.e., 10% of the device capacity. The naive
  131. * distribution of half and half would lead to 60% utilization of the
  132. * device, a significant reduction in the total amount of work done
  133. * compared to free-for-all competition. This is too high a cost to pay
  134. * for IO control.
  135. *
  136. * To conserve the total amount of work done, we keep track of how much
  137. * each active cgroup is actually using and yield part of its weight if
  138. * there are other cgroups which can make use of it. In the above case,
  139. * A's weight will be lowered so that it hovers above the actual usage and
  140. * B would be able to use the rest.
  141. *
  142. * As we don't want to penalize a cgroup for donating its weight, the
  143. * surplus weight adjustment factors in a margin and has an immediate
  144. * snapback mechanism in case the cgroup needs more IO vtime for itself.
  145. *
  146. * Note that adjusting down surplus weights has the same effects as
  147. * accelerating vtime for other cgroups and work conservation can also be
  148. * implemented by adjusting vrate dynamically. However, squaring who can
  149. * donate and should take back how much requires hweight propagations
  150. * anyway making it easier to implement and understand as a separate
  151. * mechanism.
  152. *
  153. * 3. Monitoring
  154. *
  155. * Instead of debugfs or other clumsy monitoring mechanisms, this
  156. * controller uses a drgn based monitoring script -
  157. * tools/cgroup/iocost_monitor.py. For details on drgn, please see
  158. * https://github.com/osandov/drgn. The ouput looks like the following.
  159. *
  160. * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
  161. * active weight hweight% inflt% dbt delay usages%
  162. * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
  163. * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
  164. *
  165. * - per : Timer period
  166. * - cur_per : Internal wall and device vtime clock
  167. * - vrate : Device virtual time rate against wall clock
  168. * - weight : Surplus-adjusted and configured weights
  169. * - hweight : Surplus-adjusted and configured hierarchical weights
  170. * - inflt : The percentage of in-flight IO cost at the end of last period
  171. * - del_ms : Deferred issuer delay induction level and duration
  172. * - usages : Usage history
  173. */
  174. #include <linux/kernel.h>
  175. #include <linux/module.h>
  176. #include <linux/timer.h>
  177. #include <linux/time64.h>
  178. #include <linux/parser.h>
  179. #include <linux/sched/signal.h>
  180. #include <linux/blk-cgroup.h>
  181. #include <asm/local.h>
  182. #include <asm/local64.h>
  183. #include "blk-rq-qos.h"
  184. #include "blk-stat.h"
  185. #include "blk-wbt.h"
  186. #ifdef CONFIG_TRACEPOINTS
  187. /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
  188. #define TRACE_IOCG_PATH_LEN 1024
  189. static DEFINE_SPINLOCK(trace_iocg_path_lock);
  190. static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
  191. #define TRACE_IOCG_PATH(type, iocg, ...) \
  192. do { \
  193. unsigned long flags; \
  194. if (trace_iocost_##type##_enabled()) { \
  195. spin_lock_irqsave(&trace_iocg_path_lock, flags); \
  196. cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
  197. trace_iocg_path, TRACE_IOCG_PATH_LEN); \
  198. trace_iocost_##type(iocg, trace_iocg_path, \
  199. ##__VA_ARGS__); \
  200. spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
  201. } \
  202. } while (0)
  203. #else /* CONFIG_TRACE_POINTS */
  204. #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
  205. #endif /* CONFIG_TRACE_POINTS */
  206. enum {
  207. MILLION = 1000000,
  208. /* timer period is calculated from latency requirements, bound it */
  209. MIN_PERIOD = USEC_PER_MSEC,
  210. MAX_PERIOD = USEC_PER_SEC,
  211. /*
  212. * iocg->vtime is targeted at 50% behind the device vtime, which
  213. * serves as its IO credit buffer. Surplus weight adjustment is
  214. * immediately canceled if the vtime margin runs below 10%.
  215. */
  216. MARGIN_MIN_PCT = 10,
  217. MARGIN_LOW_PCT = 20,
  218. MARGIN_TARGET_PCT = 50,
  219. INUSE_ADJ_STEP_PCT = 25,
  220. /* Have some play in timer operations */
  221. TIMER_SLACK_PCT = 1,
  222. /* 1/64k is granular enough and can easily be handled w/ u32 */
  223. WEIGHT_ONE = 1 << 16,
  224. /*
  225. * As vtime is used to calculate the cost of each IO, it needs to
  226. * be fairly high precision. For example, it should be able to
  227. * represent the cost of a single page worth of discard with
  228. * suffificient accuracy. At the same time, it should be able to
  229. * represent reasonably long enough durations to be useful and
  230. * convenient during operation.
  231. *
  232. * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
  233. * granularity and days of wrap-around time even at extreme vrates.
  234. */
  235. VTIME_PER_SEC_SHIFT = 37,
  236. VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
  237. VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
  238. VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
  239. /* bound vrate adjustments within two orders of magnitude */
  240. VRATE_MIN_PPM = 10000, /* 1% */
  241. VRATE_MAX_PPM = 100000000, /* 10000% */
  242. VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
  243. VRATE_CLAMP_ADJ_PCT = 4,
  244. /* if IOs end up waiting for requests, issue less */
  245. RQ_WAIT_BUSY_PCT = 5,
  246. /* unbusy hysterisis */
  247. UNBUSY_THR_PCT = 75,
  248. /*
  249. * The effect of delay is indirect and non-linear and a huge amount of
  250. * future debt can accumulate abruptly while unthrottled. Linearly scale
  251. * up delay as debt is going up and then let it decay exponentially.
  252. * This gives us quick ramp ups while delay is accumulating and long
  253. * tails which can help reducing the frequency of debt explosions on
  254. * unthrottle. The parameters are experimentally determined.
  255. *
  256. * The delay mechanism provides adequate protection and behavior in many
  257. * cases. However, this is far from ideal and falls shorts on both
  258. * fronts. The debtors are often throttled too harshly costing a
  259. * significant level of fairness and possibly total work while the
  260. * protection against their impacts on the system can be choppy and
  261. * unreliable.
  262. *
  263. * The shortcoming primarily stems from the fact that, unlike for page
  264. * cache, the kernel doesn't have well-defined back-pressure propagation
  265. * mechanism and policies for anonymous memory. Fully addressing this
  266. * issue will likely require substantial improvements in the area.
  267. */
  268. MIN_DELAY_THR_PCT = 500,
  269. MAX_DELAY_THR_PCT = 25000,
  270. MIN_DELAY = 250,
  271. MAX_DELAY = 250 * USEC_PER_MSEC,
  272. /* halve debts if avg usage over 100ms is under 50% */
  273. DFGV_USAGE_PCT = 50,
  274. DFGV_PERIOD = 100 * USEC_PER_MSEC,
  275. /* don't let cmds which take a very long time pin lagging for too long */
  276. MAX_LAGGING_PERIODS = 10,
  277. /* switch iff the conditions are met for longer than this */
  278. AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
  279. /*
  280. * Count IO size in 4k pages. The 12bit shift helps keeping
  281. * size-proportional components of cost calculation in closer
  282. * numbers of digits to per-IO cost components.
  283. */
  284. IOC_PAGE_SHIFT = 12,
  285. IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
  286. IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
  287. /* if apart further than 16M, consider randio for linear model */
  288. LCOEF_RANDIO_PAGES = 4096,
  289. };
  290. enum ioc_running {
  291. IOC_IDLE,
  292. IOC_RUNNING,
  293. IOC_STOP,
  294. };
  295. /* io.cost.qos controls including per-dev enable of the whole controller */
  296. enum {
  297. QOS_ENABLE,
  298. QOS_CTRL,
  299. NR_QOS_CTRL_PARAMS,
  300. };
  301. /* io.cost.qos params */
  302. enum {
  303. QOS_RPPM,
  304. QOS_RLAT,
  305. QOS_WPPM,
  306. QOS_WLAT,
  307. QOS_MIN,
  308. QOS_MAX,
  309. NR_QOS_PARAMS,
  310. };
  311. /* io.cost.model controls */
  312. enum {
  313. COST_CTRL,
  314. COST_MODEL,
  315. NR_COST_CTRL_PARAMS,
  316. };
  317. /* builtin linear cost model coefficients */
  318. enum {
  319. I_LCOEF_RBPS,
  320. I_LCOEF_RSEQIOPS,
  321. I_LCOEF_RRANDIOPS,
  322. I_LCOEF_WBPS,
  323. I_LCOEF_WSEQIOPS,
  324. I_LCOEF_WRANDIOPS,
  325. NR_I_LCOEFS,
  326. };
  327. enum {
  328. LCOEF_RPAGE,
  329. LCOEF_RSEQIO,
  330. LCOEF_RRANDIO,
  331. LCOEF_WPAGE,
  332. LCOEF_WSEQIO,
  333. LCOEF_WRANDIO,
  334. NR_LCOEFS,
  335. };
  336. enum {
  337. AUTOP_INVALID,
  338. AUTOP_HDD,
  339. AUTOP_SSD_QD1,
  340. AUTOP_SSD_DFL,
  341. AUTOP_SSD_FAST,
  342. };
  343. struct ioc_gq;
  344. struct ioc_params {
  345. u32 qos[NR_QOS_PARAMS];
  346. u64 i_lcoefs[NR_I_LCOEFS];
  347. u64 lcoefs[NR_LCOEFS];
  348. u32 too_fast_vrate_pct;
  349. u32 too_slow_vrate_pct;
  350. };
  351. struct ioc_margins {
  352. s64 min;
  353. s64 low;
  354. s64 target;
  355. };
  356. struct ioc_missed {
  357. local_t nr_met;
  358. local_t nr_missed;
  359. u32 last_met;
  360. u32 last_missed;
  361. };
  362. struct ioc_pcpu_stat {
  363. struct ioc_missed missed[2];
  364. local64_t rq_wait_ns;
  365. u64 last_rq_wait_ns;
  366. };
  367. /* per device */
  368. struct ioc {
  369. struct rq_qos rqos;
  370. bool enabled;
  371. struct ioc_params params;
  372. struct ioc_margins margins;
  373. u32 period_us;
  374. u32 timer_slack_ns;
  375. u64 vrate_min;
  376. u64 vrate_max;
  377. spinlock_t lock;
  378. struct timer_list timer;
  379. struct list_head active_iocgs; /* active cgroups */
  380. struct ioc_pcpu_stat __percpu *pcpu_stat;
  381. enum ioc_running running;
  382. atomic64_t vtime_rate;
  383. u64 vtime_base_rate;
  384. s64 vtime_err;
  385. seqcount_spinlock_t period_seqcount;
  386. u64 period_at; /* wallclock starttime */
  387. u64 period_at_vtime; /* vtime starttime */
  388. atomic64_t cur_period; /* inc'd each period */
  389. int busy_level; /* saturation history */
  390. bool weights_updated;
  391. atomic_t hweight_gen; /* for lazy hweights */
  392. /* debt forgivness */
  393. u64 dfgv_period_at;
  394. u64 dfgv_period_rem;
  395. u64 dfgv_usage_us_sum;
  396. u64 autop_too_fast_at;
  397. u64 autop_too_slow_at;
  398. int autop_idx;
  399. bool user_qos_params:1;
  400. bool user_cost_model:1;
  401. };
  402. struct iocg_pcpu_stat {
  403. local64_t abs_vusage;
  404. };
  405. struct iocg_stat {
  406. u64 usage_us;
  407. u64 wait_us;
  408. u64 indebt_us;
  409. u64 indelay_us;
  410. };
  411. /* per device-cgroup pair */
  412. struct ioc_gq {
  413. struct blkg_policy_data pd;
  414. struct ioc *ioc;
  415. /*
  416. * A iocg can get its weight from two sources - an explicit
  417. * per-device-cgroup configuration or the default weight of the
  418. * cgroup. `cfg_weight` is the explicit per-device-cgroup
  419. * configuration. `weight` is the effective considering both
  420. * sources.
  421. *
  422. * When an idle cgroup becomes active its `active` goes from 0 to
  423. * `weight`. `inuse` is the surplus adjusted active weight.
  424. * `active` and `inuse` are used to calculate `hweight_active` and
  425. * `hweight_inuse`.
  426. *
  427. * `last_inuse` remembers `inuse` while an iocg is idle to persist
  428. * surplus adjustments.
  429. *
  430. * `inuse` may be adjusted dynamically during period. `saved_*` are used
  431. * to determine and track adjustments.
  432. */
  433. u32 cfg_weight;
  434. u32 weight;
  435. u32 active;
  436. u32 inuse;
  437. u32 last_inuse;
  438. s64 saved_margin;
  439. sector_t cursor; /* to detect randio */
  440. /*
  441. * `vtime` is this iocg's vtime cursor which progresses as IOs are
  442. * issued. If lagging behind device vtime, the delta represents
  443. * the currently available IO budget. If runnning ahead, the
  444. * overage.
  445. *
  446. * `vtime_done` is the same but progressed on completion rather
  447. * than issue. The delta behind `vtime` represents the cost of
  448. * currently in-flight IOs.
  449. */
  450. atomic64_t vtime;
  451. atomic64_t done_vtime;
  452. u64 abs_vdebt;
  453. /* current delay in effect and when it started */
  454. u64 delay;
  455. u64 delay_at;
  456. /*
  457. * The period this iocg was last active in. Used for deactivation
  458. * and invalidating `vtime`.
  459. */
  460. atomic64_t active_period;
  461. struct list_head active_list;
  462. /* see __propagate_weights() and current_hweight() for details */
  463. u64 child_active_sum;
  464. u64 child_inuse_sum;
  465. u64 child_adjusted_sum;
  466. int hweight_gen;
  467. u32 hweight_active;
  468. u32 hweight_inuse;
  469. u32 hweight_donating;
  470. u32 hweight_after_donation;
  471. struct list_head walk_list;
  472. struct list_head surplus_list;
  473. struct wait_queue_head waitq;
  474. struct hrtimer waitq_timer;
  475. /* timestamp at the latest activation */
  476. u64 activated_at;
  477. /* statistics */
  478. struct iocg_pcpu_stat __percpu *pcpu_stat;
  479. struct iocg_stat local_stat;
  480. struct iocg_stat desc_stat;
  481. struct iocg_stat last_stat;
  482. u64 last_stat_abs_vusage;
  483. u64 usage_delta_us;
  484. u64 wait_since;
  485. u64 indebt_since;
  486. u64 indelay_since;
  487. /* this iocg's depth in the hierarchy and ancestors including self */
  488. int level;
  489. struct ioc_gq *ancestors[];
  490. };
  491. /* per cgroup */
  492. struct ioc_cgrp {
  493. struct blkcg_policy_data cpd;
  494. unsigned int dfl_weight;
  495. };
  496. struct ioc_now {
  497. u64 now_ns;
  498. u64 now;
  499. u64 vnow;
  500. u64 vrate;
  501. };
  502. struct iocg_wait {
  503. struct wait_queue_entry wait;
  504. struct bio *bio;
  505. u64 abs_cost;
  506. bool committed;
  507. };
  508. struct iocg_wake_ctx {
  509. struct ioc_gq *iocg;
  510. u32 hw_inuse;
  511. s64 vbudget;
  512. };
  513. static const struct ioc_params autop[] = {
  514. [AUTOP_HDD] = {
  515. .qos = {
  516. [QOS_RLAT] = 250000, /* 250ms */
  517. [QOS_WLAT] = 250000,
  518. [QOS_MIN] = VRATE_MIN_PPM,
  519. [QOS_MAX] = VRATE_MAX_PPM,
  520. },
  521. .i_lcoefs = {
  522. [I_LCOEF_RBPS] = 174019176,
  523. [I_LCOEF_RSEQIOPS] = 41708,
  524. [I_LCOEF_RRANDIOPS] = 370,
  525. [I_LCOEF_WBPS] = 178075866,
  526. [I_LCOEF_WSEQIOPS] = 42705,
  527. [I_LCOEF_WRANDIOPS] = 378,
  528. },
  529. },
  530. [AUTOP_SSD_QD1] = {
  531. .qos = {
  532. [QOS_RLAT] = 25000, /* 25ms */
  533. [QOS_WLAT] = 25000,
  534. [QOS_MIN] = VRATE_MIN_PPM,
  535. [QOS_MAX] = VRATE_MAX_PPM,
  536. },
  537. .i_lcoefs = {
  538. [I_LCOEF_RBPS] = 245855193,
  539. [I_LCOEF_RSEQIOPS] = 61575,
  540. [I_LCOEF_RRANDIOPS] = 6946,
  541. [I_LCOEF_WBPS] = 141365009,
  542. [I_LCOEF_WSEQIOPS] = 33716,
  543. [I_LCOEF_WRANDIOPS] = 26796,
  544. },
  545. },
  546. [AUTOP_SSD_DFL] = {
  547. .qos = {
  548. [QOS_RLAT] = 25000, /* 25ms */
  549. [QOS_WLAT] = 25000,
  550. [QOS_MIN] = VRATE_MIN_PPM,
  551. [QOS_MAX] = VRATE_MAX_PPM,
  552. },
  553. .i_lcoefs = {
  554. [I_LCOEF_RBPS] = 488636629,
  555. [I_LCOEF_RSEQIOPS] = 8932,
  556. [I_LCOEF_RRANDIOPS] = 8518,
  557. [I_LCOEF_WBPS] = 427891549,
  558. [I_LCOEF_WSEQIOPS] = 28755,
  559. [I_LCOEF_WRANDIOPS] = 21940,
  560. },
  561. .too_fast_vrate_pct = 500,
  562. },
  563. [AUTOP_SSD_FAST] = {
  564. .qos = {
  565. [QOS_RLAT] = 5000, /* 5ms */
  566. [QOS_WLAT] = 5000,
  567. [QOS_MIN] = VRATE_MIN_PPM,
  568. [QOS_MAX] = VRATE_MAX_PPM,
  569. },
  570. .i_lcoefs = {
  571. [I_LCOEF_RBPS] = 3102524156LLU,
  572. [I_LCOEF_RSEQIOPS] = 724816,
  573. [I_LCOEF_RRANDIOPS] = 778122,
  574. [I_LCOEF_WBPS] = 1742780862LLU,
  575. [I_LCOEF_WSEQIOPS] = 425702,
  576. [I_LCOEF_WRANDIOPS] = 443193,
  577. },
  578. .too_slow_vrate_pct = 10,
  579. },
  580. };
  581. /*
  582. * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
  583. * vtime credit shortage and down on device saturation.
  584. */
  585. static u32 vrate_adj_pct[] =
  586. { 0, 0, 0, 0,
  587. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  588. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  589. 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
  590. static struct blkcg_policy blkcg_policy_iocost;
  591. /* accessors and helpers */
  592. static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
  593. {
  594. return container_of(rqos, struct ioc, rqos);
  595. }
  596. static struct ioc *q_to_ioc(struct request_queue *q)
  597. {
  598. return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
  599. }
  600. static const char *q_name(struct request_queue *q)
  601. {
  602. if (blk_queue_registered(q))
  603. return kobject_name(q->kobj.parent);
  604. else
  605. return "<unknown>";
  606. }
  607. static const char __maybe_unused *ioc_name(struct ioc *ioc)
  608. {
  609. return q_name(ioc->rqos.q);
  610. }
  611. static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
  612. {
  613. return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
  614. }
  615. static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
  616. {
  617. return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
  618. }
  619. static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
  620. {
  621. return pd_to_blkg(&iocg->pd);
  622. }
  623. static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
  624. {
  625. return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
  626. struct ioc_cgrp, cpd);
  627. }
  628. /*
  629. * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
  630. * weight, the more expensive each IO. Must round up.
  631. */
  632. static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
  633. {
  634. return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
  635. }
  636. /*
  637. * The inverse of abs_cost_to_cost(). Must round up.
  638. */
  639. static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
  640. {
  641. return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
  642. }
  643. static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
  644. u64 abs_cost, u64 cost)
  645. {
  646. struct iocg_pcpu_stat *gcs;
  647. bio->bi_iocost_cost = cost;
  648. atomic64_add(cost, &iocg->vtime);
  649. gcs = get_cpu_ptr(iocg->pcpu_stat);
  650. local64_add(abs_cost, &gcs->abs_vusage);
  651. put_cpu_ptr(gcs);
  652. }
  653. static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
  654. {
  655. if (lock_ioc) {
  656. spin_lock_irqsave(&iocg->ioc->lock, *flags);
  657. spin_lock(&iocg->waitq.lock);
  658. } else {
  659. spin_lock_irqsave(&iocg->waitq.lock, *flags);
  660. }
  661. }
  662. static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
  663. {
  664. if (unlock_ioc) {
  665. spin_unlock(&iocg->waitq.lock);
  666. spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
  667. } else {
  668. spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
  669. }
  670. }
  671. #define CREATE_TRACE_POINTS
  672. #include <trace/events/iocost.h>
  673. static void ioc_refresh_margins(struct ioc *ioc)
  674. {
  675. struct ioc_margins *margins = &ioc->margins;
  676. u32 period_us = ioc->period_us;
  677. u64 vrate = ioc->vtime_base_rate;
  678. margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
  679. margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
  680. margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
  681. }
  682. /* latency Qos params changed, update period_us and all the dependent params */
  683. static void ioc_refresh_period_us(struct ioc *ioc)
  684. {
  685. u32 ppm, lat, multi, period_us;
  686. lockdep_assert_held(&ioc->lock);
  687. /* pick the higher latency target */
  688. if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
  689. ppm = ioc->params.qos[QOS_RPPM];
  690. lat = ioc->params.qos[QOS_RLAT];
  691. } else {
  692. ppm = ioc->params.qos[QOS_WPPM];
  693. lat = ioc->params.qos[QOS_WLAT];
  694. }
  695. /*
  696. * We want the period to be long enough to contain a healthy number
  697. * of IOs while short enough for granular control. Define it as a
  698. * multiple of the latency target. Ideally, the multiplier should
  699. * be scaled according to the percentile so that it would nominally
  700. * contain a certain number of requests. Let's be simpler and
  701. * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
  702. */
  703. if (ppm)
  704. multi = max_t(u32, (MILLION - ppm) / 50000, 2);
  705. else
  706. multi = 2;
  707. period_us = multi * lat;
  708. period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
  709. /* calculate dependent params */
  710. ioc->period_us = period_us;
  711. ioc->timer_slack_ns = div64_u64(
  712. (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
  713. 100);
  714. ioc_refresh_margins(ioc);
  715. }
  716. static int ioc_autop_idx(struct ioc *ioc)
  717. {
  718. int idx = ioc->autop_idx;
  719. const struct ioc_params *p = &autop[idx];
  720. u32 vrate_pct;
  721. u64 now_ns;
  722. /* rotational? */
  723. if (!blk_queue_nonrot(ioc->rqos.q))
  724. return AUTOP_HDD;
  725. /* handle SATA SSDs w/ broken NCQ */
  726. if (blk_queue_depth(ioc->rqos.q) == 1)
  727. return AUTOP_SSD_QD1;
  728. /* use one of the normal ssd sets */
  729. if (idx < AUTOP_SSD_DFL)
  730. return AUTOP_SSD_DFL;
  731. /* if user is overriding anything, maintain what was there */
  732. if (ioc->user_qos_params || ioc->user_cost_model)
  733. return idx;
  734. /* step up/down based on the vrate */
  735. vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
  736. now_ns = ktime_get_ns();
  737. if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
  738. if (!ioc->autop_too_fast_at)
  739. ioc->autop_too_fast_at = now_ns;
  740. if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
  741. return idx + 1;
  742. } else {
  743. ioc->autop_too_fast_at = 0;
  744. }
  745. if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
  746. if (!ioc->autop_too_slow_at)
  747. ioc->autop_too_slow_at = now_ns;
  748. if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
  749. return idx - 1;
  750. } else {
  751. ioc->autop_too_slow_at = 0;
  752. }
  753. return idx;
  754. }
  755. /*
  756. * Take the followings as input
  757. *
  758. * @bps maximum sequential throughput
  759. * @seqiops maximum sequential 4k iops
  760. * @randiops maximum random 4k iops
  761. *
  762. * and calculate the linear model cost coefficients.
  763. *
  764. * *@page per-page cost 1s / (@bps / 4096)
  765. * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
  766. * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
  767. */
  768. static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
  769. u64 *page, u64 *seqio, u64 *randio)
  770. {
  771. u64 v;
  772. *page = *seqio = *randio = 0;
  773. if (bps)
  774. *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
  775. DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
  776. if (seqiops) {
  777. v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
  778. if (v > *page)
  779. *seqio = v - *page;
  780. }
  781. if (randiops) {
  782. v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
  783. if (v > *page)
  784. *randio = v - *page;
  785. }
  786. }
  787. static void ioc_refresh_lcoefs(struct ioc *ioc)
  788. {
  789. u64 *u = ioc->params.i_lcoefs;
  790. u64 *c = ioc->params.lcoefs;
  791. calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
  792. &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
  793. calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
  794. &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
  795. }
  796. static bool ioc_refresh_params(struct ioc *ioc, bool force)
  797. {
  798. const struct ioc_params *p;
  799. int idx;
  800. lockdep_assert_held(&ioc->lock);
  801. idx = ioc_autop_idx(ioc);
  802. p = &autop[idx];
  803. if (idx == ioc->autop_idx && !force)
  804. return false;
  805. if (idx != ioc->autop_idx)
  806. atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
  807. ioc->autop_idx = idx;
  808. ioc->autop_too_fast_at = 0;
  809. ioc->autop_too_slow_at = 0;
  810. if (!ioc->user_qos_params)
  811. memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
  812. if (!ioc->user_cost_model)
  813. memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
  814. ioc_refresh_period_us(ioc);
  815. ioc_refresh_lcoefs(ioc);
  816. ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
  817. VTIME_PER_USEC, MILLION);
  818. ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
  819. VTIME_PER_USEC, MILLION);
  820. return true;
  821. }
  822. /*
  823. * When an iocg accumulates too much vtime or gets deactivated, we throw away
  824. * some vtime, which lowers the overall device utilization. As the exact amount
  825. * which is being thrown away is known, we can compensate by accelerating the
  826. * vrate accordingly so that the extra vtime generated in the current period
  827. * matches what got lost.
  828. */
  829. static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
  830. {
  831. s64 pleft = ioc->period_at + ioc->period_us - now->now;
  832. s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
  833. s64 vcomp, vcomp_min, vcomp_max;
  834. lockdep_assert_held(&ioc->lock);
  835. /* we need some time left in this period */
  836. if (pleft <= 0)
  837. goto done;
  838. /*
  839. * Calculate how much vrate should be adjusted to offset the error.
  840. * Limit the amount of adjustment and deduct the adjusted amount from
  841. * the error.
  842. */
  843. vcomp = -div64_s64(ioc->vtime_err, pleft);
  844. vcomp_min = -(ioc->vtime_base_rate >> 1);
  845. vcomp_max = ioc->vtime_base_rate;
  846. vcomp = clamp(vcomp, vcomp_min, vcomp_max);
  847. ioc->vtime_err += vcomp * pleft;
  848. atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
  849. done:
  850. /* bound how much error can accumulate */
  851. ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
  852. }
  853. /* take a snapshot of the current [v]time and vrate */
  854. static void ioc_now(struct ioc *ioc, struct ioc_now *now)
  855. {
  856. unsigned seq;
  857. now->now_ns = ktime_get();
  858. now->now = ktime_to_us(now->now_ns);
  859. now->vrate = atomic64_read(&ioc->vtime_rate);
  860. /*
  861. * The current vtime is
  862. *
  863. * vtime at period start + (wallclock time since the start) * vrate
  864. *
  865. * As a consistent snapshot of `period_at_vtime` and `period_at` is
  866. * needed, they're seqcount protected.
  867. */
  868. do {
  869. seq = read_seqcount_begin(&ioc->period_seqcount);
  870. now->vnow = ioc->period_at_vtime +
  871. (now->now - ioc->period_at) * now->vrate;
  872. } while (read_seqcount_retry(&ioc->period_seqcount, seq));
  873. }
  874. static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
  875. {
  876. WARN_ON_ONCE(ioc->running != IOC_RUNNING);
  877. write_seqcount_begin(&ioc->period_seqcount);
  878. ioc->period_at = now->now;
  879. ioc->period_at_vtime = now->vnow;
  880. write_seqcount_end(&ioc->period_seqcount);
  881. ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
  882. add_timer(&ioc->timer);
  883. }
  884. /*
  885. * Update @iocg's `active` and `inuse` to @active and @inuse, update level
  886. * weight sums and propagate upwards accordingly. If @save, the current margin
  887. * is saved to be used as reference for later inuse in-period adjustments.
  888. */
  889. static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
  890. bool save, struct ioc_now *now)
  891. {
  892. struct ioc *ioc = iocg->ioc;
  893. int lvl;
  894. lockdep_assert_held(&ioc->lock);
  895. /*
  896. * For an active leaf node, its inuse shouldn't be zero or exceed
  897. * @active. An active internal node's inuse is solely determined by the
  898. * inuse to active ratio of its children regardless of @inuse.
  899. */
  900. if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
  901. inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
  902. iocg->child_active_sum);
  903. } else {
  904. inuse = clamp_t(u32, inuse, 1, active);
  905. }
  906. iocg->last_inuse = iocg->inuse;
  907. if (save)
  908. iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
  909. if (active == iocg->active && inuse == iocg->inuse)
  910. return;
  911. for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
  912. struct ioc_gq *parent = iocg->ancestors[lvl];
  913. struct ioc_gq *child = iocg->ancestors[lvl + 1];
  914. u32 parent_active = 0, parent_inuse = 0;
  915. /* update the level sums */
  916. parent->child_active_sum += (s32)(active - child->active);
  917. parent->child_inuse_sum += (s32)(inuse - child->inuse);
  918. /* apply the updates */
  919. child->active = active;
  920. child->inuse = inuse;
  921. /*
  922. * The delta between inuse and active sums indicates that
  923. * that much of weight is being given away. Parent's inuse
  924. * and active should reflect the ratio.
  925. */
  926. if (parent->child_active_sum) {
  927. parent_active = parent->weight;
  928. parent_inuse = DIV64_U64_ROUND_UP(
  929. parent_active * parent->child_inuse_sum,
  930. parent->child_active_sum);
  931. }
  932. /* do we need to keep walking up? */
  933. if (parent_active == parent->active &&
  934. parent_inuse == parent->inuse)
  935. break;
  936. active = parent_active;
  937. inuse = parent_inuse;
  938. }
  939. ioc->weights_updated = true;
  940. }
  941. static void commit_weights(struct ioc *ioc)
  942. {
  943. lockdep_assert_held(&ioc->lock);
  944. if (ioc->weights_updated) {
  945. /* paired with rmb in current_hweight(), see there */
  946. smp_wmb();
  947. atomic_inc(&ioc->hweight_gen);
  948. ioc->weights_updated = false;
  949. }
  950. }
  951. static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
  952. bool save, struct ioc_now *now)
  953. {
  954. __propagate_weights(iocg, active, inuse, save, now);
  955. commit_weights(iocg->ioc);
  956. }
  957. static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
  958. {
  959. struct ioc *ioc = iocg->ioc;
  960. int lvl;
  961. u32 hwa, hwi;
  962. int ioc_gen;
  963. /* hot path - if uptodate, use cached */
  964. ioc_gen = atomic_read(&ioc->hweight_gen);
  965. if (ioc_gen == iocg->hweight_gen)
  966. goto out;
  967. /*
  968. * Paired with wmb in commit_weights(). If we saw the updated
  969. * hweight_gen, all the weight updates from __propagate_weights() are
  970. * visible too.
  971. *
  972. * We can race with weight updates during calculation and get it
  973. * wrong. However, hweight_gen would have changed and a future
  974. * reader will recalculate and we're guaranteed to discard the
  975. * wrong result soon.
  976. */
  977. smp_rmb();
  978. hwa = hwi = WEIGHT_ONE;
  979. for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
  980. struct ioc_gq *parent = iocg->ancestors[lvl];
  981. struct ioc_gq *child = iocg->ancestors[lvl + 1];
  982. u64 active_sum = READ_ONCE(parent->child_active_sum);
  983. u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
  984. u32 active = READ_ONCE(child->active);
  985. u32 inuse = READ_ONCE(child->inuse);
  986. /* we can race with deactivations and either may read as zero */
  987. if (!active_sum || !inuse_sum)
  988. continue;
  989. active_sum = max_t(u64, active, active_sum);
  990. hwa = div64_u64((u64)hwa * active, active_sum);
  991. inuse_sum = max_t(u64, inuse, inuse_sum);
  992. hwi = div64_u64((u64)hwi * inuse, inuse_sum);
  993. }
  994. iocg->hweight_active = max_t(u32, hwa, 1);
  995. iocg->hweight_inuse = max_t(u32, hwi, 1);
  996. iocg->hweight_gen = ioc_gen;
  997. out:
  998. if (hw_activep)
  999. *hw_activep = iocg->hweight_active;
  1000. if (hw_inusep)
  1001. *hw_inusep = iocg->hweight_inuse;
  1002. }
  1003. /*
  1004. * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
  1005. * other weights stay unchanged.
  1006. */
  1007. static u32 current_hweight_max(struct ioc_gq *iocg)
  1008. {
  1009. u32 hwm = WEIGHT_ONE;
  1010. u32 inuse = iocg->active;
  1011. u64 child_inuse_sum;
  1012. int lvl;
  1013. lockdep_assert_held(&iocg->ioc->lock);
  1014. for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
  1015. struct ioc_gq *parent = iocg->ancestors[lvl];
  1016. struct ioc_gq *child = iocg->ancestors[lvl + 1];
  1017. child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
  1018. hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
  1019. inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
  1020. parent->child_active_sum);
  1021. }
  1022. return max_t(u32, hwm, 1);
  1023. }
  1024. static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
  1025. {
  1026. struct ioc *ioc = iocg->ioc;
  1027. struct blkcg_gq *blkg = iocg_to_blkg(iocg);
  1028. struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
  1029. u32 weight;
  1030. lockdep_assert_held(&ioc->lock);
  1031. weight = iocg->cfg_weight ?: iocc->dfl_weight;
  1032. if (weight != iocg->weight && iocg->active)
  1033. propagate_weights(iocg, weight, iocg->inuse, true, now);
  1034. iocg->weight = weight;
  1035. }
  1036. static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
  1037. {
  1038. struct ioc *ioc = iocg->ioc;
  1039. u64 last_period, cur_period;
  1040. u64 vtime, vtarget;
  1041. int i;
  1042. /*
  1043. * If seem to be already active, just update the stamp to tell the
  1044. * timer that we're still active. We don't mind occassional races.
  1045. */
  1046. if (!list_empty(&iocg->active_list)) {
  1047. ioc_now(ioc, now);
  1048. cur_period = atomic64_read(&ioc->cur_period);
  1049. if (atomic64_read(&iocg->active_period) != cur_period)
  1050. atomic64_set(&iocg->active_period, cur_period);
  1051. return true;
  1052. }
  1053. /* racy check on internal node IOs, treat as root level IOs */
  1054. if (iocg->child_active_sum)
  1055. return false;
  1056. spin_lock_irq(&ioc->lock);
  1057. ioc_now(ioc, now);
  1058. /* update period */
  1059. cur_period = atomic64_read(&ioc->cur_period);
  1060. last_period = atomic64_read(&iocg->active_period);
  1061. atomic64_set(&iocg->active_period, cur_period);
  1062. /* already activated or breaking leaf-only constraint? */
  1063. if (!list_empty(&iocg->active_list))
  1064. goto succeed_unlock;
  1065. for (i = iocg->level - 1; i > 0; i--)
  1066. if (!list_empty(&iocg->ancestors[i]->active_list))
  1067. goto fail_unlock;
  1068. if (iocg->child_active_sum)
  1069. goto fail_unlock;
  1070. /*
  1071. * Always start with the target budget. On deactivation, we throw away
  1072. * anything above it.
  1073. */
  1074. vtarget = now->vnow - ioc->margins.target;
  1075. vtime = atomic64_read(&iocg->vtime);
  1076. atomic64_add(vtarget - vtime, &iocg->vtime);
  1077. atomic64_add(vtarget - vtime, &iocg->done_vtime);
  1078. vtime = vtarget;
  1079. /*
  1080. * Activate, propagate weight and start period timer if not
  1081. * running. Reset hweight_gen to avoid accidental match from
  1082. * wrapping.
  1083. */
  1084. iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
  1085. list_add(&iocg->active_list, &ioc->active_iocgs);
  1086. propagate_weights(iocg, iocg->weight,
  1087. iocg->last_inuse ?: iocg->weight, true, now);
  1088. TRACE_IOCG_PATH(iocg_activate, iocg, now,
  1089. last_period, cur_period, vtime);
  1090. iocg->activated_at = now->now;
  1091. if (ioc->running == IOC_IDLE) {
  1092. ioc->running = IOC_RUNNING;
  1093. ioc->dfgv_period_at = now->now;
  1094. ioc->dfgv_period_rem = 0;
  1095. ioc_start_period(ioc, now);
  1096. }
  1097. succeed_unlock:
  1098. spin_unlock_irq(&ioc->lock);
  1099. return true;
  1100. fail_unlock:
  1101. spin_unlock_irq(&ioc->lock);
  1102. return false;
  1103. }
  1104. static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
  1105. {
  1106. struct ioc *ioc = iocg->ioc;
  1107. struct blkcg_gq *blkg = iocg_to_blkg(iocg);
  1108. u64 tdelta, delay, new_delay;
  1109. s64 vover, vover_pct;
  1110. u32 hwa;
  1111. lockdep_assert_held(&iocg->waitq.lock);
  1112. /* calculate the current delay in effect - 1/2 every second */
  1113. tdelta = now->now - iocg->delay_at;
  1114. if (iocg->delay)
  1115. delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
  1116. else
  1117. delay = 0;
  1118. /* calculate the new delay from the debt amount */
  1119. current_hweight(iocg, &hwa, NULL);
  1120. vover = atomic64_read(&iocg->vtime) +
  1121. abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
  1122. vover_pct = div64_s64(100 * vover,
  1123. ioc->period_us * ioc->vtime_base_rate);
  1124. if (vover_pct <= MIN_DELAY_THR_PCT)
  1125. new_delay = 0;
  1126. else if (vover_pct >= MAX_DELAY_THR_PCT)
  1127. new_delay = MAX_DELAY;
  1128. else
  1129. new_delay = MIN_DELAY +
  1130. div_u64((MAX_DELAY - MIN_DELAY) *
  1131. (vover_pct - MIN_DELAY_THR_PCT),
  1132. MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
  1133. /* pick the higher one and apply */
  1134. if (new_delay > delay) {
  1135. iocg->delay = new_delay;
  1136. iocg->delay_at = now->now;
  1137. delay = new_delay;
  1138. }
  1139. if (delay >= MIN_DELAY) {
  1140. if (!iocg->indelay_since)
  1141. iocg->indelay_since = now->now;
  1142. blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
  1143. return true;
  1144. } else {
  1145. if (iocg->indelay_since) {
  1146. iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
  1147. iocg->indelay_since = 0;
  1148. }
  1149. iocg->delay = 0;
  1150. blkcg_clear_delay(blkg);
  1151. return false;
  1152. }
  1153. }
  1154. static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
  1155. struct ioc_now *now)
  1156. {
  1157. struct iocg_pcpu_stat *gcs;
  1158. lockdep_assert_held(&iocg->ioc->lock);
  1159. lockdep_assert_held(&iocg->waitq.lock);
  1160. WARN_ON_ONCE(list_empty(&iocg->active_list));
  1161. /*
  1162. * Once in debt, debt handling owns inuse. @iocg stays at the minimum
  1163. * inuse donating all of it share to others until its debt is paid off.
  1164. */
  1165. if (!iocg->abs_vdebt && abs_cost) {
  1166. iocg->indebt_since = now->now;
  1167. propagate_weights(iocg, iocg->active, 0, false, now);
  1168. }
  1169. iocg->abs_vdebt += abs_cost;
  1170. gcs = get_cpu_ptr(iocg->pcpu_stat);
  1171. local64_add(abs_cost, &gcs->abs_vusage);
  1172. put_cpu_ptr(gcs);
  1173. }
  1174. static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
  1175. struct ioc_now *now)
  1176. {
  1177. lockdep_assert_held(&iocg->ioc->lock);
  1178. lockdep_assert_held(&iocg->waitq.lock);
  1179. /* make sure that nobody messed with @iocg */
  1180. WARN_ON_ONCE(list_empty(&iocg->active_list));
  1181. WARN_ON_ONCE(iocg->inuse > 1);
  1182. iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
  1183. /* if debt is paid in full, restore inuse */
  1184. if (!iocg->abs_vdebt) {
  1185. iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
  1186. iocg->indebt_since = 0;
  1187. propagate_weights(iocg, iocg->active, iocg->last_inuse,
  1188. false, now);
  1189. }
  1190. }
  1191. static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
  1192. int flags, void *key)
  1193. {
  1194. struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
  1195. struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
  1196. u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
  1197. ctx->vbudget -= cost;
  1198. if (ctx->vbudget < 0)
  1199. return -1;
  1200. iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
  1201. wait->committed = true;
  1202. /*
  1203. * autoremove_wake_function() removes the wait entry only when it
  1204. * actually changed the task state. We want the wait always removed.
  1205. * Remove explicitly and use default_wake_function(). Note that the
  1206. * order of operations is important as finish_wait() tests whether
  1207. * @wq_entry is removed without grabbing the lock.
  1208. */
  1209. default_wake_function(wq_entry, mode, flags, key);
  1210. list_del_init_careful(&wq_entry->entry);
  1211. return 0;
  1212. }
  1213. /*
  1214. * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
  1215. * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
  1216. * addition to iocg->waitq.lock.
  1217. */
  1218. static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
  1219. struct ioc_now *now)
  1220. {
  1221. struct ioc *ioc = iocg->ioc;
  1222. struct iocg_wake_ctx ctx = { .iocg = iocg };
  1223. u64 vshortage, expires, oexpires;
  1224. s64 vbudget;
  1225. u32 hwa;
  1226. lockdep_assert_held(&iocg->waitq.lock);
  1227. current_hweight(iocg, &hwa, NULL);
  1228. vbudget = now->vnow - atomic64_read(&iocg->vtime);
  1229. /* pay off debt */
  1230. if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
  1231. u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
  1232. u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
  1233. u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
  1234. lockdep_assert_held(&ioc->lock);
  1235. atomic64_add(vpay, &iocg->vtime);
  1236. atomic64_add(vpay, &iocg->done_vtime);
  1237. iocg_pay_debt(iocg, abs_vpay, now);
  1238. vbudget -= vpay;
  1239. }
  1240. if (iocg->abs_vdebt || iocg->delay)
  1241. iocg_kick_delay(iocg, now);
  1242. /*
  1243. * Debt can still be outstanding if we haven't paid all yet or the
  1244. * caller raced and called without @pay_debt. Shouldn't wake up waiters
  1245. * under debt. Make sure @vbudget reflects the outstanding amount and is
  1246. * not positive.
  1247. */
  1248. if (iocg->abs_vdebt) {
  1249. s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
  1250. vbudget = min_t(s64, 0, vbudget - vdebt);
  1251. }
  1252. /*
  1253. * Wake up the ones which are due and see how much vtime we'll need for
  1254. * the next one. As paying off debt restores hw_inuse, it must be read
  1255. * after the above debt payment.
  1256. */
  1257. ctx.vbudget = vbudget;
  1258. current_hweight(iocg, NULL, &ctx.hw_inuse);
  1259. __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
  1260. if (!waitqueue_active(&iocg->waitq)) {
  1261. if (iocg->wait_since) {
  1262. iocg->local_stat.wait_us += now->now - iocg->wait_since;
  1263. iocg->wait_since = 0;
  1264. }
  1265. return;
  1266. }
  1267. if (!iocg->wait_since)
  1268. iocg->wait_since = now->now;
  1269. if (WARN_ON_ONCE(ctx.vbudget >= 0))
  1270. return;
  1271. /* determine next wakeup, add a timer margin to guarantee chunking */
  1272. vshortage = -ctx.vbudget;
  1273. expires = now->now_ns +
  1274. DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
  1275. NSEC_PER_USEC;
  1276. expires += ioc->timer_slack_ns;
  1277. /* if already active and close enough, don't bother */
  1278. oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
  1279. if (hrtimer_is_queued(&iocg->waitq_timer) &&
  1280. abs(oexpires - expires) <= ioc->timer_slack_ns)
  1281. return;
  1282. hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
  1283. ioc->timer_slack_ns, HRTIMER_MODE_ABS);
  1284. }
  1285. static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
  1286. {
  1287. struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
  1288. bool pay_debt = READ_ONCE(iocg->abs_vdebt);
  1289. struct ioc_now now;
  1290. unsigned long flags;
  1291. ioc_now(iocg->ioc, &now);
  1292. iocg_lock(iocg, pay_debt, &flags);
  1293. iocg_kick_waitq(iocg, pay_debt, &now);
  1294. iocg_unlock(iocg, pay_debt, &flags);
  1295. return HRTIMER_NORESTART;
  1296. }
  1297. static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
  1298. {
  1299. u32 nr_met[2] = { };
  1300. u32 nr_missed[2] = { };
  1301. u64 rq_wait_ns = 0;
  1302. int cpu, rw;
  1303. for_each_online_cpu(cpu) {
  1304. struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
  1305. u64 this_rq_wait_ns;
  1306. for (rw = READ; rw <= WRITE; rw++) {
  1307. u32 this_met = local_read(&stat->missed[rw].nr_met);
  1308. u32 this_missed = local_read(&stat->missed[rw].nr_missed);
  1309. nr_met[rw] += this_met - stat->missed[rw].last_met;
  1310. nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
  1311. stat->missed[rw].last_met = this_met;
  1312. stat->missed[rw].last_missed = this_missed;
  1313. }
  1314. this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
  1315. rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
  1316. stat->last_rq_wait_ns = this_rq_wait_ns;
  1317. }
  1318. for (rw = READ; rw <= WRITE; rw++) {
  1319. if (nr_met[rw] + nr_missed[rw])
  1320. missed_ppm_ar[rw] =
  1321. DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
  1322. nr_met[rw] + nr_missed[rw]);
  1323. else
  1324. missed_ppm_ar[rw] = 0;
  1325. }
  1326. *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
  1327. ioc->period_us * NSEC_PER_USEC);
  1328. }
  1329. /* was iocg idle this period? */
  1330. static bool iocg_is_idle(struct ioc_gq *iocg)
  1331. {
  1332. struct ioc *ioc = iocg->ioc;
  1333. /* did something get issued this period? */
  1334. if (atomic64_read(&iocg->active_period) ==
  1335. atomic64_read(&ioc->cur_period))
  1336. return false;
  1337. /* is something in flight? */
  1338. if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
  1339. return false;
  1340. return true;
  1341. }
  1342. /*
  1343. * Call this function on the target leaf @iocg's to build pre-order traversal
  1344. * list of all the ancestors in @inner_walk. The inner nodes are linked through
  1345. * ->walk_list and the caller is responsible for dissolving the list after use.
  1346. */
  1347. static void iocg_build_inner_walk(struct ioc_gq *iocg,
  1348. struct list_head *inner_walk)
  1349. {
  1350. int lvl;
  1351. WARN_ON_ONCE(!list_empty(&iocg->walk_list));
  1352. /* find the first ancestor which hasn't been visited yet */
  1353. for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
  1354. if (!list_empty(&iocg->ancestors[lvl]->walk_list))
  1355. break;
  1356. }
  1357. /* walk down and visit the inner nodes to get pre-order traversal */
  1358. while (++lvl <= iocg->level - 1) {
  1359. struct ioc_gq *inner = iocg->ancestors[lvl];
  1360. /* record traversal order */
  1361. list_add_tail(&inner->walk_list, inner_walk);
  1362. }
  1363. }
  1364. /* collect per-cpu counters and propagate the deltas to the parent */
  1365. static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
  1366. {
  1367. struct ioc *ioc = iocg->ioc;
  1368. struct iocg_stat new_stat;
  1369. u64 abs_vusage = 0;
  1370. u64 vusage_delta;
  1371. int cpu;
  1372. lockdep_assert_held(&iocg->ioc->lock);
  1373. /* collect per-cpu counters */
  1374. for_each_possible_cpu(cpu) {
  1375. abs_vusage += local64_read(
  1376. per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
  1377. }
  1378. vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
  1379. iocg->last_stat_abs_vusage = abs_vusage;
  1380. iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
  1381. iocg->local_stat.usage_us += iocg->usage_delta_us;
  1382. /* propagate upwards */
  1383. new_stat.usage_us =
  1384. iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
  1385. new_stat.wait_us =
  1386. iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
  1387. new_stat.indebt_us =
  1388. iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
  1389. new_stat.indelay_us =
  1390. iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
  1391. /* propagate the deltas to the parent */
  1392. if (iocg->level > 0) {
  1393. struct iocg_stat *parent_stat =
  1394. &iocg->ancestors[iocg->level - 1]->desc_stat;
  1395. parent_stat->usage_us +=
  1396. new_stat.usage_us - iocg->last_stat.usage_us;
  1397. parent_stat->wait_us +=
  1398. new_stat.wait_us - iocg->last_stat.wait_us;
  1399. parent_stat->indebt_us +=
  1400. new_stat.indebt_us - iocg->last_stat.indebt_us;
  1401. parent_stat->indelay_us +=
  1402. new_stat.indelay_us - iocg->last_stat.indelay_us;
  1403. }
  1404. iocg->last_stat = new_stat;
  1405. }
  1406. /* get stat counters ready for reading on all active iocgs */
  1407. static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
  1408. {
  1409. LIST_HEAD(inner_walk);
  1410. struct ioc_gq *iocg, *tiocg;
  1411. /* flush leaves and build inner node walk list */
  1412. list_for_each_entry(iocg, target_iocgs, active_list) {
  1413. iocg_flush_stat_one(iocg, now);
  1414. iocg_build_inner_walk(iocg, &inner_walk);
  1415. }
  1416. /* keep flushing upwards by walking the inner list backwards */
  1417. list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
  1418. iocg_flush_stat_one(iocg, now);
  1419. list_del_init(&iocg->walk_list);
  1420. }
  1421. }
  1422. /*
  1423. * Determine what @iocg's hweight_inuse should be after donating unused
  1424. * capacity. @hwm is the upper bound and used to signal no donation. This
  1425. * function also throws away @iocg's excess budget.
  1426. */
  1427. static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
  1428. u32 usage, struct ioc_now *now)
  1429. {
  1430. struct ioc *ioc = iocg->ioc;
  1431. u64 vtime = atomic64_read(&iocg->vtime);
  1432. s64 excess, delta, target, new_hwi;
  1433. /* debt handling owns inuse for debtors */
  1434. if (iocg->abs_vdebt)
  1435. return 1;
  1436. /* see whether minimum margin requirement is met */
  1437. if (waitqueue_active(&iocg->waitq) ||
  1438. time_after64(vtime, now->vnow - ioc->margins.min))
  1439. return hwm;
  1440. /* throw away excess above target */
  1441. excess = now->vnow - vtime - ioc->margins.target;
  1442. if (excess > 0) {
  1443. atomic64_add(excess, &iocg->vtime);
  1444. atomic64_add(excess, &iocg->done_vtime);
  1445. vtime += excess;
  1446. ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
  1447. }
  1448. /*
  1449. * Let's say the distance between iocg's and device's vtimes as a
  1450. * fraction of period duration is delta. Assuming that the iocg will
  1451. * consume the usage determined above, we want to determine new_hwi so
  1452. * that delta equals MARGIN_TARGET at the end of the next period.
  1453. *
  1454. * We need to execute usage worth of IOs while spending the sum of the
  1455. * new budget (1 - MARGIN_TARGET) and the leftover from the last period
  1456. * (delta):
  1457. *
  1458. * usage = (1 - MARGIN_TARGET + delta) * new_hwi
  1459. *
  1460. * Therefore, the new_hwi is:
  1461. *
  1462. * new_hwi = usage / (1 - MARGIN_TARGET + delta)
  1463. */
  1464. delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
  1465. now->vnow - ioc->period_at_vtime);
  1466. target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
  1467. new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
  1468. return clamp_t(s64, new_hwi, 1, hwm);
  1469. }
  1470. /*
  1471. * For work-conservation, an iocg which isn't using all of its share should
  1472. * donate the leftover to other iocgs. There are two ways to achieve this - 1.
  1473. * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
  1474. *
  1475. * #1 is mathematically simpler but has the drawback of requiring synchronous
  1476. * global hweight_inuse updates when idle iocg's get activated or inuse weights
  1477. * change due to donation snapbacks as it has the possibility of grossly
  1478. * overshooting what's allowed by the model and vrate.
  1479. *
  1480. * #2 is inherently safe with local operations. The donating iocg can easily
  1481. * snap back to higher weights when needed without worrying about impacts on
  1482. * other nodes as the impacts will be inherently correct. This also makes idle
  1483. * iocg activations safe. The only effect activations have is decreasing
  1484. * hweight_inuse of others, the right solution to which is for those iocgs to
  1485. * snap back to higher weights.
  1486. *
  1487. * So, we go with #2. The challenge is calculating how each donating iocg's
  1488. * inuse should be adjusted to achieve the target donation amounts. This is done
  1489. * using Andy's method described in the following pdf.
  1490. *
  1491. * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
  1492. *
  1493. * Given the weights and target after-donation hweight_inuse values, Andy's
  1494. * method determines how the proportional distribution should look like at each
  1495. * sibling level to maintain the relative relationship between all non-donating
  1496. * pairs. To roughly summarize, it divides the tree into donating and
  1497. * non-donating parts, calculates global donation rate which is used to
  1498. * determine the target hweight_inuse for each node, and then derives per-level
  1499. * proportions.
  1500. *
  1501. * The following pdf shows that global distribution calculated this way can be
  1502. * achieved by scaling inuse weights of donating leaves and propagating the
  1503. * adjustments upwards proportionally.
  1504. *
  1505. * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
  1506. *
  1507. * Combining the above two, we can determine how each leaf iocg's inuse should
  1508. * be adjusted to achieve the target donation.
  1509. *
  1510. * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
  1511. *
  1512. * The inline comments use symbols from the last pdf.
  1513. *
  1514. * b is the sum of the absolute budgets in the subtree. 1 for the root node.
  1515. * f is the sum of the absolute budgets of non-donating nodes in the subtree.
  1516. * t is the sum of the absolute budgets of donating nodes in the subtree.
  1517. * w is the weight of the node. w = w_f + w_t
  1518. * w_f is the non-donating portion of w. w_f = w * f / b
  1519. * w_b is the donating portion of w. w_t = w * t / b
  1520. * s is the sum of all sibling weights. s = Sum(w) for siblings
  1521. * s_f and s_t are the non-donating and donating portions of s.
  1522. *
  1523. * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
  1524. * w_pt is the donating portion of the parent's weight and w'_pt the same value
  1525. * after adjustments. Subscript r denotes the root node's values.
  1526. */
  1527. static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
  1528. {
  1529. LIST_HEAD(over_hwa);
  1530. LIST_HEAD(inner_walk);
  1531. struct ioc_gq *iocg, *tiocg, *root_iocg;
  1532. u32 after_sum, over_sum, over_target, gamma;
  1533. /*
  1534. * It's pretty unlikely but possible for the total sum of
  1535. * hweight_after_donation's to be higher than WEIGHT_ONE, which will
  1536. * confuse the following calculations. If such condition is detected,
  1537. * scale down everyone over its full share equally to keep the sum below
  1538. * WEIGHT_ONE.
  1539. */
  1540. after_sum = 0;
  1541. over_sum = 0;
  1542. list_for_each_entry(iocg, surpluses, surplus_list) {
  1543. u32 hwa;
  1544. current_hweight(iocg, &hwa, NULL);
  1545. after_sum += iocg->hweight_after_donation;
  1546. if (iocg->hweight_after_donation > hwa) {
  1547. over_sum += iocg->hweight_after_donation;
  1548. list_add(&iocg->walk_list, &over_hwa);
  1549. }
  1550. }
  1551. if (after_sum >= WEIGHT_ONE) {
  1552. /*
  1553. * The delta should be deducted from the over_sum, calculate
  1554. * target over_sum value.
  1555. */
  1556. u32 over_delta = after_sum - (WEIGHT_ONE - 1);
  1557. WARN_ON_ONCE(over_sum <= over_delta);
  1558. over_target = over_sum - over_delta;
  1559. } else {
  1560. over_target = 0;
  1561. }
  1562. list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
  1563. if (over_target)
  1564. iocg->hweight_after_donation =
  1565. div_u64((u64)iocg->hweight_after_donation *
  1566. over_target, over_sum);
  1567. list_del_init(&iocg->walk_list);
  1568. }
  1569. /*
  1570. * Build pre-order inner node walk list and prepare for donation
  1571. * adjustment calculations.
  1572. */
  1573. list_for_each_entry(iocg, surpluses, surplus_list) {
  1574. iocg_build_inner_walk(iocg, &inner_walk);
  1575. }
  1576. root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
  1577. WARN_ON_ONCE(root_iocg->level > 0);
  1578. list_for_each_entry(iocg, &inner_walk, walk_list) {
  1579. iocg->child_adjusted_sum = 0;
  1580. iocg->hweight_donating = 0;
  1581. iocg->hweight_after_donation = 0;
  1582. }
  1583. /*
  1584. * Propagate the donating budget (b_t) and after donation budget (b'_t)
  1585. * up the hierarchy.
  1586. */
  1587. list_for_each_entry(iocg, surpluses, surplus_list) {
  1588. struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
  1589. parent->hweight_donating += iocg->hweight_donating;
  1590. parent->hweight_after_donation += iocg->hweight_after_donation;
  1591. }
  1592. list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
  1593. if (iocg->level > 0) {
  1594. struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
  1595. parent->hweight_donating += iocg->hweight_donating;
  1596. parent->hweight_after_donation += iocg->hweight_after_donation;
  1597. }
  1598. }
  1599. /*
  1600. * Calculate inner hwa's (b) and make sure the donation values are
  1601. * within the accepted ranges as we're doing low res calculations with
  1602. * roundups.
  1603. */
  1604. list_for_each_entry(iocg, &inner_walk, walk_list) {
  1605. if (iocg->level) {
  1606. struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
  1607. iocg->hweight_active = DIV64_U64_ROUND_UP(
  1608. (u64)parent->hweight_active * iocg->active,
  1609. parent->child_active_sum);
  1610. }
  1611. iocg->hweight_donating = min(iocg->hweight_donating,
  1612. iocg->hweight_active);
  1613. iocg->hweight_after_donation = min(iocg->hweight_after_donation,
  1614. iocg->hweight_donating - 1);
  1615. if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
  1616. iocg->hweight_donating <= 1 ||
  1617. iocg->hweight_after_donation == 0)) {
  1618. pr_warn("iocg: invalid donation weights in ");
  1619. pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
  1620. pr_cont(": active=%u donating=%u after=%u\n",
  1621. iocg->hweight_active, iocg->hweight_donating,
  1622. iocg->hweight_after_donation);
  1623. }
  1624. }
  1625. /*
  1626. * Calculate the global donation rate (gamma) - the rate to adjust
  1627. * non-donating budgets by.
  1628. *
  1629. * No need to use 64bit multiplication here as the first operand is
  1630. * guaranteed to be smaller than WEIGHT_ONE (1<<16).
  1631. *
  1632. * We know that there are beneficiary nodes and the sum of the donating
  1633. * hweights can't be whole; however, due to the round-ups during hweight
  1634. * calculations, root_iocg->hweight_donating might still end up equal to
  1635. * or greater than whole. Limit the range when calculating the divider.
  1636. *
  1637. * gamma = (1 - t_r') / (1 - t_r)
  1638. */
  1639. gamma = DIV_ROUND_UP(
  1640. (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
  1641. WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
  1642. /*
  1643. * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
  1644. * nodes.
  1645. */
  1646. list_for_each_entry(iocg, &inner_walk, walk_list) {
  1647. struct ioc_gq *parent;
  1648. u32 inuse, wpt, wptp;
  1649. u64 st, sf;
  1650. if (iocg->level == 0) {
  1651. /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
  1652. iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
  1653. iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
  1654. WEIGHT_ONE - iocg->hweight_after_donation);
  1655. continue;
  1656. }
  1657. parent = iocg->ancestors[iocg->level - 1];
  1658. /* b' = gamma * b_f + b_t' */
  1659. iocg->hweight_inuse = DIV64_U64_ROUND_UP(
  1660. (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
  1661. WEIGHT_ONE) + iocg->hweight_after_donation;
  1662. /* w' = s' * b' / b'_p */
  1663. inuse = DIV64_U64_ROUND_UP(
  1664. (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
  1665. parent->hweight_inuse);
  1666. /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
  1667. st = DIV64_U64_ROUND_UP(
  1668. iocg->child_active_sum * iocg->hweight_donating,
  1669. iocg->hweight_active);
  1670. sf = iocg->child_active_sum - st;
  1671. wpt = DIV64_U64_ROUND_UP(
  1672. (u64)iocg->active * iocg->hweight_donating,
  1673. iocg->hweight_active);
  1674. wptp = DIV64_U64_ROUND_UP(
  1675. (u64)inuse * iocg->hweight_after_donation,
  1676. iocg->hweight_inuse);
  1677. iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
  1678. }
  1679. /*
  1680. * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
  1681. * we can finally determine leaf adjustments.
  1682. */
  1683. list_for_each_entry(iocg, surpluses, surplus_list) {
  1684. struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
  1685. u32 inuse;
  1686. /*
  1687. * In-debt iocgs participated in the donation calculation with
  1688. * the minimum target hweight_inuse. Configuring inuse
  1689. * accordingly would work fine but debt handling expects
  1690. * @iocg->inuse stay at the minimum and we don't wanna
  1691. * interfere.
  1692. */
  1693. if (iocg->abs_vdebt) {
  1694. WARN_ON_ONCE(iocg->inuse > 1);
  1695. continue;
  1696. }
  1697. /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
  1698. inuse = DIV64_U64_ROUND_UP(
  1699. parent->child_adjusted_sum * iocg->hweight_after_donation,
  1700. parent->hweight_inuse);
  1701. TRACE_IOCG_PATH(inuse_transfer, iocg, now,
  1702. iocg->inuse, inuse,
  1703. iocg->hweight_inuse,
  1704. iocg->hweight_after_donation);
  1705. __propagate_weights(iocg, iocg->active, inuse, true, now);
  1706. }
  1707. /* walk list should be dissolved after use */
  1708. list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
  1709. list_del_init(&iocg->walk_list);
  1710. }
  1711. /*
  1712. * A low weight iocg can amass a large amount of debt, for example, when
  1713. * anonymous memory gets reclaimed aggressively. If the system has a lot of
  1714. * memory paired with a slow IO device, the debt can span multiple seconds or
  1715. * more. If there are no other subsequent IO issuers, the in-debt iocg may end
  1716. * up blocked paying its debt while the IO device is idle.
  1717. *
  1718. * The following protects against such cases. If the device has been
  1719. * sufficiently idle for a while, the debts are halved and delays are
  1720. * recalculated.
  1721. */
  1722. static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
  1723. struct ioc_now *now)
  1724. {
  1725. struct ioc_gq *iocg;
  1726. u64 dur, usage_pct, nr_cycles;
  1727. /* if no debtor, reset the cycle */
  1728. if (!nr_debtors) {
  1729. ioc->dfgv_period_at = now->now;
  1730. ioc->dfgv_period_rem = 0;
  1731. ioc->dfgv_usage_us_sum = 0;
  1732. return;
  1733. }
  1734. /*
  1735. * Debtors can pass through a lot of writes choking the device and we
  1736. * don't want to be forgiving debts while the device is struggling from
  1737. * write bursts. If we're missing latency targets, consider the device
  1738. * fully utilized.
  1739. */
  1740. if (ioc->busy_level > 0)
  1741. usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
  1742. ioc->dfgv_usage_us_sum += usage_us_sum;
  1743. if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
  1744. return;
  1745. /*
  1746. * At least DFGV_PERIOD has passed since the last period. Calculate the
  1747. * average usage and reset the period counters.
  1748. */
  1749. dur = now->now - ioc->dfgv_period_at;
  1750. usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
  1751. ioc->dfgv_period_at = now->now;
  1752. ioc->dfgv_usage_us_sum = 0;
  1753. /* if was too busy, reset everything */
  1754. if (usage_pct > DFGV_USAGE_PCT) {
  1755. ioc->dfgv_period_rem = 0;
  1756. return;
  1757. }
  1758. /*
  1759. * Usage is lower than threshold. Let's forgive some debts. Debt
  1760. * forgiveness runs off of the usual ioc timer but its period usually
  1761. * doesn't match ioc's. Compensate the difference by performing the
  1762. * reduction as many times as would fit in the duration since the last
  1763. * run and carrying over the left-over duration in @ioc->dfgv_period_rem
  1764. * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
  1765. * reductions is doubled.
  1766. */
  1767. nr_cycles = dur + ioc->dfgv_period_rem;
  1768. ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
  1769. list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
  1770. u64 __maybe_unused old_debt, __maybe_unused old_delay;
  1771. if (!iocg->abs_vdebt && !iocg->delay)
  1772. continue;
  1773. spin_lock(&iocg->waitq.lock);
  1774. old_debt = iocg->abs_vdebt;
  1775. old_delay = iocg->delay;
  1776. if (iocg->abs_vdebt)
  1777. iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
  1778. if (iocg->delay)
  1779. iocg->delay = iocg->delay >> nr_cycles ?: 1;
  1780. iocg_kick_waitq(iocg, true, now);
  1781. TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
  1782. old_debt, iocg->abs_vdebt,
  1783. old_delay, iocg->delay);
  1784. spin_unlock(&iocg->waitq.lock);
  1785. }
  1786. }
  1787. static void ioc_timer_fn(struct timer_list *timer)
  1788. {
  1789. struct ioc *ioc = container_of(timer, struct ioc, timer);
  1790. struct ioc_gq *iocg, *tiocg;
  1791. struct ioc_now now;
  1792. LIST_HEAD(surpluses);
  1793. int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
  1794. u64 usage_us_sum = 0;
  1795. u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
  1796. u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
  1797. u32 missed_ppm[2], rq_wait_pct;
  1798. u64 period_vtime;
  1799. int prev_busy_level;
  1800. /* how were the latencies during the period? */
  1801. ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
  1802. /* take care of active iocgs */
  1803. spin_lock_irq(&ioc->lock);
  1804. ioc_now(ioc, &now);
  1805. period_vtime = now.vnow - ioc->period_at_vtime;
  1806. if (WARN_ON_ONCE(!period_vtime)) {
  1807. spin_unlock_irq(&ioc->lock);
  1808. return;
  1809. }
  1810. /*
  1811. * Waiters determine the sleep durations based on the vrate they
  1812. * saw at the time of sleep. If vrate has increased, some waiters
  1813. * could be sleeping for too long. Wake up tardy waiters which
  1814. * should have woken up in the last period and expire idle iocgs.
  1815. */
  1816. list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
  1817. if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
  1818. !iocg->delay && !iocg_is_idle(iocg))
  1819. continue;
  1820. spin_lock(&iocg->waitq.lock);
  1821. /* flush wait and indebt stat deltas */
  1822. if (iocg->wait_since) {
  1823. iocg->local_stat.wait_us += now.now - iocg->wait_since;
  1824. iocg->wait_since = now.now;
  1825. }
  1826. if (iocg->indebt_since) {
  1827. iocg->local_stat.indebt_us +=
  1828. now.now - iocg->indebt_since;
  1829. iocg->indebt_since = now.now;
  1830. }
  1831. if (iocg->indelay_since) {
  1832. iocg->local_stat.indelay_us +=
  1833. now.now - iocg->indelay_since;
  1834. iocg->indelay_since = now.now;
  1835. }
  1836. if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
  1837. iocg->delay) {
  1838. /* might be oversleeping vtime / hweight changes, kick */
  1839. iocg_kick_waitq(iocg, true, &now);
  1840. if (iocg->abs_vdebt || iocg->delay)
  1841. nr_debtors++;
  1842. } else if (iocg_is_idle(iocg)) {
  1843. /* no waiter and idle, deactivate */
  1844. u64 vtime = atomic64_read(&iocg->vtime);
  1845. s64 excess;
  1846. /*
  1847. * @iocg has been inactive for a full duration and will
  1848. * have a high budget. Account anything above target as
  1849. * error and throw away. On reactivation, it'll start
  1850. * with the target budget.
  1851. */
  1852. excess = now.vnow - vtime - ioc->margins.target;
  1853. if (excess > 0) {
  1854. u32 old_hwi;
  1855. current_hweight(iocg, NULL, &old_hwi);
  1856. ioc->vtime_err -= div64_u64(excess * old_hwi,
  1857. WEIGHT_ONE);
  1858. }
  1859. __propagate_weights(iocg, 0, 0, false, &now);
  1860. list_del_init(&iocg->active_list);
  1861. }
  1862. spin_unlock(&iocg->waitq.lock);
  1863. }
  1864. commit_weights(ioc);
  1865. /*
  1866. * Wait and indebt stat are flushed above and the donation calculation
  1867. * below needs updated usage stat. Let's bring stat up-to-date.
  1868. */
  1869. iocg_flush_stat(&ioc->active_iocgs, &now);
  1870. /* calc usage and see whether some weights need to be moved around */
  1871. list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
  1872. u64 vdone, vtime, usage_us, usage_dur;
  1873. u32 usage, hw_active, hw_inuse;
  1874. /*
  1875. * Collect unused and wind vtime closer to vnow to prevent
  1876. * iocgs from accumulating a large amount of budget.
  1877. */
  1878. vdone = atomic64_read(&iocg->done_vtime);
  1879. vtime = atomic64_read(&iocg->vtime);
  1880. current_hweight(iocg, &hw_active, &hw_inuse);
  1881. /*
  1882. * Latency QoS detection doesn't account for IOs which are
  1883. * in-flight for longer than a period. Detect them by
  1884. * comparing vdone against period start. If lagging behind
  1885. * IOs from past periods, don't increase vrate.
  1886. */
  1887. if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
  1888. !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
  1889. time_after64(vtime, vdone) &&
  1890. time_after64(vtime, now.vnow -
  1891. MAX_LAGGING_PERIODS * period_vtime) &&
  1892. time_before64(vdone, now.vnow - period_vtime))
  1893. nr_lagging++;
  1894. /*
  1895. * Determine absolute usage factoring in in-flight IOs to avoid
  1896. * high-latency completions appearing as idle.
  1897. */
  1898. usage_us = iocg->usage_delta_us;
  1899. usage_us_sum += usage_us;
  1900. if (vdone != vtime) {
  1901. u64 inflight_us = DIV64_U64_ROUND_UP(
  1902. cost_to_abs_cost(vtime - vdone, hw_inuse),
  1903. ioc->vtime_base_rate);
  1904. usage_us = max(usage_us, inflight_us);
  1905. }
  1906. /* convert to hweight based usage ratio */
  1907. if (time_after64(iocg->activated_at, ioc->period_at))
  1908. usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
  1909. else
  1910. usage_dur = max_t(u64, now.now - ioc->period_at, 1);
  1911. usage = clamp_t(u32,
  1912. DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
  1913. usage_dur),
  1914. 1, WEIGHT_ONE);
  1915. /* see whether there's surplus vtime */
  1916. WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
  1917. if (hw_inuse < hw_active ||
  1918. (!waitqueue_active(&iocg->waitq) &&
  1919. time_before64(vtime, now.vnow - ioc->margins.low))) {
  1920. u32 hwa, old_hwi, hwm, new_hwi;
  1921. /*
  1922. * Already donating or accumulated enough to start.
  1923. * Determine the donation amount.
  1924. */
  1925. current_hweight(iocg, &hwa, &old_hwi);
  1926. hwm = current_hweight_max(iocg);
  1927. new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
  1928. usage, &now);
  1929. /*
  1930. * Donation calculation assumes hweight_after_donation
  1931. * to be positive, a condition that a donor w/ hwa < 2
  1932. * can't meet. Don't bother with donation if hwa is
  1933. * below 2. It's not gonna make a meaningful difference
  1934. * anyway.
  1935. */
  1936. if (new_hwi < hwm && hwa >= 2) {
  1937. iocg->hweight_donating = hwa;
  1938. iocg->hweight_after_donation = new_hwi;
  1939. list_add(&iocg->surplus_list, &surpluses);
  1940. } else {
  1941. TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
  1942. iocg->inuse, iocg->active,
  1943. iocg->hweight_inuse, new_hwi);
  1944. __propagate_weights(iocg, iocg->active,
  1945. iocg->active, true, &now);
  1946. nr_shortages++;
  1947. }
  1948. } else {
  1949. /* genuinely short on vtime */
  1950. nr_shortages++;
  1951. }
  1952. }
  1953. if (!list_empty(&surpluses) && nr_shortages)
  1954. transfer_surpluses(&surpluses, &now);
  1955. commit_weights(ioc);
  1956. /* surplus list should be dissolved after use */
  1957. list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
  1958. list_del_init(&iocg->surplus_list);
  1959. /*
  1960. * If q is getting clogged or we're missing too much, we're issuing
  1961. * too much IO and should lower vtime rate. If we're not missing
  1962. * and experiencing shortages but not surpluses, we're too stingy
  1963. * and should increase vtime rate.
  1964. */
  1965. prev_busy_level = ioc->busy_level;
  1966. if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
  1967. missed_ppm[READ] > ppm_rthr ||
  1968. missed_ppm[WRITE] > ppm_wthr) {
  1969. /* clearly missing QoS targets, slow down vrate */
  1970. ioc->busy_level = max(ioc->busy_level, 0);
  1971. ioc->busy_level++;
  1972. } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
  1973. missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
  1974. missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
  1975. /* QoS targets are being met with >25% margin */
  1976. if (nr_shortages) {
  1977. /*
  1978. * We're throttling while the device has spare
  1979. * capacity. If vrate was being slowed down, stop.
  1980. */
  1981. ioc->busy_level = min(ioc->busy_level, 0);
  1982. /*
  1983. * If there are IOs spanning multiple periods, wait
  1984. * them out before pushing the device harder.
  1985. */
  1986. if (!nr_lagging)
  1987. ioc->busy_level--;
  1988. } else {
  1989. /*
  1990. * Nobody is being throttled and the users aren't
  1991. * issuing enough IOs to saturate the device. We
  1992. * simply don't know how close the device is to
  1993. * saturation. Coast.
  1994. */
  1995. ioc->busy_level = 0;
  1996. }
  1997. } else {
  1998. /* inside the hysterisis margin, we're good */
  1999. ioc->busy_level = 0;
  2000. }
  2001. ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
  2002. if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
  2003. u64 vrate = ioc->vtime_base_rate;
  2004. u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
  2005. /* rq_wait signal is always reliable, ignore user vrate_min */
  2006. if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
  2007. vrate_min = VRATE_MIN;
  2008. /*
  2009. * If vrate is out of bounds, apply clamp gradually as the
  2010. * bounds can change abruptly. Otherwise, apply busy_level
  2011. * based adjustment.
  2012. */
  2013. if (vrate < vrate_min) {
  2014. vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
  2015. 100);
  2016. vrate = min(vrate, vrate_min);
  2017. } else if (vrate > vrate_max) {
  2018. vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
  2019. 100);
  2020. vrate = max(vrate, vrate_max);
  2021. } else {
  2022. int idx = min_t(int, abs(ioc->busy_level),
  2023. ARRAY_SIZE(vrate_adj_pct) - 1);
  2024. u32 adj_pct = vrate_adj_pct[idx];
  2025. if (ioc->busy_level > 0)
  2026. adj_pct = 100 - adj_pct;
  2027. else
  2028. adj_pct = 100 + adj_pct;
  2029. vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
  2030. vrate_min, vrate_max);
  2031. }
  2032. trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
  2033. nr_lagging, nr_shortages);
  2034. ioc->vtime_base_rate = vrate;
  2035. ioc_refresh_margins(ioc);
  2036. } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
  2037. trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
  2038. missed_ppm, rq_wait_pct, nr_lagging,
  2039. nr_shortages);
  2040. }
  2041. ioc_refresh_params(ioc, false);
  2042. ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
  2043. /*
  2044. * This period is done. Move onto the next one. If nothing's
  2045. * going on with the device, stop the timer.
  2046. */
  2047. atomic64_inc(&ioc->cur_period);
  2048. if (ioc->running != IOC_STOP) {
  2049. if (!list_empty(&ioc->active_iocgs)) {
  2050. ioc_start_period(ioc, &now);
  2051. } else {
  2052. ioc->busy_level = 0;
  2053. ioc->vtime_err = 0;
  2054. ioc->running = IOC_IDLE;
  2055. }
  2056. ioc_refresh_vrate(ioc, &now);
  2057. }
  2058. spin_unlock_irq(&ioc->lock);
  2059. }
  2060. static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
  2061. u64 abs_cost, struct ioc_now *now)
  2062. {
  2063. struct ioc *ioc = iocg->ioc;
  2064. struct ioc_margins *margins = &ioc->margins;
  2065. u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
  2066. u32 hwi, adj_step;
  2067. s64 margin;
  2068. u64 cost, new_inuse;
  2069. current_hweight(iocg, NULL, &hwi);
  2070. old_hwi = hwi;
  2071. cost = abs_cost_to_cost(abs_cost, hwi);
  2072. margin = now->vnow - vtime - cost;
  2073. /* debt handling owns inuse for debtors */
  2074. if (iocg->abs_vdebt)
  2075. return cost;
  2076. /*
  2077. * We only increase inuse during period and do so iff the margin has
  2078. * deteriorated since the previous adjustment.
  2079. */
  2080. if (margin >= iocg->saved_margin || margin >= margins->low ||
  2081. iocg->inuse == iocg->active)
  2082. return cost;
  2083. spin_lock_irq(&ioc->lock);
  2084. /* we own inuse only when @iocg is in the normal active state */
  2085. if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
  2086. spin_unlock_irq(&ioc->lock);
  2087. return cost;
  2088. }
  2089. /*
  2090. * Bump up inuse till @abs_cost fits in the existing budget.
  2091. * adj_step must be determined after acquiring ioc->lock - we might
  2092. * have raced and lost to another thread for activation and could
  2093. * be reading 0 iocg->active before ioc->lock which will lead to
  2094. * infinite loop.
  2095. */
  2096. new_inuse = iocg->inuse;
  2097. adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
  2098. do {
  2099. new_inuse = new_inuse + adj_step;
  2100. propagate_weights(iocg, iocg->active, new_inuse, true, now);
  2101. current_hweight(iocg, NULL, &hwi);
  2102. cost = abs_cost_to_cost(abs_cost, hwi);
  2103. } while (time_after64(vtime + cost, now->vnow) &&
  2104. iocg->inuse != iocg->active);
  2105. spin_unlock_irq(&ioc->lock);
  2106. TRACE_IOCG_PATH(inuse_adjust, iocg, now,
  2107. old_inuse, iocg->inuse, old_hwi, hwi);
  2108. return cost;
  2109. }
  2110. static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
  2111. bool is_merge, u64 *costp)
  2112. {
  2113. struct ioc *ioc = iocg->ioc;
  2114. u64 coef_seqio, coef_randio, coef_page;
  2115. u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
  2116. u64 seek_pages = 0;
  2117. u64 cost = 0;
  2118. switch (bio_op(bio)) {
  2119. case REQ_OP_READ:
  2120. coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
  2121. coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
  2122. coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
  2123. break;
  2124. case REQ_OP_WRITE:
  2125. coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
  2126. coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
  2127. coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
  2128. break;
  2129. default:
  2130. goto out;
  2131. }
  2132. if (iocg->cursor) {
  2133. seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
  2134. seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
  2135. }
  2136. if (!is_merge) {
  2137. if (seek_pages > LCOEF_RANDIO_PAGES) {
  2138. cost += coef_randio;
  2139. } else {
  2140. cost += coef_seqio;
  2141. }
  2142. }
  2143. cost += pages * coef_page;
  2144. out:
  2145. *costp = cost;
  2146. }
  2147. static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
  2148. {
  2149. u64 cost;
  2150. calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
  2151. return cost;
  2152. }
  2153. static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
  2154. u64 *costp)
  2155. {
  2156. unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
  2157. switch (req_op(rq)) {
  2158. case REQ_OP_READ:
  2159. *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
  2160. break;
  2161. case REQ_OP_WRITE:
  2162. *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
  2163. break;
  2164. default:
  2165. *costp = 0;
  2166. }
  2167. }
  2168. static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
  2169. {
  2170. u64 cost;
  2171. calc_size_vtime_cost_builtin(rq, ioc, &cost);
  2172. return cost;
  2173. }
  2174. static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
  2175. {
  2176. struct blkcg_gq *blkg = bio->bi_blkg;
  2177. struct ioc *ioc = rqos_to_ioc(rqos);
  2178. struct ioc_gq *iocg = blkg_to_iocg(blkg);
  2179. struct ioc_now now;
  2180. struct iocg_wait wait;
  2181. u64 abs_cost, cost, vtime;
  2182. bool use_debt, ioc_locked;
  2183. unsigned long flags;
  2184. /* bypass IOs if disabled, still initializing, or for root cgroup */
  2185. if (!ioc->enabled || !iocg || !iocg->level)
  2186. return;
  2187. /* calculate the absolute vtime cost */
  2188. abs_cost = calc_vtime_cost(bio, iocg, false);
  2189. if (!abs_cost)
  2190. return;
  2191. if (!iocg_activate(iocg, &now))
  2192. return;
  2193. iocg->cursor = bio_end_sector(bio);
  2194. vtime = atomic64_read(&iocg->vtime);
  2195. cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
  2196. /*
  2197. * If no one's waiting and within budget, issue right away. The
  2198. * tests are racy but the races aren't systemic - we only miss once
  2199. * in a while which is fine.
  2200. */
  2201. if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
  2202. time_before_eq64(vtime + cost, now.vnow)) {
  2203. iocg_commit_bio(iocg, bio, abs_cost, cost);
  2204. return;
  2205. }
  2206. /*
  2207. * We're over budget. This can be handled in two ways. IOs which may
  2208. * cause priority inversions are punted to @ioc->aux_iocg and charged as
  2209. * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
  2210. * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
  2211. * whether debt handling is needed and acquire locks accordingly.
  2212. */
  2213. use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
  2214. ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
  2215. retry_lock:
  2216. iocg_lock(iocg, ioc_locked, &flags);
  2217. /*
  2218. * @iocg must stay activated for debt and waitq handling. Deactivation
  2219. * is synchronized against both ioc->lock and waitq.lock and we won't
  2220. * get deactivated as long as we're waiting or has debt, so we're good
  2221. * if we're activated here. In the unlikely cases that we aren't, just
  2222. * issue the IO.
  2223. */
  2224. if (unlikely(list_empty(&iocg->active_list))) {
  2225. iocg_unlock(iocg, ioc_locked, &flags);
  2226. iocg_commit_bio(iocg, bio, abs_cost, cost);
  2227. return;
  2228. }
  2229. /*
  2230. * We're over budget. If @bio has to be issued regardless, remember
  2231. * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
  2232. * off the debt before waking more IOs.
  2233. *
  2234. * This way, the debt is continuously paid off each period with the
  2235. * actual budget available to the cgroup. If we just wound vtime, we
  2236. * would incorrectly use the current hw_inuse for the entire amount
  2237. * which, for example, can lead to the cgroup staying blocked for a
  2238. * long time even with substantially raised hw_inuse.
  2239. *
  2240. * An iocg with vdebt should stay online so that the timer can keep
  2241. * deducting its vdebt and [de]activate use_delay mechanism
  2242. * accordingly. We don't want to race against the timer trying to
  2243. * clear them and leave @iocg inactive w/ dangling use_delay heavily
  2244. * penalizing the cgroup and its descendants.
  2245. */
  2246. if (use_debt) {
  2247. iocg_incur_debt(iocg, abs_cost, &now);
  2248. if (iocg_kick_delay(iocg, &now))
  2249. blkcg_schedule_throttle(rqos->q,
  2250. (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
  2251. iocg_unlock(iocg, ioc_locked, &flags);
  2252. return;
  2253. }
  2254. /* guarantee that iocgs w/ waiters have maximum inuse */
  2255. if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
  2256. if (!ioc_locked) {
  2257. iocg_unlock(iocg, false, &flags);
  2258. ioc_locked = true;
  2259. goto retry_lock;
  2260. }
  2261. propagate_weights(iocg, iocg->active, iocg->active, true,
  2262. &now);
  2263. }
  2264. /*
  2265. * Append self to the waitq and schedule the wakeup timer if we're
  2266. * the first waiter. The timer duration is calculated based on the
  2267. * current vrate. vtime and hweight changes can make it too short
  2268. * or too long. Each wait entry records the absolute cost it's
  2269. * waiting for to allow re-evaluation using a custom wait entry.
  2270. *
  2271. * If too short, the timer simply reschedules itself. If too long,
  2272. * the period timer will notice and trigger wakeups.
  2273. *
  2274. * All waiters are on iocg->waitq and the wait states are
  2275. * synchronized using waitq.lock.
  2276. */
  2277. init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
  2278. wait.wait.private = current;
  2279. wait.bio = bio;
  2280. wait.abs_cost = abs_cost;
  2281. wait.committed = false; /* will be set true by waker */
  2282. __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
  2283. iocg_kick_waitq(iocg, ioc_locked, &now);
  2284. iocg_unlock(iocg, ioc_locked, &flags);
  2285. while (true) {
  2286. set_current_state(TASK_UNINTERRUPTIBLE);
  2287. if (wait.committed)
  2288. break;
  2289. io_schedule();
  2290. }
  2291. /* waker already committed us, proceed */
  2292. finish_wait(&iocg->waitq, &wait.wait);
  2293. }
  2294. static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
  2295. struct bio *bio)
  2296. {
  2297. struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
  2298. struct ioc *ioc = rqos_to_ioc(rqos);
  2299. sector_t bio_end = bio_end_sector(bio);
  2300. struct ioc_now now;
  2301. u64 vtime, abs_cost, cost;
  2302. unsigned long flags;
  2303. /* bypass if disabled, still initializing, or for root cgroup */
  2304. if (!ioc->enabled || !iocg || !iocg->level)
  2305. return;
  2306. abs_cost = calc_vtime_cost(bio, iocg, true);
  2307. if (!abs_cost)
  2308. return;
  2309. ioc_now(ioc, &now);
  2310. vtime = atomic64_read(&iocg->vtime);
  2311. cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
  2312. /* update cursor if backmerging into the request at the cursor */
  2313. if (blk_rq_pos(rq) < bio_end &&
  2314. blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
  2315. iocg->cursor = bio_end;
  2316. /*
  2317. * Charge if there's enough vtime budget and the existing request has
  2318. * cost assigned.
  2319. */
  2320. if (rq->bio && rq->bio->bi_iocost_cost &&
  2321. time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
  2322. iocg_commit_bio(iocg, bio, abs_cost, cost);
  2323. return;
  2324. }
  2325. /*
  2326. * Otherwise, account it as debt if @iocg is online, which it should
  2327. * be for the vast majority of cases. See debt handling in
  2328. * ioc_rqos_throttle() for details.
  2329. */
  2330. spin_lock_irqsave(&ioc->lock, flags);
  2331. spin_lock(&iocg->waitq.lock);
  2332. if (likely(!list_empty(&iocg->active_list))) {
  2333. iocg_incur_debt(iocg, abs_cost, &now);
  2334. if (iocg_kick_delay(iocg, &now))
  2335. blkcg_schedule_throttle(rqos->q,
  2336. (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
  2337. } else {
  2338. iocg_commit_bio(iocg, bio, abs_cost, cost);
  2339. }
  2340. spin_unlock(&iocg->waitq.lock);
  2341. spin_unlock_irqrestore(&ioc->lock, flags);
  2342. }
  2343. static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
  2344. {
  2345. struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
  2346. if (iocg && bio->bi_iocost_cost)
  2347. atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
  2348. }
  2349. static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
  2350. {
  2351. struct ioc *ioc = rqos_to_ioc(rqos);
  2352. struct ioc_pcpu_stat *ccs;
  2353. u64 on_q_ns, rq_wait_ns, size_nsec;
  2354. int pidx, rw;
  2355. if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
  2356. return;
  2357. switch (req_op(rq) & REQ_OP_MASK) {
  2358. case REQ_OP_READ:
  2359. pidx = QOS_RLAT;
  2360. rw = READ;
  2361. break;
  2362. case REQ_OP_WRITE:
  2363. pidx = QOS_WLAT;
  2364. rw = WRITE;
  2365. break;
  2366. default:
  2367. return;
  2368. }
  2369. on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
  2370. rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
  2371. size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
  2372. ccs = get_cpu_ptr(ioc->pcpu_stat);
  2373. if (on_q_ns <= size_nsec ||
  2374. on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
  2375. local_inc(&ccs->missed[rw].nr_met);
  2376. else
  2377. local_inc(&ccs->missed[rw].nr_missed);
  2378. local64_add(rq_wait_ns, &ccs->rq_wait_ns);
  2379. put_cpu_ptr(ccs);
  2380. }
  2381. static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
  2382. {
  2383. struct ioc *ioc = rqos_to_ioc(rqos);
  2384. spin_lock_irq(&ioc->lock);
  2385. ioc_refresh_params(ioc, false);
  2386. spin_unlock_irq(&ioc->lock);
  2387. }
  2388. static void ioc_rqos_exit(struct rq_qos *rqos)
  2389. {
  2390. struct ioc *ioc = rqos_to_ioc(rqos);
  2391. blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
  2392. spin_lock_irq(&ioc->lock);
  2393. ioc->running = IOC_STOP;
  2394. spin_unlock_irq(&ioc->lock);
  2395. del_timer_sync(&ioc->timer);
  2396. free_percpu(ioc->pcpu_stat);
  2397. kfree(ioc);
  2398. }
  2399. static struct rq_qos_ops ioc_rqos_ops = {
  2400. .throttle = ioc_rqos_throttle,
  2401. .merge = ioc_rqos_merge,
  2402. .done_bio = ioc_rqos_done_bio,
  2403. .done = ioc_rqos_done,
  2404. .queue_depth_changed = ioc_rqos_queue_depth_changed,
  2405. .exit = ioc_rqos_exit,
  2406. };
  2407. static int blk_iocost_init(struct request_queue *q)
  2408. {
  2409. struct ioc *ioc;
  2410. struct rq_qos *rqos;
  2411. int i, cpu, ret;
  2412. ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
  2413. if (!ioc)
  2414. return -ENOMEM;
  2415. ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
  2416. if (!ioc->pcpu_stat) {
  2417. kfree(ioc);
  2418. return -ENOMEM;
  2419. }
  2420. for_each_possible_cpu(cpu) {
  2421. struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
  2422. for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
  2423. local_set(&ccs->missed[i].nr_met, 0);
  2424. local_set(&ccs->missed[i].nr_missed, 0);
  2425. }
  2426. local64_set(&ccs->rq_wait_ns, 0);
  2427. }
  2428. rqos = &ioc->rqos;
  2429. rqos->id = RQ_QOS_COST;
  2430. rqos->ops = &ioc_rqos_ops;
  2431. rqos->q = q;
  2432. spin_lock_init(&ioc->lock);
  2433. timer_setup(&ioc->timer, ioc_timer_fn, 0);
  2434. INIT_LIST_HEAD(&ioc->active_iocgs);
  2435. ioc->running = IOC_IDLE;
  2436. ioc->vtime_base_rate = VTIME_PER_USEC;
  2437. atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
  2438. seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
  2439. ioc->period_at = ktime_to_us(ktime_get());
  2440. atomic64_set(&ioc->cur_period, 0);
  2441. atomic_set(&ioc->hweight_gen, 0);
  2442. spin_lock_irq(&ioc->lock);
  2443. ioc->autop_idx = AUTOP_INVALID;
  2444. ioc_refresh_params(ioc, true);
  2445. spin_unlock_irq(&ioc->lock);
  2446. /*
  2447. * rqos must be added before activation to allow iocg_pd_init() to
  2448. * lookup the ioc from q. This means that the rqos methods may get
  2449. * called before policy activation completion, can't assume that the
  2450. * target bio has an iocg associated and need to test for NULL iocg.
  2451. */
  2452. rq_qos_add(q, rqos);
  2453. ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
  2454. if (ret) {
  2455. rq_qos_del(q, rqos);
  2456. free_percpu(ioc->pcpu_stat);
  2457. kfree(ioc);
  2458. return ret;
  2459. }
  2460. return 0;
  2461. }
  2462. static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
  2463. {
  2464. struct ioc_cgrp *iocc;
  2465. iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
  2466. if (!iocc)
  2467. return NULL;
  2468. iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
  2469. return &iocc->cpd;
  2470. }
  2471. static void ioc_cpd_free(struct blkcg_policy_data *cpd)
  2472. {
  2473. kfree(container_of(cpd, struct ioc_cgrp, cpd));
  2474. }
  2475. static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
  2476. struct blkcg *blkcg)
  2477. {
  2478. int levels = blkcg->css.cgroup->level + 1;
  2479. struct ioc_gq *iocg;
  2480. iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
  2481. if (!iocg)
  2482. return NULL;
  2483. iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
  2484. if (!iocg->pcpu_stat) {
  2485. kfree(iocg);
  2486. return NULL;
  2487. }
  2488. return &iocg->pd;
  2489. }
  2490. static void ioc_pd_init(struct blkg_policy_data *pd)
  2491. {
  2492. struct ioc_gq *iocg = pd_to_iocg(pd);
  2493. struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
  2494. struct ioc *ioc = q_to_ioc(blkg->q);
  2495. struct ioc_now now;
  2496. struct blkcg_gq *tblkg;
  2497. unsigned long flags;
  2498. ioc_now(ioc, &now);
  2499. iocg->ioc = ioc;
  2500. atomic64_set(&iocg->vtime, now.vnow);
  2501. atomic64_set(&iocg->done_vtime, now.vnow);
  2502. atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
  2503. INIT_LIST_HEAD(&iocg->active_list);
  2504. INIT_LIST_HEAD(&iocg->walk_list);
  2505. INIT_LIST_HEAD(&iocg->surplus_list);
  2506. iocg->hweight_active = WEIGHT_ONE;
  2507. iocg->hweight_inuse = WEIGHT_ONE;
  2508. init_waitqueue_head(&iocg->waitq);
  2509. hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  2510. iocg->waitq_timer.function = iocg_waitq_timer_fn;
  2511. iocg->level = blkg->blkcg->css.cgroup->level;
  2512. for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
  2513. struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
  2514. iocg->ancestors[tiocg->level] = tiocg;
  2515. }
  2516. spin_lock_irqsave(&ioc->lock, flags);
  2517. weight_updated(iocg, &now);
  2518. spin_unlock_irqrestore(&ioc->lock, flags);
  2519. }
  2520. static void ioc_pd_free(struct blkg_policy_data *pd)
  2521. {
  2522. struct ioc_gq *iocg = pd_to_iocg(pd);
  2523. struct ioc *ioc = iocg->ioc;
  2524. unsigned long flags;
  2525. if (ioc) {
  2526. spin_lock_irqsave(&ioc->lock, flags);
  2527. if (!list_empty(&iocg->active_list)) {
  2528. struct ioc_now now;
  2529. ioc_now(ioc, &now);
  2530. propagate_weights(iocg, 0, 0, false, &now);
  2531. list_del_init(&iocg->active_list);
  2532. }
  2533. WARN_ON_ONCE(!list_empty(&iocg->walk_list));
  2534. WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
  2535. spin_unlock_irqrestore(&ioc->lock, flags);
  2536. hrtimer_cancel(&iocg->waitq_timer);
  2537. }
  2538. free_percpu(iocg->pcpu_stat);
  2539. kfree(iocg);
  2540. }
  2541. static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
  2542. {
  2543. struct ioc_gq *iocg = pd_to_iocg(pd);
  2544. struct ioc *ioc = iocg->ioc;
  2545. size_t pos = 0;
  2546. if (!ioc->enabled)
  2547. return 0;
  2548. if (iocg->level == 0) {
  2549. unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
  2550. ioc->vtime_base_rate * 10000,
  2551. VTIME_PER_USEC);
  2552. pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
  2553. vp10k / 100, vp10k % 100);
  2554. }
  2555. pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
  2556. iocg->last_stat.usage_us);
  2557. if (blkcg_debug_stats)
  2558. pos += scnprintf(buf + pos, size - pos,
  2559. " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
  2560. iocg->last_stat.wait_us,
  2561. iocg->last_stat.indebt_us,
  2562. iocg->last_stat.indelay_us);
  2563. return pos;
  2564. }
  2565. static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
  2566. int off)
  2567. {
  2568. const char *dname = blkg_dev_name(pd->blkg);
  2569. struct ioc_gq *iocg = pd_to_iocg(pd);
  2570. if (dname && iocg->cfg_weight)
  2571. seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
  2572. return 0;
  2573. }
  2574. static int ioc_weight_show(struct seq_file *sf, void *v)
  2575. {
  2576. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  2577. struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
  2578. seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
  2579. blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
  2580. &blkcg_policy_iocost, seq_cft(sf)->private, false);
  2581. return 0;
  2582. }
  2583. static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
  2584. size_t nbytes, loff_t off)
  2585. {
  2586. struct blkcg *blkcg = css_to_blkcg(of_css(of));
  2587. struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
  2588. struct blkg_conf_ctx ctx;
  2589. struct ioc_now now;
  2590. struct ioc_gq *iocg;
  2591. u32 v;
  2592. int ret;
  2593. if (!strchr(buf, ':')) {
  2594. struct blkcg_gq *blkg;
  2595. if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
  2596. return -EINVAL;
  2597. if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
  2598. return -EINVAL;
  2599. spin_lock_irq(&blkcg->lock);
  2600. iocc->dfl_weight = v * WEIGHT_ONE;
  2601. hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
  2602. struct ioc_gq *iocg = blkg_to_iocg(blkg);
  2603. if (iocg) {
  2604. spin_lock(&iocg->ioc->lock);
  2605. ioc_now(iocg->ioc, &now);
  2606. weight_updated(iocg, &now);
  2607. spin_unlock(&iocg->ioc->lock);
  2608. }
  2609. }
  2610. spin_unlock_irq(&blkcg->lock);
  2611. return nbytes;
  2612. }
  2613. ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
  2614. if (ret)
  2615. return ret;
  2616. iocg = blkg_to_iocg(ctx.blkg);
  2617. if (!strncmp(ctx.body, "default", 7)) {
  2618. v = 0;
  2619. } else {
  2620. if (!sscanf(ctx.body, "%u", &v))
  2621. goto einval;
  2622. if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
  2623. goto einval;
  2624. }
  2625. spin_lock(&iocg->ioc->lock);
  2626. iocg->cfg_weight = v * WEIGHT_ONE;
  2627. ioc_now(iocg->ioc, &now);
  2628. weight_updated(iocg, &now);
  2629. spin_unlock(&iocg->ioc->lock);
  2630. blkg_conf_finish(&ctx);
  2631. return nbytes;
  2632. einval:
  2633. blkg_conf_finish(&ctx);
  2634. return -EINVAL;
  2635. }
  2636. static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
  2637. int off)
  2638. {
  2639. const char *dname = blkg_dev_name(pd->blkg);
  2640. struct ioc *ioc = pd_to_iocg(pd)->ioc;
  2641. if (!dname)
  2642. return 0;
  2643. seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
  2644. dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
  2645. ioc->params.qos[QOS_RPPM] / 10000,
  2646. ioc->params.qos[QOS_RPPM] % 10000 / 100,
  2647. ioc->params.qos[QOS_RLAT],
  2648. ioc->params.qos[QOS_WPPM] / 10000,
  2649. ioc->params.qos[QOS_WPPM] % 10000 / 100,
  2650. ioc->params.qos[QOS_WLAT],
  2651. ioc->params.qos[QOS_MIN] / 10000,
  2652. ioc->params.qos[QOS_MIN] % 10000 / 100,
  2653. ioc->params.qos[QOS_MAX] / 10000,
  2654. ioc->params.qos[QOS_MAX] % 10000 / 100);
  2655. return 0;
  2656. }
  2657. static int ioc_qos_show(struct seq_file *sf, void *v)
  2658. {
  2659. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  2660. blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
  2661. &blkcg_policy_iocost, seq_cft(sf)->private, false);
  2662. return 0;
  2663. }
  2664. static const match_table_t qos_ctrl_tokens = {
  2665. { QOS_ENABLE, "enable=%u" },
  2666. { QOS_CTRL, "ctrl=%s" },
  2667. { NR_QOS_CTRL_PARAMS, NULL },
  2668. };
  2669. static const match_table_t qos_tokens = {
  2670. { QOS_RPPM, "rpct=%s" },
  2671. { QOS_RLAT, "rlat=%u" },
  2672. { QOS_WPPM, "wpct=%s" },
  2673. { QOS_WLAT, "wlat=%u" },
  2674. { QOS_MIN, "min=%s" },
  2675. { QOS_MAX, "max=%s" },
  2676. { NR_QOS_PARAMS, NULL },
  2677. };
  2678. static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
  2679. size_t nbytes, loff_t off)
  2680. {
  2681. struct gendisk *disk;
  2682. struct ioc *ioc;
  2683. u32 qos[NR_QOS_PARAMS];
  2684. bool enable, user;
  2685. char *p;
  2686. int ret;
  2687. disk = blkcg_conf_get_disk(&input);
  2688. if (IS_ERR(disk))
  2689. return PTR_ERR(disk);
  2690. ioc = q_to_ioc(disk->queue);
  2691. if (!ioc) {
  2692. ret = blk_iocost_init(disk->queue);
  2693. if (ret)
  2694. goto err;
  2695. ioc = q_to_ioc(disk->queue);
  2696. }
  2697. spin_lock_irq(&ioc->lock);
  2698. memcpy(qos, ioc->params.qos, sizeof(qos));
  2699. enable = ioc->enabled;
  2700. user = ioc->user_qos_params;
  2701. spin_unlock_irq(&ioc->lock);
  2702. while ((p = strsep(&input, " \t\n"))) {
  2703. substring_t args[MAX_OPT_ARGS];
  2704. char buf[32];
  2705. int tok;
  2706. s64 v;
  2707. if (!*p)
  2708. continue;
  2709. switch (match_token(p, qos_ctrl_tokens, args)) {
  2710. case QOS_ENABLE:
  2711. match_u64(&args[0], &v);
  2712. enable = v;
  2713. continue;
  2714. case QOS_CTRL:
  2715. match_strlcpy(buf, &args[0], sizeof(buf));
  2716. if (!strcmp(buf, "auto"))
  2717. user = false;
  2718. else if (!strcmp(buf, "user"))
  2719. user = true;
  2720. else
  2721. goto einval;
  2722. continue;
  2723. }
  2724. tok = match_token(p, qos_tokens, args);
  2725. switch (tok) {
  2726. case QOS_RPPM:
  2727. case QOS_WPPM:
  2728. if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
  2729. sizeof(buf))
  2730. goto einval;
  2731. if (cgroup_parse_float(buf, 2, &v))
  2732. goto einval;
  2733. if (v < 0 || v > 10000)
  2734. goto einval;
  2735. qos[tok] = v * 100;
  2736. break;
  2737. case QOS_RLAT:
  2738. case QOS_WLAT:
  2739. if (match_u64(&args[0], &v))
  2740. goto einval;
  2741. qos[tok] = v;
  2742. break;
  2743. case QOS_MIN:
  2744. case QOS_MAX:
  2745. if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
  2746. sizeof(buf))
  2747. goto einval;
  2748. if (cgroup_parse_float(buf, 2, &v))
  2749. goto einval;
  2750. if (v < 0)
  2751. goto einval;
  2752. qos[tok] = clamp_t(s64, v * 100,
  2753. VRATE_MIN_PPM, VRATE_MAX_PPM);
  2754. break;
  2755. default:
  2756. goto einval;
  2757. }
  2758. user = true;
  2759. }
  2760. if (qos[QOS_MIN] > qos[QOS_MAX])
  2761. goto einval;
  2762. spin_lock_irq(&ioc->lock);
  2763. if (enable) {
  2764. blk_stat_enable_accounting(ioc->rqos.q);
  2765. blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
  2766. ioc->enabled = true;
  2767. } else {
  2768. blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
  2769. ioc->enabled = false;
  2770. }
  2771. if (user) {
  2772. memcpy(ioc->params.qos, qos, sizeof(qos));
  2773. ioc->user_qos_params = true;
  2774. } else {
  2775. ioc->user_qos_params = false;
  2776. }
  2777. ioc_refresh_params(ioc, true);
  2778. spin_unlock_irq(&ioc->lock);
  2779. put_disk_and_module(disk);
  2780. return nbytes;
  2781. einval:
  2782. ret = -EINVAL;
  2783. err:
  2784. put_disk_and_module(disk);
  2785. return ret;
  2786. }
  2787. static u64 ioc_cost_model_prfill(struct seq_file *sf,
  2788. struct blkg_policy_data *pd, int off)
  2789. {
  2790. const char *dname = blkg_dev_name(pd->blkg);
  2791. struct ioc *ioc = pd_to_iocg(pd)->ioc;
  2792. u64 *u = ioc->params.i_lcoefs;
  2793. if (!dname)
  2794. return 0;
  2795. seq_printf(sf, "%s ctrl=%s model=linear "
  2796. "rbps=%llu rseqiops=%llu rrandiops=%llu "
  2797. "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
  2798. dname, ioc->user_cost_model ? "user" : "auto",
  2799. u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
  2800. u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
  2801. return 0;
  2802. }
  2803. static int ioc_cost_model_show(struct seq_file *sf, void *v)
  2804. {
  2805. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  2806. blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
  2807. &blkcg_policy_iocost, seq_cft(sf)->private, false);
  2808. return 0;
  2809. }
  2810. static const match_table_t cost_ctrl_tokens = {
  2811. { COST_CTRL, "ctrl=%s" },
  2812. { COST_MODEL, "model=%s" },
  2813. { NR_COST_CTRL_PARAMS, NULL },
  2814. };
  2815. static const match_table_t i_lcoef_tokens = {
  2816. { I_LCOEF_RBPS, "rbps=%u" },
  2817. { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
  2818. { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
  2819. { I_LCOEF_WBPS, "wbps=%u" },
  2820. { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
  2821. { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
  2822. { NR_I_LCOEFS, NULL },
  2823. };
  2824. static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
  2825. size_t nbytes, loff_t off)
  2826. {
  2827. struct gendisk *disk;
  2828. struct ioc *ioc;
  2829. u64 u[NR_I_LCOEFS];
  2830. bool user;
  2831. char *p;
  2832. int ret;
  2833. disk = blkcg_conf_get_disk(&input);
  2834. if (IS_ERR(disk))
  2835. return PTR_ERR(disk);
  2836. ioc = q_to_ioc(disk->queue);
  2837. if (!ioc) {
  2838. ret = blk_iocost_init(disk->queue);
  2839. if (ret)
  2840. goto err;
  2841. ioc = q_to_ioc(disk->queue);
  2842. }
  2843. spin_lock_irq(&ioc->lock);
  2844. memcpy(u, ioc->params.i_lcoefs, sizeof(u));
  2845. user = ioc->user_cost_model;
  2846. spin_unlock_irq(&ioc->lock);
  2847. while ((p = strsep(&input, " \t\n"))) {
  2848. substring_t args[MAX_OPT_ARGS];
  2849. char buf[32];
  2850. int tok;
  2851. u64 v;
  2852. if (!*p)
  2853. continue;
  2854. switch (match_token(p, cost_ctrl_tokens, args)) {
  2855. case COST_CTRL:
  2856. match_strlcpy(buf, &args[0], sizeof(buf));
  2857. if (!strcmp(buf, "auto"))
  2858. user = false;
  2859. else if (!strcmp(buf, "user"))
  2860. user = true;
  2861. else
  2862. goto einval;
  2863. continue;
  2864. case COST_MODEL:
  2865. match_strlcpy(buf, &args[0], sizeof(buf));
  2866. if (strcmp(buf, "linear"))
  2867. goto einval;
  2868. continue;
  2869. }
  2870. tok = match_token(p, i_lcoef_tokens, args);
  2871. if (tok == NR_I_LCOEFS)
  2872. goto einval;
  2873. if (match_u64(&args[0], &v))
  2874. goto einval;
  2875. u[tok] = v;
  2876. user = true;
  2877. }
  2878. spin_lock_irq(&ioc->lock);
  2879. if (user) {
  2880. memcpy(ioc->params.i_lcoefs, u, sizeof(u));
  2881. ioc->user_cost_model = true;
  2882. } else {
  2883. ioc->user_cost_model = false;
  2884. }
  2885. ioc_refresh_params(ioc, true);
  2886. spin_unlock_irq(&ioc->lock);
  2887. put_disk_and_module(disk);
  2888. return nbytes;
  2889. einval:
  2890. ret = -EINVAL;
  2891. err:
  2892. put_disk_and_module(disk);
  2893. return ret;
  2894. }
  2895. static struct cftype ioc_files[] = {
  2896. {
  2897. .name = "weight",
  2898. .flags = CFTYPE_NOT_ON_ROOT,
  2899. .seq_show = ioc_weight_show,
  2900. .write = ioc_weight_write,
  2901. },
  2902. {
  2903. .name = "cost.qos",
  2904. .flags = CFTYPE_ONLY_ON_ROOT,
  2905. .seq_show = ioc_qos_show,
  2906. .write = ioc_qos_write,
  2907. },
  2908. {
  2909. .name = "cost.model",
  2910. .flags = CFTYPE_ONLY_ON_ROOT,
  2911. .seq_show = ioc_cost_model_show,
  2912. .write = ioc_cost_model_write,
  2913. },
  2914. {}
  2915. };
  2916. static struct blkcg_policy blkcg_policy_iocost = {
  2917. .dfl_cftypes = ioc_files,
  2918. .cpd_alloc_fn = ioc_cpd_alloc,
  2919. .cpd_free_fn = ioc_cpd_free,
  2920. .pd_alloc_fn = ioc_pd_alloc,
  2921. .pd_init_fn = ioc_pd_init,
  2922. .pd_free_fn = ioc_pd_free,
  2923. .pd_stat_fn = ioc_pd_stat,
  2924. };
  2925. static int __init ioc_init(void)
  2926. {
  2927. return blkcg_policy_register(&blkcg_policy_iocost);
  2928. }
  2929. static void __exit ioc_exit(void)
  2930. {
  2931. blkcg_policy_unregister(&blkcg_policy_iocost);
  2932. }
  2933. module_init(ioc_init);
  2934. module_exit(ioc_exit);