super.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * bcache setup/teardown code, and some metadata io - read a superblock and
  4. * figure out what to do with it.
  5. *
  6. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  7. * Copyright 2012 Google, Inc.
  8. */
  9. #include "bcache.h"
  10. #include "btree.h"
  11. #include "debug.h"
  12. #include "extents.h"
  13. #include "request.h"
  14. #include "writeback.h"
  15. #include "features.h"
  16. #include <linux/blkdev.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/genhd.h>
  19. #include <linux/idr.h>
  20. #include <linux/kthread.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/module.h>
  23. #include <linux/random.h>
  24. #include <linux/reboot.h>
  25. #include <linux/sysfs.h>
  26. unsigned int bch_cutoff_writeback;
  27. unsigned int bch_cutoff_writeback_sync;
  28. static const char bcache_magic[] = {
  29. 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
  30. 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
  31. };
  32. static const char invalid_uuid[] = {
  33. 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
  34. 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
  35. };
  36. static struct kobject *bcache_kobj;
  37. struct mutex bch_register_lock;
  38. bool bcache_is_reboot;
  39. LIST_HEAD(bch_cache_sets);
  40. static LIST_HEAD(uncached_devices);
  41. static int bcache_major;
  42. static DEFINE_IDA(bcache_device_idx);
  43. static wait_queue_head_t unregister_wait;
  44. struct workqueue_struct *bcache_wq;
  45. struct workqueue_struct *bch_flush_wq;
  46. struct workqueue_struct *bch_journal_wq;
  47. #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
  48. /* limitation of partitions number on single bcache device */
  49. #define BCACHE_MINORS 128
  50. /* limitation of bcache devices number on single system */
  51. #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
  52. /* Superblock */
  53. static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
  54. {
  55. unsigned int bucket_size = le16_to_cpu(s->bucket_size);
  56. if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
  57. if (bch_has_feature_large_bucket(sb)) {
  58. unsigned int max, order;
  59. max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
  60. order = le16_to_cpu(s->bucket_size);
  61. /*
  62. * bcache tool will make sure the overflow won't
  63. * happen, an error message here is enough.
  64. */
  65. if (order > max)
  66. pr_err("Bucket size (1 << %u) overflows\n",
  67. order);
  68. bucket_size = 1 << order;
  69. } else if (bch_has_feature_obso_large_bucket(sb)) {
  70. bucket_size +=
  71. le16_to_cpu(s->obso_bucket_size_hi) << 16;
  72. }
  73. }
  74. return bucket_size;
  75. }
  76. static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev,
  77. struct cache_sb_disk *s)
  78. {
  79. const char *err;
  80. unsigned int i;
  81. sb->first_bucket= le16_to_cpu(s->first_bucket);
  82. sb->nbuckets = le64_to_cpu(s->nbuckets);
  83. sb->bucket_size = get_bucket_size(sb, s);
  84. sb->nr_in_set = le16_to_cpu(s->nr_in_set);
  85. sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
  86. err = "Too many journal buckets";
  87. if (sb->keys > SB_JOURNAL_BUCKETS)
  88. goto err;
  89. err = "Too many buckets";
  90. if (sb->nbuckets > LONG_MAX)
  91. goto err;
  92. err = "Not enough buckets";
  93. if (sb->nbuckets < 1 << 7)
  94. goto err;
  95. err = "Bad block size (not power of 2)";
  96. if (!is_power_of_2(sb->block_size))
  97. goto err;
  98. err = "Bad block size (larger than page size)";
  99. if (sb->block_size > PAGE_SECTORS)
  100. goto err;
  101. err = "Bad bucket size (not power of 2)";
  102. if (!is_power_of_2(sb->bucket_size))
  103. goto err;
  104. err = "Bad bucket size (smaller than page size)";
  105. if (sb->bucket_size < PAGE_SECTORS)
  106. goto err;
  107. err = "Invalid superblock: device too small";
  108. if (get_capacity(bdev->bd_disk) <
  109. sb->bucket_size * sb->nbuckets)
  110. goto err;
  111. err = "Bad UUID";
  112. if (bch_is_zero(sb->set_uuid, 16))
  113. goto err;
  114. err = "Bad cache device number in set";
  115. if (!sb->nr_in_set ||
  116. sb->nr_in_set <= sb->nr_this_dev ||
  117. sb->nr_in_set > MAX_CACHES_PER_SET)
  118. goto err;
  119. err = "Journal buckets not sequential";
  120. for (i = 0; i < sb->keys; i++)
  121. if (sb->d[i] != sb->first_bucket + i)
  122. goto err;
  123. err = "Too many journal buckets";
  124. if (sb->first_bucket + sb->keys > sb->nbuckets)
  125. goto err;
  126. err = "Invalid superblock: first bucket comes before end of super";
  127. if (sb->first_bucket * sb->bucket_size < 16)
  128. goto err;
  129. err = NULL;
  130. err:
  131. return err;
  132. }
  133. static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
  134. struct cache_sb_disk **res)
  135. {
  136. const char *err;
  137. struct cache_sb_disk *s;
  138. struct page *page;
  139. unsigned int i;
  140. page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
  141. SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
  142. if (IS_ERR(page))
  143. return "IO error";
  144. s = page_address(page) + offset_in_page(SB_OFFSET);
  145. sb->offset = le64_to_cpu(s->offset);
  146. sb->version = le64_to_cpu(s->version);
  147. memcpy(sb->magic, s->magic, 16);
  148. memcpy(sb->uuid, s->uuid, 16);
  149. memcpy(sb->set_uuid, s->set_uuid, 16);
  150. memcpy(sb->label, s->label, SB_LABEL_SIZE);
  151. sb->flags = le64_to_cpu(s->flags);
  152. sb->seq = le64_to_cpu(s->seq);
  153. sb->last_mount = le32_to_cpu(s->last_mount);
  154. sb->keys = le16_to_cpu(s->keys);
  155. for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
  156. sb->d[i] = le64_to_cpu(s->d[i]);
  157. pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
  158. sb->version, sb->flags, sb->seq, sb->keys);
  159. err = "Not a bcache superblock (bad offset)";
  160. if (sb->offset != SB_SECTOR)
  161. goto err;
  162. err = "Not a bcache superblock (bad magic)";
  163. if (memcmp(sb->magic, bcache_magic, 16))
  164. goto err;
  165. err = "Bad checksum";
  166. if (s->csum != csum_set(s))
  167. goto err;
  168. err = "Bad UUID";
  169. if (bch_is_zero(sb->uuid, 16))
  170. goto err;
  171. sb->block_size = le16_to_cpu(s->block_size);
  172. err = "Superblock block size smaller than device block size";
  173. if (sb->block_size << 9 < bdev_logical_block_size(bdev))
  174. goto err;
  175. switch (sb->version) {
  176. case BCACHE_SB_VERSION_BDEV:
  177. sb->data_offset = BDEV_DATA_START_DEFAULT;
  178. break;
  179. case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
  180. case BCACHE_SB_VERSION_BDEV_WITH_FEATURES:
  181. sb->data_offset = le64_to_cpu(s->data_offset);
  182. err = "Bad data offset";
  183. if (sb->data_offset < BDEV_DATA_START_DEFAULT)
  184. goto err;
  185. break;
  186. case BCACHE_SB_VERSION_CDEV:
  187. case BCACHE_SB_VERSION_CDEV_WITH_UUID:
  188. err = read_super_common(sb, bdev, s);
  189. if (err)
  190. goto err;
  191. break;
  192. case BCACHE_SB_VERSION_CDEV_WITH_FEATURES:
  193. /*
  194. * Feature bits are needed in read_super_common(),
  195. * convert them firstly.
  196. */
  197. sb->feature_compat = le64_to_cpu(s->feature_compat);
  198. sb->feature_incompat = le64_to_cpu(s->feature_incompat);
  199. sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
  200. /* Check incompatible features */
  201. err = "Unsupported compatible feature found";
  202. if (bch_has_unknown_compat_features(sb))
  203. goto err;
  204. err = "Unsupported read-only compatible feature found";
  205. if (bch_has_unknown_ro_compat_features(sb))
  206. goto err;
  207. err = "Unsupported incompatible feature found";
  208. if (bch_has_unknown_incompat_features(sb))
  209. goto err;
  210. err = read_super_common(sb, bdev, s);
  211. if (err)
  212. goto err;
  213. break;
  214. default:
  215. err = "Unsupported superblock version";
  216. goto err;
  217. }
  218. sb->last_mount = (u32)ktime_get_real_seconds();
  219. *res = s;
  220. return NULL;
  221. err:
  222. put_page(page);
  223. return err;
  224. }
  225. static void write_bdev_super_endio(struct bio *bio)
  226. {
  227. struct cached_dev *dc = bio->bi_private;
  228. if (bio->bi_status)
  229. bch_count_backing_io_errors(dc, bio);
  230. closure_put(&dc->sb_write);
  231. }
  232. static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
  233. struct bio *bio)
  234. {
  235. unsigned int i;
  236. bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
  237. bio->bi_iter.bi_sector = SB_SECTOR;
  238. __bio_add_page(bio, virt_to_page(out), SB_SIZE,
  239. offset_in_page(out));
  240. out->offset = cpu_to_le64(sb->offset);
  241. memcpy(out->uuid, sb->uuid, 16);
  242. memcpy(out->set_uuid, sb->set_uuid, 16);
  243. memcpy(out->label, sb->label, SB_LABEL_SIZE);
  244. out->flags = cpu_to_le64(sb->flags);
  245. out->seq = cpu_to_le64(sb->seq);
  246. out->last_mount = cpu_to_le32(sb->last_mount);
  247. out->first_bucket = cpu_to_le16(sb->first_bucket);
  248. out->keys = cpu_to_le16(sb->keys);
  249. for (i = 0; i < sb->keys; i++)
  250. out->d[i] = cpu_to_le64(sb->d[i]);
  251. if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
  252. out->feature_compat = cpu_to_le64(sb->feature_compat);
  253. out->feature_incompat = cpu_to_le64(sb->feature_incompat);
  254. out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat);
  255. }
  256. out->version = cpu_to_le64(sb->version);
  257. out->csum = csum_set(out);
  258. pr_debug("ver %llu, flags %llu, seq %llu\n",
  259. sb->version, sb->flags, sb->seq);
  260. submit_bio(bio);
  261. }
  262. static void bch_write_bdev_super_unlock(struct closure *cl)
  263. {
  264. struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
  265. up(&dc->sb_write_mutex);
  266. }
  267. void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
  268. {
  269. struct closure *cl = &dc->sb_write;
  270. struct bio *bio = &dc->sb_bio;
  271. down(&dc->sb_write_mutex);
  272. closure_init(cl, parent);
  273. bio_init(bio, dc->sb_bv, 1);
  274. bio_set_dev(bio, dc->bdev);
  275. bio->bi_end_io = write_bdev_super_endio;
  276. bio->bi_private = dc;
  277. closure_get(cl);
  278. /* I/O request sent to backing device */
  279. __write_super(&dc->sb, dc->sb_disk, bio);
  280. closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
  281. }
  282. static void write_super_endio(struct bio *bio)
  283. {
  284. struct cache *ca = bio->bi_private;
  285. /* is_read = 0 */
  286. bch_count_io_errors(ca, bio->bi_status, 0,
  287. "writing superblock");
  288. closure_put(&ca->set->sb_write);
  289. }
  290. static void bcache_write_super_unlock(struct closure *cl)
  291. {
  292. struct cache_set *c = container_of(cl, struct cache_set, sb_write);
  293. up(&c->sb_write_mutex);
  294. }
  295. void bcache_write_super(struct cache_set *c)
  296. {
  297. struct closure *cl = &c->sb_write;
  298. struct cache *ca = c->cache;
  299. struct bio *bio = &ca->sb_bio;
  300. unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
  301. down(&c->sb_write_mutex);
  302. closure_init(cl, &c->cl);
  303. ca->sb.seq++;
  304. if (ca->sb.version < version)
  305. ca->sb.version = version;
  306. bio_init(bio, ca->sb_bv, 1);
  307. bio_set_dev(bio, ca->bdev);
  308. bio->bi_end_io = write_super_endio;
  309. bio->bi_private = ca;
  310. closure_get(cl);
  311. __write_super(&ca->sb, ca->sb_disk, bio);
  312. closure_return_with_destructor(cl, bcache_write_super_unlock);
  313. }
  314. /* UUID io */
  315. static void uuid_endio(struct bio *bio)
  316. {
  317. struct closure *cl = bio->bi_private;
  318. struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
  319. cache_set_err_on(bio->bi_status, c, "accessing uuids");
  320. bch_bbio_free(bio, c);
  321. closure_put(cl);
  322. }
  323. static void uuid_io_unlock(struct closure *cl)
  324. {
  325. struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
  326. up(&c->uuid_write_mutex);
  327. }
  328. static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
  329. struct bkey *k, struct closure *parent)
  330. {
  331. struct closure *cl = &c->uuid_write;
  332. struct uuid_entry *u;
  333. unsigned int i;
  334. char buf[80];
  335. BUG_ON(!parent);
  336. down(&c->uuid_write_mutex);
  337. closure_init(cl, parent);
  338. for (i = 0; i < KEY_PTRS(k); i++) {
  339. struct bio *bio = bch_bbio_alloc(c);
  340. bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
  341. bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
  342. bio->bi_end_io = uuid_endio;
  343. bio->bi_private = cl;
  344. bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
  345. bch_bio_map(bio, c->uuids);
  346. bch_submit_bbio(bio, c, k, i);
  347. if (op != REQ_OP_WRITE)
  348. break;
  349. }
  350. bch_extent_to_text(buf, sizeof(buf), k);
  351. pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
  352. for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
  353. if (!bch_is_zero(u->uuid, 16))
  354. pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
  355. u - c->uuids, u->uuid, u->label,
  356. u->first_reg, u->last_reg, u->invalidated);
  357. closure_return_with_destructor(cl, uuid_io_unlock);
  358. }
  359. static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
  360. {
  361. struct bkey *k = &j->uuid_bucket;
  362. if (__bch_btree_ptr_invalid(c, k))
  363. return "bad uuid pointer";
  364. bkey_copy(&c->uuid_bucket, k);
  365. uuid_io(c, REQ_OP_READ, 0, k, cl);
  366. if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
  367. struct uuid_entry_v0 *u0 = (void *) c->uuids;
  368. struct uuid_entry *u1 = (void *) c->uuids;
  369. int i;
  370. closure_sync(cl);
  371. /*
  372. * Since the new uuid entry is bigger than the old, we have to
  373. * convert starting at the highest memory address and work down
  374. * in order to do it in place
  375. */
  376. for (i = c->nr_uuids - 1;
  377. i >= 0;
  378. --i) {
  379. memcpy(u1[i].uuid, u0[i].uuid, 16);
  380. memcpy(u1[i].label, u0[i].label, 32);
  381. u1[i].first_reg = u0[i].first_reg;
  382. u1[i].last_reg = u0[i].last_reg;
  383. u1[i].invalidated = u0[i].invalidated;
  384. u1[i].flags = 0;
  385. u1[i].sectors = 0;
  386. }
  387. }
  388. return NULL;
  389. }
  390. static int __uuid_write(struct cache_set *c)
  391. {
  392. BKEY_PADDED(key) k;
  393. struct closure cl;
  394. struct cache *ca = c->cache;
  395. unsigned int size;
  396. closure_init_stack(&cl);
  397. lockdep_assert_held(&bch_register_lock);
  398. if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
  399. return 1;
  400. size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
  401. SET_KEY_SIZE(&k.key, size);
  402. uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
  403. closure_sync(&cl);
  404. /* Only one bucket used for uuid write */
  405. atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
  406. bkey_copy(&c->uuid_bucket, &k.key);
  407. bkey_put(c, &k.key);
  408. return 0;
  409. }
  410. int bch_uuid_write(struct cache_set *c)
  411. {
  412. int ret = __uuid_write(c);
  413. if (!ret)
  414. bch_journal_meta(c, NULL);
  415. return ret;
  416. }
  417. static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
  418. {
  419. struct uuid_entry *u;
  420. for (u = c->uuids;
  421. u < c->uuids + c->nr_uuids; u++)
  422. if (!memcmp(u->uuid, uuid, 16))
  423. return u;
  424. return NULL;
  425. }
  426. static struct uuid_entry *uuid_find_empty(struct cache_set *c)
  427. {
  428. static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
  429. return uuid_find(c, zero_uuid);
  430. }
  431. /*
  432. * Bucket priorities/gens:
  433. *
  434. * For each bucket, we store on disk its
  435. * 8 bit gen
  436. * 16 bit priority
  437. *
  438. * See alloc.c for an explanation of the gen. The priority is used to implement
  439. * lru (and in the future other) cache replacement policies; for most purposes
  440. * it's just an opaque integer.
  441. *
  442. * The gens and the priorities don't have a whole lot to do with each other, and
  443. * it's actually the gens that must be written out at specific times - it's no
  444. * big deal if the priorities don't get written, if we lose them we just reuse
  445. * buckets in suboptimal order.
  446. *
  447. * On disk they're stored in a packed array, and in as many buckets are required
  448. * to fit them all. The buckets we use to store them form a list; the journal
  449. * header points to the first bucket, the first bucket points to the second
  450. * bucket, et cetera.
  451. *
  452. * This code is used by the allocation code; periodically (whenever it runs out
  453. * of buckets to allocate from) the allocation code will invalidate some
  454. * buckets, but it can't use those buckets until their new gens are safely on
  455. * disk.
  456. */
  457. static void prio_endio(struct bio *bio)
  458. {
  459. struct cache *ca = bio->bi_private;
  460. cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
  461. bch_bbio_free(bio, ca->set);
  462. closure_put(&ca->prio);
  463. }
  464. static void prio_io(struct cache *ca, uint64_t bucket, int op,
  465. unsigned long op_flags)
  466. {
  467. struct closure *cl = &ca->prio;
  468. struct bio *bio = bch_bbio_alloc(ca->set);
  469. closure_init_stack(cl);
  470. bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
  471. bio_set_dev(bio, ca->bdev);
  472. bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb);
  473. bio->bi_end_io = prio_endio;
  474. bio->bi_private = ca;
  475. bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
  476. bch_bio_map(bio, ca->disk_buckets);
  477. closure_bio_submit(ca->set, bio, &ca->prio);
  478. closure_sync(cl);
  479. }
  480. int bch_prio_write(struct cache *ca, bool wait)
  481. {
  482. int i;
  483. struct bucket *b;
  484. struct closure cl;
  485. pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
  486. fifo_used(&ca->free[RESERVE_PRIO]),
  487. fifo_used(&ca->free[RESERVE_NONE]),
  488. fifo_used(&ca->free_inc));
  489. /*
  490. * Pre-check if there are enough free buckets. In the non-blocking
  491. * scenario it's better to fail early rather than starting to allocate
  492. * buckets and do a cleanup later in case of failure.
  493. */
  494. if (!wait) {
  495. size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
  496. fifo_used(&ca->free[RESERVE_NONE]);
  497. if (prio_buckets(ca) > avail)
  498. return -ENOMEM;
  499. }
  500. closure_init_stack(&cl);
  501. lockdep_assert_held(&ca->set->bucket_lock);
  502. ca->disk_buckets->seq++;
  503. atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
  504. &ca->meta_sectors_written);
  505. for (i = prio_buckets(ca) - 1; i >= 0; --i) {
  506. long bucket;
  507. struct prio_set *p = ca->disk_buckets;
  508. struct bucket_disk *d = p->data;
  509. struct bucket_disk *end = d + prios_per_bucket(ca);
  510. for (b = ca->buckets + i * prios_per_bucket(ca);
  511. b < ca->buckets + ca->sb.nbuckets && d < end;
  512. b++, d++) {
  513. d->prio = cpu_to_le16(b->prio);
  514. d->gen = b->gen;
  515. }
  516. p->next_bucket = ca->prio_buckets[i + 1];
  517. p->magic = pset_magic(&ca->sb);
  518. p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
  519. bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
  520. BUG_ON(bucket == -1);
  521. mutex_unlock(&ca->set->bucket_lock);
  522. prio_io(ca, bucket, REQ_OP_WRITE, 0);
  523. mutex_lock(&ca->set->bucket_lock);
  524. ca->prio_buckets[i] = bucket;
  525. atomic_dec_bug(&ca->buckets[bucket].pin);
  526. }
  527. mutex_unlock(&ca->set->bucket_lock);
  528. bch_journal_meta(ca->set, &cl);
  529. closure_sync(&cl);
  530. mutex_lock(&ca->set->bucket_lock);
  531. /*
  532. * Don't want the old priorities to get garbage collected until after we
  533. * finish writing the new ones, and they're journalled
  534. */
  535. for (i = 0; i < prio_buckets(ca); i++) {
  536. if (ca->prio_last_buckets[i])
  537. __bch_bucket_free(ca,
  538. &ca->buckets[ca->prio_last_buckets[i]]);
  539. ca->prio_last_buckets[i] = ca->prio_buckets[i];
  540. }
  541. return 0;
  542. }
  543. static int prio_read(struct cache *ca, uint64_t bucket)
  544. {
  545. struct prio_set *p = ca->disk_buckets;
  546. struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
  547. struct bucket *b;
  548. unsigned int bucket_nr = 0;
  549. int ret = -EIO;
  550. for (b = ca->buckets;
  551. b < ca->buckets + ca->sb.nbuckets;
  552. b++, d++) {
  553. if (d == end) {
  554. ca->prio_buckets[bucket_nr] = bucket;
  555. ca->prio_last_buckets[bucket_nr] = bucket;
  556. bucket_nr++;
  557. prio_io(ca, bucket, REQ_OP_READ, 0);
  558. if (p->csum !=
  559. bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
  560. pr_warn("bad csum reading priorities\n");
  561. goto out;
  562. }
  563. if (p->magic != pset_magic(&ca->sb)) {
  564. pr_warn("bad magic reading priorities\n");
  565. goto out;
  566. }
  567. bucket = p->next_bucket;
  568. d = p->data;
  569. }
  570. b->prio = le16_to_cpu(d->prio);
  571. b->gen = b->last_gc = d->gen;
  572. }
  573. ret = 0;
  574. out:
  575. return ret;
  576. }
  577. /* Bcache device */
  578. static int open_dev(struct block_device *b, fmode_t mode)
  579. {
  580. struct bcache_device *d = b->bd_disk->private_data;
  581. if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
  582. return -ENXIO;
  583. closure_get(&d->cl);
  584. return 0;
  585. }
  586. static void release_dev(struct gendisk *b, fmode_t mode)
  587. {
  588. struct bcache_device *d = b->private_data;
  589. closure_put(&d->cl);
  590. }
  591. static int ioctl_dev(struct block_device *b, fmode_t mode,
  592. unsigned int cmd, unsigned long arg)
  593. {
  594. struct bcache_device *d = b->bd_disk->private_data;
  595. return d->ioctl(d, mode, cmd, arg);
  596. }
  597. static const struct block_device_operations bcache_cached_ops = {
  598. .submit_bio = cached_dev_submit_bio,
  599. .open = open_dev,
  600. .release = release_dev,
  601. .ioctl = ioctl_dev,
  602. .owner = THIS_MODULE,
  603. };
  604. static const struct block_device_operations bcache_flash_ops = {
  605. .submit_bio = flash_dev_submit_bio,
  606. .open = open_dev,
  607. .release = release_dev,
  608. .ioctl = ioctl_dev,
  609. .owner = THIS_MODULE,
  610. };
  611. void bcache_device_stop(struct bcache_device *d)
  612. {
  613. if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
  614. /*
  615. * closure_fn set to
  616. * - cached device: cached_dev_flush()
  617. * - flash dev: flash_dev_flush()
  618. */
  619. closure_queue(&d->cl);
  620. }
  621. static void bcache_device_unlink(struct bcache_device *d)
  622. {
  623. lockdep_assert_held(&bch_register_lock);
  624. if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
  625. struct cache *ca = d->c->cache;
  626. sysfs_remove_link(&d->c->kobj, d->name);
  627. sysfs_remove_link(&d->kobj, "cache");
  628. bd_unlink_disk_holder(ca->bdev, d->disk);
  629. }
  630. }
  631. static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
  632. const char *name)
  633. {
  634. struct cache *ca = c->cache;
  635. int ret;
  636. bd_link_disk_holder(ca->bdev, d->disk);
  637. snprintf(d->name, BCACHEDEVNAME_SIZE,
  638. "%s%u", name, d->id);
  639. ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
  640. if (ret < 0)
  641. pr_err("Couldn't create device -> cache set symlink\n");
  642. ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
  643. if (ret < 0)
  644. pr_err("Couldn't create cache set -> device symlink\n");
  645. clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
  646. }
  647. static void bcache_device_detach(struct bcache_device *d)
  648. {
  649. lockdep_assert_held(&bch_register_lock);
  650. atomic_dec(&d->c->attached_dev_nr);
  651. if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
  652. struct uuid_entry *u = d->c->uuids + d->id;
  653. SET_UUID_FLASH_ONLY(u, 0);
  654. memcpy(u->uuid, invalid_uuid, 16);
  655. u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
  656. bch_uuid_write(d->c);
  657. }
  658. bcache_device_unlink(d);
  659. d->c->devices[d->id] = NULL;
  660. closure_put(&d->c->caching);
  661. d->c = NULL;
  662. }
  663. static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
  664. unsigned int id)
  665. {
  666. d->id = id;
  667. d->c = c;
  668. c->devices[id] = d;
  669. if (id >= c->devices_max_used)
  670. c->devices_max_used = id + 1;
  671. closure_get(&c->caching);
  672. }
  673. static inline int first_minor_to_idx(int first_minor)
  674. {
  675. return (first_minor/BCACHE_MINORS);
  676. }
  677. static inline int idx_to_first_minor(int idx)
  678. {
  679. return (idx * BCACHE_MINORS);
  680. }
  681. static void bcache_device_free(struct bcache_device *d)
  682. {
  683. struct gendisk *disk = d->disk;
  684. lockdep_assert_held(&bch_register_lock);
  685. if (disk)
  686. pr_info("%s stopped\n", disk->disk_name);
  687. else
  688. pr_err("bcache device (NULL gendisk) stopped\n");
  689. if (d->c)
  690. bcache_device_detach(d);
  691. if (disk) {
  692. bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
  693. if (disk_added)
  694. del_gendisk(disk);
  695. if (disk->queue)
  696. blk_cleanup_queue(disk->queue);
  697. ida_simple_remove(&bcache_device_idx,
  698. first_minor_to_idx(disk->first_minor));
  699. if (disk_added)
  700. put_disk(disk);
  701. }
  702. bioset_exit(&d->bio_split);
  703. kvfree(d->full_dirty_stripes);
  704. kvfree(d->stripe_sectors_dirty);
  705. closure_debug_destroy(&d->cl);
  706. }
  707. static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
  708. sector_t sectors, struct block_device *cached_bdev,
  709. const struct block_device_operations *ops)
  710. {
  711. struct request_queue *q;
  712. const size_t max_stripes = min_t(size_t, INT_MAX,
  713. SIZE_MAX / sizeof(atomic_t));
  714. uint64_t n;
  715. int idx;
  716. if (!d->stripe_size)
  717. d->stripe_size = 1 << 31;
  718. n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
  719. if (!n || n > max_stripes) {
  720. pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
  721. n);
  722. return -ENOMEM;
  723. }
  724. d->nr_stripes = n;
  725. n = d->nr_stripes * sizeof(atomic_t);
  726. d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
  727. if (!d->stripe_sectors_dirty)
  728. return -ENOMEM;
  729. n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
  730. d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
  731. if (!d->full_dirty_stripes)
  732. goto out_free_stripe_sectors_dirty;
  733. idx = ida_simple_get(&bcache_device_idx, 0,
  734. BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
  735. if (idx < 0)
  736. goto out_free_full_dirty_stripes;
  737. if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
  738. BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
  739. goto out_ida_remove;
  740. d->disk = alloc_disk(BCACHE_MINORS);
  741. if (!d->disk)
  742. goto out_bioset_exit;
  743. set_capacity(d->disk, sectors);
  744. snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
  745. d->disk->major = bcache_major;
  746. d->disk->first_minor = idx_to_first_minor(idx);
  747. d->disk->fops = ops;
  748. d->disk->private_data = d;
  749. q = blk_alloc_queue(NUMA_NO_NODE);
  750. if (!q)
  751. return -ENOMEM;
  752. d->disk->queue = q;
  753. q->limits.max_hw_sectors = UINT_MAX;
  754. q->limits.max_sectors = UINT_MAX;
  755. q->limits.max_segment_size = UINT_MAX;
  756. q->limits.max_segments = BIO_MAX_PAGES;
  757. blk_queue_max_discard_sectors(q, UINT_MAX);
  758. q->limits.discard_granularity = 512;
  759. q->limits.io_min = block_size;
  760. q->limits.logical_block_size = block_size;
  761. q->limits.physical_block_size = block_size;
  762. if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
  763. /*
  764. * This should only happen with BCACHE_SB_VERSION_BDEV.
  765. * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
  766. */
  767. pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
  768. d->disk->disk_name, q->limits.logical_block_size,
  769. PAGE_SIZE, bdev_logical_block_size(cached_bdev));
  770. /* This also adjusts physical block size/min io size if needed */
  771. blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
  772. }
  773. blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
  774. blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
  775. blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
  776. blk_queue_write_cache(q, true, true);
  777. return 0;
  778. out_bioset_exit:
  779. bioset_exit(&d->bio_split);
  780. out_ida_remove:
  781. ida_simple_remove(&bcache_device_idx, idx);
  782. out_free_full_dirty_stripes:
  783. kvfree(d->full_dirty_stripes);
  784. out_free_stripe_sectors_dirty:
  785. kvfree(d->stripe_sectors_dirty);
  786. return -ENOMEM;
  787. }
  788. /* Cached device */
  789. static void calc_cached_dev_sectors(struct cache_set *c)
  790. {
  791. uint64_t sectors = 0;
  792. struct cached_dev *dc;
  793. list_for_each_entry(dc, &c->cached_devs, list)
  794. sectors += bdev_sectors(dc->bdev);
  795. c->cached_dev_sectors = sectors;
  796. }
  797. #define BACKING_DEV_OFFLINE_TIMEOUT 5
  798. static int cached_dev_status_update(void *arg)
  799. {
  800. struct cached_dev *dc = arg;
  801. struct request_queue *q;
  802. /*
  803. * If this delayed worker is stopping outside, directly quit here.
  804. * dc->io_disable might be set via sysfs interface, so check it
  805. * here too.
  806. */
  807. while (!kthread_should_stop() && !dc->io_disable) {
  808. q = bdev_get_queue(dc->bdev);
  809. if (blk_queue_dying(q))
  810. dc->offline_seconds++;
  811. else
  812. dc->offline_seconds = 0;
  813. if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
  814. pr_err("%s: device offline for %d seconds\n",
  815. dc->backing_dev_name,
  816. BACKING_DEV_OFFLINE_TIMEOUT);
  817. pr_err("%s: disable I/O request due to backing device offline\n",
  818. dc->disk.name);
  819. dc->io_disable = true;
  820. /* let others know earlier that io_disable is true */
  821. smp_mb();
  822. bcache_device_stop(&dc->disk);
  823. break;
  824. }
  825. schedule_timeout_interruptible(HZ);
  826. }
  827. wait_for_kthread_stop();
  828. return 0;
  829. }
  830. int bch_cached_dev_run(struct cached_dev *dc)
  831. {
  832. struct bcache_device *d = &dc->disk;
  833. char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
  834. char *env[] = {
  835. "DRIVER=bcache",
  836. kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
  837. kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
  838. NULL,
  839. };
  840. if (dc->io_disable) {
  841. pr_err("I/O disabled on cached dev %s\n",
  842. dc->backing_dev_name);
  843. kfree(env[1]);
  844. kfree(env[2]);
  845. kfree(buf);
  846. return -EIO;
  847. }
  848. if (atomic_xchg(&dc->running, 1)) {
  849. kfree(env[1]);
  850. kfree(env[2]);
  851. kfree(buf);
  852. pr_info("cached dev %s is running already\n",
  853. dc->backing_dev_name);
  854. return -EBUSY;
  855. }
  856. if (!d->c &&
  857. BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
  858. struct closure cl;
  859. closure_init_stack(&cl);
  860. SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
  861. bch_write_bdev_super(dc, &cl);
  862. closure_sync(&cl);
  863. }
  864. add_disk(d->disk);
  865. bd_link_disk_holder(dc->bdev, dc->disk.disk);
  866. /*
  867. * won't show up in the uevent file, use udevadm monitor -e instead
  868. * only class / kset properties are persistent
  869. */
  870. kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
  871. kfree(env[1]);
  872. kfree(env[2]);
  873. kfree(buf);
  874. if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
  875. sysfs_create_link(&disk_to_dev(d->disk)->kobj,
  876. &d->kobj, "bcache")) {
  877. pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
  878. return -ENOMEM;
  879. }
  880. dc->status_update_thread = kthread_run(cached_dev_status_update,
  881. dc, "bcache_status_update");
  882. if (IS_ERR(dc->status_update_thread)) {
  883. pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
  884. }
  885. return 0;
  886. }
  887. /*
  888. * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
  889. * work dc->writeback_rate_update is running. Wait until the routine
  890. * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
  891. * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
  892. * seconds, give up waiting here and continue to cancel it too.
  893. */
  894. static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
  895. {
  896. int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
  897. do {
  898. if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
  899. &dc->disk.flags))
  900. break;
  901. time_out--;
  902. schedule_timeout_interruptible(1);
  903. } while (time_out > 0);
  904. if (time_out == 0)
  905. pr_warn("give up waiting for dc->writeback_write_update to quit\n");
  906. cancel_delayed_work_sync(&dc->writeback_rate_update);
  907. }
  908. static void cached_dev_detach_finish(struct work_struct *w)
  909. {
  910. struct cached_dev *dc = container_of(w, struct cached_dev, detach);
  911. struct closure cl;
  912. closure_init_stack(&cl);
  913. BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
  914. BUG_ON(refcount_read(&dc->count));
  915. if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
  916. cancel_writeback_rate_update_dwork(dc);
  917. if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
  918. kthread_stop(dc->writeback_thread);
  919. dc->writeback_thread = NULL;
  920. }
  921. memset(&dc->sb.set_uuid, 0, 16);
  922. SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
  923. bch_write_bdev_super(dc, &cl);
  924. closure_sync(&cl);
  925. mutex_lock(&bch_register_lock);
  926. calc_cached_dev_sectors(dc->disk.c);
  927. bcache_device_detach(&dc->disk);
  928. list_move(&dc->list, &uncached_devices);
  929. clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
  930. clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
  931. mutex_unlock(&bch_register_lock);
  932. pr_info("Caching disabled for %s\n", dc->backing_dev_name);
  933. /* Drop ref we took in cached_dev_detach() */
  934. closure_put(&dc->disk.cl);
  935. }
  936. void bch_cached_dev_detach(struct cached_dev *dc)
  937. {
  938. lockdep_assert_held(&bch_register_lock);
  939. if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
  940. return;
  941. if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
  942. return;
  943. /*
  944. * Block the device from being closed and freed until we're finished
  945. * detaching
  946. */
  947. closure_get(&dc->disk.cl);
  948. bch_writeback_queue(dc);
  949. cached_dev_put(dc);
  950. }
  951. int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
  952. uint8_t *set_uuid)
  953. {
  954. uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
  955. struct uuid_entry *u;
  956. struct cached_dev *exist_dc, *t;
  957. int ret = 0;
  958. if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
  959. (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
  960. return -ENOENT;
  961. if (dc->disk.c) {
  962. pr_err("Can't attach %s: already attached\n",
  963. dc->backing_dev_name);
  964. return -EINVAL;
  965. }
  966. if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
  967. pr_err("Can't attach %s: shutting down\n",
  968. dc->backing_dev_name);
  969. return -EINVAL;
  970. }
  971. if (dc->sb.block_size < c->cache->sb.block_size) {
  972. /* Will die */
  973. pr_err("Couldn't attach %s: block size less than set's block size\n",
  974. dc->backing_dev_name);
  975. return -EINVAL;
  976. }
  977. /* Check whether already attached */
  978. list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
  979. if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
  980. pr_err("Tried to attach %s but duplicate UUID already attached\n",
  981. dc->backing_dev_name);
  982. return -EINVAL;
  983. }
  984. }
  985. u = uuid_find(c, dc->sb.uuid);
  986. if (u &&
  987. (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
  988. BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
  989. memcpy(u->uuid, invalid_uuid, 16);
  990. u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
  991. u = NULL;
  992. }
  993. if (!u) {
  994. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  995. pr_err("Couldn't find uuid for %s in set\n",
  996. dc->backing_dev_name);
  997. return -ENOENT;
  998. }
  999. u = uuid_find_empty(c);
  1000. if (!u) {
  1001. pr_err("Not caching %s, no room for UUID\n",
  1002. dc->backing_dev_name);
  1003. return -EINVAL;
  1004. }
  1005. }
  1006. /*
  1007. * Deadlocks since we're called via sysfs...
  1008. * sysfs_remove_file(&dc->kobj, &sysfs_attach);
  1009. */
  1010. if (bch_is_zero(u->uuid, 16)) {
  1011. struct closure cl;
  1012. closure_init_stack(&cl);
  1013. memcpy(u->uuid, dc->sb.uuid, 16);
  1014. memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
  1015. u->first_reg = u->last_reg = rtime;
  1016. bch_uuid_write(c);
  1017. memcpy(dc->sb.set_uuid, c->set_uuid, 16);
  1018. SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
  1019. bch_write_bdev_super(dc, &cl);
  1020. closure_sync(&cl);
  1021. } else {
  1022. u->last_reg = rtime;
  1023. bch_uuid_write(c);
  1024. }
  1025. bcache_device_attach(&dc->disk, c, u - c->uuids);
  1026. list_move(&dc->list, &c->cached_devs);
  1027. calc_cached_dev_sectors(c);
  1028. /*
  1029. * dc->c must be set before dc->count != 0 - paired with the mb in
  1030. * cached_dev_get()
  1031. */
  1032. smp_wmb();
  1033. refcount_set(&dc->count, 1);
  1034. /* Block writeback thread, but spawn it */
  1035. down_write(&dc->writeback_lock);
  1036. if (bch_cached_dev_writeback_start(dc)) {
  1037. up_write(&dc->writeback_lock);
  1038. pr_err("Couldn't start writeback facilities for %s\n",
  1039. dc->disk.disk->disk_name);
  1040. return -ENOMEM;
  1041. }
  1042. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  1043. atomic_set(&dc->has_dirty, 1);
  1044. bch_writeback_queue(dc);
  1045. }
  1046. bch_sectors_dirty_init(&dc->disk);
  1047. ret = bch_cached_dev_run(dc);
  1048. if (ret && (ret != -EBUSY)) {
  1049. up_write(&dc->writeback_lock);
  1050. /*
  1051. * bch_register_lock is held, bcache_device_stop() is not
  1052. * able to be directly called. The kthread and kworker
  1053. * created previously in bch_cached_dev_writeback_start()
  1054. * have to be stopped manually here.
  1055. */
  1056. kthread_stop(dc->writeback_thread);
  1057. cancel_writeback_rate_update_dwork(dc);
  1058. pr_err("Couldn't run cached device %s\n",
  1059. dc->backing_dev_name);
  1060. return ret;
  1061. }
  1062. bcache_device_link(&dc->disk, c, "bdev");
  1063. atomic_inc(&c->attached_dev_nr);
  1064. if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
  1065. pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
  1066. pr_err("Please update to the latest bcache-tools to create the cache device\n");
  1067. set_disk_ro(dc->disk.disk, 1);
  1068. }
  1069. /* Allow the writeback thread to proceed */
  1070. up_write(&dc->writeback_lock);
  1071. pr_info("Caching %s as %s on set %pU\n",
  1072. dc->backing_dev_name,
  1073. dc->disk.disk->disk_name,
  1074. dc->disk.c->set_uuid);
  1075. return 0;
  1076. }
  1077. /* when dc->disk.kobj released */
  1078. void bch_cached_dev_release(struct kobject *kobj)
  1079. {
  1080. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  1081. disk.kobj);
  1082. kfree(dc);
  1083. module_put(THIS_MODULE);
  1084. }
  1085. static void cached_dev_free(struct closure *cl)
  1086. {
  1087. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  1088. if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
  1089. cancel_writeback_rate_update_dwork(dc);
  1090. if (!IS_ERR_OR_NULL(dc->writeback_thread))
  1091. kthread_stop(dc->writeback_thread);
  1092. if (!IS_ERR_OR_NULL(dc->status_update_thread))
  1093. kthread_stop(dc->status_update_thread);
  1094. mutex_lock(&bch_register_lock);
  1095. if (atomic_read(&dc->running))
  1096. bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
  1097. bcache_device_free(&dc->disk);
  1098. list_del(&dc->list);
  1099. mutex_unlock(&bch_register_lock);
  1100. if (dc->sb_disk)
  1101. put_page(virt_to_page(dc->sb_disk));
  1102. if (!IS_ERR_OR_NULL(dc->bdev))
  1103. blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1104. wake_up(&unregister_wait);
  1105. kobject_put(&dc->disk.kobj);
  1106. }
  1107. static void cached_dev_flush(struct closure *cl)
  1108. {
  1109. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  1110. struct bcache_device *d = &dc->disk;
  1111. mutex_lock(&bch_register_lock);
  1112. bcache_device_unlink(d);
  1113. mutex_unlock(&bch_register_lock);
  1114. bch_cache_accounting_destroy(&dc->accounting);
  1115. kobject_del(&d->kobj);
  1116. continue_at(cl, cached_dev_free, system_wq);
  1117. }
  1118. static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
  1119. {
  1120. int ret;
  1121. struct io *io;
  1122. struct request_queue *q = bdev_get_queue(dc->bdev);
  1123. __module_get(THIS_MODULE);
  1124. INIT_LIST_HEAD(&dc->list);
  1125. closure_init(&dc->disk.cl, NULL);
  1126. set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
  1127. kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
  1128. INIT_WORK(&dc->detach, cached_dev_detach_finish);
  1129. sema_init(&dc->sb_write_mutex, 1);
  1130. INIT_LIST_HEAD(&dc->io_lru);
  1131. spin_lock_init(&dc->io_lock);
  1132. bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
  1133. dc->sequential_cutoff = 4 << 20;
  1134. for (io = dc->io; io < dc->io + RECENT_IO; io++) {
  1135. list_add(&io->lru, &dc->io_lru);
  1136. hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
  1137. }
  1138. dc->disk.stripe_size = q->limits.io_opt >> 9;
  1139. if (dc->disk.stripe_size)
  1140. dc->partial_stripes_expensive =
  1141. q->limits.raid_partial_stripes_expensive;
  1142. ret = bcache_device_init(&dc->disk, block_size,
  1143. dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
  1144. dc->bdev, &bcache_cached_ops);
  1145. if (ret)
  1146. return ret;
  1147. blk_queue_io_opt(dc->disk.disk->queue,
  1148. max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
  1149. atomic_set(&dc->io_errors, 0);
  1150. dc->io_disable = false;
  1151. dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
  1152. /* default to auto */
  1153. dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
  1154. bch_cached_dev_request_init(dc);
  1155. bch_cached_dev_writeback_init(dc);
  1156. return 0;
  1157. }
  1158. /* Cached device - bcache superblock */
  1159. static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
  1160. struct block_device *bdev,
  1161. struct cached_dev *dc)
  1162. {
  1163. const char *err = "cannot allocate memory";
  1164. struct cache_set *c;
  1165. int ret = -ENOMEM;
  1166. bdevname(bdev, dc->backing_dev_name);
  1167. memcpy(&dc->sb, sb, sizeof(struct cache_sb));
  1168. dc->bdev = bdev;
  1169. dc->bdev->bd_holder = dc;
  1170. dc->sb_disk = sb_disk;
  1171. if (cached_dev_init(dc, sb->block_size << 9))
  1172. goto err;
  1173. err = "error creating kobject";
  1174. if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
  1175. "bcache"))
  1176. goto err;
  1177. if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
  1178. goto err;
  1179. pr_info("registered backing device %s\n", dc->backing_dev_name);
  1180. list_add(&dc->list, &uncached_devices);
  1181. /* attach to a matched cache set if it exists */
  1182. list_for_each_entry(c, &bch_cache_sets, list)
  1183. bch_cached_dev_attach(dc, c, NULL);
  1184. if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
  1185. BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
  1186. err = "failed to run cached device";
  1187. ret = bch_cached_dev_run(dc);
  1188. if (ret)
  1189. goto err;
  1190. }
  1191. return 0;
  1192. err:
  1193. pr_notice("error %s: %s\n", dc->backing_dev_name, err);
  1194. bcache_device_stop(&dc->disk);
  1195. return ret;
  1196. }
  1197. /* Flash only volumes */
  1198. /* When d->kobj released */
  1199. void bch_flash_dev_release(struct kobject *kobj)
  1200. {
  1201. struct bcache_device *d = container_of(kobj, struct bcache_device,
  1202. kobj);
  1203. kfree(d);
  1204. }
  1205. static void flash_dev_free(struct closure *cl)
  1206. {
  1207. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  1208. mutex_lock(&bch_register_lock);
  1209. atomic_long_sub(bcache_dev_sectors_dirty(d),
  1210. &d->c->flash_dev_dirty_sectors);
  1211. bcache_device_free(d);
  1212. mutex_unlock(&bch_register_lock);
  1213. kobject_put(&d->kobj);
  1214. }
  1215. static void flash_dev_flush(struct closure *cl)
  1216. {
  1217. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  1218. mutex_lock(&bch_register_lock);
  1219. bcache_device_unlink(d);
  1220. mutex_unlock(&bch_register_lock);
  1221. kobject_del(&d->kobj);
  1222. continue_at(cl, flash_dev_free, system_wq);
  1223. }
  1224. static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
  1225. {
  1226. struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
  1227. GFP_KERNEL);
  1228. if (!d)
  1229. return -ENOMEM;
  1230. closure_init(&d->cl, NULL);
  1231. set_closure_fn(&d->cl, flash_dev_flush, system_wq);
  1232. kobject_init(&d->kobj, &bch_flash_dev_ktype);
  1233. if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
  1234. NULL, &bcache_flash_ops))
  1235. goto err;
  1236. bcache_device_attach(d, c, u - c->uuids);
  1237. bch_sectors_dirty_init(d);
  1238. bch_flash_dev_request_init(d);
  1239. add_disk(d->disk);
  1240. if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
  1241. goto err;
  1242. bcache_device_link(d, c, "volume");
  1243. if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
  1244. pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
  1245. pr_err("Please update to the latest bcache-tools to create the cache device\n");
  1246. set_disk_ro(d->disk, 1);
  1247. }
  1248. return 0;
  1249. err:
  1250. kobject_put(&d->kobj);
  1251. return -ENOMEM;
  1252. }
  1253. static int flash_devs_run(struct cache_set *c)
  1254. {
  1255. int ret = 0;
  1256. struct uuid_entry *u;
  1257. for (u = c->uuids;
  1258. u < c->uuids + c->nr_uuids && !ret;
  1259. u++)
  1260. if (UUID_FLASH_ONLY(u))
  1261. ret = flash_dev_run(c, u);
  1262. return ret;
  1263. }
  1264. int bch_flash_dev_create(struct cache_set *c, uint64_t size)
  1265. {
  1266. struct uuid_entry *u;
  1267. if (test_bit(CACHE_SET_STOPPING, &c->flags))
  1268. return -EINTR;
  1269. if (!test_bit(CACHE_SET_RUNNING, &c->flags))
  1270. return -EPERM;
  1271. u = uuid_find_empty(c);
  1272. if (!u) {
  1273. pr_err("Can't create volume, no room for UUID\n");
  1274. return -EINVAL;
  1275. }
  1276. get_random_bytes(u->uuid, 16);
  1277. memset(u->label, 0, 32);
  1278. u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
  1279. SET_UUID_FLASH_ONLY(u, 1);
  1280. u->sectors = size >> 9;
  1281. bch_uuid_write(c);
  1282. return flash_dev_run(c, u);
  1283. }
  1284. bool bch_cached_dev_error(struct cached_dev *dc)
  1285. {
  1286. if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
  1287. return false;
  1288. dc->io_disable = true;
  1289. /* make others know io_disable is true earlier */
  1290. smp_mb();
  1291. pr_err("stop %s: too many IO errors on backing device %s\n",
  1292. dc->disk.disk->disk_name, dc->backing_dev_name);
  1293. bcache_device_stop(&dc->disk);
  1294. return true;
  1295. }
  1296. /* Cache set */
  1297. __printf(2, 3)
  1298. bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
  1299. {
  1300. struct va_format vaf;
  1301. va_list args;
  1302. if (c->on_error != ON_ERROR_PANIC &&
  1303. test_bit(CACHE_SET_STOPPING, &c->flags))
  1304. return false;
  1305. if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
  1306. pr_info("CACHE_SET_IO_DISABLE already set\n");
  1307. /*
  1308. * XXX: we can be called from atomic context
  1309. * acquire_console_sem();
  1310. */
  1311. va_start(args, fmt);
  1312. vaf.fmt = fmt;
  1313. vaf.va = &args;
  1314. pr_err("error on %pU: %pV, disabling caching\n",
  1315. c->set_uuid, &vaf);
  1316. va_end(args);
  1317. if (c->on_error == ON_ERROR_PANIC)
  1318. panic("panic forced after error\n");
  1319. bch_cache_set_unregister(c);
  1320. return true;
  1321. }
  1322. /* When c->kobj released */
  1323. void bch_cache_set_release(struct kobject *kobj)
  1324. {
  1325. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  1326. kfree(c);
  1327. module_put(THIS_MODULE);
  1328. }
  1329. static void cache_set_free(struct closure *cl)
  1330. {
  1331. struct cache_set *c = container_of(cl, struct cache_set, cl);
  1332. struct cache *ca;
  1333. debugfs_remove(c->debug);
  1334. bch_open_buckets_free(c);
  1335. bch_btree_cache_free(c);
  1336. bch_journal_free(c);
  1337. mutex_lock(&bch_register_lock);
  1338. bch_bset_sort_state_free(&c->sort);
  1339. free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
  1340. ca = c->cache;
  1341. if (ca) {
  1342. ca->set = NULL;
  1343. c->cache = NULL;
  1344. kobject_put(&ca->kobj);
  1345. }
  1346. if (c->moving_gc_wq)
  1347. destroy_workqueue(c->moving_gc_wq);
  1348. bioset_exit(&c->bio_split);
  1349. mempool_exit(&c->fill_iter);
  1350. mempool_exit(&c->bio_meta);
  1351. mempool_exit(&c->search);
  1352. kfree(c->devices);
  1353. list_del(&c->list);
  1354. mutex_unlock(&bch_register_lock);
  1355. pr_info("Cache set %pU unregistered\n", c->set_uuid);
  1356. wake_up(&unregister_wait);
  1357. closure_debug_destroy(&c->cl);
  1358. kobject_put(&c->kobj);
  1359. }
  1360. static void cache_set_flush(struct closure *cl)
  1361. {
  1362. struct cache_set *c = container_of(cl, struct cache_set, caching);
  1363. struct cache *ca = c->cache;
  1364. struct btree *b;
  1365. bch_cache_accounting_destroy(&c->accounting);
  1366. kobject_put(&c->internal);
  1367. kobject_del(&c->kobj);
  1368. if (!IS_ERR_OR_NULL(c->gc_thread))
  1369. kthread_stop(c->gc_thread);
  1370. if (!IS_ERR_OR_NULL(c->root))
  1371. list_add(&c->root->list, &c->btree_cache);
  1372. /*
  1373. * Avoid flushing cached nodes if cache set is retiring
  1374. * due to too many I/O errors detected.
  1375. */
  1376. if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
  1377. list_for_each_entry(b, &c->btree_cache, list) {
  1378. mutex_lock(&b->write_lock);
  1379. if (btree_node_dirty(b))
  1380. __bch_btree_node_write(b, NULL);
  1381. mutex_unlock(&b->write_lock);
  1382. }
  1383. if (ca->alloc_thread)
  1384. kthread_stop(ca->alloc_thread);
  1385. if (c->journal.cur) {
  1386. cancel_delayed_work_sync(&c->journal.work);
  1387. /* flush last journal entry if needed */
  1388. c->journal.work.work.func(&c->journal.work.work);
  1389. }
  1390. closure_return(cl);
  1391. }
  1392. /*
  1393. * This function is only called when CACHE_SET_IO_DISABLE is set, which means
  1394. * cache set is unregistering due to too many I/O errors. In this condition,
  1395. * the bcache device might be stopped, it depends on stop_when_cache_set_failed
  1396. * value and whether the broken cache has dirty data:
  1397. *
  1398. * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
  1399. * BCH_CACHED_STOP_AUTO 0 NO
  1400. * BCH_CACHED_STOP_AUTO 1 YES
  1401. * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
  1402. * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
  1403. *
  1404. * The expected behavior is, if stop_when_cache_set_failed is configured to
  1405. * "auto" via sysfs interface, the bcache device will not be stopped if the
  1406. * backing device is clean on the broken cache device.
  1407. */
  1408. static void conditional_stop_bcache_device(struct cache_set *c,
  1409. struct bcache_device *d,
  1410. struct cached_dev *dc)
  1411. {
  1412. if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
  1413. pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
  1414. d->disk->disk_name, c->set_uuid);
  1415. bcache_device_stop(d);
  1416. } else if (atomic_read(&dc->has_dirty)) {
  1417. /*
  1418. * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
  1419. * and dc->has_dirty == 1
  1420. */
  1421. pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
  1422. d->disk->disk_name);
  1423. /*
  1424. * There might be a small time gap that cache set is
  1425. * released but bcache device is not. Inside this time
  1426. * gap, regular I/O requests will directly go into
  1427. * backing device as no cache set attached to. This
  1428. * behavior may also introduce potential inconsistence
  1429. * data in writeback mode while cache is dirty.
  1430. * Therefore before calling bcache_device_stop() due
  1431. * to a broken cache device, dc->io_disable should be
  1432. * explicitly set to true.
  1433. */
  1434. dc->io_disable = true;
  1435. /* make others know io_disable is true earlier */
  1436. smp_mb();
  1437. bcache_device_stop(d);
  1438. } else {
  1439. /*
  1440. * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
  1441. * and dc->has_dirty == 0
  1442. */
  1443. pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
  1444. d->disk->disk_name);
  1445. }
  1446. }
  1447. static void __cache_set_unregister(struct closure *cl)
  1448. {
  1449. struct cache_set *c = container_of(cl, struct cache_set, caching);
  1450. struct cached_dev *dc;
  1451. struct bcache_device *d;
  1452. size_t i;
  1453. mutex_lock(&bch_register_lock);
  1454. for (i = 0; i < c->devices_max_used; i++) {
  1455. d = c->devices[i];
  1456. if (!d)
  1457. continue;
  1458. if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
  1459. test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
  1460. dc = container_of(d, struct cached_dev, disk);
  1461. bch_cached_dev_detach(dc);
  1462. if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
  1463. conditional_stop_bcache_device(c, d, dc);
  1464. } else {
  1465. bcache_device_stop(d);
  1466. }
  1467. }
  1468. mutex_unlock(&bch_register_lock);
  1469. continue_at(cl, cache_set_flush, system_wq);
  1470. }
  1471. void bch_cache_set_stop(struct cache_set *c)
  1472. {
  1473. if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
  1474. /* closure_fn set to __cache_set_unregister() */
  1475. closure_queue(&c->caching);
  1476. }
  1477. void bch_cache_set_unregister(struct cache_set *c)
  1478. {
  1479. set_bit(CACHE_SET_UNREGISTERING, &c->flags);
  1480. bch_cache_set_stop(c);
  1481. }
  1482. #define alloc_meta_bucket_pages(gfp, sb) \
  1483. ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
  1484. struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
  1485. {
  1486. int iter_size;
  1487. struct cache *ca = container_of(sb, struct cache, sb);
  1488. struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
  1489. if (!c)
  1490. return NULL;
  1491. __module_get(THIS_MODULE);
  1492. closure_init(&c->cl, NULL);
  1493. set_closure_fn(&c->cl, cache_set_free, system_wq);
  1494. closure_init(&c->caching, &c->cl);
  1495. set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
  1496. /* Maybe create continue_at_noreturn() and use it here? */
  1497. closure_set_stopped(&c->cl);
  1498. closure_put(&c->cl);
  1499. kobject_init(&c->kobj, &bch_cache_set_ktype);
  1500. kobject_init(&c->internal, &bch_cache_set_internal_ktype);
  1501. bch_cache_accounting_init(&c->accounting, &c->cl);
  1502. memcpy(c->set_uuid, sb->set_uuid, 16);
  1503. c->cache = ca;
  1504. c->cache->set = c;
  1505. c->bucket_bits = ilog2(sb->bucket_size);
  1506. c->block_bits = ilog2(sb->block_size);
  1507. c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
  1508. c->devices_max_used = 0;
  1509. atomic_set(&c->attached_dev_nr, 0);
  1510. c->btree_pages = meta_bucket_pages(sb);
  1511. if (c->btree_pages > BTREE_MAX_PAGES)
  1512. c->btree_pages = max_t(int, c->btree_pages / 4,
  1513. BTREE_MAX_PAGES);
  1514. sema_init(&c->sb_write_mutex, 1);
  1515. mutex_init(&c->bucket_lock);
  1516. init_waitqueue_head(&c->btree_cache_wait);
  1517. spin_lock_init(&c->btree_cannibalize_lock);
  1518. init_waitqueue_head(&c->bucket_wait);
  1519. init_waitqueue_head(&c->gc_wait);
  1520. sema_init(&c->uuid_write_mutex, 1);
  1521. spin_lock_init(&c->btree_gc_time.lock);
  1522. spin_lock_init(&c->btree_split_time.lock);
  1523. spin_lock_init(&c->btree_read_time.lock);
  1524. bch_moving_init_cache_set(c);
  1525. INIT_LIST_HEAD(&c->list);
  1526. INIT_LIST_HEAD(&c->cached_devs);
  1527. INIT_LIST_HEAD(&c->btree_cache);
  1528. INIT_LIST_HEAD(&c->btree_cache_freeable);
  1529. INIT_LIST_HEAD(&c->btree_cache_freed);
  1530. INIT_LIST_HEAD(&c->data_buckets);
  1531. iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
  1532. sizeof(struct btree_iter_set);
  1533. c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
  1534. if (!c->devices)
  1535. goto err;
  1536. if (mempool_init_slab_pool(&c->search, 32, bch_search_cache))
  1537. goto err;
  1538. if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
  1539. sizeof(struct bbio) +
  1540. sizeof(struct bio_vec) * meta_bucket_pages(sb)))
  1541. goto err;
  1542. if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
  1543. goto err;
  1544. if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
  1545. BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
  1546. goto err;
  1547. c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
  1548. if (!c->uuids)
  1549. goto err;
  1550. c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
  1551. if (!c->moving_gc_wq)
  1552. goto err;
  1553. if (bch_journal_alloc(c))
  1554. goto err;
  1555. if (bch_btree_cache_alloc(c))
  1556. goto err;
  1557. if (bch_open_buckets_alloc(c))
  1558. goto err;
  1559. if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
  1560. goto err;
  1561. c->congested_read_threshold_us = 2000;
  1562. c->congested_write_threshold_us = 20000;
  1563. c->error_limit = DEFAULT_IO_ERROR_LIMIT;
  1564. c->idle_max_writeback_rate_enabled = 1;
  1565. WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
  1566. return c;
  1567. err:
  1568. bch_cache_set_unregister(c);
  1569. return NULL;
  1570. }
  1571. static int run_cache_set(struct cache_set *c)
  1572. {
  1573. const char *err = "cannot allocate memory";
  1574. struct cached_dev *dc, *t;
  1575. struct cache *ca = c->cache;
  1576. struct closure cl;
  1577. LIST_HEAD(journal);
  1578. struct journal_replay *l;
  1579. closure_init_stack(&cl);
  1580. c->nbuckets = ca->sb.nbuckets;
  1581. set_gc_sectors(c);
  1582. if (CACHE_SYNC(&c->cache->sb)) {
  1583. struct bkey *k;
  1584. struct jset *j;
  1585. err = "cannot allocate memory for journal";
  1586. if (bch_journal_read(c, &journal))
  1587. goto err;
  1588. pr_debug("btree_journal_read() done\n");
  1589. err = "no journal entries found";
  1590. if (list_empty(&journal))
  1591. goto err;
  1592. j = &list_entry(journal.prev, struct journal_replay, list)->j;
  1593. err = "IO error reading priorities";
  1594. if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
  1595. goto err;
  1596. /*
  1597. * If prio_read() fails it'll call cache_set_error and we'll
  1598. * tear everything down right away, but if we perhaps checked
  1599. * sooner we could avoid journal replay.
  1600. */
  1601. k = &j->btree_root;
  1602. err = "bad btree root";
  1603. if (__bch_btree_ptr_invalid(c, k))
  1604. goto err;
  1605. err = "error reading btree root";
  1606. c->root = bch_btree_node_get(c, NULL, k,
  1607. j->btree_level,
  1608. true, NULL);
  1609. if (IS_ERR_OR_NULL(c->root))
  1610. goto err;
  1611. list_del_init(&c->root->list);
  1612. rw_unlock(true, c->root);
  1613. err = uuid_read(c, j, &cl);
  1614. if (err)
  1615. goto err;
  1616. err = "error in recovery";
  1617. if (bch_btree_check(c))
  1618. goto err;
  1619. bch_journal_mark(c, &journal);
  1620. bch_initial_gc_finish(c);
  1621. pr_debug("btree_check() done\n");
  1622. /*
  1623. * bcache_journal_next() can't happen sooner, or
  1624. * btree_gc_finish() will give spurious errors about last_gc >
  1625. * gc_gen - this is a hack but oh well.
  1626. */
  1627. bch_journal_next(&c->journal);
  1628. err = "error starting allocator thread";
  1629. if (bch_cache_allocator_start(ca))
  1630. goto err;
  1631. /*
  1632. * First place it's safe to allocate: btree_check() and
  1633. * btree_gc_finish() have to run before we have buckets to
  1634. * allocate, and bch_bucket_alloc_set() might cause a journal
  1635. * entry to be written so bcache_journal_next() has to be called
  1636. * first.
  1637. *
  1638. * If the uuids were in the old format we have to rewrite them
  1639. * before the next journal entry is written:
  1640. */
  1641. if (j->version < BCACHE_JSET_VERSION_UUID)
  1642. __uuid_write(c);
  1643. err = "bcache: replay journal failed";
  1644. if (bch_journal_replay(c, &journal))
  1645. goto err;
  1646. } else {
  1647. unsigned int j;
  1648. pr_notice("invalidating existing data\n");
  1649. ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
  1650. 2, SB_JOURNAL_BUCKETS);
  1651. for (j = 0; j < ca->sb.keys; j++)
  1652. ca->sb.d[j] = ca->sb.first_bucket + j;
  1653. bch_initial_gc_finish(c);
  1654. err = "error starting allocator thread";
  1655. if (bch_cache_allocator_start(ca))
  1656. goto err;
  1657. mutex_lock(&c->bucket_lock);
  1658. bch_prio_write(ca, true);
  1659. mutex_unlock(&c->bucket_lock);
  1660. err = "cannot allocate new UUID bucket";
  1661. if (__uuid_write(c))
  1662. goto err;
  1663. err = "cannot allocate new btree root";
  1664. c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
  1665. if (IS_ERR_OR_NULL(c->root))
  1666. goto err;
  1667. mutex_lock(&c->root->write_lock);
  1668. bkey_copy_key(&c->root->key, &MAX_KEY);
  1669. bch_btree_node_write(c->root, &cl);
  1670. mutex_unlock(&c->root->write_lock);
  1671. bch_btree_set_root(c->root);
  1672. rw_unlock(true, c->root);
  1673. /*
  1674. * We don't want to write the first journal entry until
  1675. * everything is set up - fortunately journal entries won't be
  1676. * written until the SET_CACHE_SYNC() here:
  1677. */
  1678. SET_CACHE_SYNC(&c->cache->sb, true);
  1679. bch_journal_next(&c->journal);
  1680. bch_journal_meta(c, &cl);
  1681. }
  1682. err = "error starting gc thread";
  1683. if (bch_gc_thread_start(c))
  1684. goto err;
  1685. closure_sync(&cl);
  1686. c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
  1687. bcache_write_super(c);
  1688. if (bch_has_feature_obso_large_bucket(&c->cache->sb))
  1689. pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
  1690. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1691. bch_cached_dev_attach(dc, c, NULL);
  1692. flash_devs_run(c);
  1693. set_bit(CACHE_SET_RUNNING, &c->flags);
  1694. return 0;
  1695. err:
  1696. while (!list_empty(&journal)) {
  1697. l = list_first_entry(&journal, struct journal_replay, list);
  1698. list_del(&l->list);
  1699. kfree(l);
  1700. }
  1701. closure_sync(&cl);
  1702. bch_cache_set_error(c, "%s", err);
  1703. return -EIO;
  1704. }
  1705. static const char *register_cache_set(struct cache *ca)
  1706. {
  1707. char buf[12];
  1708. const char *err = "cannot allocate memory";
  1709. struct cache_set *c;
  1710. list_for_each_entry(c, &bch_cache_sets, list)
  1711. if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
  1712. if (c->cache)
  1713. return "duplicate cache set member";
  1714. goto found;
  1715. }
  1716. c = bch_cache_set_alloc(&ca->sb);
  1717. if (!c)
  1718. return err;
  1719. err = "error creating kobject";
  1720. if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
  1721. kobject_add(&c->internal, &c->kobj, "internal"))
  1722. goto err;
  1723. if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
  1724. goto err;
  1725. bch_debug_init_cache_set(c);
  1726. list_add(&c->list, &bch_cache_sets);
  1727. found:
  1728. sprintf(buf, "cache%i", ca->sb.nr_this_dev);
  1729. if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
  1730. sysfs_create_link(&c->kobj, &ca->kobj, buf))
  1731. goto err;
  1732. kobject_get(&ca->kobj);
  1733. ca->set = c;
  1734. ca->set->cache = ca;
  1735. err = "failed to run cache set";
  1736. if (run_cache_set(c) < 0)
  1737. goto err;
  1738. return NULL;
  1739. err:
  1740. bch_cache_set_unregister(c);
  1741. return err;
  1742. }
  1743. /* Cache device */
  1744. /* When ca->kobj released */
  1745. void bch_cache_release(struct kobject *kobj)
  1746. {
  1747. struct cache *ca = container_of(kobj, struct cache, kobj);
  1748. unsigned int i;
  1749. if (ca->set) {
  1750. BUG_ON(ca->set->cache != ca);
  1751. ca->set->cache = NULL;
  1752. }
  1753. free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
  1754. kfree(ca->prio_buckets);
  1755. vfree(ca->buckets);
  1756. free_heap(&ca->heap);
  1757. free_fifo(&ca->free_inc);
  1758. for (i = 0; i < RESERVE_NR; i++)
  1759. free_fifo(&ca->free[i]);
  1760. if (ca->sb_disk)
  1761. put_page(virt_to_page(ca->sb_disk));
  1762. if (!IS_ERR_OR_NULL(ca->bdev))
  1763. blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1764. kfree(ca);
  1765. module_put(THIS_MODULE);
  1766. }
  1767. static int cache_alloc(struct cache *ca)
  1768. {
  1769. size_t free;
  1770. size_t btree_buckets;
  1771. struct bucket *b;
  1772. int ret = -ENOMEM;
  1773. const char *err = NULL;
  1774. __module_get(THIS_MODULE);
  1775. kobject_init(&ca->kobj, &bch_cache_ktype);
  1776. bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
  1777. /*
  1778. * when ca->sb.njournal_buckets is not zero, journal exists,
  1779. * and in bch_journal_replay(), tree node may split,
  1780. * so bucket of RESERVE_BTREE type is needed,
  1781. * the worst situation is all journal buckets are valid journal,
  1782. * and all the keys need to replay,
  1783. * so the number of RESERVE_BTREE type buckets should be as much
  1784. * as journal buckets
  1785. */
  1786. btree_buckets = ca->sb.njournal_buckets ?: 8;
  1787. free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
  1788. if (!free) {
  1789. ret = -EPERM;
  1790. err = "ca->sb.nbuckets is too small";
  1791. goto err_free;
  1792. }
  1793. if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
  1794. GFP_KERNEL)) {
  1795. err = "ca->free[RESERVE_BTREE] alloc failed";
  1796. goto err_btree_alloc;
  1797. }
  1798. if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
  1799. GFP_KERNEL)) {
  1800. err = "ca->free[RESERVE_PRIO] alloc failed";
  1801. goto err_prio_alloc;
  1802. }
  1803. if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
  1804. err = "ca->free[RESERVE_MOVINGGC] alloc failed";
  1805. goto err_movinggc_alloc;
  1806. }
  1807. if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
  1808. err = "ca->free[RESERVE_NONE] alloc failed";
  1809. goto err_none_alloc;
  1810. }
  1811. if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
  1812. err = "ca->free_inc alloc failed";
  1813. goto err_free_inc_alloc;
  1814. }
  1815. if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
  1816. err = "ca->heap alloc failed";
  1817. goto err_heap_alloc;
  1818. }
  1819. ca->buckets = vzalloc(array_size(sizeof(struct bucket),
  1820. ca->sb.nbuckets));
  1821. if (!ca->buckets) {
  1822. err = "ca->buckets alloc failed";
  1823. goto err_buckets_alloc;
  1824. }
  1825. ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
  1826. prio_buckets(ca), 2),
  1827. GFP_KERNEL);
  1828. if (!ca->prio_buckets) {
  1829. err = "ca->prio_buckets alloc failed";
  1830. goto err_prio_buckets_alloc;
  1831. }
  1832. ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
  1833. if (!ca->disk_buckets) {
  1834. err = "ca->disk_buckets alloc failed";
  1835. goto err_disk_buckets_alloc;
  1836. }
  1837. ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
  1838. for_each_bucket(b, ca)
  1839. atomic_set(&b->pin, 0);
  1840. return 0;
  1841. err_disk_buckets_alloc:
  1842. kfree(ca->prio_buckets);
  1843. err_prio_buckets_alloc:
  1844. vfree(ca->buckets);
  1845. err_buckets_alloc:
  1846. free_heap(&ca->heap);
  1847. err_heap_alloc:
  1848. free_fifo(&ca->free_inc);
  1849. err_free_inc_alloc:
  1850. free_fifo(&ca->free[RESERVE_NONE]);
  1851. err_none_alloc:
  1852. free_fifo(&ca->free[RESERVE_MOVINGGC]);
  1853. err_movinggc_alloc:
  1854. free_fifo(&ca->free[RESERVE_PRIO]);
  1855. err_prio_alloc:
  1856. free_fifo(&ca->free[RESERVE_BTREE]);
  1857. err_btree_alloc:
  1858. err_free:
  1859. module_put(THIS_MODULE);
  1860. if (err)
  1861. pr_notice("error %s: %s\n", ca->cache_dev_name, err);
  1862. return ret;
  1863. }
  1864. static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
  1865. struct block_device *bdev, struct cache *ca)
  1866. {
  1867. const char *err = NULL; /* must be set for any error case */
  1868. int ret = 0;
  1869. bdevname(bdev, ca->cache_dev_name);
  1870. memcpy(&ca->sb, sb, sizeof(struct cache_sb));
  1871. ca->bdev = bdev;
  1872. ca->bdev->bd_holder = ca;
  1873. ca->sb_disk = sb_disk;
  1874. if (blk_queue_discard(bdev_get_queue(bdev)))
  1875. ca->discard = CACHE_DISCARD(&ca->sb);
  1876. ret = cache_alloc(ca);
  1877. if (ret != 0) {
  1878. /*
  1879. * If we failed here, it means ca->kobj is not initialized yet,
  1880. * kobject_put() won't be called and there is no chance to
  1881. * call blkdev_put() to bdev in bch_cache_release(). So we
  1882. * explicitly call blkdev_put() here.
  1883. */
  1884. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1885. if (ret == -ENOMEM)
  1886. err = "cache_alloc(): -ENOMEM";
  1887. else if (ret == -EPERM)
  1888. err = "cache_alloc(): cache device is too small";
  1889. else
  1890. err = "cache_alloc(): unknown error";
  1891. goto err;
  1892. }
  1893. if (kobject_add(&ca->kobj,
  1894. &part_to_dev(bdev->bd_part)->kobj,
  1895. "bcache")) {
  1896. err = "error calling kobject_add";
  1897. ret = -ENOMEM;
  1898. goto out;
  1899. }
  1900. mutex_lock(&bch_register_lock);
  1901. err = register_cache_set(ca);
  1902. mutex_unlock(&bch_register_lock);
  1903. if (err) {
  1904. ret = -ENODEV;
  1905. goto out;
  1906. }
  1907. pr_info("registered cache device %s\n", ca->cache_dev_name);
  1908. out:
  1909. kobject_put(&ca->kobj);
  1910. err:
  1911. if (err)
  1912. pr_notice("error %s: %s\n", ca->cache_dev_name, err);
  1913. return ret;
  1914. }
  1915. /* Global interfaces/init */
  1916. static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
  1917. const char *buffer, size_t size);
  1918. static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
  1919. struct kobj_attribute *attr,
  1920. const char *buffer, size_t size);
  1921. kobj_attribute_write(register, register_bcache);
  1922. kobj_attribute_write(register_quiet, register_bcache);
  1923. kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
  1924. static bool bch_is_open_backing(struct block_device *bdev)
  1925. {
  1926. struct cache_set *c, *tc;
  1927. struct cached_dev *dc, *t;
  1928. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1929. list_for_each_entry_safe(dc, t, &c->cached_devs, list)
  1930. if (dc->bdev == bdev)
  1931. return true;
  1932. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1933. if (dc->bdev == bdev)
  1934. return true;
  1935. return false;
  1936. }
  1937. static bool bch_is_open_cache(struct block_device *bdev)
  1938. {
  1939. struct cache_set *c, *tc;
  1940. list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
  1941. struct cache *ca = c->cache;
  1942. if (ca->bdev == bdev)
  1943. return true;
  1944. }
  1945. return false;
  1946. }
  1947. static bool bch_is_open(struct block_device *bdev)
  1948. {
  1949. return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
  1950. }
  1951. struct async_reg_args {
  1952. struct delayed_work reg_work;
  1953. char *path;
  1954. struct cache_sb *sb;
  1955. struct cache_sb_disk *sb_disk;
  1956. struct block_device *bdev;
  1957. };
  1958. static void register_bdev_worker(struct work_struct *work)
  1959. {
  1960. int fail = false;
  1961. struct async_reg_args *args =
  1962. container_of(work, struct async_reg_args, reg_work.work);
  1963. struct cached_dev *dc;
  1964. dc = kzalloc(sizeof(*dc), GFP_KERNEL);
  1965. if (!dc) {
  1966. fail = true;
  1967. put_page(virt_to_page(args->sb_disk));
  1968. blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1969. goto out;
  1970. }
  1971. mutex_lock(&bch_register_lock);
  1972. if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0)
  1973. fail = true;
  1974. mutex_unlock(&bch_register_lock);
  1975. out:
  1976. if (fail)
  1977. pr_info("error %s: fail to register backing device\n",
  1978. args->path);
  1979. kfree(args->sb);
  1980. kfree(args->path);
  1981. kfree(args);
  1982. module_put(THIS_MODULE);
  1983. }
  1984. static void register_cache_worker(struct work_struct *work)
  1985. {
  1986. int fail = false;
  1987. struct async_reg_args *args =
  1988. container_of(work, struct async_reg_args, reg_work.work);
  1989. struct cache *ca;
  1990. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1991. if (!ca) {
  1992. fail = true;
  1993. put_page(virt_to_page(args->sb_disk));
  1994. blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1995. goto out;
  1996. }
  1997. /* blkdev_put() will be called in bch_cache_release() */
  1998. if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0)
  1999. fail = true;
  2000. out:
  2001. if (fail)
  2002. pr_info("error %s: fail to register cache device\n",
  2003. args->path);
  2004. kfree(args->sb);
  2005. kfree(args->path);
  2006. kfree(args);
  2007. module_put(THIS_MODULE);
  2008. }
  2009. static void register_device_aync(struct async_reg_args *args)
  2010. {
  2011. if (SB_IS_BDEV(args->sb))
  2012. INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
  2013. else
  2014. INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
  2015. /* 10 jiffies is enough for a delay */
  2016. queue_delayed_work(system_wq, &args->reg_work, 10);
  2017. }
  2018. static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
  2019. const char *buffer, size_t size)
  2020. {
  2021. const char *err;
  2022. char *path = NULL;
  2023. struct cache_sb *sb;
  2024. struct cache_sb_disk *sb_disk;
  2025. struct block_device *bdev;
  2026. ssize_t ret;
  2027. bool async_registration = false;
  2028. #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
  2029. async_registration = true;
  2030. #endif
  2031. ret = -EBUSY;
  2032. err = "failed to reference bcache module";
  2033. if (!try_module_get(THIS_MODULE))
  2034. goto out;
  2035. /* For latest state of bcache_is_reboot */
  2036. smp_mb();
  2037. err = "bcache is in reboot";
  2038. if (bcache_is_reboot)
  2039. goto out_module_put;
  2040. ret = -ENOMEM;
  2041. err = "cannot allocate memory";
  2042. path = kstrndup(buffer, size, GFP_KERNEL);
  2043. if (!path)
  2044. goto out_module_put;
  2045. sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
  2046. if (!sb)
  2047. goto out_free_path;
  2048. ret = -EINVAL;
  2049. err = "failed to open device";
  2050. bdev = blkdev_get_by_path(strim(path),
  2051. FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  2052. sb);
  2053. if (IS_ERR(bdev)) {
  2054. if (bdev == ERR_PTR(-EBUSY)) {
  2055. bdev = lookup_bdev(strim(path));
  2056. mutex_lock(&bch_register_lock);
  2057. if (!IS_ERR(bdev) && bch_is_open(bdev))
  2058. err = "device already registered";
  2059. else
  2060. err = "device busy";
  2061. mutex_unlock(&bch_register_lock);
  2062. if (!IS_ERR(bdev))
  2063. bdput(bdev);
  2064. if (attr == &ksysfs_register_quiet)
  2065. goto done;
  2066. }
  2067. goto out_free_sb;
  2068. }
  2069. err = "failed to set blocksize";
  2070. if (set_blocksize(bdev, 4096))
  2071. goto out_blkdev_put;
  2072. err = read_super(sb, bdev, &sb_disk);
  2073. if (err)
  2074. goto out_blkdev_put;
  2075. err = "failed to register device";
  2076. if (async_registration) {
  2077. /* register in asynchronous way */
  2078. struct async_reg_args *args =
  2079. kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
  2080. if (!args) {
  2081. ret = -ENOMEM;
  2082. err = "cannot allocate memory";
  2083. goto out_put_sb_page;
  2084. }
  2085. args->path = path;
  2086. args->sb = sb;
  2087. args->sb_disk = sb_disk;
  2088. args->bdev = bdev;
  2089. register_device_aync(args);
  2090. /* No wait and returns to user space */
  2091. goto async_done;
  2092. }
  2093. if (SB_IS_BDEV(sb)) {
  2094. struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
  2095. if (!dc)
  2096. goto out_put_sb_page;
  2097. mutex_lock(&bch_register_lock);
  2098. ret = register_bdev(sb, sb_disk, bdev, dc);
  2099. mutex_unlock(&bch_register_lock);
  2100. /* blkdev_put() will be called in cached_dev_free() */
  2101. if (ret < 0)
  2102. goto out_free_sb;
  2103. } else {
  2104. struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  2105. if (!ca)
  2106. goto out_put_sb_page;
  2107. /* blkdev_put() will be called in bch_cache_release() */
  2108. if (register_cache(sb, sb_disk, bdev, ca) != 0)
  2109. goto out_free_sb;
  2110. }
  2111. done:
  2112. kfree(sb);
  2113. kfree(path);
  2114. module_put(THIS_MODULE);
  2115. async_done:
  2116. return size;
  2117. out_put_sb_page:
  2118. put_page(virt_to_page(sb_disk));
  2119. out_blkdev_put:
  2120. blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  2121. out_free_sb:
  2122. kfree(sb);
  2123. out_free_path:
  2124. kfree(path);
  2125. path = NULL;
  2126. out_module_put:
  2127. module_put(THIS_MODULE);
  2128. out:
  2129. pr_info("error %s: %s\n", path?path:"", err);
  2130. return ret;
  2131. }
  2132. struct pdev {
  2133. struct list_head list;
  2134. struct cached_dev *dc;
  2135. };
  2136. static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
  2137. struct kobj_attribute *attr,
  2138. const char *buffer,
  2139. size_t size)
  2140. {
  2141. LIST_HEAD(pending_devs);
  2142. ssize_t ret = size;
  2143. struct cached_dev *dc, *tdc;
  2144. struct pdev *pdev, *tpdev;
  2145. struct cache_set *c, *tc;
  2146. mutex_lock(&bch_register_lock);
  2147. list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
  2148. pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
  2149. if (!pdev)
  2150. break;
  2151. pdev->dc = dc;
  2152. list_add(&pdev->list, &pending_devs);
  2153. }
  2154. list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
  2155. list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
  2156. char *pdev_set_uuid = pdev->dc->sb.set_uuid;
  2157. char *set_uuid = c->set_uuid;
  2158. if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
  2159. list_del(&pdev->list);
  2160. kfree(pdev);
  2161. break;
  2162. }
  2163. }
  2164. }
  2165. mutex_unlock(&bch_register_lock);
  2166. list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
  2167. pr_info("delete pdev %p\n", pdev);
  2168. list_del(&pdev->list);
  2169. bcache_device_stop(&pdev->dc->disk);
  2170. kfree(pdev);
  2171. }
  2172. return ret;
  2173. }
  2174. static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
  2175. {
  2176. if (bcache_is_reboot)
  2177. return NOTIFY_DONE;
  2178. if (code == SYS_DOWN ||
  2179. code == SYS_HALT ||
  2180. code == SYS_POWER_OFF) {
  2181. DEFINE_WAIT(wait);
  2182. unsigned long start = jiffies;
  2183. bool stopped = false;
  2184. struct cache_set *c, *tc;
  2185. struct cached_dev *dc, *tdc;
  2186. mutex_lock(&bch_register_lock);
  2187. if (bcache_is_reboot)
  2188. goto out;
  2189. /* New registration is rejected since now */
  2190. bcache_is_reboot = true;
  2191. /*
  2192. * Make registering caller (if there is) on other CPU
  2193. * core know bcache_is_reboot set to true earlier
  2194. */
  2195. smp_mb();
  2196. if (list_empty(&bch_cache_sets) &&
  2197. list_empty(&uncached_devices))
  2198. goto out;
  2199. mutex_unlock(&bch_register_lock);
  2200. pr_info("Stopping all devices:\n");
  2201. /*
  2202. * The reason bch_register_lock is not held to call
  2203. * bch_cache_set_stop() and bcache_device_stop() is to
  2204. * avoid potential deadlock during reboot, because cache
  2205. * set or bcache device stopping process will acqurie
  2206. * bch_register_lock too.
  2207. *
  2208. * We are safe here because bcache_is_reboot sets to
  2209. * true already, register_bcache() will reject new
  2210. * registration now. bcache_is_reboot also makes sure
  2211. * bcache_reboot() won't be re-entered on by other thread,
  2212. * so there is no race in following list iteration by
  2213. * list_for_each_entry_safe().
  2214. */
  2215. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  2216. bch_cache_set_stop(c);
  2217. list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
  2218. bcache_device_stop(&dc->disk);
  2219. /*
  2220. * Give an early chance for other kthreads and
  2221. * kworkers to stop themselves
  2222. */
  2223. schedule();
  2224. /* What's a condition variable? */
  2225. while (1) {
  2226. long timeout = start + 10 * HZ - jiffies;
  2227. mutex_lock(&bch_register_lock);
  2228. stopped = list_empty(&bch_cache_sets) &&
  2229. list_empty(&uncached_devices);
  2230. if (timeout < 0 || stopped)
  2231. break;
  2232. prepare_to_wait(&unregister_wait, &wait,
  2233. TASK_UNINTERRUPTIBLE);
  2234. mutex_unlock(&bch_register_lock);
  2235. schedule_timeout(timeout);
  2236. }
  2237. finish_wait(&unregister_wait, &wait);
  2238. if (stopped)
  2239. pr_info("All devices stopped\n");
  2240. else
  2241. pr_notice("Timeout waiting for devices to be closed\n");
  2242. out:
  2243. mutex_unlock(&bch_register_lock);
  2244. }
  2245. return NOTIFY_DONE;
  2246. }
  2247. static struct notifier_block reboot = {
  2248. .notifier_call = bcache_reboot,
  2249. .priority = INT_MAX, /* before any real devices */
  2250. };
  2251. static void bcache_exit(void)
  2252. {
  2253. bch_debug_exit();
  2254. bch_request_exit();
  2255. if (bcache_kobj)
  2256. kobject_put(bcache_kobj);
  2257. if (bcache_wq)
  2258. destroy_workqueue(bcache_wq);
  2259. if (bch_journal_wq)
  2260. destroy_workqueue(bch_journal_wq);
  2261. if (bch_flush_wq)
  2262. destroy_workqueue(bch_flush_wq);
  2263. bch_btree_exit();
  2264. if (bcache_major)
  2265. unregister_blkdev(bcache_major, "bcache");
  2266. unregister_reboot_notifier(&reboot);
  2267. mutex_destroy(&bch_register_lock);
  2268. }
  2269. /* Check and fixup module parameters */
  2270. static void check_module_parameters(void)
  2271. {
  2272. if (bch_cutoff_writeback_sync == 0)
  2273. bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
  2274. else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
  2275. pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
  2276. bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
  2277. bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
  2278. }
  2279. if (bch_cutoff_writeback == 0)
  2280. bch_cutoff_writeback = CUTOFF_WRITEBACK;
  2281. else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
  2282. pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
  2283. bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
  2284. bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
  2285. }
  2286. if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
  2287. pr_warn("set bch_cutoff_writeback (%u) to %u\n",
  2288. bch_cutoff_writeback, bch_cutoff_writeback_sync);
  2289. bch_cutoff_writeback = bch_cutoff_writeback_sync;
  2290. }
  2291. }
  2292. static int __init bcache_init(void)
  2293. {
  2294. static const struct attribute *files[] = {
  2295. &ksysfs_register.attr,
  2296. &ksysfs_register_quiet.attr,
  2297. &ksysfs_pendings_cleanup.attr,
  2298. NULL
  2299. };
  2300. check_module_parameters();
  2301. mutex_init(&bch_register_lock);
  2302. init_waitqueue_head(&unregister_wait);
  2303. register_reboot_notifier(&reboot);
  2304. bcache_major = register_blkdev(0, "bcache");
  2305. if (bcache_major < 0) {
  2306. unregister_reboot_notifier(&reboot);
  2307. mutex_destroy(&bch_register_lock);
  2308. return bcache_major;
  2309. }
  2310. if (bch_btree_init())
  2311. goto err;
  2312. bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
  2313. if (!bcache_wq)
  2314. goto err;
  2315. /*
  2316. * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
  2317. *
  2318. * 1. It used `system_wq` before which also does no memory reclaim.
  2319. * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
  2320. * reduced throughput can be observed.
  2321. *
  2322. * We still want to user our own queue to not congest the `system_wq`.
  2323. */
  2324. bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
  2325. if (!bch_flush_wq)
  2326. goto err;
  2327. bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
  2328. if (!bch_journal_wq)
  2329. goto err;
  2330. bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
  2331. if (!bcache_kobj)
  2332. goto err;
  2333. if (bch_request_init() ||
  2334. sysfs_create_files(bcache_kobj, files))
  2335. goto err;
  2336. bch_debug_init();
  2337. closure_debug_init();
  2338. bcache_is_reboot = false;
  2339. return 0;
  2340. err:
  2341. bcache_exit();
  2342. return -ENOMEM;
  2343. }
  2344. /*
  2345. * Module hooks
  2346. */
  2347. module_exit(bcache_exit);
  2348. module_init(bcache_init);
  2349. module_param(bch_cutoff_writeback, uint, 0);
  2350. MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
  2351. module_param(bch_cutoff_writeback_sync, uint, 0);
  2352. MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");
  2353. MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
  2354. MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
  2355. MODULE_LICENSE("GPL");