nbd.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Network block device - make block devices work over TCP
  4. *
  5. * Note that you can not swap over this thing, yet. Seems to work but
  6. * deadlocks sometimes - you can not swap over TCP in general.
  7. *
  8. * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
  9. * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
  10. *
  11. * (part of code stolen from loop.c)
  12. */
  13. #include <linux/major.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/fs.h>
  20. #include <linux/bio.h>
  21. #include <linux/stat.h>
  22. #include <linux/errno.h>
  23. #include <linux/file.h>
  24. #include <linux/ioctl.h>
  25. #include <linux/mutex.h>
  26. #include <linux/compiler.h>
  27. #include <linux/completion.h>
  28. #include <linux/err.h>
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <net/sock.h>
  32. #include <linux/net.h>
  33. #include <linux/kthread.h>
  34. #include <linux/types.h>
  35. #include <linux/debugfs.h>
  36. #include <linux/blk-mq.h>
  37. #include <linux/uaccess.h>
  38. #include <asm/types.h>
  39. #include <linux/nbd.h>
  40. #include <linux/nbd-netlink.h>
  41. #include <net/genetlink.h>
  42. #define CREATE_TRACE_POINTS
  43. #include <trace/events/nbd.h>
  44. static DEFINE_IDR(nbd_index_idr);
  45. static DEFINE_MUTEX(nbd_index_mutex);
  46. static int nbd_total_devices = 0;
  47. struct nbd_sock {
  48. struct socket *sock;
  49. struct mutex tx_lock;
  50. struct request *pending;
  51. int sent;
  52. bool dead;
  53. int fallback_index;
  54. int cookie;
  55. };
  56. struct recv_thread_args {
  57. struct work_struct work;
  58. struct nbd_device *nbd;
  59. int index;
  60. };
  61. struct link_dead_args {
  62. struct work_struct work;
  63. int index;
  64. };
  65. #define NBD_RT_TIMEDOUT 0
  66. #define NBD_RT_DISCONNECT_REQUESTED 1
  67. #define NBD_RT_DISCONNECTED 2
  68. #define NBD_RT_HAS_PID_FILE 3
  69. #define NBD_RT_HAS_CONFIG_REF 4
  70. #define NBD_RT_BOUND 5
  71. #define NBD_RT_DISCONNECT_ON_CLOSE 6
  72. #define NBD_DESTROY_ON_DISCONNECT 0
  73. #define NBD_DISCONNECT_REQUESTED 1
  74. struct nbd_config {
  75. u32 flags;
  76. unsigned long runtime_flags;
  77. u64 dead_conn_timeout;
  78. struct nbd_sock **socks;
  79. int num_connections;
  80. atomic_t live_connections;
  81. wait_queue_head_t conn_wait;
  82. atomic_t recv_threads;
  83. wait_queue_head_t recv_wq;
  84. loff_t blksize;
  85. loff_t bytesize;
  86. #if IS_ENABLED(CONFIG_DEBUG_FS)
  87. struct dentry *dbg_dir;
  88. #endif
  89. };
  90. struct nbd_device {
  91. struct blk_mq_tag_set tag_set;
  92. int index;
  93. refcount_t config_refs;
  94. refcount_t refs;
  95. struct nbd_config *config;
  96. struct mutex config_lock;
  97. struct gendisk *disk;
  98. struct workqueue_struct *recv_workq;
  99. struct list_head list;
  100. struct task_struct *task_recv;
  101. struct task_struct *task_setup;
  102. struct completion *destroy_complete;
  103. unsigned long flags;
  104. };
  105. #define NBD_CMD_REQUEUED 1
  106. struct nbd_cmd {
  107. struct nbd_device *nbd;
  108. struct mutex lock;
  109. int index;
  110. int cookie;
  111. int retries;
  112. blk_status_t status;
  113. unsigned long flags;
  114. u32 cmd_cookie;
  115. };
  116. #if IS_ENABLED(CONFIG_DEBUG_FS)
  117. static struct dentry *nbd_dbg_dir;
  118. #endif
  119. #define nbd_name(nbd) ((nbd)->disk->disk_name)
  120. #define NBD_MAGIC 0x68797548
  121. #define NBD_DEF_BLKSIZE 1024
  122. static unsigned int nbds_max = 16;
  123. static int max_part = 16;
  124. static int part_shift;
  125. static int nbd_dev_dbg_init(struct nbd_device *nbd);
  126. static void nbd_dev_dbg_close(struct nbd_device *nbd);
  127. static void nbd_config_put(struct nbd_device *nbd);
  128. static void nbd_connect_reply(struct genl_info *info, int index);
  129. static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
  130. static void nbd_dead_link_work(struct work_struct *work);
  131. static void nbd_disconnect_and_put(struct nbd_device *nbd);
  132. static inline struct device *nbd_to_dev(struct nbd_device *nbd)
  133. {
  134. return disk_to_dev(nbd->disk);
  135. }
  136. static void nbd_requeue_cmd(struct nbd_cmd *cmd)
  137. {
  138. struct request *req = blk_mq_rq_from_pdu(cmd);
  139. if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
  140. blk_mq_requeue_request(req, true);
  141. }
  142. #define NBD_COOKIE_BITS 32
  143. static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
  144. {
  145. struct request *req = blk_mq_rq_from_pdu(cmd);
  146. u32 tag = blk_mq_unique_tag(req);
  147. u64 cookie = cmd->cmd_cookie;
  148. return (cookie << NBD_COOKIE_BITS) | tag;
  149. }
  150. static u32 nbd_handle_to_tag(u64 handle)
  151. {
  152. return (u32)handle;
  153. }
  154. static u32 nbd_handle_to_cookie(u64 handle)
  155. {
  156. return (u32)(handle >> NBD_COOKIE_BITS);
  157. }
  158. static const char *nbdcmd_to_ascii(int cmd)
  159. {
  160. switch (cmd) {
  161. case NBD_CMD_READ: return "read";
  162. case NBD_CMD_WRITE: return "write";
  163. case NBD_CMD_DISC: return "disconnect";
  164. case NBD_CMD_FLUSH: return "flush";
  165. case NBD_CMD_TRIM: return "trim/discard";
  166. }
  167. return "invalid";
  168. }
  169. static ssize_t pid_show(struct device *dev,
  170. struct device_attribute *attr, char *buf)
  171. {
  172. struct gendisk *disk = dev_to_disk(dev);
  173. struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
  174. return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
  175. }
  176. static const struct device_attribute pid_attr = {
  177. .attr = { .name = "pid", .mode = 0444},
  178. .show = pid_show,
  179. };
  180. static void nbd_dev_remove(struct nbd_device *nbd)
  181. {
  182. struct gendisk *disk = nbd->disk;
  183. struct request_queue *q;
  184. if (disk) {
  185. q = disk->queue;
  186. del_gendisk(disk);
  187. blk_cleanup_queue(q);
  188. blk_mq_free_tag_set(&nbd->tag_set);
  189. disk->private_data = NULL;
  190. put_disk(disk);
  191. }
  192. /*
  193. * Place this in the last just before the nbd is freed to
  194. * make sure that the disk and the related kobject are also
  195. * totally removed to avoid duplicate creation of the same
  196. * one.
  197. */
  198. if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
  199. complete(nbd->destroy_complete);
  200. kfree(nbd);
  201. }
  202. static void nbd_put(struct nbd_device *nbd)
  203. {
  204. if (refcount_dec_and_mutex_lock(&nbd->refs,
  205. &nbd_index_mutex)) {
  206. idr_remove(&nbd_index_idr, nbd->index);
  207. nbd_dev_remove(nbd);
  208. mutex_unlock(&nbd_index_mutex);
  209. }
  210. }
  211. static int nbd_disconnected(struct nbd_config *config)
  212. {
  213. return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
  214. test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
  215. }
  216. static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
  217. int notify)
  218. {
  219. if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
  220. struct link_dead_args *args;
  221. args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
  222. if (args) {
  223. INIT_WORK(&args->work, nbd_dead_link_work);
  224. args->index = nbd->index;
  225. queue_work(system_wq, &args->work);
  226. }
  227. }
  228. if (!nsock->dead) {
  229. kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
  230. if (atomic_dec_return(&nbd->config->live_connections) == 0) {
  231. if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
  232. &nbd->config->runtime_flags)) {
  233. set_bit(NBD_RT_DISCONNECTED,
  234. &nbd->config->runtime_flags);
  235. dev_info(nbd_to_dev(nbd),
  236. "Disconnected due to user request.\n");
  237. }
  238. }
  239. }
  240. nsock->dead = true;
  241. nsock->pending = NULL;
  242. nsock->sent = 0;
  243. }
  244. static void nbd_size_clear(struct nbd_device *nbd)
  245. {
  246. if (nbd->config->bytesize) {
  247. set_capacity(nbd->disk, 0);
  248. kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
  249. }
  250. }
  251. static void nbd_size_update(struct nbd_device *nbd, bool start)
  252. {
  253. struct nbd_config *config = nbd->config;
  254. struct block_device *bdev = bdget_disk(nbd->disk, 0);
  255. sector_t nr_sectors = config->bytesize >> 9;
  256. if (config->flags & NBD_FLAG_SEND_TRIM) {
  257. nbd->disk->queue->limits.discard_granularity = config->blksize;
  258. nbd->disk->queue->limits.discard_alignment = config->blksize;
  259. blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
  260. }
  261. blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
  262. blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
  263. set_capacity(nbd->disk, nr_sectors);
  264. if (bdev) {
  265. if (bdev->bd_disk) {
  266. bd_set_nr_sectors(bdev, nr_sectors);
  267. if (start)
  268. set_blocksize(bdev, config->blksize);
  269. } else
  270. set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
  271. bdput(bdev);
  272. }
  273. kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
  274. }
  275. static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
  276. loff_t nr_blocks)
  277. {
  278. struct nbd_config *config = nbd->config;
  279. config->blksize = blocksize;
  280. config->bytesize = blocksize * nr_blocks;
  281. if (nbd->task_recv != NULL)
  282. nbd_size_update(nbd, false);
  283. }
  284. static void nbd_complete_rq(struct request *req)
  285. {
  286. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
  287. dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
  288. cmd->status ? "failed" : "done");
  289. blk_mq_end_request(req, cmd->status);
  290. }
  291. /*
  292. * Forcibly shutdown the socket causing all listeners to error
  293. */
  294. static void sock_shutdown(struct nbd_device *nbd)
  295. {
  296. struct nbd_config *config = nbd->config;
  297. int i;
  298. if (config->num_connections == 0)
  299. return;
  300. if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
  301. return;
  302. for (i = 0; i < config->num_connections; i++) {
  303. struct nbd_sock *nsock = config->socks[i];
  304. mutex_lock(&nsock->tx_lock);
  305. nbd_mark_nsock_dead(nbd, nsock, 0);
  306. mutex_unlock(&nsock->tx_lock);
  307. }
  308. dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
  309. }
  310. static u32 req_to_nbd_cmd_type(struct request *req)
  311. {
  312. switch (req_op(req)) {
  313. case REQ_OP_DISCARD:
  314. return NBD_CMD_TRIM;
  315. case REQ_OP_FLUSH:
  316. return NBD_CMD_FLUSH;
  317. case REQ_OP_WRITE:
  318. return NBD_CMD_WRITE;
  319. case REQ_OP_READ:
  320. return NBD_CMD_READ;
  321. default:
  322. return U32_MAX;
  323. }
  324. }
  325. static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
  326. bool reserved)
  327. {
  328. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
  329. struct nbd_device *nbd = cmd->nbd;
  330. struct nbd_config *config;
  331. if (!mutex_trylock(&cmd->lock))
  332. return BLK_EH_RESET_TIMER;
  333. if (!refcount_inc_not_zero(&nbd->config_refs)) {
  334. cmd->status = BLK_STS_TIMEOUT;
  335. mutex_unlock(&cmd->lock);
  336. goto done;
  337. }
  338. config = nbd->config;
  339. if (config->num_connections > 1 ||
  340. (config->num_connections == 1 && nbd->tag_set.timeout)) {
  341. dev_err_ratelimited(nbd_to_dev(nbd),
  342. "Connection timed out, retrying (%d/%d alive)\n",
  343. atomic_read(&config->live_connections),
  344. config->num_connections);
  345. /*
  346. * Hooray we have more connections, requeue this IO, the submit
  347. * path will put it on a real connection. Or if only one
  348. * connection is configured, the submit path will wait util
  349. * a new connection is reconfigured or util dead timeout.
  350. */
  351. if (config->socks) {
  352. if (cmd->index < config->num_connections) {
  353. struct nbd_sock *nsock =
  354. config->socks[cmd->index];
  355. mutex_lock(&nsock->tx_lock);
  356. /* We can have multiple outstanding requests, so
  357. * we don't want to mark the nsock dead if we've
  358. * already reconnected with a new socket, so
  359. * only mark it dead if its the same socket we
  360. * were sent out on.
  361. */
  362. if (cmd->cookie == nsock->cookie)
  363. nbd_mark_nsock_dead(nbd, nsock, 1);
  364. mutex_unlock(&nsock->tx_lock);
  365. }
  366. mutex_unlock(&cmd->lock);
  367. nbd_requeue_cmd(cmd);
  368. nbd_config_put(nbd);
  369. return BLK_EH_DONE;
  370. }
  371. }
  372. if (!nbd->tag_set.timeout) {
  373. /*
  374. * Userspace sets timeout=0 to disable socket disconnection,
  375. * so just warn and reset the timer.
  376. */
  377. struct nbd_sock *nsock = config->socks[cmd->index];
  378. cmd->retries++;
  379. dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
  380. req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
  381. (unsigned long long)blk_rq_pos(req) << 9,
  382. blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
  383. mutex_lock(&nsock->tx_lock);
  384. if (cmd->cookie != nsock->cookie) {
  385. nbd_requeue_cmd(cmd);
  386. mutex_unlock(&nsock->tx_lock);
  387. mutex_unlock(&cmd->lock);
  388. nbd_config_put(nbd);
  389. return BLK_EH_DONE;
  390. }
  391. mutex_unlock(&nsock->tx_lock);
  392. mutex_unlock(&cmd->lock);
  393. nbd_config_put(nbd);
  394. return BLK_EH_RESET_TIMER;
  395. }
  396. dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
  397. set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
  398. cmd->status = BLK_STS_IOERR;
  399. mutex_unlock(&cmd->lock);
  400. sock_shutdown(nbd);
  401. nbd_config_put(nbd);
  402. done:
  403. blk_mq_complete_request(req);
  404. return BLK_EH_DONE;
  405. }
  406. /*
  407. * Send or receive packet.
  408. */
  409. static int sock_xmit(struct nbd_device *nbd, int index, int send,
  410. struct iov_iter *iter, int msg_flags, int *sent)
  411. {
  412. struct nbd_config *config = nbd->config;
  413. struct socket *sock = config->socks[index]->sock;
  414. int result;
  415. struct msghdr msg;
  416. unsigned int noreclaim_flag;
  417. if (unlikely(!sock)) {
  418. dev_err_ratelimited(disk_to_dev(nbd->disk),
  419. "Attempted %s on closed socket in sock_xmit\n",
  420. (send ? "send" : "recv"));
  421. return -EINVAL;
  422. }
  423. msg.msg_iter = *iter;
  424. noreclaim_flag = memalloc_noreclaim_save();
  425. do {
  426. sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
  427. msg.msg_name = NULL;
  428. msg.msg_namelen = 0;
  429. msg.msg_control = NULL;
  430. msg.msg_controllen = 0;
  431. msg.msg_flags = msg_flags | MSG_NOSIGNAL;
  432. if (send)
  433. result = sock_sendmsg(sock, &msg);
  434. else
  435. result = sock_recvmsg(sock, &msg, msg.msg_flags);
  436. if (result <= 0) {
  437. if (result == 0)
  438. result = -EPIPE; /* short read */
  439. break;
  440. }
  441. if (sent)
  442. *sent += result;
  443. } while (msg_data_left(&msg));
  444. memalloc_noreclaim_restore(noreclaim_flag);
  445. return result;
  446. }
  447. /*
  448. * Different settings for sk->sk_sndtimeo can result in different return values
  449. * if there is a signal pending when we enter sendmsg, because reasons?
  450. */
  451. static inline int was_interrupted(int result)
  452. {
  453. return result == -ERESTARTSYS || result == -EINTR;
  454. }
  455. /* always call with the tx_lock held */
  456. static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
  457. {
  458. struct request *req = blk_mq_rq_from_pdu(cmd);
  459. struct nbd_config *config = nbd->config;
  460. struct nbd_sock *nsock = config->socks[index];
  461. int result;
  462. struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
  463. struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
  464. struct iov_iter from;
  465. unsigned long size = blk_rq_bytes(req);
  466. struct bio *bio;
  467. u64 handle;
  468. u32 type;
  469. u32 nbd_cmd_flags = 0;
  470. int sent = nsock->sent, skip = 0;
  471. iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
  472. type = req_to_nbd_cmd_type(req);
  473. if (type == U32_MAX)
  474. return -EIO;
  475. if (rq_data_dir(req) == WRITE &&
  476. (config->flags & NBD_FLAG_READ_ONLY)) {
  477. dev_err_ratelimited(disk_to_dev(nbd->disk),
  478. "Write on read-only\n");
  479. return -EIO;
  480. }
  481. if (req->cmd_flags & REQ_FUA)
  482. nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
  483. /* We did a partial send previously, and we at least sent the whole
  484. * request struct, so just go and send the rest of the pages in the
  485. * request.
  486. */
  487. if (sent) {
  488. if (sent >= sizeof(request)) {
  489. skip = sent - sizeof(request);
  490. /* initialize handle for tracing purposes */
  491. handle = nbd_cmd_handle(cmd);
  492. goto send_pages;
  493. }
  494. iov_iter_advance(&from, sent);
  495. } else {
  496. cmd->cmd_cookie++;
  497. }
  498. cmd->index = index;
  499. cmd->cookie = nsock->cookie;
  500. cmd->retries = 0;
  501. request.type = htonl(type | nbd_cmd_flags);
  502. if (type != NBD_CMD_FLUSH) {
  503. request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
  504. request.len = htonl(size);
  505. }
  506. handle = nbd_cmd_handle(cmd);
  507. memcpy(request.handle, &handle, sizeof(handle));
  508. trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
  509. dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
  510. req, nbdcmd_to_ascii(type),
  511. (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
  512. result = sock_xmit(nbd, index, 1, &from,
  513. (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
  514. trace_nbd_header_sent(req, handle);
  515. if (result <= 0) {
  516. if (was_interrupted(result)) {
  517. /* If we havne't sent anything we can just return BUSY,
  518. * however if we have sent something we need to make
  519. * sure we only allow this req to be sent until we are
  520. * completely done.
  521. */
  522. if (sent) {
  523. nsock->pending = req;
  524. nsock->sent = sent;
  525. }
  526. set_bit(NBD_CMD_REQUEUED, &cmd->flags);
  527. return BLK_STS_RESOURCE;
  528. }
  529. dev_err_ratelimited(disk_to_dev(nbd->disk),
  530. "Send control failed (result %d)\n", result);
  531. return -EAGAIN;
  532. }
  533. send_pages:
  534. if (type != NBD_CMD_WRITE)
  535. goto out;
  536. bio = req->bio;
  537. while (bio) {
  538. struct bio *next = bio->bi_next;
  539. struct bvec_iter iter;
  540. struct bio_vec bvec;
  541. bio_for_each_segment(bvec, bio, iter) {
  542. bool is_last = !next && bio_iter_last(bvec, iter);
  543. int flags = is_last ? 0 : MSG_MORE;
  544. dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
  545. req, bvec.bv_len);
  546. iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
  547. if (skip) {
  548. if (skip >= iov_iter_count(&from)) {
  549. skip -= iov_iter_count(&from);
  550. continue;
  551. }
  552. iov_iter_advance(&from, skip);
  553. skip = 0;
  554. }
  555. result = sock_xmit(nbd, index, 1, &from, flags, &sent);
  556. if (result <= 0) {
  557. if (was_interrupted(result)) {
  558. /* We've already sent the header, we
  559. * have no choice but to set pending and
  560. * return BUSY.
  561. */
  562. nsock->pending = req;
  563. nsock->sent = sent;
  564. set_bit(NBD_CMD_REQUEUED, &cmd->flags);
  565. return BLK_STS_RESOURCE;
  566. }
  567. dev_err(disk_to_dev(nbd->disk),
  568. "Send data failed (result %d)\n",
  569. result);
  570. return -EAGAIN;
  571. }
  572. /*
  573. * The completion might already have come in,
  574. * so break for the last one instead of letting
  575. * the iterator do it. This prevents use-after-free
  576. * of the bio.
  577. */
  578. if (is_last)
  579. break;
  580. }
  581. bio = next;
  582. }
  583. out:
  584. trace_nbd_payload_sent(req, handle);
  585. nsock->pending = NULL;
  586. nsock->sent = 0;
  587. return 0;
  588. }
  589. /* NULL returned = something went wrong, inform userspace */
  590. static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
  591. {
  592. struct nbd_config *config = nbd->config;
  593. int result;
  594. struct nbd_reply reply;
  595. struct nbd_cmd *cmd;
  596. struct request *req = NULL;
  597. u64 handle;
  598. u16 hwq;
  599. u32 tag;
  600. struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
  601. struct iov_iter to;
  602. int ret = 0;
  603. reply.magic = 0;
  604. iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
  605. result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
  606. if (result <= 0) {
  607. if (!nbd_disconnected(config))
  608. dev_err(disk_to_dev(nbd->disk),
  609. "Receive control failed (result %d)\n", result);
  610. return ERR_PTR(result);
  611. }
  612. if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
  613. dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
  614. (unsigned long)ntohl(reply.magic));
  615. return ERR_PTR(-EPROTO);
  616. }
  617. memcpy(&handle, reply.handle, sizeof(handle));
  618. tag = nbd_handle_to_tag(handle);
  619. hwq = blk_mq_unique_tag_to_hwq(tag);
  620. if (hwq < nbd->tag_set.nr_hw_queues)
  621. req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
  622. blk_mq_unique_tag_to_tag(tag));
  623. if (!req || !blk_mq_request_started(req)) {
  624. dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
  625. tag, req);
  626. return ERR_PTR(-ENOENT);
  627. }
  628. trace_nbd_header_received(req, handle);
  629. cmd = blk_mq_rq_to_pdu(req);
  630. mutex_lock(&cmd->lock);
  631. if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
  632. dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
  633. req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
  634. ret = -ENOENT;
  635. goto out;
  636. }
  637. if (cmd->status != BLK_STS_OK) {
  638. dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
  639. req);
  640. ret = -ENOENT;
  641. goto out;
  642. }
  643. if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
  644. dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
  645. req);
  646. ret = -ENOENT;
  647. goto out;
  648. }
  649. if (ntohl(reply.error)) {
  650. dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
  651. ntohl(reply.error));
  652. cmd->status = BLK_STS_IOERR;
  653. goto out;
  654. }
  655. dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
  656. if (rq_data_dir(req) != WRITE) {
  657. struct req_iterator iter;
  658. struct bio_vec bvec;
  659. rq_for_each_segment(bvec, req, iter) {
  660. iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
  661. result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
  662. if (result <= 0) {
  663. dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
  664. result);
  665. /*
  666. * If we've disconnected, we need to make sure we
  667. * complete this request, otherwise error out
  668. * and let the timeout stuff handle resubmitting
  669. * this request onto another connection.
  670. */
  671. if (nbd_disconnected(config)) {
  672. cmd->status = BLK_STS_IOERR;
  673. goto out;
  674. }
  675. ret = -EIO;
  676. goto out;
  677. }
  678. dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
  679. req, bvec.bv_len);
  680. }
  681. }
  682. out:
  683. trace_nbd_payload_received(req, handle);
  684. mutex_unlock(&cmd->lock);
  685. return ret ? ERR_PTR(ret) : cmd;
  686. }
  687. static void recv_work(struct work_struct *work)
  688. {
  689. struct recv_thread_args *args = container_of(work,
  690. struct recv_thread_args,
  691. work);
  692. struct nbd_device *nbd = args->nbd;
  693. struct nbd_config *config = nbd->config;
  694. struct nbd_cmd *cmd;
  695. struct request *rq;
  696. while (1) {
  697. cmd = nbd_read_stat(nbd, args->index);
  698. if (IS_ERR(cmd)) {
  699. struct nbd_sock *nsock = config->socks[args->index];
  700. mutex_lock(&nsock->tx_lock);
  701. nbd_mark_nsock_dead(nbd, nsock, 1);
  702. mutex_unlock(&nsock->tx_lock);
  703. break;
  704. }
  705. rq = blk_mq_rq_from_pdu(cmd);
  706. if (likely(!blk_should_fake_timeout(rq->q)))
  707. blk_mq_complete_request(rq);
  708. }
  709. nbd_config_put(nbd);
  710. atomic_dec(&config->recv_threads);
  711. wake_up(&config->recv_wq);
  712. kfree(args);
  713. }
  714. static bool nbd_clear_req(struct request *req, void *data, bool reserved)
  715. {
  716. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
  717. /* don't abort one completed request */
  718. if (blk_mq_request_completed(req))
  719. return true;
  720. mutex_lock(&cmd->lock);
  721. cmd->status = BLK_STS_IOERR;
  722. mutex_unlock(&cmd->lock);
  723. blk_mq_complete_request(req);
  724. return true;
  725. }
  726. static void nbd_clear_que(struct nbd_device *nbd)
  727. {
  728. blk_mq_quiesce_queue(nbd->disk->queue);
  729. blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
  730. blk_mq_unquiesce_queue(nbd->disk->queue);
  731. dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
  732. }
  733. static int find_fallback(struct nbd_device *nbd, int index)
  734. {
  735. struct nbd_config *config = nbd->config;
  736. int new_index = -1;
  737. struct nbd_sock *nsock = config->socks[index];
  738. int fallback = nsock->fallback_index;
  739. if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
  740. return new_index;
  741. if (config->num_connections <= 1) {
  742. dev_err_ratelimited(disk_to_dev(nbd->disk),
  743. "Dead connection, failed to find a fallback\n");
  744. return new_index;
  745. }
  746. if (fallback >= 0 && fallback < config->num_connections &&
  747. !config->socks[fallback]->dead)
  748. return fallback;
  749. if (nsock->fallback_index < 0 ||
  750. nsock->fallback_index >= config->num_connections ||
  751. config->socks[nsock->fallback_index]->dead) {
  752. int i;
  753. for (i = 0; i < config->num_connections; i++) {
  754. if (i == index)
  755. continue;
  756. if (!config->socks[i]->dead) {
  757. new_index = i;
  758. break;
  759. }
  760. }
  761. nsock->fallback_index = new_index;
  762. if (new_index < 0) {
  763. dev_err_ratelimited(disk_to_dev(nbd->disk),
  764. "Dead connection, failed to find a fallback\n");
  765. return new_index;
  766. }
  767. }
  768. new_index = nsock->fallback_index;
  769. return new_index;
  770. }
  771. static int wait_for_reconnect(struct nbd_device *nbd)
  772. {
  773. struct nbd_config *config = nbd->config;
  774. if (!config->dead_conn_timeout)
  775. return 0;
  776. if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
  777. return 0;
  778. return wait_event_timeout(config->conn_wait,
  779. atomic_read(&config->live_connections) > 0,
  780. config->dead_conn_timeout) > 0;
  781. }
  782. static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
  783. {
  784. struct request *req = blk_mq_rq_from_pdu(cmd);
  785. struct nbd_device *nbd = cmd->nbd;
  786. struct nbd_config *config;
  787. struct nbd_sock *nsock;
  788. int ret;
  789. if (!refcount_inc_not_zero(&nbd->config_refs)) {
  790. dev_err_ratelimited(disk_to_dev(nbd->disk),
  791. "Socks array is empty\n");
  792. blk_mq_start_request(req);
  793. return -EINVAL;
  794. }
  795. config = nbd->config;
  796. if (index >= config->num_connections) {
  797. dev_err_ratelimited(disk_to_dev(nbd->disk),
  798. "Attempted send on invalid socket\n");
  799. nbd_config_put(nbd);
  800. blk_mq_start_request(req);
  801. return -EINVAL;
  802. }
  803. cmd->status = BLK_STS_OK;
  804. again:
  805. nsock = config->socks[index];
  806. mutex_lock(&nsock->tx_lock);
  807. if (nsock->dead) {
  808. int old_index = index;
  809. index = find_fallback(nbd, index);
  810. mutex_unlock(&nsock->tx_lock);
  811. if (index < 0) {
  812. if (wait_for_reconnect(nbd)) {
  813. index = old_index;
  814. goto again;
  815. }
  816. /* All the sockets should already be down at this point,
  817. * we just want to make sure that DISCONNECTED is set so
  818. * any requests that come in that were queue'ed waiting
  819. * for the reconnect timer don't trigger the timer again
  820. * and instead just error out.
  821. */
  822. sock_shutdown(nbd);
  823. nbd_config_put(nbd);
  824. blk_mq_start_request(req);
  825. return -EIO;
  826. }
  827. goto again;
  828. }
  829. /* Handle the case that we have a pending request that was partially
  830. * transmitted that _has_ to be serviced first. We need to call requeue
  831. * here so that it gets put _after_ the request that is already on the
  832. * dispatch list.
  833. */
  834. blk_mq_start_request(req);
  835. if (unlikely(nsock->pending && nsock->pending != req)) {
  836. nbd_requeue_cmd(cmd);
  837. ret = 0;
  838. goto out;
  839. }
  840. /*
  841. * Some failures are related to the link going down, so anything that
  842. * returns EAGAIN can be retried on a different socket.
  843. */
  844. ret = nbd_send_cmd(nbd, cmd, index);
  845. if (ret == -EAGAIN) {
  846. dev_err_ratelimited(disk_to_dev(nbd->disk),
  847. "Request send failed, requeueing\n");
  848. nbd_mark_nsock_dead(nbd, nsock, 1);
  849. nbd_requeue_cmd(cmd);
  850. ret = 0;
  851. }
  852. out:
  853. mutex_unlock(&nsock->tx_lock);
  854. nbd_config_put(nbd);
  855. return ret;
  856. }
  857. static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
  858. const struct blk_mq_queue_data *bd)
  859. {
  860. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
  861. int ret;
  862. /*
  863. * Since we look at the bio's to send the request over the network we
  864. * need to make sure the completion work doesn't mark this request done
  865. * before we are done doing our send. This keeps us from dereferencing
  866. * freed data if we have particularly fast completions (ie we get the
  867. * completion before we exit sock_xmit on the last bvec) or in the case
  868. * that the server is misbehaving (or there was an error) before we're
  869. * done sending everything over the wire.
  870. */
  871. mutex_lock(&cmd->lock);
  872. clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
  873. /* We can be called directly from the user space process, which means we
  874. * could possibly have signals pending so our sendmsg will fail. In
  875. * this case we need to return that we are busy, otherwise error out as
  876. * appropriate.
  877. */
  878. ret = nbd_handle_cmd(cmd, hctx->queue_num);
  879. if (ret < 0)
  880. ret = BLK_STS_IOERR;
  881. else if (!ret)
  882. ret = BLK_STS_OK;
  883. mutex_unlock(&cmd->lock);
  884. return ret;
  885. }
  886. static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
  887. int *err)
  888. {
  889. struct socket *sock;
  890. *err = 0;
  891. sock = sockfd_lookup(fd, err);
  892. if (!sock)
  893. return NULL;
  894. if (sock->ops->shutdown == sock_no_shutdown) {
  895. dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
  896. *err = -EINVAL;
  897. sockfd_put(sock);
  898. return NULL;
  899. }
  900. return sock;
  901. }
  902. static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
  903. bool netlink)
  904. {
  905. struct nbd_config *config = nbd->config;
  906. struct socket *sock;
  907. struct nbd_sock **socks;
  908. struct nbd_sock *nsock;
  909. int err;
  910. sock = nbd_get_socket(nbd, arg, &err);
  911. if (!sock)
  912. return err;
  913. /*
  914. * We need to make sure we don't get any errant requests while we're
  915. * reallocating the ->socks array.
  916. */
  917. blk_mq_freeze_queue(nbd->disk->queue);
  918. if (!netlink && !nbd->task_setup &&
  919. !test_bit(NBD_RT_BOUND, &config->runtime_flags))
  920. nbd->task_setup = current;
  921. if (!netlink &&
  922. (nbd->task_setup != current ||
  923. test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
  924. dev_err(disk_to_dev(nbd->disk),
  925. "Device being setup by another task");
  926. err = -EBUSY;
  927. goto put_socket;
  928. }
  929. nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
  930. if (!nsock) {
  931. err = -ENOMEM;
  932. goto put_socket;
  933. }
  934. socks = krealloc(config->socks, (config->num_connections + 1) *
  935. sizeof(struct nbd_sock *), GFP_KERNEL);
  936. if (!socks) {
  937. kfree(nsock);
  938. err = -ENOMEM;
  939. goto put_socket;
  940. }
  941. config->socks = socks;
  942. nsock->fallback_index = -1;
  943. nsock->dead = false;
  944. mutex_init(&nsock->tx_lock);
  945. nsock->sock = sock;
  946. nsock->pending = NULL;
  947. nsock->sent = 0;
  948. nsock->cookie = 0;
  949. socks[config->num_connections++] = nsock;
  950. atomic_inc(&config->live_connections);
  951. blk_mq_unfreeze_queue(nbd->disk->queue);
  952. return 0;
  953. put_socket:
  954. blk_mq_unfreeze_queue(nbd->disk->queue);
  955. sockfd_put(sock);
  956. return err;
  957. }
  958. static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
  959. {
  960. struct nbd_config *config = nbd->config;
  961. struct socket *sock, *old;
  962. struct recv_thread_args *args;
  963. int i;
  964. int err;
  965. sock = nbd_get_socket(nbd, arg, &err);
  966. if (!sock)
  967. return err;
  968. args = kzalloc(sizeof(*args), GFP_KERNEL);
  969. if (!args) {
  970. sockfd_put(sock);
  971. return -ENOMEM;
  972. }
  973. for (i = 0; i < config->num_connections; i++) {
  974. struct nbd_sock *nsock = config->socks[i];
  975. if (!nsock->dead)
  976. continue;
  977. mutex_lock(&nsock->tx_lock);
  978. if (!nsock->dead) {
  979. mutex_unlock(&nsock->tx_lock);
  980. continue;
  981. }
  982. sk_set_memalloc(sock->sk);
  983. if (nbd->tag_set.timeout)
  984. sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
  985. atomic_inc(&config->recv_threads);
  986. refcount_inc(&nbd->config_refs);
  987. old = nsock->sock;
  988. nsock->fallback_index = -1;
  989. nsock->sock = sock;
  990. nsock->dead = false;
  991. INIT_WORK(&args->work, recv_work);
  992. args->index = i;
  993. args->nbd = nbd;
  994. nsock->cookie++;
  995. mutex_unlock(&nsock->tx_lock);
  996. sockfd_put(old);
  997. clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
  998. /* We take the tx_mutex in an error path in the recv_work, so we
  999. * need to queue_work outside of the tx_mutex.
  1000. */
  1001. queue_work(nbd->recv_workq, &args->work);
  1002. atomic_inc(&config->live_connections);
  1003. wake_up(&config->conn_wait);
  1004. return 0;
  1005. }
  1006. sockfd_put(sock);
  1007. kfree(args);
  1008. return -ENOSPC;
  1009. }
  1010. static void nbd_bdev_reset(struct block_device *bdev)
  1011. {
  1012. if (bdev->bd_openers > 1)
  1013. return;
  1014. bd_set_nr_sectors(bdev, 0);
  1015. }
  1016. static void nbd_parse_flags(struct nbd_device *nbd)
  1017. {
  1018. struct nbd_config *config = nbd->config;
  1019. if (config->flags & NBD_FLAG_READ_ONLY)
  1020. set_disk_ro(nbd->disk, true);
  1021. else
  1022. set_disk_ro(nbd->disk, false);
  1023. if (config->flags & NBD_FLAG_SEND_TRIM)
  1024. blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
  1025. if (config->flags & NBD_FLAG_SEND_FLUSH) {
  1026. if (config->flags & NBD_FLAG_SEND_FUA)
  1027. blk_queue_write_cache(nbd->disk->queue, true, true);
  1028. else
  1029. blk_queue_write_cache(nbd->disk->queue, true, false);
  1030. }
  1031. else
  1032. blk_queue_write_cache(nbd->disk->queue, false, false);
  1033. }
  1034. static void send_disconnects(struct nbd_device *nbd)
  1035. {
  1036. struct nbd_config *config = nbd->config;
  1037. struct nbd_request request = {
  1038. .magic = htonl(NBD_REQUEST_MAGIC),
  1039. .type = htonl(NBD_CMD_DISC),
  1040. };
  1041. struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
  1042. struct iov_iter from;
  1043. int i, ret;
  1044. for (i = 0; i < config->num_connections; i++) {
  1045. struct nbd_sock *nsock = config->socks[i];
  1046. iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
  1047. mutex_lock(&nsock->tx_lock);
  1048. ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
  1049. if (ret <= 0)
  1050. dev_err(disk_to_dev(nbd->disk),
  1051. "Send disconnect failed %d\n", ret);
  1052. mutex_unlock(&nsock->tx_lock);
  1053. }
  1054. }
  1055. static int nbd_disconnect(struct nbd_device *nbd)
  1056. {
  1057. struct nbd_config *config = nbd->config;
  1058. dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
  1059. set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
  1060. set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
  1061. send_disconnects(nbd);
  1062. return 0;
  1063. }
  1064. static void nbd_clear_sock(struct nbd_device *nbd)
  1065. {
  1066. sock_shutdown(nbd);
  1067. nbd_clear_que(nbd);
  1068. nbd->task_setup = NULL;
  1069. }
  1070. static void nbd_config_put(struct nbd_device *nbd)
  1071. {
  1072. if (refcount_dec_and_mutex_lock(&nbd->config_refs,
  1073. &nbd->config_lock)) {
  1074. struct nbd_config *config = nbd->config;
  1075. nbd_dev_dbg_close(nbd);
  1076. nbd_size_clear(nbd);
  1077. if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
  1078. &config->runtime_flags))
  1079. device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
  1080. nbd->task_recv = NULL;
  1081. nbd_clear_sock(nbd);
  1082. if (config->num_connections) {
  1083. int i;
  1084. for (i = 0; i < config->num_connections; i++) {
  1085. sockfd_put(config->socks[i]->sock);
  1086. kfree(config->socks[i]);
  1087. }
  1088. kfree(config->socks);
  1089. }
  1090. kfree(nbd->config);
  1091. nbd->config = NULL;
  1092. if (nbd->recv_workq)
  1093. destroy_workqueue(nbd->recv_workq);
  1094. nbd->recv_workq = NULL;
  1095. nbd->tag_set.timeout = 0;
  1096. nbd->disk->queue->limits.discard_granularity = 0;
  1097. nbd->disk->queue->limits.discard_alignment = 0;
  1098. blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
  1099. blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
  1100. mutex_unlock(&nbd->config_lock);
  1101. nbd_put(nbd);
  1102. module_put(THIS_MODULE);
  1103. }
  1104. }
  1105. static int nbd_start_device(struct nbd_device *nbd)
  1106. {
  1107. struct nbd_config *config = nbd->config;
  1108. int num_connections = config->num_connections;
  1109. int error = 0, i;
  1110. if (nbd->task_recv)
  1111. return -EBUSY;
  1112. if (!config->socks)
  1113. return -EINVAL;
  1114. if (num_connections > 1 &&
  1115. !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
  1116. dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
  1117. return -EINVAL;
  1118. }
  1119. nbd->recv_workq = alloc_workqueue("knbd%d-recv",
  1120. WQ_MEM_RECLAIM | WQ_HIGHPRI |
  1121. WQ_UNBOUND, 0, nbd->index);
  1122. if (!nbd->recv_workq) {
  1123. dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
  1124. return -ENOMEM;
  1125. }
  1126. blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
  1127. nbd->task_recv = current;
  1128. nbd_parse_flags(nbd);
  1129. error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
  1130. if (error) {
  1131. dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
  1132. return error;
  1133. }
  1134. set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
  1135. nbd_dev_dbg_init(nbd);
  1136. for (i = 0; i < num_connections; i++) {
  1137. struct recv_thread_args *args;
  1138. args = kzalloc(sizeof(*args), GFP_KERNEL);
  1139. if (!args) {
  1140. sock_shutdown(nbd);
  1141. /*
  1142. * If num_connections is m (2 < m),
  1143. * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
  1144. * But NO.(n + 1) failed. We still have n recv threads.
  1145. * So, add flush_workqueue here to prevent recv threads
  1146. * dropping the last config_refs and trying to destroy
  1147. * the workqueue from inside the workqueue.
  1148. */
  1149. if (i)
  1150. flush_workqueue(nbd->recv_workq);
  1151. return -ENOMEM;
  1152. }
  1153. sk_set_memalloc(config->socks[i]->sock->sk);
  1154. if (nbd->tag_set.timeout)
  1155. config->socks[i]->sock->sk->sk_sndtimeo =
  1156. nbd->tag_set.timeout;
  1157. atomic_inc(&config->recv_threads);
  1158. refcount_inc(&nbd->config_refs);
  1159. INIT_WORK(&args->work, recv_work);
  1160. args->nbd = nbd;
  1161. args->index = i;
  1162. queue_work(nbd->recv_workq, &args->work);
  1163. }
  1164. nbd_size_update(nbd, true);
  1165. return error;
  1166. }
  1167. static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
  1168. {
  1169. struct nbd_config *config = nbd->config;
  1170. int ret;
  1171. ret = nbd_start_device(nbd);
  1172. if (ret)
  1173. return ret;
  1174. if (max_part)
  1175. set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
  1176. mutex_unlock(&nbd->config_lock);
  1177. ret = wait_event_interruptible(config->recv_wq,
  1178. atomic_read(&config->recv_threads) == 0);
  1179. if (ret)
  1180. sock_shutdown(nbd);
  1181. flush_workqueue(nbd->recv_workq);
  1182. mutex_lock(&nbd->config_lock);
  1183. nbd_bdev_reset(bdev);
  1184. /* user requested, ignore socket errors */
  1185. if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
  1186. ret = 0;
  1187. if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
  1188. ret = -ETIMEDOUT;
  1189. return ret;
  1190. }
  1191. static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
  1192. struct block_device *bdev)
  1193. {
  1194. sock_shutdown(nbd);
  1195. __invalidate_device(bdev, true);
  1196. nbd_bdev_reset(bdev);
  1197. if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
  1198. &nbd->config->runtime_flags))
  1199. nbd_config_put(nbd);
  1200. }
  1201. static bool nbd_is_valid_blksize(unsigned long blksize)
  1202. {
  1203. if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
  1204. blksize > PAGE_SIZE)
  1205. return false;
  1206. return true;
  1207. }
  1208. static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
  1209. {
  1210. nbd->tag_set.timeout = timeout * HZ;
  1211. if (timeout)
  1212. blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
  1213. else
  1214. blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
  1215. }
  1216. /* Must be called with config_lock held */
  1217. static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
  1218. unsigned int cmd, unsigned long arg)
  1219. {
  1220. struct nbd_config *config = nbd->config;
  1221. switch (cmd) {
  1222. case NBD_DISCONNECT:
  1223. return nbd_disconnect(nbd);
  1224. case NBD_CLEAR_SOCK:
  1225. nbd_clear_sock_ioctl(nbd, bdev);
  1226. return 0;
  1227. case NBD_SET_SOCK:
  1228. return nbd_add_socket(nbd, arg, false);
  1229. case NBD_SET_BLKSIZE:
  1230. if (!arg)
  1231. arg = NBD_DEF_BLKSIZE;
  1232. if (!nbd_is_valid_blksize(arg))
  1233. return -EINVAL;
  1234. nbd_size_set(nbd, arg,
  1235. div_s64(config->bytesize, arg));
  1236. return 0;
  1237. case NBD_SET_SIZE:
  1238. nbd_size_set(nbd, config->blksize,
  1239. div_s64(arg, config->blksize));
  1240. return 0;
  1241. case NBD_SET_SIZE_BLOCKS:
  1242. nbd_size_set(nbd, config->blksize, arg);
  1243. return 0;
  1244. case NBD_SET_TIMEOUT:
  1245. nbd_set_cmd_timeout(nbd, arg);
  1246. return 0;
  1247. case NBD_SET_FLAGS:
  1248. config->flags = arg;
  1249. return 0;
  1250. case NBD_DO_IT:
  1251. return nbd_start_device_ioctl(nbd, bdev);
  1252. case NBD_CLEAR_QUE:
  1253. /*
  1254. * This is for compatibility only. The queue is always cleared
  1255. * by NBD_DO_IT or NBD_CLEAR_SOCK.
  1256. */
  1257. return 0;
  1258. case NBD_PRINT_DEBUG:
  1259. /*
  1260. * For compatibility only, we no longer keep a list of
  1261. * outstanding requests.
  1262. */
  1263. return 0;
  1264. }
  1265. return -ENOTTY;
  1266. }
  1267. static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
  1268. unsigned int cmd, unsigned long arg)
  1269. {
  1270. struct nbd_device *nbd = bdev->bd_disk->private_data;
  1271. struct nbd_config *config = nbd->config;
  1272. int error = -EINVAL;
  1273. if (!capable(CAP_SYS_ADMIN))
  1274. return -EPERM;
  1275. /* The block layer will pass back some non-nbd ioctls in case we have
  1276. * special handling for them, but we don't so just return an error.
  1277. */
  1278. if (_IOC_TYPE(cmd) != 0xab)
  1279. return -EINVAL;
  1280. mutex_lock(&nbd->config_lock);
  1281. /* Don't allow ioctl operations on a nbd device that was created with
  1282. * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
  1283. */
  1284. if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
  1285. (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
  1286. error = __nbd_ioctl(bdev, nbd, cmd, arg);
  1287. else
  1288. dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
  1289. mutex_unlock(&nbd->config_lock);
  1290. return error;
  1291. }
  1292. static struct nbd_config *nbd_alloc_config(void)
  1293. {
  1294. struct nbd_config *config;
  1295. config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
  1296. if (!config)
  1297. return NULL;
  1298. atomic_set(&config->recv_threads, 0);
  1299. init_waitqueue_head(&config->recv_wq);
  1300. init_waitqueue_head(&config->conn_wait);
  1301. config->blksize = NBD_DEF_BLKSIZE;
  1302. atomic_set(&config->live_connections, 0);
  1303. try_module_get(THIS_MODULE);
  1304. return config;
  1305. }
  1306. static int nbd_open(struct block_device *bdev, fmode_t mode)
  1307. {
  1308. struct nbd_device *nbd;
  1309. int ret = 0;
  1310. mutex_lock(&nbd_index_mutex);
  1311. nbd = bdev->bd_disk->private_data;
  1312. if (!nbd) {
  1313. ret = -ENXIO;
  1314. goto out;
  1315. }
  1316. if (!refcount_inc_not_zero(&nbd->refs)) {
  1317. ret = -ENXIO;
  1318. goto out;
  1319. }
  1320. if (!refcount_inc_not_zero(&nbd->config_refs)) {
  1321. struct nbd_config *config;
  1322. mutex_lock(&nbd->config_lock);
  1323. if (refcount_inc_not_zero(&nbd->config_refs)) {
  1324. mutex_unlock(&nbd->config_lock);
  1325. goto out;
  1326. }
  1327. config = nbd->config = nbd_alloc_config();
  1328. if (!config) {
  1329. ret = -ENOMEM;
  1330. mutex_unlock(&nbd->config_lock);
  1331. goto out;
  1332. }
  1333. refcount_set(&nbd->config_refs, 1);
  1334. refcount_inc(&nbd->refs);
  1335. mutex_unlock(&nbd->config_lock);
  1336. set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
  1337. } else if (nbd_disconnected(nbd->config)) {
  1338. set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
  1339. }
  1340. out:
  1341. mutex_unlock(&nbd_index_mutex);
  1342. return ret;
  1343. }
  1344. static void nbd_release(struct gendisk *disk, fmode_t mode)
  1345. {
  1346. struct nbd_device *nbd = disk->private_data;
  1347. struct block_device *bdev = bdget_disk(disk, 0);
  1348. if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
  1349. bdev->bd_openers == 0)
  1350. nbd_disconnect_and_put(nbd);
  1351. bdput(bdev);
  1352. nbd_config_put(nbd);
  1353. nbd_put(nbd);
  1354. }
  1355. static const struct block_device_operations nbd_fops =
  1356. {
  1357. .owner = THIS_MODULE,
  1358. .open = nbd_open,
  1359. .release = nbd_release,
  1360. .ioctl = nbd_ioctl,
  1361. .compat_ioctl = nbd_ioctl,
  1362. };
  1363. #if IS_ENABLED(CONFIG_DEBUG_FS)
  1364. static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
  1365. {
  1366. struct nbd_device *nbd = s->private;
  1367. if (nbd->task_recv)
  1368. seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
  1369. return 0;
  1370. }
  1371. static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
  1372. {
  1373. return single_open(file, nbd_dbg_tasks_show, inode->i_private);
  1374. }
  1375. static const struct file_operations nbd_dbg_tasks_ops = {
  1376. .open = nbd_dbg_tasks_open,
  1377. .read = seq_read,
  1378. .llseek = seq_lseek,
  1379. .release = single_release,
  1380. };
  1381. static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
  1382. {
  1383. struct nbd_device *nbd = s->private;
  1384. u32 flags = nbd->config->flags;
  1385. seq_printf(s, "Hex: 0x%08x\n\n", flags);
  1386. seq_puts(s, "Known flags:\n");
  1387. if (flags & NBD_FLAG_HAS_FLAGS)
  1388. seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
  1389. if (flags & NBD_FLAG_READ_ONLY)
  1390. seq_puts(s, "NBD_FLAG_READ_ONLY\n");
  1391. if (flags & NBD_FLAG_SEND_FLUSH)
  1392. seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
  1393. if (flags & NBD_FLAG_SEND_FUA)
  1394. seq_puts(s, "NBD_FLAG_SEND_FUA\n");
  1395. if (flags & NBD_FLAG_SEND_TRIM)
  1396. seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
  1397. return 0;
  1398. }
  1399. static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
  1400. {
  1401. return single_open(file, nbd_dbg_flags_show, inode->i_private);
  1402. }
  1403. static const struct file_operations nbd_dbg_flags_ops = {
  1404. .open = nbd_dbg_flags_open,
  1405. .read = seq_read,
  1406. .llseek = seq_lseek,
  1407. .release = single_release,
  1408. };
  1409. static int nbd_dev_dbg_init(struct nbd_device *nbd)
  1410. {
  1411. struct dentry *dir;
  1412. struct nbd_config *config = nbd->config;
  1413. if (!nbd_dbg_dir)
  1414. return -EIO;
  1415. dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
  1416. if (!dir) {
  1417. dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
  1418. nbd_name(nbd));
  1419. return -EIO;
  1420. }
  1421. config->dbg_dir = dir;
  1422. debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
  1423. debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
  1424. debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
  1425. debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
  1426. debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
  1427. return 0;
  1428. }
  1429. static void nbd_dev_dbg_close(struct nbd_device *nbd)
  1430. {
  1431. debugfs_remove_recursive(nbd->config->dbg_dir);
  1432. }
  1433. static int nbd_dbg_init(void)
  1434. {
  1435. struct dentry *dbg_dir;
  1436. dbg_dir = debugfs_create_dir("nbd", NULL);
  1437. if (!dbg_dir)
  1438. return -EIO;
  1439. nbd_dbg_dir = dbg_dir;
  1440. return 0;
  1441. }
  1442. static void nbd_dbg_close(void)
  1443. {
  1444. debugfs_remove_recursive(nbd_dbg_dir);
  1445. }
  1446. #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
  1447. static int nbd_dev_dbg_init(struct nbd_device *nbd)
  1448. {
  1449. return 0;
  1450. }
  1451. static void nbd_dev_dbg_close(struct nbd_device *nbd)
  1452. {
  1453. }
  1454. static int nbd_dbg_init(void)
  1455. {
  1456. return 0;
  1457. }
  1458. static void nbd_dbg_close(void)
  1459. {
  1460. }
  1461. #endif
  1462. static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
  1463. unsigned int hctx_idx, unsigned int numa_node)
  1464. {
  1465. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
  1466. cmd->nbd = set->driver_data;
  1467. cmd->flags = 0;
  1468. mutex_init(&cmd->lock);
  1469. return 0;
  1470. }
  1471. static const struct blk_mq_ops nbd_mq_ops = {
  1472. .queue_rq = nbd_queue_rq,
  1473. .complete = nbd_complete_rq,
  1474. .init_request = nbd_init_request,
  1475. .timeout = nbd_xmit_timeout,
  1476. };
  1477. static int nbd_dev_add(int index)
  1478. {
  1479. struct nbd_device *nbd;
  1480. struct gendisk *disk;
  1481. struct request_queue *q;
  1482. int err = -ENOMEM;
  1483. nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
  1484. if (!nbd)
  1485. goto out;
  1486. disk = alloc_disk(1 << part_shift);
  1487. if (!disk)
  1488. goto out_free_nbd;
  1489. if (index >= 0) {
  1490. err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
  1491. GFP_KERNEL);
  1492. if (err == -ENOSPC)
  1493. err = -EEXIST;
  1494. } else {
  1495. err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
  1496. if (err >= 0)
  1497. index = err;
  1498. }
  1499. if (err < 0)
  1500. goto out_free_disk;
  1501. nbd->index = index;
  1502. nbd->disk = disk;
  1503. nbd->tag_set.ops = &nbd_mq_ops;
  1504. nbd->tag_set.nr_hw_queues = 1;
  1505. nbd->tag_set.queue_depth = 128;
  1506. nbd->tag_set.numa_node = NUMA_NO_NODE;
  1507. nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
  1508. nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
  1509. BLK_MQ_F_BLOCKING;
  1510. nbd->tag_set.driver_data = nbd;
  1511. nbd->destroy_complete = NULL;
  1512. err = blk_mq_alloc_tag_set(&nbd->tag_set);
  1513. if (err)
  1514. goto out_free_idr;
  1515. q = blk_mq_init_queue(&nbd->tag_set);
  1516. if (IS_ERR(q)) {
  1517. err = PTR_ERR(q);
  1518. goto out_free_tags;
  1519. }
  1520. disk->queue = q;
  1521. /*
  1522. * Tell the block layer that we are not a rotational device
  1523. */
  1524. blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
  1525. blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
  1526. disk->queue->limits.discard_granularity = 0;
  1527. disk->queue->limits.discard_alignment = 0;
  1528. blk_queue_max_discard_sectors(disk->queue, 0);
  1529. blk_queue_max_segment_size(disk->queue, UINT_MAX);
  1530. blk_queue_max_segments(disk->queue, USHRT_MAX);
  1531. blk_queue_max_hw_sectors(disk->queue, 65536);
  1532. disk->queue->limits.max_sectors = 256;
  1533. mutex_init(&nbd->config_lock);
  1534. refcount_set(&nbd->config_refs, 0);
  1535. refcount_set(&nbd->refs, 1);
  1536. INIT_LIST_HEAD(&nbd->list);
  1537. disk->major = NBD_MAJOR;
  1538. disk->first_minor = index << part_shift;
  1539. disk->fops = &nbd_fops;
  1540. disk->private_data = nbd;
  1541. sprintf(disk->disk_name, "nbd%d", index);
  1542. add_disk(disk);
  1543. nbd_total_devices++;
  1544. return index;
  1545. out_free_tags:
  1546. blk_mq_free_tag_set(&nbd->tag_set);
  1547. out_free_idr:
  1548. idr_remove(&nbd_index_idr, index);
  1549. out_free_disk:
  1550. put_disk(disk);
  1551. out_free_nbd:
  1552. kfree(nbd);
  1553. out:
  1554. return err;
  1555. }
  1556. static int find_free_cb(int id, void *ptr, void *data)
  1557. {
  1558. struct nbd_device *nbd = ptr;
  1559. struct nbd_device **found = data;
  1560. if (!refcount_read(&nbd->config_refs)) {
  1561. *found = nbd;
  1562. return 1;
  1563. }
  1564. return 0;
  1565. }
  1566. /* Netlink interface. */
  1567. static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
  1568. [NBD_ATTR_INDEX] = { .type = NLA_U32 },
  1569. [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
  1570. [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
  1571. [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
  1572. [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
  1573. [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
  1574. [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
  1575. [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
  1576. [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
  1577. };
  1578. static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
  1579. [NBD_SOCK_FD] = { .type = NLA_U32 },
  1580. };
  1581. /* We don't use this right now since we don't parse the incoming list, but we
  1582. * still want it here so userspace knows what to expect.
  1583. */
  1584. static const struct nla_policy __attribute__((unused))
  1585. nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
  1586. [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
  1587. [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
  1588. };
  1589. static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
  1590. {
  1591. struct nbd_config *config = nbd->config;
  1592. u64 bsize = config->blksize;
  1593. u64 bytes = config->bytesize;
  1594. if (info->attrs[NBD_ATTR_SIZE_BYTES])
  1595. bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
  1596. if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
  1597. bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
  1598. if (!bsize)
  1599. bsize = NBD_DEF_BLKSIZE;
  1600. if (!nbd_is_valid_blksize(bsize)) {
  1601. printk(KERN_ERR "Invalid block size %llu\n", bsize);
  1602. return -EINVAL;
  1603. }
  1604. }
  1605. if (bytes != config->bytesize || bsize != config->blksize)
  1606. nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
  1607. return 0;
  1608. }
  1609. static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
  1610. {
  1611. DECLARE_COMPLETION_ONSTACK(destroy_complete);
  1612. struct nbd_device *nbd = NULL;
  1613. struct nbd_config *config;
  1614. int index = -1;
  1615. int ret;
  1616. bool put_dev = false;
  1617. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  1618. return -EPERM;
  1619. if (info->attrs[NBD_ATTR_INDEX])
  1620. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  1621. if (!info->attrs[NBD_ATTR_SOCKETS]) {
  1622. printk(KERN_ERR "nbd: must specify at least one socket\n");
  1623. return -EINVAL;
  1624. }
  1625. if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
  1626. printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
  1627. return -EINVAL;
  1628. }
  1629. again:
  1630. mutex_lock(&nbd_index_mutex);
  1631. if (index == -1) {
  1632. ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
  1633. if (ret == 0) {
  1634. int new_index;
  1635. new_index = nbd_dev_add(-1);
  1636. if (new_index < 0) {
  1637. mutex_unlock(&nbd_index_mutex);
  1638. printk(KERN_ERR "nbd: failed to add new device\n");
  1639. return new_index;
  1640. }
  1641. nbd = idr_find(&nbd_index_idr, new_index);
  1642. }
  1643. } else {
  1644. nbd = idr_find(&nbd_index_idr, index);
  1645. if (!nbd) {
  1646. ret = nbd_dev_add(index);
  1647. if (ret < 0) {
  1648. mutex_unlock(&nbd_index_mutex);
  1649. printk(KERN_ERR "nbd: failed to add new device\n");
  1650. return ret;
  1651. }
  1652. nbd = idr_find(&nbd_index_idr, index);
  1653. }
  1654. }
  1655. if (!nbd) {
  1656. printk(KERN_ERR "nbd: couldn't find device at index %d\n",
  1657. index);
  1658. mutex_unlock(&nbd_index_mutex);
  1659. return -EINVAL;
  1660. }
  1661. if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
  1662. test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
  1663. nbd->destroy_complete = &destroy_complete;
  1664. mutex_unlock(&nbd_index_mutex);
  1665. /* Wait untill the the nbd stuff is totally destroyed */
  1666. wait_for_completion(&destroy_complete);
  1667. goto again;
  1668. }
  1669. if (!refcount_inc_not_zero(&nbd->refs)) {
  1670. mutex_unlock(&nbd_index_mutex);
  1671. if (index == -1)
  1672. goto again;
  1673. printk(KERN_ERR "nbd: device at index %d is going down\n",
  1674. index);
  1675. return -EINVAL;
  1676. }
  1677. mutex_unlock(&nbd_index_mutex);
  1678. mutex_lock(&nbd->config_lock);
  1679. if (refcount_read(&nbd->config_refs)) {
  1680. mutex_unlock(&nbd->config_lock);
  1681. nbd_put(nbd);
  1682. if (index == -1)
  1683. goto again;
  1684. printk(KERN_ERR "nbd: nbd%d already in use\n", index);
  1685. return -EBUSY;
  1686. }
  1687. if (WARN_ON(nbd->config)) {
  1688. mutex_unlock(&nbd->config_lock);
  1689. nbd_put(nbd);
  1690. return -EINVAL;
  1691. }
  1692. config = nbd->config = nbd_alloc_config();
  1693. if (!nbd->config) {
  1694. mutex_unlock(&nbd->config_lock);
  1695. nbd_put(nbd);
  1696. printk(KERN_ERR "nbd: couldn't allocate config\n");
  1697. return -ENOMEM;
  1698. }
  1699. refcount_set(&nbd->config_refs, 1);
  1700. set_bit(NBD_RT_BOUND, &config->runtime_flags);
  1701. ret = nbd_genl_size_set(info, nbd);
  1702. if (ret)
  1703. goto out;
  1704. if (info->attrs[NBD_ATTR_TIMEOUT])
  1705. nbd_set_cmd_timeout(nbd,
  1706. nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
  1707. if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
  1708. config->dead_conn_timeout =
  1709. nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
  1710. config->dead_conn_timeout *= HZ;
  1711. }
  1712. if (info->attrs[NBD_ATTR_SERVER_FLAGS])
  1713. config->flags =
  1714. nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
  1715. if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
  1716. u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
  1717. if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
  1718. /*
  1719. * We have 1 ref to keep the device around, and then 1
  1720. * ref for our current operation here, which will be
  1721. * inherited by the config. If we already have
  1722. * DESTROY_ON_DISCONNECT set then we know we don't have
  1723. * that extra ref already held so we don't need the
  1724. * put_dev.
  1725. */
  1726. if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
  1727. &nbd->flags))
  1728. put_dev = true;
  1729. } else {
  1730. if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
  1731. &nbd->flags))
  1732. refcount_inc(&nbd->refs);
  1733. }
  1734. if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
  1735. set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
  1736. &config->runtime_flags);
  1737. }
  1738. }
  1739. if (info->attrs[NBD_ATTR_SOCKETS]) {
  1740. struct nlattr *attr;
  1741. int rem, fd;
  1742. nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
  1743. rem) {
  1744. struct nlattr *socks[NBD_SOCK_MAX+1];
  1745. if (nla_type(attr) != NBD_SOCK_ITEM) {
  1746. printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
  1747. ret = -EINVAL;
  1748. goto out;
  1749. }
  1750. ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
  1751. attr,
  1752. nbd_sock_policy,
  1753. info->extack);
  1754. if (ret != 0) {
  1755. printk(KERN_ERR "nbd: error processing sock list\n");
  1756. ret = -EINVAL;
  1757. goto out;
  1758. }
  1759. if (!socks[NBD_SOCK_FD])
  1760. continue;
  1761. fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
  1762. ret = nbd_add_socket(nbd, fd, true);
  1763. if (ret)
  1764. goto out;
  1765. }
  1766. }
  1767. ret = nbd_start_device(nbd);
  1768. out:
  1769. mutex_unlock(&nbd->config_lock);
  1770. if (!ret) {
  1771. set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
  1772. refcount_inc(&nbd->config_refs);
  1773. nbd_connect_reply(info, nbd->index);
  1774. }
  1775. nbd_config_put(nbd);
  1776. if (put_dev)
  1777. nbd_put(nbd);
  1778. return ret;
  1779. }
  1780. static void nbd_disconnect_and_put(struct nbd_device *nbd)
  1781. {
  1782. mutex_lock(&nbd->config_lock);
  1783. nbd_disconnect(nbd);
  1784. sock_shutdown(nbd);
  1785. /*
  1786. * Make sure recv thread has finished, so it does not drop the last
  1787. * config ref and try to destroy the workqueue from inside the work
  1788. * queue. And this also ensure that we can safely call nbd_clear_que()
  1789. * to cancel the inflight I/Os.
  1790. */
  1791. if (nbd->recv_workq)
  1792. flush_workqueue(nbd->recv_workq);
  1793. nbd_clear_que(nbd);
  1794. nbd->task_setup = NULL;
  1795. mutex_unlock(&nbd->config_lock);
  1796. if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
  1797. &nbd->config->runtime_flags))
  1798. nbd_config_put(nbd);
  1799. }
  1800. static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
  1801. {
  1802. struct nbd_device *nbd;
  1803. int index;
  1804. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  1805. return -EPERM;
  1806. if (!info->attrs[NBD_ATTR_INDEX]) {
  1807. printk(KERN_ERR "nbd: must specify an index to disconnect\n");
  1808. return -EINVAL;
  1809. }
  1810. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  1811. mutex_lock(&nbd_index_mutex);
  1812. nbd = idr_find(&nbd_index_idr, index);
  1813. if (!nbd) {
  1814. mutex_unlock(&nbd_index_mutex);
  1815. printk(KERN_ERR "nbd: couldn't find device at index %d\n",
  1816. index);
  1817. return -EINVAL;
  1818. }
  1819. if (!refcount_inc_not_zero(&nbd->refs)) {
  1820. mutex_unlock(&nbd_index_mutex);
  1821. printk(KERN_ERR "nbd: device at index %d is going down\n",
  1822. index);
  1823. return -EINVAL;
  1824. }
  1825. mutex_unlock(&nbd_index_mutex);
  1826. if (!refcount_inc_not_zero(&nbd->config_refs)) {
  1827. nbd_put(nbd);
  1828. return 0;
  1829. }
  1830. nbd_disconnect_and_put(nbd);
  1831. nbd_config_put(nbd);
  1832. nbd_put(nbd);
  1833. return 0;
  1834. }
  1835. static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
  1836. {
  1837. struct nbd_device *nbd = NULL;
  1838. struct nbd_config *config;
  1839. int index;
  1840. int ret = 0;
  1841. bool put_dev = false;
  1842. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  1843. return -EPERM;
  1844. if (!info->attrs[NBD_ATTR_INDEX]) {
  1845. printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
  1846. return -EINVAL;
  1847. }
  1848. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  1849. mutex_lock(&nbd_index_mutex);
  1850. nbd = idr_find(&nbd_index_idr, index);
  1851. if (!nbd) {
  1852. mutex_unlock(&nbd_index_mutex);
  1853. printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
  1854. index);
  1855. return -EINVAL;
  1856. }
  1857. if (!refcount_inc_not_zero(&nbd->refs)) {
  1858. mutex_unlock(&nbd_index_mutex);
  1859. printk(KERN_ERR "nbd: device at index %d is going down\n",
  1860. index);
  1861. return -EINVAL;
  1862. }
  1863. mutex_unlock(&nbd_index_mutex);
  1864. if (!refcount_inc_not_zero(&nbd->config_refs)) {
  1865. dev_err(nbd_to_dev(nbd),
  1866. "not configured, cannot reconfigure\n");
  1867. nbd_put(nbd);
  1868. return -EINVAL;
  1869. }
  1870. mutex_lock(&nbd->config_lock);
  1871. config = nbd->config;
  1872. if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
  1873. !nbd->task_recv) {
  1874. dev_err(nbd_to_dev(nbd),
  1875. "not configured, cannot reconfigure\n");
  1876. ret = -EINVAL;
  1877. goto out;
  1878. }
  1879. ret = nbd_genl_size_set(info, nbd);
  1880. if (ret)
  1881. goto out;
  1882. if (info->attrs[NBD_ATTR_TIMEOUT])
  1883. nbd_set_cmd_timeout(nbd,
  1884. nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
  1885. if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
  1886. config->dead_conn_timeout =
  1887. nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
  1888. config->dead_conn_timeout *= HZ;
  1889. }
  1890. if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
  1891. u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
  1892. if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
  1893. if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
  1894. &nbd->flags))
  1895. put_dev = true;
  1896. } else {
  1897. if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
  1898. &nbd->flags))
  1899. refcount_inc(&nbd->refs);
  1900. }
  1901. if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
  1902. set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
  1903. &config->runtime_flags);
  1904. } else {
  1905. clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
  1906. &config->runtime_flags);
  1907. }
  1908. }
  1909. if (info->attrs[NBD_ATTR_SOCKETS]) {
  1910. struct nlattr *attr;
  1911. int rem, fd;
  1912. nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
  1913. rem) {
  1914. struct nlattr *socks[NBD_SOCK_MAX+1];
  1915. if (nla_type(attr) != NBD_SOCK_ITEM) {
  1916. printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
  1917. ret = -EINVAL;
  1918. goto out;
  1919. }
  1920. ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
  1921. attr,
  1922. nbd_sock_policy,
  1923. info->extack);
  1924. if (ret != 0) {
  1925. printk(KERN_ERR "nbd: error processing sock list\n");
  1926. ret = -EINVAL;
  1927. goto out;
  1928. }
  1929. if (!socks[NBD_SOCK_FD])
  1930. continue;
  1931. fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
  1932. ret = nbd_reconnect_socket(nbd, fd);
  1933. if (ret) {
  1934. if (ret == -ENOSPC)
  1935. ret = 0;
  1936. goto out;
  1937. }
  1938. dev_info(nbd_to_dev(nbd), "reconnected socket\n");
  1939. }
  1940. }
  1941. out:
  1942. mutex_unlock(&nbd->config_lock);
  1943. nbd_config_put(nbd);
  1944. nbd_put(nbd);
  1945. if (put_dev)
  1946. nbd_put(nbd);
  1947. return ret;
  1948. }
  1949. static const struct genl_small_ops nbd_connect_genl_ops[] = {
  1950. {
  1951. .cmd = NBD_CMD_CONNECT,
  1952. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  1953. .doit = nbd_genl_connect,
  1954. },
  1955. {
  1956. .cmd = NBD_CMD_DISCONNECT,
  1957. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  1958. .doit = nbd_genl_disconnect,
  1959. },
  1960. {
  1961. .cmd = NBD_CMD_RECONFIGURE,
  1962. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  1963. .doit = nbd_genl_reconfigure,
  1964. },
  1965. {
  1966. .cmd = NBD_CMD_STATUS,
  1967. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  1968. .doit = nbd_genl_status,
  1969. },
  1970. };
  1971. static const struct genl_multicast_group nbd_mcast_grps[] = {
  1972. { .name = NBD_GENL_MCAST_GROUP_NAME, },
  1973. };
  1974. static struct genl_family nbd_genl_family __ro_after_init = {
  1975. .hdrsize = 0,
  1976. .name = NBD_GENL_FAMILY_NAME,
  1977. .version = NBD_GENL_VERSION,
  1978. .module = THIS_MODULE,
  1979. .small_ops = nbd_connect_genl_ops,
  1980. .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
  1981. .maxattr = NBD_ATTR_MAX,
  1982. .policy = nbd_attr_policy,
  1983. .mcgrps = nbd_mcast_grps,
  1984. .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
  1985. };
  1986. static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
  1987. {
  1988. struct nlattr *dev_opt;
  1989. u8 connected = 0;
  1990. int ret;
  1991. /* This is a little racey, but for status it's ok. The
  1992. * reason we don't take a ref here is because we can't
  1993. * take a ref in the index == -1 case as we would need
  1994. * to put under the nbd_index_mutex, which could
  1995. * deadlock if we are configured to remove ourselves
  1996. * once we're disconnected.
  1997. */
  1998. if (refcount_read(&nbd->config_refs))
  1999. connected = 1;
  2000. dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
  2001. if (!dev_opt)
  2002. return -EMSGSIZE;
  2003. ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
  2004. if (ret)
  2005. return -EMSGSIZE;
  2006. ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
  2007. connected);
  2008. if (ret)
  2009. return -EMSGSIZE;
  2010. nla_nest_end(reply, dev_opt);
  2011. return 0;
  2012. }
  2013. static int status_cb(int id, void *ptr, void *data)
  2014. {
  2015. struct nbd_device *nbd = ptr;
  2016. return populate_nbd_status(nbd, (struct sk_buff *)data);
  2017. }
  2018. static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
  2019. {
  2020. struct nlattr *dev_list;
  2021. struct sk_buff *reply;
  2022. void *reply_head;
  2023. size_t msg_size;
  2024. int index = -1;
  2025. int ret = -ENOMEM;
  2026. if (info->attrs[NBD_ATTR_INDEX])
  2027. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  2028. mutex_lock(&nbd_index_mutex);
  2029. msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
  2030. nla_attr_size(sizeof(u8)));
  2031. msg_size *= (index == -1) ? nbd_total_devices : 1;
  2032. reply = genlmsg_new(msg_size, GFP_KERNEL);
  2033. if (!reply)
  2034. goto out;
  2035. reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
  2036. NBD_CMD_STATUS);
  2037. if (!reply_head) {
  2038. nlmsg_free(reply);
  2039. goto out;
  2040. }
  2041. dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
  2042. if (index == -1) {
  2043. ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
  2044. if (ret) {
  2045. nlmsg_free(reply);
  2046. goto out;
  2047. }
  2048. } else {
  2049. struct nbd_device *nbd;
  2050. nbd = idr_find(&nbd_index_idr, index);
  2051. if (nbd) {
  2052. ret = populate_nbd_status(nbd, reply);
  2053. if (ret) {
  2054. nlmsg_free(reply);
  2055. goto out;
  2056. }
  2057. }
  2058. }
  2059. nla_nest_end(reply, dev_list);
  2060. genlmsg_end(reply, reply_head);
  2061. ret = genlmsg_reply(reply, info);
  2062. out:
  2063. mutex_unlock(&nbd_index_mutex);
  2064. return ret;
  2065. }
  2066. static void nbd_connect_reply(struct genl_info *info, int index)
  2067. {
  2068. struct sk_buff *skb;
  2069. void *msg_head;
  2070. int ret;
  2071. skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
  2072. if (!skb)
  2073. return;
  2074. msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
  2075. NBD_CMD_CONNECT);
  2076. if (!msg_head) {
  2077. nlmsg_free(skb);
  2078. return;
  2079. }
  2080. ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
  2081. if (ret) {
  2082. nlmsg_free(skb);
  2083. return;
  2084. }
  2085. genlmsg_end(skb, msg_head);
  2086. genlmsg_reply(skb, info);
  2087. }
  2088. static void nbd_mcast_index(int index)
  2089. {
  2090. struct sk_buff *skb;
  2091. void *msg_head;
  2092. int ret;
  2093. skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
  2094. if (!skb)
  2095. return;
  2096. msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
  2097. NBD_CMD_LINK_DEAD);
  2098. if (!msg_head) {
  2099. nlmsg_free(skb);
  2100. return;
  2101. }
  2102. ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
  2103. if (ret) {
  2104. nlmsg_free(skb);
  2105. return;
  2106. }
  2107. genlmsg_end(skb, msg_head);
  2108. genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
  2109. }
  2110. static void nbd_dead_link_work(struct work_struct *work)
  2111. {
  2112. struct link_dead_args *args = container_of(work, struct link_dead_args,
  2113. work);
  2114. nbd_mcast_index(args->index);
  2115. kfree(args);
  2116. }
  2117. static int __init nbd_init(void)
  2118. {
  2119. int i;
  2120. BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
  2121. if (max_part < 0) {
  2122. printk(KERN_ERR "nbd: max_part must be >= 0\n");
  2123. return -EINVAL;
  2124. }
  2125. part_shift = 0;
  2126. if (max_part > 0) {
  2127. part_shift = fls(max_part);
  2128. /*
  2129. * Adjust max_part according to part_shift as it is exported
  2130. * to user space so that user can know the max number of
  2131. * partition kernel should be able to manage.
  2132. *
  2133. * Note that -1 is required because partition 0 is reserved
  2134. * for the whole disk.
  2135. */
  2136. max_part = (1UL << part_shift) - 1;
  2137. }
  2138. if ((1UL << part_shift) > DISK_MAX_PARTS)
  2139. return -EINVAL;
  2140. if (nbds_max > 1UL << (MINORBITS - part_shift))
  2141. return -EINVAL;
  2142. if (register_blkdev(NBD_MAJOR, "nbd"))
  2143. return -EIO;
  2144. if (genl_register_family(&nbd_genl_family)) {
  2145. unregister_blkdev(NBD_MAJOR, "nbd");
  2146. return -EINVAL;
  2147. }
  2148. nbd_dbg_init();
  2149. mutex_lock(&nbd_index_mutex);
  2150. for (i = 0; i < nbds_max; i++)
  2151. nbd_dev_add(i);
  2152. mutex_unlock(&nbd_index_mutex);
  2153. return 0;
  2154. }
  2155. static int nbd_exit_cb(int id, void *ptr, void *data)
  2156. {
  2157. struct list_head *list = (struct list_head *)data;
  2158. struct nbd_device *nbd = ptr;
  2159. list_add_tail(&nbd->list, list);
  2160. return 0;
  2161. }
  2162. static void __exit nbd_cleanup(void)
  2163. {
  2164. struct nbd_device *nbd;
  2165. LIST_HEAD(del_list);
  2166. nbd_dbg_close();
  2167. mutex_lock(&nbd_index_mutex);
  2168. idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
  2169. mutex_unlock(&nbd_index_mutex);
  2170. while (!list_empty(&del_list)) {
  2171. nbd = list_first_entry(&del_list, struct nbd_device, list);
  2172. list_del_init(&nbd->list);
  2173. if (refcount_read(&nbd->refs) != 1)
  2174. printk(KERN_ERR "nbd: possibly leaking a device\n");
  2175. nbd_put(nbd);
  2176. }
  2177. idr_destroy(&nbd_index_idr);
  2178. genl_unregister_family(&nbd_genl_family);
  2179. unregister_blkdev(NBD_MAJOR, "nbd");
  2180. }
  2181. module_init(nbd_init);
  2182. module_exit(nbd_cleanup);
  2183. MODULE_DESCRIPTION("Network Block Device");
  2184. MODULE_LICENSE("GPL");
  2185. module_param(nbds_max, int, 0444);
  2186. MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
  2187. module_param(max_part, int, 0444);
  2188. MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");