virtio_blk.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //#define DEBUG
  3. #include <linux/spinlock.h>
  4. #include <linux/slab.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/hdreg.h>
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/virtio.h>
  11. #include <linux/virtio_blk.h>
  12. #include <linux/scatterlist.h>
  13. #include <linux/string_helpers.h>
  14. #include <linux/idr.h>
  15. #include <linux/blk-mq.h>
  16. #include <linux/blk-mq-virtio.h>
  17. #include <linux/numa.h>
  18. #include <uapi/linux/virtio_ring.h>
  19. #define PART_BITS 4
  20. #define VQ_NAME_LEN 16
  21. #define MAX_DISCARD_SEGMENTS 256u
  22. static int major;
  23. static DEFINE_IDA(vd_index_ida);
  24. static struct workqueue_struct *virtblk_wq;
  25. struct virtio_blk_vq {
  26. struct virtqueue *vq;
  27. spinlock_t lock;
  28. char name[VQ_NAME_LEN];
  29. } ____cacheline_aligned_in_smp;
  30. struct virtio_blk {
  31. /*
  32. * This mutex must be held by anything that may run after
  33. * virtblk_remove() sets vblk->vdev to NULL.
  34. *
  35. * blk-mq, virtqueue processing, and sysfs attribute code paths are
  36. * shut down before vblk->vdev is set to NULL and therefore do not need
  37. * to hold this mutex.
  38. */
  39. struct mutex vdev_mutex;
  40. struct virtio_device *vdev;
  41. /* The disk structure for the kernel. */
  42. struct gendisk *disk;
  43. /* Block layer tags. */
  44. struct blk_mq_tag_set tag_set;
  45. /* Process context for config space updates */
  46. struct work_struct config_work;
  47. /*
  48. * Tracks references from block_device_operations open/release and
  49. * virtio_driver probe/remove so this object can be freed once no
  50. * longer in use.
  51. */
  52. refcount_t refs;
  53. /* What host tells us, plus 2 for header & tailer. */
  54. unsigned int sg_elems;
  55. /* Ida index - used to track minor number allocations. */
  56. int index;
  57. /* num of vqs */
  58. int num_vqs;
  59. struct virtio_blk_vq *vqs;
  60. };
  61. struct virtblk_req {
  62. struct virtio_blk_outhdr out_hdr;
  63. u8 status;
  64. struct scatterlist sg[];
  65. };
  66. static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
  67. {
  68. switch (vbr->status) {
  69. case VIRTIO_BLK_S_OK:
  70. return BLK_STS_OK;
  71. case VIRTIO_BLK_S_UNSUPP:
  72. return BLK_STS_NOTSUPP;
  73. default:
  74. return BLK_STS_IOERR;
  75. }
  76. }
  77. static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
  78. struct scatterlist *data_sg, bool have_data)
  79. {
  80. struct scatterlist hdr, status, *sgs[3];
  81. unsigned int num_out = 0, num_in = 0;
  82. sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
  83. sgs[num_out++] = &hdr;
  84. if (have_data) {
  85. if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
  86. sgs[num_out++] = data_sg;
  87. else
  88. sgs[num_out + num_in++] = data_sg;
  89. }
  90. sg_init_one(&status, &vbr->status, sizeof(vbr->status));
  91. sgs[num_out + num_in++] = &status;
  92. return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
  93. }
  94. static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
  95. {
  96. unsigned short segments = blk_rq_nr_discard_segments(req);
  97. unsigned short n = 0;
  98. struct virtio_blk_discard_write_zeroes *range;
  99. struct bio *bio;
  100. u32 flags = 0;
  101. if (unmap)
  102. flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
  103. range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
  104. if (!range)
  105. return -ENOMEM;
  106. /*
  107. * Single max discard segment means multi-range discard isn't
  108. * supported, and block layer only runs contiguity merge like
  109. * normal RW request. So we can't reply on bio for retrieving
  110. * each range info.
  111. */
  112. if (queue_max_discard_segments(req->q) == 1) {
  113. range[0].flags = cpu_to_le32(flags);
  114. range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
  115. range[0].sector = cpu_to_le64(blk_rq_pos(req));
  116. n = 1;
  117. } else {
  118. __rq_for_each_bio(bio, req) {
  119. u64 sector = bio->bi_iter.bi_sector;
  120. u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
  121. range[n].flags = cpu_to_le32(flags);
  122. range[n].num_sectors = cpu_to_le32(num_sectors);
  123. range[n].sector = cpu_to_le64(sector);
  124. n++;
  125. }
  126. }
  127. WARN_ON_ONCE(n != segments);
  128. req->special_vec.bv_page = virt_to_page(range);
  129. req->special_vec.bv_offset = offset_in_page(range);
  130. req->special_vec.bv_len = sizeof(*range) * segments;
  131. req->rq_flags |= RQF_SPECIAL_PAYLOAD;
  132. return 0;
  133. }
  134. static inline void virtblk_request_done(struct request *req)
  135. {
  136. struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
  137. if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
  138. kfree(page_address(req->special_vec.bv_page) +
  139. req->special_vec.bv_offset);
  140. }
  141. blk_mq_end_request(req, virtblk_result(vbr));
  142. }
  143. static void virtblk_done(struct virtqueue *vq)
  144. {
  145. struct virtio_blk *vblk = vq->vdev->priv;
  146. bool req_done = false;
  147. int qid = vq->index;
  148. struct virtblk_req *vbr;
  149. unsigned long flags;
  150. unsigned int len;
  151. spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
  152. do {
  153. virtqueue_disable_cb(vq);
  154. while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
  155. struct request *req = blk_mq_rq_from_pdu(vbr);
  156. if (likely(!blk_should_fake_timeout(req->q)))
  157. blk_mq_complete_request(req);
  158. req_done = true;
  159. }
  160. if (unlikely(virtqueue_is_broken(vq)))
  161. break;
  162. } while (!virtqueue_enable_cb(vq));
  163. /* In case queue is stopped waiting for more buffers. */
  164. if (req_done)
  165. blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
  166. spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
  167. }
  168. static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
  169. {
  170. struct virtio_blk *vblk = hctx->queue->queuedata;
  171. struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
  172. bool kick;
  173. spin_lock_irq(&vq->lock);
  174. kick = virtqueue_kick_prepare(vq->vq);
  175. spin_unlock_irq(&vq->lock);
  176. if (kick)
  177. virtqueue_notify(vq->vq);
  178. }
  179. static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
  180. const struct blk_mq_queue_data *bd)
  181. {
  182. struct virtio_blk *vblk = hctx->queue->queuedata;
  183. struct request *req = bd->rq;
  184. struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
  185. unsigned long flags;
  186. unsigned int num;
  187. int qid = hctx->queue_num;
  188. int err;
  189. bool notify = false;
  190. bool unmap = false;
  191. u32 type;
  192. switch (req_op(req)) {
  193. case REQ_OP_READ:
  194. case REQ_OP_WRITE:
  195. type = 0;
  196. break;
  197. case REQ_OP_FLUSH:
  198. type = VIRTIO_BLK_T_FLUSH;
  199. break;
  200. case REQ_OP_DISCARD:
  201. type = VIRTIO_BLK_T_DISCARD;
  202. break;
  203. case REQ_OP_WRITE_ZEROES:
  204. type = VIRTIO_BLK_T_WRITE_ZEROES;
  205. unmap = !(req->cmd_flags & REQ_NOUNMAP);
  206. break;
  207. case REQ_OP_DRV_IN:
  208. type = VIRTIO_BLK_T_GET_ID;
  209. break;
  210. default:
  211. WARN_ON_ONCE(1);
  212. return BLK_STS_IOERR;
  213. }
  214. BUG_ON(type != VIRTIO_BLK_T_DISCARD &&
  215. type != VIRTIO_BLK_T_WRITE_ZEROES &&
  216. (req->nr_phys_segments + 2 > vblk->sg_elems));
  217. vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
  218. vbr->out_hdr.sector = type ?
  219. 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
  220. vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
  221. blk_mq_start_request(req);
  222. if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
  223. err = virtblk_setup_discard_write_zeroes(req, unmap);
  224. if (err)
  225. return BLK_STS_RESOURCE;
  226. }
  227. num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
  228. if (num) {
  229. if (rq_data_dir(req) == WRITE)
  230. vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
  231. else
  232. vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
  233. }
  234. spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
  235. err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
  236. if (err) {
  237. virtqueue_kick(vblk->vqs[qid].vq);
  238. /* Don't stop the queue if -ENOMEM: we may have failed to
  239. * bounce the buffer due to global resource outage.
  240. */
  241. if (err == -ENOSPC)
  242. blk_mq_stop_hw_queue(hctx);
  243. spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
  244. switch (err) {
  245. case -ENOSPC:
  246. return BLK_STS_DEV_RESOURCE;
  247. case -ENOMEM:
  248. return BLK_STS_RESOURCE;
  249. default:
  250. return BLK_STS_IOERR;
  251. }
  252. }
  253. if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
  254. notify = true;
  255. spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
  256. if (notify)
  257. virtqueue_notify(vblk->vqs[qid].vq);
  258. return BLK_STS_OK;
  259. }
  260. /* return id (s/n) string for *disk to *id_str
  261. */
  262. static int virtblk_get_id(struct gendisk *disk, char *id_str)
  263. {
  264. struct virtio_blk *vblk = disk->private_data;
  265. struct request_queue *q = vblk->disk->queue;
  266. struct request *req;
  267. int err;
  268. req = blk_get_request(q, REQ_OP_DRV_IN, 0);
  269. if (IS_ERR(req))
  270. return PTR_ERR(req);
  271. err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
  272. if (err)
  273. goto out;
  274. blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
  275. err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
  276. out:
  277. blk_put_request(req);
  278. return err;
  279. }
  280. static void virtblk_get(struct virtio_blk *vblk)
  281. {
  282. refcount_inc(&vblk->refs);
  283. }
  284. static void virtblk_put(struct virtio_blk *vblk)
  285. {
  286. if (refcount_dec_and_test(&vblk->refs)) {
  287. ida_simple_remove(&vd_index_ida, vblk->index);
  288. mutex_destroy(&vblk->vdev_mutex);
  289. kfree(vblk);
  290. }
  291. }
  292. static int virtblk_open(struct block_device *bd, fmode_t mode)
  293. {
  294. struct virtio_blk *vblk = bd->bd_disk->private_data;
  295. int ret = 0;
  296. mutex_lock(&vblk->vdev_mutex);
  297. if (vblk->vdev)
  298. virtblk_get(vblk);
  299. else
  300. ret = -ENXIO;
  301. mutex_unlock(&vblk->vdev_mutex);
  302. return ret;
  303. }
  304. static void virtblk_release(struct gendisk *disk, fmode_t mode)
  305. {
  306. struct virtio_blk *vblk = disk->private_data;
  307. virtblk_put(vblk);
  308. }
  309. /* We provide getgeo only to please some old bootloader/partitioning tools */
  310. static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
  311. {
  312. struct virtio_blk *vblk = bd->bd_disk->private_data;
  313. int ret = 0;
  314. mutex_lock(&vblk->vdev_mutex);
  315. if (!vblk->vdev) {
  316. ret = -ENXIO;
  317. goto out;
  318. }
  319. /* see if the host passed in geometry config */
  320. if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
  321. virtio_cread(vblk->vdev, struct virtio_blk_config,
  322. geometry.cylinders, &geo->cylinders);
  323. virtio_cread(vblk->vdev, struct virtio_blk_config,
  324. geometry.heads, &geo->heads);
  325. virtio_cread(vblk->vdev, struct virtio_blk_config,
  326. geometry.sectors, &geo->sectors);
  327. } else {
  328. /* some standard values, similar to sd */
  329. geo->heads = 1 << 6;
  330. geo->sectors = 1 << 5;
  331. geo->cylinders = get_capacity(bd->bd_disk) >> 11;
  332. }
  333. out:
  334. mutex_unlock(&vblk->vdev_mutex);
  335. return ret;
  336. }
  337. static const struct block_device_operations virtblk_fops = {
  338. .owner = THIS_MODULE,
  339. .open = virtblk_open,
  340. .release = virtblk_release,
  341. .getgeo = virtblk_getgeo,
  342. };
  343. static int index_to_minor(int index)
  344. {
  345. return index << PART_BITS;
  346. }
  347. static int minor_to_index(int minor)
  348. {
  349. return minor >> PART_BITS;
  350. }
  351. static ssize_t serial_show(struct device *dev,
  352. struct device_attribute *attr, char *buf)
  353. {
  354. struct gendisk *disk = dev_to_disk(dev);
  355. int err;
  356. /* sysfs gives us a PAGE_SIZE buffer */
  357. BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
  358. buf[VIRTIO_BLK_ID_BYTES] = '\0';
  359. err = virtblk_get_id(disk, buf);
  360. if (!err)
  361. return strlen(buf);
  362. if (err == -EIO) /* Unsupported? Make it empty. */
  363. return 0;
  364. return err;
  365. }
  366. static DEVICE_ATTR_RO(serial);
  367. /* The queue's logical block size must be set before calling this */
  368. static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
  369. {
  370. struct virtio_device *vdev = vblk->vdev;
  371. struct request_queue *q = vblk->disk->queue;
  372. char cap_str_2[10], cap_str_10[10];
  373. unsigned long long nblocks;
  374. u64 capacity;
  375. /* Host must always specify the capacity. */
  376. virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
  377. /* If capacity is too big, truncate with warning. */
  378. if ((sector_t)capacity != capacity) {
  379. dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
  380. (unsigned long long)capacity);
  381. capacity = (sector_t)-1;
  382. }
  383. nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
  384. string_get_size(nblocks, queue_logical_block_size(q),
  385. STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
  386. string_get_size(nblocks, queue_logical_block_size(q),
  387. STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
  388. dev_notice(&vdev->dev,
  389. "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
  390. vblk->disk->disk_name,
  391. resize ? "new size: " : "",
  392. nblocks,
  393. queue_logical_block_size(q),
  394. cap_str_10,
  395. cap_str_2);
  396. set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
  397. }
  398. static void virtblk_config_changed_work(struct work_struct *work)
  399. {
  400. struct virtio_blk *vblk =
  401. container_of(work, struct virtio_blk, config_work);
  402. virtblk_update_capacity(vblk, true);
  403. }
  404. static void virtblk_config_changed(struct virtio_device *vdev)
  405. {
  406. struct virtio_blk *vblk = vdev->priv;
  407. queue_work(virtblk_wq, &vblk->config_work);
  408. }
  409. static int init_vq(struct virtio_blk *vblk)
  410. {
  411. int err;
  412. int i;
  413. vq_callback_t **callbacks;
  414. const char **names;
  415. struct virtqueue **vqs;
  416. unsigned short num_vqs;
  417. struct virtio_device *vdev = vblk->vdev;
  418. struct irq_affinity desc = { 0, };
  419. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
  420. struct virtio_blk_config, num_queues,
  421. &num_vqs);
  422. if (err)
  423. num_vqs = 1;
  424. num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
  425. vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
  426. if (!vblk->vqs)
  427. return -ENOMEM;
  428. names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
  429. callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
  430. vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
  431. if (!names || !callbacks || !vqs) {
  432. err = -ENOMEM;
  433. goto out;
  434. }
  435. for (i = 0; i < num_vqs; i++) {
  436. callbacks[i] = virtblk_done;
  437. snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
  438. names[i] = vblk->vqs[i].name;
  439. }
  440. /* Discover virtqueues and write information to configuration. */
  441. err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
  442. if (err)
  443. goto out;
  444. for (i = 0; i < num_vqs; i++) {
  445. spin_lock_init(&vblk->vqs[i].lock);
  446. vblk->vqs[i].vq = vqs[i];
  447. }
  448. vblk->num_vqs = num_vqs;
  449. out:
  450. kfree(vqs);
  451. kfree(callbacks);
  452. kfree(names);
  453. if (err)
  454. kfree(vblk->vqs);
  455. return err;
  456. }
  457. /*
  458. * Legacy naming scheme used for virtio devices. We are stuck with it for
  459. * virtio blk but don't ever use it for any new driver.
  460. */
  461. static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
  462. {
  463. const int base = 'z' - 'a' + 1;
  464. char *begin = buf + strlen(prefix);
  465. char *end = buf + buflen;
  466. char *p;
  467. int unit;
  468. p = end - 1;
  469. *p = '\0';
  470. unit = base;
  471. do {
  472. if (p == begin)
  473. return -EINVAL;
  474. *--p = 'a' + (index % unit);
  475. index = (index / unit) - 1;
  476. } while (index >= 0);
  477. memmove(begin, p, end - p);
  478. memcpy(buf, prefix, strlen(prefix));
  479. return 0;
  480. }
  481. static int virtblk_get_cache_mode(struct virtio_device *vdev)
  482. {
  483. u8 writeback;
  484. int err;
  485. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
  486. struct virtio_blk_config, wce,
  487. &writeback);
  488. /*
  489. * If WCE is not configurable and flush is not available,
  490. * assume no writeback cache is in use.
  491. */
  492. if (err)
  493. writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
  494. return writeback;
  495. }
  496. static void virtblk_update_cache_mode(struct virtio_device *vdev)
  497. {
  498. u8 writeback = virtblk_get_cache_mode(vdev);
  499. struct virtio_blk *vblk = vdev->priv;
  500. blk_queue_write_cache(vblk->disk->queue, writeback, false);
  501. revalidate_disk_size(vblk->disk, true);
  502. }
  503. static const char *const virtblk_cache_types[] = {
  504. "write through", "write back"
  505. };
  506. static ssize_t
  507. cache_type_store(struct device *dev, struct device_attribute *attr,
  508. const char *buf, size_t count)
  509. {
  510. struct gendisk *disk = dev_to_disk(dev);
  511. struct virtio_blk *vblk = disk->private_data;
  512. struct virtio_device *vdev = vblk->vdev;
  513. int i;
  514. BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
  515. i = sysfs_match_string(virtblk_cache_types, buf);
  516. if (i < 0)
  517. return i;
  518. virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
  519. virtblk_update_cache_mode(vdev);
  520. return count;
  521. }
  522. static ssize_t
  523. cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
  524. {
  525. struct gendisk *disk = dev_to_disk(dev);
  526. struct virtio_blk *vblk = disk->private_data;
  527. u8 writeback = virtblk_get_cache_mode(vblk->vdev);
  528. BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
  529. return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
  530. }
  531. static DEVICE_ATTR_RW(cache_type);
  532. static struct attribute *virtblk_attrs[] = {
  533. &dev_attr_serial.attr,
  534. &dev_attr_cache_type.attr,
  535. NULL,
  536. };
  537. static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
  538. struct attribute *a, int n)
  539. {
  540. struct device *dev = kobj_to_dev(kobj);
  541. struct gendisk *disk = dev_to_disk(dev);
  542. struct virtio_blk *vblk = disk->private_data;
  543. struct virtio_device *vdev = vblk->vdev;
  544. if (a == &dev_attr_cache_type.attr &&
  545. !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
  546. return S_IRUGO;
  547. return a->mode;
  548. }
  549. static const struct attribute_group virtblk_attr_group = {
  550. .attrs = virtblk_attrs,
  551. .is_visible = virtblk_attrs_are_visible,
  552. };
  553. static const struct attribute_group *virtblk_attr_groups[] = {
  554. &virtblk_attr_group,
  555. NULL,
  556. };
  557. static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
  558. unsigned int hctx_idx, unsigned int numa_node)
  559. {
  560. struct virtio_blk *vblk = set->driver_data;
  561. struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
  562. sg_init_table(vbr->sg, vblk->sg_elems);
  563. return 0;
  564. }
  565. static int virtblk_map_queues(struct blk_mq_tag_set *set)
  566. {
  567. struct virtio_blk *vblk = set->driver_data;
  568. return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
  569. vblk->vdev, 0);
  570. }
  571. static const struct blk_mq_ops virtio_mq_ops = {
  572. .queue_rq = virtio_queue_rq,
  573. .commit_rqs = virtio_commit_rqs,
  574. .complete = virtblk_request_done,
  575. .init_request = virtblk_init_request,
  576. .map_queues = virtblk_map_queues,
  577. };
  578. static unsigned int virtblk_queue_depth;
  579. module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
  580. static int virtblk_probe(struct virtio_device *vdev)
  581. {
  582. struct virtio_blk *vblk;
  583. struct request_queue *q;
  584. int err, index;
  585. u32 v, blk_size, max_size, sg_elems, opt_io_size;
  586. u16 min_io_size;
  587. u8 physical_block_exp, alignment_offset;
  588. if (!vdev->config->get) {
  589. dev_err(&vdev->dev, "%s failure: config access disabled\n",
  590. __func__);
  591. return -EINVAL;
  592. }
  593. err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
  594. GFP_KERNEL);
  595. if (err < 0)
  596. goto out;
  597. index = err;
  598. /* We need to know how many segments before we allocate. */
  599. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
  600. struct virtio_blk_config, seg_max,
  601. &sg_elems);
  602. /* We need at least one SG element, whatever they say. */
  603. if (err || !sg_elems)
  604. sg_elems = 1;
  605. /* We need an extra sg elements at head and tail. */
  606. sg_elems += 2;
  607. vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
  608. if (!vblk) {
  609. err = -ENOMEM;
  610. goto out_free_index;
  611. }
  612. /* This reference is dropped in virtblk_remove(). */
  613. refcount_set(&vblk->refs, 1);
  614. mutex_init(&vblk->vdev_mutex);
  615. vblk->vdev = vdev;
  616. vblk->sg_elems = sg_elems;
  617. INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
  618. err = init_vq(vblk);
  619. if (err)
  620. goto out_free_vblk;
  621. /* FIXME: How many partitions? How long is a piece of string? */
  622. vblk->disk = alloc_disk(1 << PART_BITS);
  623. if (!vblk->disk) {
  624. err = -ENOMEM;
  625. goto out_free_vq;
  626. }
  627. /* Default queue sizing is to fill the ring. */
  628. if (!virtblk_queue_depth) {
  629. virtblk_queue_depth = vblk->vqs[0].vq->num_free;
  630. /* ... but without indirect descs, we use 2 descs per req */
  631. if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
  632. virtblk_queue_depth /= 2;
  633. }
  634. memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
  635. vblk->tag_set.ops = &virtio_mq_ops;
  636. vblk->tag_set.queue_depth = virtblk_queue_depth;
  637. vblk->tag_set.numa_node = NUMA_NO_NODE;
  638. vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
  639. vblk->tag_set.cmd_size =
  640. sizeof(struct virtblk_req) +
  641. sizeof(struct scatterlist) * sg_elems;
  642. vblk->tag_set.driver_data = vblk;
  643. vblk->tag_set.nr_hw_queues = vblk->num_vqs;
  644. err = blk_mq_alloc_tag_set(&vblk->tag_set);
  645. if (err)
  646. goto out_put_disk;
  647. q = blk_mq_init_queue(&vblk->tag_set);
  648. if (IS_ERR(q)) {
  649. err = -ENOMEM;
  650. goto out_free_tags;
  651. }
  652. vblk->disk->queue = q;
  653. q->queuedata = vblk;
  654. virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
  655. vblk->disk->major = major;
  656. vblk->disk->first_minor = index_to_minor(index);
  657. vblk->disk->private_data = vblk;
  658. vblk->disk->fops = &virtblk_fops;
  659. vblk->disk->flags |= GENHD_FL_EXT_DEVT;
  660. vblk->index = index;
  661. /* configure queue flush support */
  662. virtblk_update_cache_mode(vdev);
  663. /* If disk is read-only in the host, the guest should obey */
  664. if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
  665. set_disk_ro(vblk->disk, 1);
  666. /* We can handle whatever the host told us to handle. */
  667. blk_queue_max_segments(q, vblk->sg_elems-2);
  668. /* No real sector limit. */
  669. blk_queue_max_hw_sectors(q, -1U);
  670. max_size = virtio_max_dma_size(vdev);
  671. /* Host can optionally specify maximum segment size and number of
  672. * segments. */
  673. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
  674. struct virtio_blk_config, size_max, &v);
  675. if (!err)
  676. max_size = min(max_size, v);
  677. blk_queue_max_segment_size(q, max_size);
  678. /* Host can optionally specify the block size of the device */
  679. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
  680. struct virtio_blk_config, blk_size,
  681. &blk_size);
  682. if (!err) {
  683. err = blk_validate_block_size(blk_size);
  684. if (err) {
  685. dev_err(&vdev->dev,
  686. "virtio_blk: invalid block size: 0x%x\n",
  687. blk_size);
  688. goto out_cleanup_disk;
  689. }
  690. blk_queue_logical_block_size(q, blk_size);
  691. } else
  692. blk_size = queue_logical_block_size(q);
  693. /* Use topology information if available */
  694. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  695. struct virtio_blk_config, physical_block_exp,
  696. &physical_block_exp);
  697. if (!err && physical_block_exp)
  698. blk_queue_physical_block_size(q,
  699. blk_size * (1 << physical_block_exp));
  700. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  701. struct virtio_blk_config, alignment_offset,
  702. &alignment_offset);
  703. if (!err && alignment_offset)
  704. blk_queue_alignment_offset(q, blk_size * alignment_offset);
  705. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  706. struct virtio_blk_config, min_io_size,
  707. &min_io_size);
  708. if (!err && min_io_size)
  709. blk_queue_io_min(q, blk_size * min_io_size);
  710. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  711. struct virtio_blk_config, opt_io_size,
  712. &opt_io_size);
  713. if (!err && opt_io_size)
  714. blk_queue_io_opt(q, blk_size * opt_io_size);
  715. if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
  716. q->limits.discard_granularity = blk_size;
  717. virtio_cread(vdev, struct virtio_blk_config,
  718. discard_sector_alignment, &v);
  719. q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
  720. virtio_cread(vdev, struct virtio_blk_config,
  721. max_discard_sectors, &v);
  722. blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
  723. virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
  724. &v);
  725. /*
  726. * max_discard_seg == 0 is out of spec but we always
  727. * handled it.
  728. */
  729. if (!v)
  730. v = sg_elems - 2;
  731. blk_queue_max_discard_segments(q,
  732. min(v, MAX_DISCARD_SEGMENTS));
  733. blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
  734. }
  735. if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
  736. virtio_cread(vdev, struct virtio_blk_config,
  737. max_write_zeroes_sectors, &v);
  738. blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
  739. }
  740. virtblk_update_capacity(vblk, false);
  741. virtio_device_ready(vdev);
  742. device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
  743. return 0;
  744. out_cleanup_disk:
  745. blk_cleanup_queue(vblk->disk->queue);
  746. out_free_tags:
  747. blk_mq_free_tag_set(&vblk->tag_set);
  748. out_put_disk:
  749. put_disk(vblk->disk);
  750. out_free_vq:
  751. vdev->config->del_vqs(vdev);
  752. kfree(vblk->vqs);
  753. out_free_vblk:
  754. kfree(vblk);
  755. out_free_index:
  756. ida_simple_remove(&vd_index_ida, index);
  757. out:
  758. return err;
  759. }
  760. static void virtblk_remove(struct virtio_device *vdev)
  761. {
  762. struct virtio_blk *vblk = vdev->priv;
  763. /* Make sure no work handler is accessing the device. */
  764. flush_work(&vblk->config_work);
  765. del_gendisk(vblk->disk);
  766. blk_cleanup_queue(vblk->disk->queue);
  767. blk_mq_free_tag_set(&vblk->tag_set);
  768. mutex_lock(&vblk->vdev_mutex);
  769. /* Stop all the virtqueues. */
  770. vdev->config->reset(vdev);
  771. /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
  772. vblk->vdev = NULL;
  773. put_disk(vblk->disk);
  774. vdev->config->del_vqs(vdev);
  775. kfree(vblk->vqs);
  776. mutex_unlock(&vblk->vdev_mutex);
  777. virtblk_put(vblk);
  778. }
  779. #ifdef CONFIG_PM_SLEEP
  780. static int virtblk_freeze(struct virtio_device *vdev)
  781. {
  782. struct virtio_blk *vblk = vdev->priv;
  783. /* Ensure we don't receive any more interrupts */
  784. vdev->config->reset(vdev);
  785. /* Make sure no work handler is accessing the device. */
  786. flush_work(&vblk->config_work);
  787. blk_mq_quiesce_queue(vblk->disk->queue);
  788. vdev->config->del_vqs(vdev);
  789. kfree(vblk->vqs);
  790. return 0;
  791. }
  792. static int virtblk_restore(struct virtio_device *vdev)
  793. {
  794. struct virtio_blk *vblk = vdev->priv;
  795. int ret;
  796. ret = init_vq(vdev->priv);
  797. if (ret)
  798. return ret;
  799. virtio_device_ready(vdev);
  800. blk_mq_unquiesce_queue(vblk->disk->queue);
  801. return 0;
  802. }
  803. #endif
  804. static const struct virtio_device_id id_table[] = {
  805. { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
  806. { 0 },
  807. };
  808. static unsigned int features_legacy[] = {
  809. VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
  810. VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
  811. VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
  812. VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
  813. }
  814. ;
  815. static unsigned int features[] = {
  816. VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
  817. VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
  818. VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
  819. VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
  820. };
  821. static struct virtio_driver virtio_blk = {
  822. .feature_table = features,
  823. .feature_table_size = ARRAY_SIZE(features),
  824. .feature_table_legacy = features_legacy,
  825. .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
  826. .driver.name = KBUILD_MODNAME,
  827. .driver.owner = THIS_MODULE,
  828. .id_table = id_table,
  829. .probe = virtblk_probe,
  830. .remove = virtblk_remove,
  831. .config_changed = virtblk_config_changed,
  832. #ifdef CONFIG_PM_SLEEP
  833. .freeze = virtblk_freeze,
  834. .restore = virtblk_restore,
  835. #endif
  836. };
  837. static int __init init(void)
  838. {
  839. int error;
  840. virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
  841. if (!virtblk_wq)
  842. return -ENOMEM;
  843. major = register_blkdev(0, "virtblk");
  844. if (major < 0) {
  845. error = major;
  846. goto out_destroy_workqueue;
  847. }
  848. error = register_virtio_driver(&virtio_blk);
  849. if (error)
  850. goto out_unregister_blkdev;
  851. return 0;
  852. out_unregister_blkdev:
  853. unregister_blkdev(major, "virtblk");
  854. out_destroy_workqueue:
  855. destroy_workqueue(virtblk_wq);
  856. return error;
  857. }
  858. static void __exit fini(void)
  859. {
  860. unregister_virtio_driver(&virtio_blk);
  861. unregister_blkdev(major, "virtblk");
  862. destroy_workqueue(virtblk_wq);
  863. }
  864. module_init(init);
  865. module_exit(fini);
  866. MODULE_DEVICE_TABLE(virtio, id_table);
  867. MODULE_DESCRIPTION("Virtio block driver");
  868. MODULE_LICENSE("GPL");