pvblock.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) 2007-2008 Samuel Thibault.
  4. * (C) Copyright 2020 EPAM Systems Inc.
  5. */
  6. #include <blk.h>
  7. #include <common.h>
  8. #include <dm.h>
  9. #include <dm/device-internal.h>
  10. #include <malloc.h>
  11. #include <part.h>
  12. #include <asm/armv8/mmu.h>
  13. #include <asm/io.h>
  14. #include <asm/xen/system.h>
  15. #include <linux/bug.h>
  16. #include <linux/compat.h>
  17. #include <xen/events.h>
  18. #include <xen/gnttab.h>
  19. #include <xen/hvm.h>
  20. #include <xen/xenbus.h>
  21. #include <xen/interface/io/ring.h>
  22. #include <xen/interface/io/blkif.h>
  23. #include <xen/interface/io/protocols.h>
  24. #define DRV_NAME "pvblock"
  25. #define DRV_NAME_BLK "pvblock_blk"
  26. #define O_RDONLY 00
  27. #define O_RDWR 02
  28. #define WAIT_RING_TO_MS 10
  29. struct blkfront_info {
  30. u64 sectors;
  31. unsigned int sector_size;
  32. int mode;
  33. int info;
  34. int barrier;
  35. int flush;
  36. };
  37. /**
  38. * struct blkfront_dev - Struct representing blkfront device
  39. * @dom: Domain id
  40. * @ring: Front_ring structure
  41. * @ring_ref: The grant reference, allowing us to grant access
  42. * to the ring to the other end/domain
  43. * @evtchn: Event channel used to signal ring events
  44. * @handle: Events handle
  45. * @nodename: Device XenStore path in format "device/vbd/" + @devid
  46. * @backend: Backend XenStore path
  47. * @info: Private data
  48. * @devid: Device id
  49. */
  50. struct blkfront_dev {
  51. domid_t dom;
  52. struct blkif_front_ring ring;
  53. grant_ref_t ring_ref;
  54. evtchn_port_t evtchn;
  55. blkif_vdev_t handle;
  56. char *nodename;
  57. char *backend;
  58. struct blkfront_info info;
  59. unsigned int devid;
  60. u8 *bounce_buffer;
  61. };
  62. struct blkfront_platdata {
  63. unsigned int devid;
  64. };
  65. /**
  66. * struct blkfront_aiocb - AIO сontrol block
  67. * @aio_dev: Blockfront device
  68. * @aio_buf: Memory buffer, which must be sector-aligned for
  69. * @aio_dev sector
  70. * @aio_nbytes: Size of AIO, which must be less than @aio_dev
  71. * sector-sized amounts
  72. * @aio_offset: Offset, which must not go beyond @aio_dev
  73. * sector-aligned location
  74. * @data: Data used to receiving response from ring
  75. * @gref: Array of grant references
  76. * @n: Number of segments
  77. * @aio_cb: Represents one I/O request.
  78. */
  79. struct blkfront_aiocb {
  80. struct blkfront_dev *aio_dev;
  81. u8 *aio_buf;
  82. size_t aio_nbytes;
  83. off_t aio_offset;
  84. void *data;
  85. grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  86. int n;
  87. void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
  88. };
  89. static void blkfront_sync(struct blkfront_dev *dev);
  90. static void free_blkfront(struct blkfront_dev *dev)
  91. {
  92. mask_evtchn(dev->evtchn);
  93. free(dev->backend);
  94. gnttab_end_access(dev->ring_ref);
  95. free(dev->ring.sring);
  96. unbind_evtchn(dev->evtchn);
  97. free(dev->bounce_buffer);
  98. free(dev->nodename);
  99. free(dev);
  100. }
  101. static int init_blkfront(unsigned int devid, struct blkfront_dev *dev)
  102. {
  103. xenbus_transaction_t xbt;
  104. char *err = NULL;
  105. char *message = NULL;
  106. struct blkif_sring *s;
  107. int retry = 0;
  108. char *msg = NULL;
  109. char *c;
  110. char nodename[32];
  111. char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1];
  112. sprintf(nodename, "device/vbd/%d", devid);
  113. memset(dev, 0, sizeof(*dev));
  114. dev->nodename = strdup(nodename);
  115. dev->devid = devid;
  116. snprintf(path, sizeof(path), "%s/backend-id", nodename);
  117. dev->dom = xenbus_read_integer(path);
  118. evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn);
  119. s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE);
  120. if (!s) {
  121. printf("Failed to allocate shared ring\n");
  122. goto error;
  123. }
  124. SHARED_RING_INIT(s);
  125. FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
  126. dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0);
  127. again:
  128. err = xenbus_transaction_start(&xbt);
  129. if (err) {
  130. printf("starting transaction\n");
  131. free(err);
  132. }
  133. err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref);
  134. if (err) {
  135. message = "writing ring-ref";
  136. goto abort_transaction;
  137. }
  138. err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);
  139. if (err) {
  140. message = "writing event-channel";
  141. goto abort_transaction;
  142. }
  143. err = xenbus_printf(xbt, nodename, "protocol", "%s",
  144. XEN_IO_PROTO_ABI_NATIVE);
  145. if (err) {
  146. message = "writing protocol";
  147. goto abort_transaction;
  148. }
  149. snprintf(path, sizeof(path), "%s/state", nodename);
  150. err = xenbus_switch_state(xbt, path, XenbusStateConnected);
  151. if (err) {
  152. message = "switching state";
  153. goto abort_transaction;
  154. }
  155. err = xenbus_transaction_end(xbt, 0, &retry);
  156. free(err);
  157. if (retry) {
  158. goto again;
  159. printf("completing transaction\n");
  160. }
  161. goto done;
  162. abort_transaction:
  163. free(err);
  164. err = xenbus_transaction_end(xbt, 1, &retry);
  165. printf("Abort transaction %s\n", message);
  166. goto error;
  167. done:
  168. snprintf(path, sizeof(path), "%s/backend", nodename);
  169. msg = xenbus_read(XBT_NIL, path, &dev->backend);
  170. if (msg) {
  171. printf("Error %s when reading the backend path %s\n",
  172. msg, path);
  173. goto error;
  174. }
  175. dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0);
  176. {
  177. XenbusState state;
  178. char path[strlen(dev->backend) +
  179. strlen("/feature-flush-cache") + 1];
  180. snprintf(path, sizeof(path), "%s/mode", dev->backend);
  181. msg = xenbus_read(XBT_NIL, path, &c);
  182. if (msg) {
  183. printf("Error %s when reading the mode\n", msg);
  184. goto error;
  185. }
  186. if (*c == 'w')
  187. dev->info.mode = O_RDWR;
  188. else
  189. dev->info.mode = O_RDONLY;
  190. free(c);
  191. snprintf(path, sizeof(path), "%s/state", dev->backend);
  192. msg = NULL;
  193. state = xenbus_read_integer(path);
  194. while (!msg && state < XenbusStateConnected)
  195. msg = xenbus_wait_for_state_change(path, &state);
  196. if (msg || state != XenbusStateConnected) {
  197. printf("backend not available, state=%d\n", state);
  198. goto error;
  199. }
  200. snprintf(path, sizeof(path), "%s/info", dev->backend);
  201. dev->info.info = xenbus_read_integer(path);
  202. snprintf(path, sizeof(path), "%s/sectors", dev->backend);
  203. /*
  204. * FIXME: read_integer returns an int, so disk size
  205. * limited to 1TB for now
  206. */
  207. dev->info.sectors = xenbus_read_integer(path);
  208. snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
  209. dev->info.sector_size = xenbus_read_integer(path);
  210. snprintf(path, sizeof(path), "%s/feature-barrier",
  211. dev->backend);
  212. dev->info.barrier = xenbus_read_integer(path);
  213. snprintf(path, sizeof(path), "%s/feature-flush-cache",
  214. dev->backend);
  215. dev->info.flush = xenbus_read_integer(path);
  216. }
  217. unmask_evtchn(dev->evtchn);
  218. dev->bounce_buffer = memalign(dev->info.sector_size,
  219. dev->info.sector_size);
  220. if (!dev->bounce_buffer) {
  221. printf("Failed to allocate bouncing buffer\n");
  222. goto error;
  223. }
  224. debug("%llu sectors of %u bytes, bounce buffer at %p\n",
  225. dev->info.sectors, dev->info.sector_size,
  226. dev->bounce_buffer);
  227. return 0;
  228. error:
  229. free(msg);
  230. free(err);
  231. free_blkfront(dev);
  232. return -ENODEV;
  233. }
  234. static void shutdown_blkfront(struct blkfront_dev *dev)
  235. {
  236. char *err = NULL, *err2;
  237. XenbusState state;
  238. char path[strlen(dev->backend) + strlen("/state") + 1];
  239. char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1];
  240. debug("Close " DRV_NAME ", device ID %d\n", dev->devid);
  241. blkfront_sync(dev);
  242. snprintf(path, sizeof(path), "%s/state", dev->backend);
  243. snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
  244. err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing);
  245. if (err) {
  246. printf("%s: error changing state to %d: %s\n", __func__,
  247. XenbusStateClosing, err);
  248. goto close;
  249. }
  250. state = xenbus_read_integer(path);
  251. while (!err && state < XenbusStateClosing)
  252. err = xenbus_wait_for_state_change(path, &state);
  253. free(err);
  254. err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed);
  255. if (err) {
  256. printf("%s: error changing state to %d: %s\n", __func__,
  257. XenbusStateClosed, err);
  258. goto close;
  259. }
  260. state = xenbus_read_integer(path);
  261. while (state < XenbusStateClosed) {
  262. err = xenbus_wait_for_state_change(path, &state);
  263. free(err);
  264. }
  265. err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising);
  266. if (err) {
  267. printf("%s: error changing state to %d: %s\n", __func__,
  268. XenbusStateInitialising, err);
  269. goto close;
  270. }
  271. state = xenbus_read_integer(path);
  272. while (!err &&
  273. (state < XenbusStateInitWait || state >= XenbusStateClosed))
  274. err = xenbus_wait_for_state_change(path, &state);
  275. close:
  276. free(err);
  277. snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename);
  278. err2 = xenbus_rm(XBT_NIL, nodename);
  279. free(err2);
  280. snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename);
  281. err2 = xenbus_rm(XBT_NIL, nodename);
  282. free(err2);
  283. if (!err)
  284. free_blkfront(dev);
  285. }
  286. /**
  287. * blkfront_aio_poll() - AIO polling function.
  288. * @dev: Blkfront device
  289. *
  290. * Here we receive response from the ring and check its status. This happens
  291. * until we read all data from the ring. We read the data from consumed pointer
  292. * to the response pointer. Then increase consumed pointer to make it clear that
  293. * the data has been read.
  294. *
  295. * Return: Number of consumed bytes.
  296. */
  297. static int blkfront_aio_poll(struct blkfront_dev *dev)
  298. {
  299. RING_IDX rp, cons;
  300. struct blkif_response *rsp;
  301. int more;
  302. int nr_consumed;
  303. moretodo:
  304. rp = dev->ring.sring->rsp_prod;
  305. rmb(); /* Ensure we see queued responses up to 'rp'. */
  306. cons = dev->ring.rsp_cons;
  307. nr_consumed = 0;
  308. while ((cons != rp)) {
  309. struct blkfront_aiocb *aiocbp;
  310. int status;
  311. rsp = RING_GET_RESPONSE(&dev->ring, cons);
  312. nr_consumed++;
  313. aiocbp = (void *)(uintptr_t)rsp->id;
  314. status = rsp->status;
  315. switch (rsp->operation) {
  316. case BLKIF_OP_READ:
  317. case BLKIF_OP_WRITE:
  318. {
  319. int j;
  320. if (status != BLKIF_RSP_OKAY)
  321. printf("%s error %d on %s at offset %llu, num bytes %llu\n",
  322. rsp->operation == BLKIF_OP_READ ?
  323. "read" : "write",
  324. status, aiocbp->aio_dev->nodename,
  325. (unsigned long long)aiocbp->aio_offset,
  326. (unsigned long long)aiocbp->aio_nbytes);
  327. for (j = 0; j < aiocbp->n; j++)
  328. gnttab_end_access(aiocbp->gref[j]);
  329. break;
  330. }
  331. case BLKIF_OP_WRITE_BARRIER:
  332. if (status != BLKIF_RSP_OKAY)
  333. printf("write barrier error %d\n", status);
  334. break;
  335. case BLKIF_OP_FLUSH_DISKCACHE:
  336. if (status != BLKIF_RSP_OKAY)
  337. printf("flush error %d\n", status);
  338. break;
  339. default:
  340. printf("unrecognized block operation %d response (status %d)\n",
  341. rsp->operation, status);
  342. break;
  343. }
  344. dev->ring.rsp_cons = ++cons;
  345. /* Nota: callback frees aiocbp itself */
  346. if (aiocbp && aiocbp->aio_cb)
  347. aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
  348. if (dev->ring.rsp_cons != cons)
  349. /* We reentered, we must not continue here */
  350. break;
  351. }
  352. RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
  353. if (more)
  354. goto moretodo;
  355. return nr_consumed;
  356. }
  357. static void blkfront_wait_slot(struct blkfront_dev *dev)
  358. {
  359. /* Wait for a slot */
  360. if (RING_FULL(&dev->ring)) {
  361. while (true) {
  362. blkfront_aio_poll(dev);
  363. if (!RING_FULL(&dev->ring))
  364. break;
  365. wait_event_timeout(NULL, !RING_FULL(&dev->ring),
  366. WAIT_RING_TO_MS);
  367. }
  368. }
  369. }
  370. /**
  371. * blkfront_aio_poll() - Issue an aio.
  372. * @aiocbp: AIO control block structure
  373. * @write: Describes is it read or write operation
  374. * 0 - read
  375. * 1 - write
  376. *
  377. * We check whether the AIO parameters meet the requirements of the device.
  378. * Then receive request from ring and define its arguments. After this we
  379. * grant access to the grant references. The last step is notifying about AIO
  380. * via event channel.
  381. */
  382. static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
  383. {
  384. struct blkfront_dev *dev = aiocbp->aio_dev;
  385. struct blkif_request *req;
  386. RING_IDX i;
  387. int notify;
  388. int n, j;
  389. uintptr_t start, end;
  390. /* Can't io at non-sector-aligned location */
  391. BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1));
  392. /* Can't io non-sector-sized amounts */
  393. BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1));
  394. /* Can't io non-sector-aligned buffer */
  395. BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1)));
  396. start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
  397. end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes +
  398. PAGE_SIZE - 1) & PAGE_MASK;
  399. n = (end - start) / PAGE_SIZE;
  400. aiocbp->n = n;
  401. BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST);
  402. blkfront_wait_slot(dev);
  403. i = dev->ring.req_prod_pvt;
  404. req = RING_GET_REQUEST(&dev->ring, i);
  405. req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
  406. req->nr_segments = n;
  407. req->handle = dev->handle;
  408. req->id = (uintptr_t)aiocbp;
  409. req->sector_number = aiocbp->aio_offset / dev->info.sector_size;
  410. for (j = 0; j < n; j++) {
  411. req->seg[j].first_sect = 0;
  412. req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1;
  413. }
  414. req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) /
  415. dev->info.sector_size;
  416. req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf +
  417. aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size;
  418. for (j = 0; j < n; j++) {
  419. uintptr_t data = start + j * PAGE_SIZE;
  420. if (!write) {
  421. /* Trigger CoW if needed */
  422. *(char *)(data + (req->seg[j].first_sect *
  423. dev->info.sector_size)) = 0;
  424. barrier();
  425. }
  426. req->seg[j].gref = gnttab_grant_access(dev->dom,
  427. virt_to_pfn((void *)data),
  428. write);
  429. aiocbp->gref[j] = req->seg[j].gref;
  430. }
  431. dev->ring.req_prod_pvt = i + 1;
  432. wmb();
  433. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
  434. if (notify)
  435. notify_remote_via_evtchn(dev->evtchn);
  436. }
  437. static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret)
  438. {
  439. aiocbp->data = (void *)1;
  440. aiocbp->aio_cb = NULL;
  441. }
  442. static void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
  443. {
  444. aiocbp->aio_cb = blkfront_aio_cb;
  445. blkfront_aio(aiocbp, write);
  446. aiocbp->data = NULL;
  447. while (true) {
  448. blkfront_aio_poll(aiocbp->aio_dev);
  449. if (aiocbp->data)
  450. break;
  451. cpu_relax();
  452. }
  453. }
  454. static void blkfront_push_operation(struct blkfront_dev *dev, u8 op,
  455. uint64_t id)
  456. {
  457. struct blkif_request *req;
  458. int notify, i;
  459. blkfront_wait_slot(dev);
  460. i = dev->ring.req_prod_pvt;
  461. req = RING_GET_REQUEST(&dev->ring, i);
  462. req->operation = op;
  463. req->nr_segments = 0;
  464. req->handle = dev->handle;
  465. req->id = id;
  466. req->sector_number = 0;
  467. dev->ring.req_prod_pvt = i + 1;
  468. wmb();
  469. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
  470. if (notify)
  471. notify_remote_via_evtchn(dev->evtchn);
  472. }
  473. static void blkfront_sync(struct blkfront_dev *dev)
  474. {
  475. if (dev->info.mode == O_RDWR) {
  476. if (dev->info.barrier == 1)
  477. blkfront_push_operation(dev,
  478. BLKIF_OP_WRITE_BARRIER, 0);
  479. if (dev->info.flush == 1)
  480. blkfront_push_operation(dev,
  481. BLKIF_OP_FLUSH_DISKCACHE, 0);
  482. }
  483. while (true) {
  484. blkfront_aio_poll(dev);
  485. if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
  486. break;
  487. cpu_relax();
  488. }
  489. }
  490. /**
  491. * pvblock_iop() - Issue an aio.
  492. * @udev: Pvblock device
  493. * @blknr: Block number to read from / write to
  494. * @blkcnt: Amount of blocks to read / write
  495. * @buffer: Memory buffer with data to be read / write
  496. * @write: Describes is it read or write operation
  497. * 0 - read
  498. * 1 - write
  499. *
  500. * Depending on the operation - reading or writing, data is read / written from the
  501. * specified address (@buffer) to the sector (@blknr).
  502. */
  503. static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr,
  504. lbaint_t blkcnt, void *buffer, int write)
  505. {
  506. struct blkfront_dev *blk_dev = dev_get_priv(udev);
  507. struct blk_desc *desc = dev_get_uclass_platdata(udev);
  508. struct blkfront_aiocb aiocb;
  509. lbaint_t blocks_todo;
  510. bool unaligned;
  511. if (blkcnt == 0)
  512. return 0;
  513. if ((blknr + blkcnt) > desc->lba) {
  514. printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
  515. blknr + blkcnt, desc->lba);
  516. return 0;
  517. }
  518. unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1);
  519. aiocb.aio_dev = blk_dev;
  520. aiocb.aio_offset = blknr * desc->blksz;
  521. aiocb.aio_cb = NULL;
  522. aiocb.data = NULL;
  523. blocks_todo = blkcnt;
  524. do {
  525. aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer;
  526. if (write && unaligned)
  527. memcpy(blk_dev->bounce_buffer, buffer, desc->blksz);
  528. aiocb.aio_nbytes = unaligned ? desc->blksz :
  529. min((size_t)(BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE),
  530. (size_t)(blocks_todo * desc->blksz));
  531. blkfront_io(&aiocb, write);
  532. if (!write && unaligned)
  533. memcpy(buffer, blk_dev->bounce_buffer, desc->blksz);
  534. aiocb.aio_offset += aiocb.aio_nbytes;
  535. buffer += aiocb.aio_nbytes;
  536. blocks_todo -= aiocb.aio_nbytes / desc->blksz;
  537. } while (blocks_todo > 0);
  538. return blkcnt;
  539. }
  540. ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
  541. void *buffer)
  542. {
  543. return pvblock_iop(udev, blknr, blkcnt, buffer, 0);
  544. }
  545. ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
  546. const void *buffer)
  547. {
  548. return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1);
  549. }
  550. static int pvblock_blk_bind(struct udevice *udev)
  551. {
  552. struct blk_desc *desc = dev_get_uclass_platdata(udev);
  553. int devnum;
  554. desc->if_type = IF_TYPE_PVBLOCK;
  555. /*
  556. * Initialize the devnum to -ENODEV. This is to make sure that
  557. * blk_next_free_devnum() works as expected, since the default
  558. * value 0 is a valid devnum.
  559. */
  560. desc->devnum = -ENODEV;
  561. devnum = blk_next_free_devnum(IF_TYPE_PVBLOCK);
  562. if (devnum < 0)
  563. return devnum;
  564. desc->devnum = devnum;
  565. desc->part_type = PART_TYPE_UNKNOWN;
  566. desc->bdev = udev;
  567. strncpy(desc->vendor, "Xen", sizeof(desc->vendor));
  568. strncpy(desc->revision, "1", sizeof(desc->revision));
  569. strncpy(desc->product, "Virtual disk", sizeof(desc->product));
  570. return 0;
  571. }
  572. static int pvblock_blk_probe(struct udevice *udev)
  573. {
  574. struct blkfront_dev *blk_dev = dev_get_priv(udev);
  575. struct blkfront_platdata *platdata = dev_get_platdata(udev);
  576. struct blk_desc *desc = dev_get_uclass_platdata(udev);
  577. int ret, devid;
  578. devid = platdata->devid;
  579. free(platdata);
  580. ret = init_blkfront(devid, blk_dev);
  581. if (ret < 0)
  582. return ret;
  583. desc->blksz = blk_dev->info.sector_size;
  584. desc->lba = blk_dev->info.sectors;
  585. desc->log2blksz = LOG2(blk_dev->info.sector_size);
  586. return 0;
  587. }
  588. static int pvblock_blk_remove(struct udevice *udev)
  589. {
  590. struct blkfront_dev *blk_dev = dev_get_priv(udev);
  591. shutdown_blkfront(blk_dev);
  592. return 0;
  593. }
  594. static const struct blk_ops pvblock_blk_ops = {
  595. .read = pvblock_blk_read,
  596. .write = pvblock_blk_write,
  597. };
  598. U_BOOT_DRIVER(pvblock_blk) = {
  599. .name = DRV_NAME_BLK,
  600. .id = UCLASS_BLK,
  601. .ops = &pvblock_blk_ops,
  602. .bind = pvblock_blk_bind,
  603. .probe = pvblock_blk_probe,
  604. .remove = pvblock_blk_remove,
  605. .priv_auto_alloc_size = sizeof(struct blkfront_dev),
  606. .flags = DM_FLAG_OS_PREPARE,
  607. };
  608. /*******************************************************************************
  609. * Para-virtual block device class
  610. *******************************************************************************/
  611. typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid);
  612. static int on_new_vbd(struct udevice *parent, unsigned int devid)
  613. {
  614. struct driver_info info;
  615. struct udevice *udev;
  616. struct blkfront_platdata *platdata;
  617. int ret;
  618. debug("New " DRV_NAME_BLK ", device ID %d\n", devid);
  619. platdata = malloc(sizeof(struct blkfront_platdata));
  620. if (!platdata) {
  621. printf("Failed to allocate platform data\n");
  622. return -ENOMEM;
  623. }
  624. platdata->devid = devid;
  625. info.name = DRV_NAME_BLK;
  626. info.platdata = platdata;
  627. ret = device_bind_by_name(parent, false, &info, &udev);
  628. if (ret < 0) {
  629. printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n",
  630. devid, ret);
  631. free(platdata);
  632. }
  633. return ret;
  634. }
  635. static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb)
  636. {
  637. char **dirs, *msg;
  638. int i, ret;
  639. msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs);
  640. if (msg) {
  641. printf("Failed to read device/vbd directory: %s\n", msg);
  642. free(msg);
  643. return -ENODEV;
  644. }
  645. for (i = 0; dirs[i]; i++) {
  646. int devid;
  647. sscanf(dirs[i], "%d", &devid);
  648. ret = clb(udev, devid);
  649. if (ret < 0)
  650. goto fail;
  651. free(dirs[i]);
  652. }
  653. ret = 0;
  654. fail:
  655. for (; dirs[i]; i++)
  656. free(dirs[i]);
  657. free(dirs);
  658. return ret;
  659. }
  660. static void print_pvblock_devices(void)
  661. {
  662. struct udevice *udev;
  663. bool first = true;
  664. const char *class_name;
  665. class_name = uclass_get_name(UCLASS_PVBLOCK);
  666. for (blk_first_device(IF_TYPE_PVBLOCK, &udev); udev;
  667. blk_next_device(&udev), first = false) {
  668. struct blk_desc *desc = dev_get_uclass_platdata(udev);
  669. if (!first)
  670. puts(", ");
  671. printf("%s: %d", class_name, desc->devnum);
  672. }
  673. printf("\n");
  674. }
  675. void pvblock_init(void)
  676. {
  677. struct driver_info info;
  678. struct udevice *udev;
  679. struct uclass *uc;
  680. int ret;
  681. /*
  682. * At this point Xen drivers have already initialized,
  683. * so we can instantiate the class driver and enumerate
  684. * virtual block devices.
  685. */
  686. info.name = DRV_NAME;
  687. ret = device_bind_by_name(gd->dm_root, false, &info, &udev);
  688. if (ret < 0)
  689. printf("Failed to bind " DRV_NAME ", ret: %d\n", ret);
  690. /* Bootstrap virtual block devices class driver */
  691. ret = uclass_get(UCLASS_PVBLOCK, &uc);
  692. if (ret)
  693. return;
  694. uclass_foreach_dev_probe(UCLASS_PVBLOCK, udev);
  695. print_pvblock_devices();
  696. }
  697. static int pvblock_probe(struct udevice *udev)
  698. {
  699. struct uclass *uc;
  700. int ret;
  701. if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0)
  702. return -ENODEV;
  703. ret = uclass_get(UCLASS_BLK, &uc);
  704. if (ret)
  705. return ret;
  706. uclass_foreach_dev_probe(UCLASS_BLK, udev) {
  707. if (_ret)
  708. return _ret;
  709. };
  710. return 0;
  711. }
  712. U_BOOT_DRIVER(pvblock_drv) = {
  713. .name = DRV_NAME,
  714. .id = UCLASS_PVBLOCK,
  715. .probe = pvblock_probe,
  716. };
  717. UCLASS_DRIVER(pvblock) = {
  718. .name = DRV_NAME,
  719. .id = UCLASS_PVBLOCK,
  720. };