pvblock.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) 2007-2008 Samuel Thibault.
  4. * (C) Copyright 2020 EPAM Systems Inc.
  5. */
  6. #define LOG_CATEGORY UCLASS_PVBLOCK
  7. #include <blk.h>
  8. #include <common.h>
  9. #include <dm.h>
  10. #include <dm/device-internal.h>
  11. #include <malloc.h>
  12. #include <part.h>
  13. #include <asm/armv8/mmu.h>
  14. #include <asm/global_data.h>
  15. #include <asm/io.h>
  16. #include <asm/xen/system.h>
  17. #include <linux/bug.h>
  18. #include <linux/compat.h>
  19. #include <xen/events.h>
  20. #include <xen/gnttab.h>
  21. #include <xen/hvm.h>
  22. #include <xen/xenbus.h>
  23. #include <xen/interface/io/ring.h>
  24. #include <xen/interface/io/blkif.h>
  25. #include <xen/interface/io/protocols.h>
  26. #define DRV_NAME "pvblock"
  27. #define DRV_NAME_BLK "pvblock_blk"
  28. #define O_RDONLY 00
  29. #define O_RDWR 02
  30. #define WAIT_RING_TO_MS 10
  31. struct blkfront_info {
  32. u64 sectors;
  33. unsigned int sector_size;
  34. int mode;
  35. int info;
  36. int barrier;
  37. int flush;
  38. };
  39. /**
  40. * struct blkfront_dev - Struct representing blkfront device
  41. * @dom: Domain id
  42. * @ring: Front_ring structure
  43. * @ring_ref: The grant reference, allowing us to grant access
  44. * to the ring to the other end/domain
  45. * @evtchn: Event channel used to signal ring events
  46. * @handle: Events handle
  47. * @nodename: Device XenStore path in format "device/vbd/" + @devid
  48. * @backend: Backend XenStore path
  49. * @info: Private data
  50. * @devid: Device id
  51. */
  52. struct blkfront_dev {
  53. domid_t dom;
  54. struct blkif_front_ring ring;
  55. grant_ref_t ring_ref;
  56. evtchn_port_t evtchn;
  57. blkif_vdev_t handle;
  58. char *nodename;
  59. char *backend;
  60. struct blkfront_info info;
  61. unsigned int devid;
  62. u8 *bounce_buffer;
  63. };
  64. struct blkfront_plat {
  65. unsigned int devid;
  66. };
  67. /**
  68. * struct blkfront_aiocb - AIO сontrol block
  69. * @aio_dev: Blockfront device
  70. * @aio_buf: Memory buffer, which must be sector-aligned for
  71. * @aio_dev sector
  72. * @aio_nbytes: Size of AIO, which must be less than @aio_dev
  73. * sector-sized amounts
  74. * @aio_offset: Offset, which must not go beyond @aio_dev
  75. * sector-aligned location
  76. * @data: Data used to receiving response from ring
  77. * @gref: Array of grant references
  78. * @n: Number of segments
  79. * @aio_cb: Represents one I/O request.
  80. */
  81. struct blkfront_aiocb {
  82. struct blkfront_dev *aio_dev;
  83. u8 *aio_buf;
  84. size_t aio_nbytes;
  85. off_t aio_offset;
  86. void *data;
  87. grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  88. int n;
  89. void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
  90. };
  91. static void blkfront_sync(struct blkfront_dev *dev);
  92. static void free_blkfront(struct blkfront_dev *dev)
  93. {
  94. mask_evtchn(dev->evtchn);
  95. free(dev->backend);
  96. gnttab_end_access(dev->ring_ref);
  97. free(dev->ring.sring);
  98. unbind_evtchn(dev->evtchn);
  99. free(dev->bounce_buffer);
  100. free(dev->nodename);
  101. free(dev);
  102. }
  103. static int init_blkfront(unsigned int devid, struct blkfront_dev *dev)
  104. {
  105. xenbus_transaction_t xbt;
  106. char *err = NULL;
  107. char *message = NULL;
  108. struct blkif_sring *s;
  109. int retry = 0;
  110. char *msg = NULL;
  111. char *c;
  112. char nodename[32];
  113. char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1];
  114. sprintf(nodename, "device/vbd/%d", devid);
  115. memset(dev, 0, sizeof(*dev));
  116. dev->nodename = strdup(nodename);
  117. dev->devid = devid;
  118. snprintf(path, sizeof(path), "%s/backend-id", nodename);
  119. dev->dom = xenbus_read_integer(path);
  120. evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn);
  121. s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE);
  122. if (!s) {
  123. printf("Failed to allocate shared ring\n");
  124. goto error;
  125. }
  126. SHARED_RING_INIT(s);
  127. FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
  128. dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0);
  129. again:
  130. err = xenbus_transaction_start(&xbt);
  131. if (err) {
  132. printf("starting transaction\n");
  133. free(err);
  134. }
  135. err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref);
  136. if (err) {
  137. message = "writing ring-ref";
  138. goto abort_transaction;
  139. }
  140. err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);
  141. if (err) {
  142. message = "writing event-channel";
  143. goto abort_transaction;
  144. }
  145. err = xenbus_printf(xbt, nodename, "protocol", "%s",
  146. XEN_IO_PROTO_ABI_NATIVE);
  147. if (err) {
  148. message = "writing protocol";
  149. goto abort_transaction;
  150. }
  151. snprintf(path, sizeof(path), "%s/state", nodename);
  152. err = xenbus_switch_state(xbt, path, XenbusStateConnected);
  153. if (err) {
  154. message = "switching state";
  155. goto abort_transaction;
  156. }
  157. err = xenbus_transaction_end(xbt, 0, &retry);
  158. free(err);
  159. if (retry) {
  160. goto again;
  161. printf("completing transaction\n");
  162. }
  163. goto done;
  164. abort_transaction:
  165. free(err);
  166. err = xenbus_transaction_end(xbt, 1, &retry);
  167. printf("Abort transaction %s\n", message);
  168. goto error;
  169. done:
  170. snprintf(path, sizeof(path), "%s/backend", nodename);
  171. msg = xenbus_read(XBT_NIL, path, &dev->backend);
  172. if (msg) {
  173. printf("Error %s when reading the backend path %s\n",
  174. msg, path);
  175. goto error;
  176. }
  177. dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0);
  178. {
  179. XenbusState state;
  180. char path[strlen(dev->backend) +
  181. strlen("/feature-flush-cache") + 1];
  182. snprintf(path, sizeof(path), "%s/mode", dev->backend);
  183. msg = xenbus_read(XBT_NIL, path, &c);
  184. if (msg) {
  185. printf("Error %s when reading the mode\n", msg);
  186. goto error;
  187. }
  188. if (*c == 'w')
  189. dev->info.mode = O_RDWR;
  190. else
  191. dev->info.mode = O_RDONLY;
  192. free(c);
  193. snprintf(path, sizeof(path), "%s/state", dev->backend);
  194. msg = NULL;
  195. state = xenbus_read_integer(path);
  196. while (!msg && state < XenbusStateConnected)
  197. msg = xenbus_wait_for_state_change(path, &state);
  198. if (msg || state != XenbusStateConnected) {
  199. printf("backend not available, state=%d\n", state);
  200. goto error;
  201. }
  202. snprintf(path, sizeof(path), "%s/info", dev->backend);
  203. dev->info.info = xenbus_read_integer(path);
  204. snprintf(path, sizeof(path), "%s/sectors", dev->backend);
  205. /*
  206. * FIXME: read_integer returns an int, so disk size
  207. * limited to 1TB for now
  208. */
  209. dev->info.sectors = xenbus_read_integer(path);
  210. snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
  211. dev->info.sector_size = xenbus_read_integer(path);
  212. snprintf(path, sizeof(path), "%s/feature-barrier",
  213. dev->backend);
  214. dev->info.barrier = xenbus_read_integer(path);
  215. snprintf(path, sizeof(path), "%s/feature-flush-cache",
  216. dev->backend);
  217. dev->info.flush = xenbus_read_integer(path);
  218. }
  219. unmask_evtchn(dev->evtchn);
  220. dev->bounce_buffer = memalign(dev->info.sector_size,
  221. dev->info.sector_size);
  222. if (!dev->bounce_buffer) {
  223. printf("Failed to allocate bouncing buffer\n");
  224. goto error;
  225. }
  226. debug("%llu sectors of %u bytes, bounce buffer at %p\n",
  227. dev->info.sectors, dev->info.sector_size,
  228. dev->bounce_buffer);
  229. return 0;
  230. error:
  231. free(msg);
  232. free(err);
  233. free_blkfront(dev);
  234. return -ENODEV;
  235. }
  236. static void shutdown_blkfront(struct blkfront_dev *dev)
  237. {
  238. char *err = NULL, *err2;
  239. XenbusState state;
  240. char path[strlen(dev->backend) + strlen("/state") + 1];
  241. char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1];
  242. debug("Close " DRV_NAME ", device ID %d\n", dev->devid);
  243. blkfront_sync(dev);
  244. snprintf(path, sizeof(path), "%s/state", dev->backend);
  245. snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
  246. err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing);
  247. if (err) {
  248. printf("%s: error changing state to %d: %s\n", __func__,
  249. XenbusStateClosing, err);
  250. goto close;
  251. }
  252. state = xenbus_read_integer(path);
  253. while (!err && state < XenbusStateClosing)
  254. err = xenbus_wait_for_state_change(path, &state);
  255. free(err);
  256. err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed);
  257. if (err) {
  258. printf("%s: error changing state to %d: %s\n", __func__,
  259. XenbusStateClosed, err);
  260. goto close;
  261. }
  262. state = xenbus_read_integer(path);
  263. while (state < XenbusStateClosed) {
  264. err = xenbus_wait_for_state_change(path, &state);
  265. free(err);
  266. }
  267. err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising);
  268. if (err) {
  269. printf("%s: error changing state to %d: %s\n", __func__,
  270. XenbusStateInitialising, err);
  271. goto close;
  272. }
  273. state = xenbus_read_integer(path);
  274. while (!err &&
  275. (state < XenbusStateInitWait || state >= XenbusStateClosed))
  276. err = xenbus_wait_for_state_change(path, &state);
  277. close:
  278. free(err);
  279. snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename);
  280. err2 = xenbus_rm(XBT_NIL, nodename);
  281. free(err2);
  282. snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename);
  283. err2 = xenbus_rm(XBT_NIL, nodename);
  284. free(err2);
  285. if (!err)
  286. free_blkfront(dev);
  287. }
  288. /**
  289. * blkfront_aio_poll() - AIO polling function.
  290. * @dev: Blkfront device
  291. *
  292. * Here we receive response from the ring and check its status. This happens
  293. * until we read all data from the ring. We read the data from consumed pointer
  294. * to the response pointer. Then increase consumed pointer to make it clear that
  295. * the data has been read.
  296. *
  297. * Return: Number of consumed bytes.
  298. */
  299. static int blkfront_aio_poll(struct blkfront_dev *dev)
  300. {
  301. RING_IDX rp, cons;
  302. struct blkif_response *rsp;
  303. int more;
  304. int nr_consumed;
  305. moretodo:
  306. rp = dev->ring.sring->rsp_prod;
  307. rmb(); /* Ensure we see queued responses up to 'rp'. */
  308. cons = dev->ring.rsp_cons;
  309. nr_consumed = 0;
  310. while ((cons != rp)) {
  311. struct blkfront_aiocb *aiocbp;
  312. int status;
  313. rsp = RING_GET_RESPONSE(&dev->ring, cons);
  314. nr_consumed++;
  315. aiocbp = (void *)(uintptr_t)rsp->id;
  316. status = rsp->status;
  317. switch (rsp->operation) {
  318. case BLKIF_OP_READ:
  319. case BLKIF_OP_WRITE:
  320. {
  321. int j;
  322. if (status != BLKIF_RSP_OKAY)
  323. printf("%s error %d on %s at offset %llu, num bytes %llu\n",
  324. rsp->operation == BLKIF_OP_READ ?
  325. "read" : "write",
  326. status, aiocbp->aio_dev->nodename,
  327. (unsigned long long)aiocbp->aio_offset,
  328. (unsigned long long)aiocbp->aio_nbytes);
  329. for (j = 0; j < aiocbp->n; j++)
  330. gnttab_end_access(aiocbp->gref[j]);
  331. break;
  332. }
  333. case BLKIF_OP_WRITE_BARRIER:
  334. if (status != BLKIF_RSP_OKAY)
  335. printf("write barrier error %d\n", status);
  336. break;
  337. case BLKIF_OP_FLUSH_DISKCACHE:
  338. if (status != BLKIF_RSP_OKAY)
  339. printf("flush error %d\n", status);
  340. break;
  341. default:
  342. printf("unrecognized block operation %d response (status %d)\n",
  343. rsp->operation, status);
  344. break;
  345. }
  346. dev->ring.rsp_cons = ++cons;
  347. /* Nota: callback frees aiocbp itself */
  348. if (aiocbp && aiocbp->aio_cb)
  349. aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
  350. if (dev->ring.rsp_cons != cons)
  351. /* We reentered, we must not continue here */
  352. break;
  353. }
  354. RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
  355. if (more)
  356. goto moretodo;
  357. return nr_consumed;
  358. }
  359. static void blkfront_wait_slot(struct blkfront_dev *dev)
  360. {
  361. /* Wait for a slot */
  362. if (RING_FULL(&dev->ring)) {
  363. while (true) {
  364. blkfront_aio_poll(dev);
  365. if (!RING_FULL(&dev->ring))
  366. break;
  367. wait_event_timeout(NULL, !RING_FULL(&dev->ring),
  368. WAIT_RING_TO_MS);
  369. }
  370. }
  371. }
  372. /**
  373. * blkfront_aio_poll() - Issue an aio.
  374. * @aiocbp: AIO control block structure
  375. * @write: Describes is it read or write operation
  376. * 0 - read
  377. * 1 - write
  378. *
  379. * We check whether the AIO parameters meet the requirements of the device.
  380. * Then receive request from ring and define its arguments. After this we
  381. * grant access to the grant references. The last step is notifying about AIO
  382. * via event channel.
  383. */
  384. static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
  385. {
  386. struct blkfront_dev *dev = aiocbp->aio_dev;
  387. struct blkif_request *req;
  388. RING_IDX i;
  389. int notify;
  390. int n, j;
  391. uintptr_t start, end;
  392. /* Can't io at non-sector-aligned location */
  393. BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1));
  394. /* Can't io non-sector-sized amounts */
  395. BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1));
  396. /* Can't io non-sector-aligned buffer */
  397. BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1)));
  398. start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
  399. end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes +
  400. PAGE_SIZE - 1) & PAGE_MASK;
  401. n = (end - start) / PAGE_SIZE;
  402. aiocbp->n = n;
  403. BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST);
  404. blkfront_wait_slot(dev);
  405. i = dev->ring.req_prod_pvt;
  406. req = RING_GET_REQUEST(&dev->ring, i);
  407. req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
  408. req->nr_segments = n;
  409. req->handle = dev->handle;
  410. req->id = (uintptr_t)aiocbp;
  411. req->sector_number = aiocbp->aio_offset / dev->info.sector_size;
  412. for (j = 0; j < n; j++) {
  413. req->seg[j].first_sect = 0;
  414. req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1;
  415. }
  416. req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) /
  417. dev->info.sector_size;
  418. req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf +
  419. aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size;
  420. for (j = 0; j < n; j++) {
  421. uintptr_t data = start + j * PAGE_SIZE;
  422. if (!write) {
  423. /* Trigger CoW if needed */
  424. *(char *)(data + (req->seg[j].first_sect *
  425. dev->info.sector_size)) = 0;
  426. barrier();
  427. }
  428. req->seg[j].gref = gnttab_grant_access(dev->dom,
  429. virt_to_pfn((void *)data),
  430. write);
  431. aiocbp->gref[j] = req->seg[j].gref;
  432. }
  433. dev->ring.req_prod_pvt = i + 1;
  434. wmb();
  435. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
  436. if (notify)
  437. notify_remote_via_evtchn(dev->evtchn);
  438. }
  439. static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret)
  440. {
  441. aiocbp->data = (void *)1;
  442. aiocbp->aio_cb = NULL;
  443. }
  444. static void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
  445. {
  446. aiocbp->aio_cb = blkfront_aio_cb;
  447. blkfront_aio(aiocbp, write);
  448. aiocbp->data = NULL;
  449. while (true) {
  450. blkfront_aio_poll(aiocbp->aio_dev);
  451. if (aiocbp->data)
  452. break;
  453. cpu_relax();
  454. }
  455. }
  456. static void blkfront_push_operation(struct blkfront_dev *dev, u8 op,
  457. uint64_t id)
  458. {
  459. struct blkif_request *req;
  460. int notify, i;
  461. blkfront_wait_slot(dev);
  462. i = dev->ring.req_prod_pvt;
  463. req = RING_GET_REQUEST(&dev->ring, i);
  464. req->operation = op;
  465. req->nr_segments = 0;
  466. req->handle = dev->handle;
  467. req->id = id;
  468. req->sector_number = 0;
  469. dev->ring.req_prod_pvt = i + 1;
  470. wmb();
  471. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
  472. if (notify)
  473. notify_remote_via_evtchn(dev->evtchn);
  474. }
  475. static void blkfront_sync(struct blkfront_dev *dev)
  476. {
  477. if (dev->info.mode == O_RDWR) {
  478. if (dev->info.barrier == 1)
  479. blkfront_push_operation(dev,
  480. BLKIF_OP_WRITE_BARRIER, 0);
  481. if (dev->info.flush == 1)
  482. blkfront_push_operation(dev,
  483. BLKIF_OP_FLUSH_DISKCACHE, 0);
  484. }
  485. while (true) {
  486. blkfront_aio_poll(dev);
  487. if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
  488. break;
  489. cpu_relax();
  490. }
  491. }
  492. /**
  493. * pvblock_iop() - Issue an aio.
  494. * @udev: Pvblock device
  495. * @blknr: Block number to read from / write to
  496. * @blkcnt: Amount of blocks to read / write
  497. * @buffer: Memory buffer with data to be read / write
  498. * @write: Describes is it read or write operation
  499. * 0 - read
  500. * 1 - write
  501. *
  502. * Depending on the operation - reading or writing, data is read / written from the
  503. * specified address (@buffer) to the sector (@blknr).
  504. */
  505. static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr,
  506. lbaint_t blkcnt, void *buffer, int write)
  507. {
  508. struct blkfront_dev *blk_dev = dev_get_priv(udev);
  509. struct blk_desc *desc = dev_get_uclass_plat(udev);
  510. struct blkfront_aiocb aiocb;
  511. lbaint_t blocks_todo;
  512. bool unaligned;
  513. if (blkcnt == 0)
  514. return 0;
  515. if ((blknr + blkcnt) > desc->lba) {
  516. printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
  517. blknr + blkcnt, desc->lba);
  518. return 0;
  519. }
  520. unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1);
  521. aiocb.aio_dev = blk_dev;
  522. aiocb.aio_offset = blknr * desc->blksz;
  523. aiocb.aio_cb = NULL;
  524. aiocb.data = NULL;
  525. blocks_todo = blkcnt;
  526. do {
  527. aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer;
  528. if (write && unaligned)
  529. memcpy(blk_dev->bounce_buffer, buffer, desc->blksz);
  530. aiocb.aio_nbytes = unaligned ? desc->blksz :
  531. min((size_t)(BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE),
  532. (size_t)(blocks_todo * desc->blksz));
  533. blkfront_io(&aiocb, write);
  534. if (!write && unaligned)
  535. memcpy(buffer, blk_dev->bounce_buffer, desc->blksz);
  536. aiocb.aio_offset += aiocb.aio_nbytes;
  537. buffer += aiocb.aio_nbytes;
  538. blocks_todo -= aiocb.aio_nbytes / desc->blksz;
  539. } while (blocks_todo > 0);
  540. return blkcnt;
  541. }
  542. ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
  543. void *buffer)
  544. {
  545. return pvblock_iop(udev, blknr, blkcnt, buffer, 0);
  546. }
  547. ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
  548. const void *buffer)
  549. {
  550. return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1);
  551. }
  552. static int pvblock_blk_bind(struct udevice *udev)
  553. {
  554. struct blk_desc *desc = dev_get_uclass_plat(udev);
  555. int devnum;
  556. desc->if_type = IF_TYPE_PVBLOCK;
  557. /*
  558. * Initialize the devnum to -ENODEV. This is to make sure that
  559. * blk_next_free_devnum() works as expected, since the default
  560. * value 0 is a valid devnum.
  561. */
  562. desc->devnum = -ENODEV;
  563. devnum = blk_next_free_devnum(IF_TYPE_PVBLOCK);
  564. if (devnum < 0)
  565. return devnum;
  566. desc->devnum = devnum;
  567. desc->part_type = PART_TYPE_UNKNOWN;
  568. desc->bdev = udev;
  569. strncpy(desc->vendor, "Xen", sizeof(desc->vendor));
  570. strncpy(desc->revision, "1", sizeof(desc->revision));
  571. strncpy(desc->product, "Virtual disk", sizeof(desc->product));
  572. return 0;
  573. }
  574. static int pvblock_blk_probe(struct udevice *udev)
  575. {
  576. struct blkfront_dev *blk_dev = dev_get_priv(udev);
  577. struct blkfront_plat *plat = dev_get_plat(udev);
  578. struct blk_desc *desc = dev_get_uclass_plat(udev);
  579. int ret, devid;
  580. devid = plat->devid;
  581. free(plat);
  582. ret = init_blkfront(devid, blk_dev);
  583. if (ret < 0)
  584. return ret;
  585. desc->blksz = blk_dev->info.sector_size;
  586. desc->lba = blk_dev->info.sectors;
  587. desc->log2blksz = LOG2(blk_dev->info.sector_size);
  588. return 0;
  589. }
  590. static int pvblock_blk_remove(struct udevice *udev)
  591. {
  592. struct blkfront_dev *blk_dev = dev_get_priv(udev);
  593. shutdown_blkfront(blk_dev);
  594. return 0;
  595. }
  596. static const struct blk_ops pvblock_blk_ops = {
  597. .read = pvblock_blk_read,
  598. .write = pvblock_blk_write,
  599. };
  600. U_BOOT_DRIVER(pvblock_blk) = {
  601. .name = DRV_NAME_BLK,
  602. .id = UCLASS_BLK,
  603. .ops = &pvblock_blk_ops,
  604. .bind = pvblock_blk_bind,
  605. .probe = pvblock_blk_probe,
  606. .remove = pvblock_blk_remove,
  607. .priv_auto = sizeof(struct blkfront_dev),
  608. .flags = DM_FLAG_OS_PREPARE,
  609. };
  610. /*******************************************************************************
  611. * Para-virtual block device class
  612. *******************************************************************************/
  613. typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid);
  614. static int on_new_vbd(struct udevice *parent, unsigned int devid)
  615. {
  616. struct driver_info info;
  617. struct udevice *udev;
  618. struct blkfront_plat *plat;
  619. int ret;
  620. debug("New " DRV_NAME_BLK ", device ID %d\n", devid);
  621. plat = malloc(sizeof(struct blkfront_plat));
  622. if (!plat) {
  623. printf("Failed to allocate platform data\n");
  624. return -ENOMEM;
  625. }
  626. plat->devid = devid;
  627. info.name = DRV_NAME_BLK;
  628. info.plat = plat;
  629. ret = device_bind_by_name(parent, false, &info, &udev);
  630. if (ret < 0) {
  631. printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n",
  632. devid, ret);
  633. free(plat);
  634. }
  635. return ret;
  636. }
  637. static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb)
  638. {
  639. char **dirs, *msg;
  640. int i, ret;
  641. msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs);
  642. if (msg) {
  643. printf("Failed to read device/vbd directory: %s\n", msg);
  644. free(msg);
  645. return -ENODEV;
  646. }
  647. for (i = 0; dirs[i]; i++) {
  648. int devid;
  649. sscanf(dirs[i], "%d", &devid);
  650. ret = clb(udev, devid);
  651. if (ret < 0)
  652. goto fail;
  653. free(dirs[i]);
  654. }
  655. ret = 0;
  656. fail:
  657. for (; dirs[i]; i++)
  658. free(dirs[i]);
  659. free(dirs);
  660. return ret;
  661. }
  662. static void print_pvblock_devices(void)
  663. {
  664. struct udevice *udev;
  665. bool first = true;
  666. const char *class_name;
  667. class_name = uclass_get_name(UCLASS_PVBLOCK);
  668. for (blk_first_device(IF_TYPE_PVBLOCK, &udev); udev;
  669. blk_next_device(&udev), first = false) {
  670. struct blk_desc *desc = dev_get_uclass_plat(udev);
  671. if (!first)
  672. puts(", ");
  673. printf("%s: %d", class_name, desc->devnum);
  674. }
  675. printf("\n");
  676. }
  677. void pvblock_init(void)
  678. {
  679. struct driver_info info;
  680. struct udevice *udev;
  681. struct uclass *uc;
  682. int ret;
  683. /*
  684. * At this point Xen drivers have already initialized,
  685. * so we can instantiate the class driver and enumerate
  686. * virtual block devices.
  687. */
  688. info.name = DRV_NAME;
  689. ret = device_bind_by_name(gd->dm_root, false, &info, &udev);
  690. if (ret < 0)
  691. printf("Failed to bind " DRV_NAME ", ret: %d\n", ret);
  692. /* Bootstrap virtual block devices class driver */
  693. ret = uclass_get(UCLASS_PVBLOCK, &uc);
  694. if (ret)
  695. return;
  696. uclass_foreach_dev_probe(UCLASS_PVBLOCK, udev);
  697. print_pvblock_devices();
  698. }
  699. static int pvblock_probe(struct udevice *udev)
  700. {
  701. struct uclass *uc;
  702. int ret;
  703. if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0)
  704. return -ENODEV;
  705. ret = uclass_get(UCLASS_BLK, &uc);
  706. if (ret)
  707. return ret;
  708. uclass_foreach_dev_probe(UCLASS_BLK, udev) {
  709. if (_ret)
  710. return _ret;
  711. };
  712. return 0;
  713. }
  714. U_BOOT_DRIVER(pvblock_drv) = {
  715. .name = DRV_NAME,
  716. .id = UCLASS_PVBLOCK,
  717. .probe = pvblock_probe,
  718. };
  719. UCLASS_DRIVER(pvblock) = {
  720. .name = DRV_NAME,
  721. .id = UCLASS_PVBLOCK,
  722. };