ivc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <soc/tegra/ivc.h>
  6. #define TEGRA_IVC_ALIGN 64
  7. /*
  8. * IVC channel reset protocol.
  9. *
  10. * Each end uses its tx_channel.state to indicate its synchronization state.
  11. */
  12. enum tegra_ivc_state {
  13. /*
  14. * This value is zero for backwards compatibility with services that
  15. * assume channels to be initially zeroed. Such channels are in an
  16. * initially valid state, but cannot be asynchronously reset, and must
  17. * maintain a valid state at all times.
  18. *
  19. * The transmitting end can enter the established state from the sync or
  20. * ack state when it observes the receiving endpoint in the ack or
  21. * established state, indicating that has cleared the counters in our
  22. * rx_channel.
  23. */
  24. TEGRA_IVC_STATE_ESTABLISHED = 0,
  25. /*
  26. * If an endpoint is observed in the sync state, the remote endpoint is
  27. * allowed to clear the counters it owns asynchronously with respect to
  28. * the current endpoint. Therefore, the current endpoint is no longer
  29. * allowed to communicate.
  30. */
  31. TEGRA_IVC_STATE_SYNC,
  32. /*
  33. * When the transmitting end observes the receiving end in the sync
  34. * state, it can clear the w_count and r_count and transition to the ack
  35. * state. If the remote endpoint observes us in the ack state, it can
  36. * return to the established state once it has cleared its counters.
  37. */
  38. TEGRA_IVC_STATE_ACK
  39. };
  40. /*
  41. * This structure is divided into two-cache aligned parts, the first is only
  42. * written through the tx.channel pointer, while the second is only written
  43. * through the rx.channel pointer. This delineates ownership of the cache
  44. * lines, which is critical to performance and necessary in non-cache coherent
  45. * implementations.
  46. */
  47. struct tegra_ivc_header {
  48. union {
  49. struct {
  50. /* fields owned by the transmitting end */
  51. u32 count;
  52. u32 state;
  53. };
  54. u8 pad[TEGRA_IVC_ALIGN];
  55. } tx;
  56. union {
  57. /* fields owned by the receiving end */
  58. u32 count;
  59. u8 pad[TEGRA_IVC_ALIGN];
  60. } rx;
  61. };
  62. static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
  63. {
  64. if (!ivc->peer)
  65. return;
  66. dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
  67. DMA_FROM_DEVICE);
  68. }
  69. static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
  70. {
  71. if (!ivc->peer)
  72. return;
  73. dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
  74. DMA_TO_DEVICE);
  75. }
  76. static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
  77. struct tegra_ivc_header *header)
  78. {
  79. /*
  80. * This function performs multiple checks on the same values with
  81. * security implications, so create snapshots with READ_ONCE() to
  82. * ensure that these checks use the same values.
  83. */
  84. u32 tx = READ_ONCE(header->tx.count);
  85. u32 rx = READ_ONCE(header->rx.count);
  86. /*
  87. * Perform an over-full check to prevent denial of service attacks
  88. * where a server could be easily fooled into believing that there's
  89. * an extremely large number of frames ready, since receivers are not
  90. * expected to check for full or over-full conditions.
  91. *
  92. * Although the channel isn't empty, this is an invalid case caused by
  93. * a potentially malicious peer, so returning empty is safer, because
  94. * it gives the impression that the channel has gone silent.
  95. */
  96. if (tx - rx > ivc->num_frames)
  97. return true;
  98. return tx == rx;
  99. }
  100. static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
  101. struct tegra_ivc_header *header)
  102. {
  103. u32 tx = READ_ONCE(header->tx.count);
  104. u32 rx = READ_ONCE(header->rx.count);
  105. /*
  106. * Invalid cases where the counters indicate that the queue is over
  107. * capacity also appear full.
  108. */
  109. return tx - rx >= ivc->num_frames;
  110. }
  111. static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
  112. struct tegra_ivc_header *header)
  113. {
  114. u32 tx = READ_ONCE(header->tx.count);
  115. u32 rx = READ_ONCE(header->rx.count);
  116. /*
  117. * This function isn't expected to be used in scenarios where an
  118. * over-full situation can lead to denial of service attacks. See the
  119. * comment in tegra_ivc_empty() for an explanation about special
  120. * over-full considerations.
  121. */
  122. return tx - rx;
  123. }
  124. static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
  125. {
  126. WRITE_ONCE(ivc->tx.channel->tx.count,
  127. READ_ONCE(ivc->tx.channel->tx.count) + 1);
  128. if (ivc->tx.position == ivc->num_frames - 1)
  129. ivc->tx.position = 0;
  130. else
  131. ivc->tx.position++;
  132. }
  133. static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
  134. {
  135. WRITE_ONCE(ivc->rx.channel->rx.count,
  136. READ_ONCE(ivc->rx.channel->rx.count) + 1);
  137. if (ivc->rx.position == ivc->num_frames - 1)
  138. ivc->rx.position = 0;
  139. else
  140. ivc->rx.position++;
  141. }
  142. static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
  143. {
  144. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  145. /*
  146. * tx.channel->state is set locally, so it is not synchronized with
  147. * state from the remote peer. The remote peer cannot reset its
  148. * transmit counters until we've acknowledged its synchronization
  149. * request, so no additional synchronization is required because an
  150. * asynchronous transition of rx.channel->state to
  151. * TEGRA_IVC_STATE_ACK is not allowed.
  152. */
  153. if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
  154. return -ECONNRESET;
  155. /*
  156. * Avoid unnecessary invalidations when performing repeated accesses
  157. * to an IVC channel by checking the old queue pointers first.
  158. *
  159. * Synchronization is only necessary when these pointers indicate
  160. * empty or full.
  161. */
  162. if (!tegra_ivc_empty(ivc, ivc->rx.channel))
  163. return 0;
  164. tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
  165. if (tegra_ivc_empty(ivc, ivc->rx.channel))
  166. return -ENOSPC;
  167. return 0;
  168. }
  169. static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
  170. {
  171. unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
  172. if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
  173. return -ECONNRESET;
  174. if (!tegra_ivc_full(ivc, ivc->tx.channel))
  175. return 0;
  176. tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
  177. if (tegra_ivc_full(ivc, ivc->tx.channel))
  178. return -ENOSPC;
  179. return 0;
  180. }
  181. static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
  182. struct tegra_ivc_header *header,
  183. unsigned int frame)
  184. {
  185. if (WARN_ON(frame >= ivc->num_frames))
  186. return ERR_PTR(-EINVAL);
  187. return (void *)(header + 1) + ivc->frame_size * frame;
  188. }
  189. static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
  190. dma_addr_t phys,
  191. unsigned int frame)
  192. {
  193. unsigned long offset;
  194. offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
  195. return phys + offset;
  196. }
  197. static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
  198. dma_addr_t phys,
  199. unsigned int frame,
  200. unsigned int offset,
  201. size_t size)
  202. {
  203. if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
  204. return;
  205. phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
  206. dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
  207. }
  208. static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
  209. dma_addr_t phys,
  210. unsigned int frame,
  211. unsigned int offset,
  212. size_t size)
  213. {
  214. if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
  215. return;
  216. phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
  217. dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
  218. }
  219. /* directly peek at the next frame rx'ed */
  220. void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
  221. {
  222. int err;
  223. if (WARN_ON(ivc == NULL))
  224. return ERR_PTR(-EINVAL);
  225. err = tegra_ivc_check_read(ivc);
  226. if (err < 0)
  227. return ERR_PTR(err);
  228. /*
  229. * Order observation of ivc->rx.position potentially indicating new
  230. * data before data read.
  231. */
  232. smp_rmb();
  233. tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
  234. ivc->frame_size);
  235. return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
  236. }
  237. EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
  238. int tegra_ivc_read_advance(struct tegra_ivc *ivc)
  239. {
  240. unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
  241. unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
  242. int err;
  243. /*
  244. * No read barriers or synchronization here: the caller is expected to
  245. * have already observed the channel non-empty. This check is just to
  246. * catch programming errors.
  247. */
  248. err = tegra_ivc_check_read(ivc);
  249. if (err < 0)
  250. return err;
  251. tegra_ivc_advance_rx(ivc);
  252. tegra_ivc_flush(ivc, ivc->rx.phys + rx);
  253. /*
  254. * Ensure our write to ivc->rx.position occurs before our read from
  255. * ivc->tx.position.
  256. */
  257. smp_mb();
  258. /*
  259. * Notify only upon transition from full to non-full. The available
  260. * count can only asynchronously increase, so the worst possible
  261. * side-effect will be a spurious notification.
  262. */
  263. tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
  264. if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
  265. ivc->notify(ivc, ivc->notify_data);
  266. return 0;
  267. }
  268. EXPORT_SYMBOL(tegra_ivc_read_advance);
  269. /* directly poke at the next frame to be tx'ed */
  270. void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
  271. {
  272. int err;
  273. err = tegra_ivc_check_write(ivc);
  274. if (err < 0)
  275. return ERR_PTR(err);
  276. return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
  277. }
  278. EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
  279. /* advance the tx buffer */
  280. int tegra_ivc_write_advance(struct tegra_ivc *ivc)
  281. {
  282. unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
  283. unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
  284. int err;
  285. err = tegra_ivc_check_write(ivc);
  286. if (err < 0)
  287. return err;
  288. tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
  289. ivc->frame_size);
  290. /*
  291. * Order any possible stores to the frame before update of
  292. * ivc->tx.position.
  293. */
  294. smp_wmb();
  295. tegra_ivc_advance_tx(ivc);
  296. tegra_ivc_flush(ivc, ivc->tx.phys + tx);
  297. /*
  298. * Ensure our write to ivc->tx.position occurs before our read from
  299. * ivc->rx.position.
  300. */
  301. smp_mb();
  302. /*
  303. * Notify only upon transition from empty to non-empty. The available
  304. * count can only asynchronously decrease, so the worst possible
  305. * side-effect will be a spurious notification.
  306. */
  307. tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
  308. if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
  309. ivc->notify(ivc, ivc->notify_data);
  310. return 0;
  311. }
  312. EXPORT_SYMBOL(tegra_ivc_write_advance);
  313. void tegra_ivc_reset(struct tegra_ivc *ivc)
  314. {
  315. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  316. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
  317. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  318. ivc->notify(ivc, ivc->notify_data);
  319. }
  320. EXPORT_SYMBOL(tegra_ivc_reset);
  321. /*
  322. * =======================================================
  323. * IVC State Transition Table - see tegra_ivc_notified()
  324. * =======================================================
  325. *
  326. * local remote action
  327. * ----- ------ -----------------------------------
  328. * SYNC EST <none>
  329. * SYNC ACK reset counters; move to EST; notify
  330. * SYNC SYNC reset counters; move to ACK; notify
  331. * ACK EST move to EST; notify
  332. * ACK ACK move to EST; notify
  333. * ACK SYNC reset counters; move to ACK; notify
  334. * EST EST <none>
  335. * EST ACK <none>
  336. * EST SYNC reset counters; move to ACK; notify
  337. *
  338. * ===============================================================
  339. */
  340. int tegra_ivc_notified(struct tegra_ivc *ivc)
  341. {
  342. unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
  343. enum tegra_ivc_state state;
  344. /* Copy the receiver's state out of shared memory. */
  345. tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
  346. state = READ_ONCE(ivc->rx.channel->tx.state);
  347. if (state == TEGRA_IVC_STATE_SYNC) {
  348. offset = offsetof(struct tegra_ivc_header, tx.count);
  349. /*
  350. * Order observation of TEGRA_IVC_STATE_SYNC before stores
  351. * clearing tx.channel.
  352. */
  353. smp_rmb();
  354. /*
  355. * Reset tx.channel counters. The remote end is in the SYNC
  356. * state and won't make progress until we change our state,
  357. * so the counters are not in use at this time.
  358. */
  359. ivc->tx.channel->tx.count = 0;
  360. ivc->rx.channel->rx.count = 0;
  361. ivc->tx.position = 0;
  362. ivc->rx.position = 0;
  363. /*
  364. * Ensure that counters appear cleared before new state can be
  365. * observed.
  366. */
  367. smp_wmb();
  368. /*
  369. * Move to ACK state. We have just cleared our counters, so it
  370. * is now safe for the remote end to start using these values.
  371. */
  372. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
  373. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  374. /*
  375. * Notify remote end to observe state transition.
  376. */
  377. ivc->notify(ivc, ivc->notify_data);
  378. } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
  379. state == TEGRA_IVC_STATE_ACK) {
  380. offset = offsetof(struct tegra_ivc_header, tx.count);
  381. /*
  382. * Order observation of ivc_state_sync before stores clearing
  383. * tx_channel.
  384. */
  385. smp_rmb();
  386. /*
  387. * Reset tx.channel counters. The remote end is in the ACK
  388. * state and won't make progress until we change our state,
  389. * so the counters are not in use at this time.
  390. */
  391. ivc->tx.channel->tx.count = 0;
  392. ivc->rx.channel->rx.count = 0;
  393. ivc->tx.position = 0;
  394. ivc->rx.position = 0;
  395. /*
  396. * Ensure that counters appear cleared before new state can be
  397. * observed.
  398. */
  399. smp_wmb();
  400. /*
  401. * Move to ESTABLISHED state. We know that the remote end has
  402. * already cleared its counters, so it is safe to start
  403. * writing/reading on this channel.
  404. */
  405. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
  406. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  407. /*
  408. * Notify remote end to observe state transition.
  409. */
  410. ivc->notify(ivc, ivc->notify_data);
  411. } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
  412. offset = offsetof(struct tegra_ivc_header, tx.count);
  413. /*
  414. * At this point, we have observed the peer to be in either
  415. * the ACK or ESTABLISHED state. Next, order observation of
  416. * peer state before storing to tx.channel.
  417. */
  418. smp_rmb();
  419. /*
  420. * Move to ESTABLISHED state. We know that we have previously
  421. * cleared our counters, and we know that the remote end has
  422. * cleared its counters, so it is safe to start writing/reading
  423. * on this channel.
  424. */
  425. ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
  426. tegra_ivc_flush(ivc, ivc->tx.phys + offset);
  427. /*
  428. * Notify remote end to observe state transition.
  429. */
  430. ivc->notify(ivc, ivc->notify_data);
  431. } else {
  432. /*
  433. * There is no need to handle any further action. Either the
  434. * channel is already fully established, or we are waiting for
  435. * the remote end to catch up with our current state. Refer
  436. * to the diagram in "IVC State Transition Table" above.
  437. */
  438. }
  439. if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
  440. return -EAGAIN;
  441. return 0;
  442. }
  443. EXPORT_SYMBOL(tegra_ivc_notified);
  444. size_t tegra_ivc_align(size_t size)
  445. {
  446. return ALIGN(size, TEGRA_IVC_ALIGN);
  447. }
  448. EXPORT_SYMBOL(tegra_ivc_align);
  449. unsigned tegra_ivc_total_queue_size(unsigned queue_size)
  450. {
  451. if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
  452. pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
  453. __func__, queue_size, TEGRA_IVC_ALIGN);
  454. return 0;
  455. }
  456. return queue_size + sizeof(struct tegra_ivc_header);
  457. }
  458. EXPORT_SYMBOL(tegra_ivc_total_queue_size);
  459. static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
  460. unsigned int num_frames, size_t frame_size)
  461. {
  462. BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
  463. TEGRA_IVC_ALIGN));
  464. BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
  465. TEGRA_IVC_ALIGN));
  466. BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
  467. TEGRA_IVC_ALIGN));
  468. if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
  469. pr_err("num_frames * frame_size overflows\n");
  470. return -EINVAL;
  471. }
  472. if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
  473. pr_err("frame size not adequately aligned: %zu\n", frame_size);
  474. return -EINVAL;
  475. }
  476. /*
  477. * The headers must at least be aligned enough for counters
  478. * to be accessed atomically.
  479. */
  480. if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
  481. pr_err("IVC channel start not aligned: %#lx\n", rx);
  482. return -EINVAL;
  483. }
  484. if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
  485. pr_err("IVC channel start not aligned: %#lx\n", tx);
  486. return -EINVAL;
  487. }
  488. if (rx < tx) {
  489. if (rx + frame_size * num_frames > tx) {
  490. pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
  491. rx, frame_size * num_frames, tx);
  492. return -EINVAL;
  493. }
  494. } else {
  495. if (tx + frame_size * num_frames > rx) {
  496. pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
  497. tx, frame_size * num_frames, rx);
  498. return -EINVAL;
  499. }
  500. }
  501. return 0;
  502. }
  503. int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
  504. dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
  505. unsigned int num_frames, size_t frame_size,
  506. void (*notify)(struct tegra_ivc *ivc, void *data),
  507. void *data)
  508. {
  509. size_t queue_size;
  510. int err;
  511. if (WARN_ON(!ivc || !notify))
  512. return -EINVAL;
  513. /*
  514. * All sizes that can be returned by communication functions should
  515. * fit in an int.
  516. */
  517. if (frame_size > INT_MAX)
  518. return -E2BIG;
  519. err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
  520. num_frames, frame_size);
  521. if (err < 0)
  522. return err;
  523. queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
  524. if (peer) {
  525. ivc->rx.phys = dma_map_single(peer, rx, queue_size,
  526. DMA_BIDIRECTIONAL);
  527. if (dma_mapping_error(peer, ivc->rx.phys))
  528. return -ENOMEM;
  529. ivc->tx.phys = dma_map_single(peer, tx, queue_size,
  530. DMA_BIDIRECTIONAL);
  531. if (dma_mapping_error(peer, ivc->tx.phys)) {
  532. dma_unmap_single(peer, ivc->rx.phys, queue_size,
  533. DMA_BIDIRECTIONAL);
  534. return -ENOMEM;
  535. }
  536. } else {
  537. ivc->rx.phys = rx_phys;
  538. ivc->tx.phys = tx_phys;
  539. }
  540. ivc->rx.channel = rx;
  541. ivc->tx.channel = tx;
  542. ivc->peer = peer;
  543. ivc->notify = notify;
  544. ivc->notify_data = data;
  545. ivc->frame_size = frame_size;
  546. ivc->num_frames = num_frames;
  547. /*
  548. * These values aren't necessarily correct until the channel has been
  549. * reset.
  550. */
  551. ivc->tx.position = 0;
  552. ivc->rx.position = 0;
  553. return 0;
  554. }
  555. EXPORT_SYMBOL(tegra_ivc_init);
  556. void tegra_ivc_cleanup(struct tegra_ivc *ivc)
  557. {
  558. if (ivc->peer) {
  559. size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
  560. ivc->frame_size);
  561. dma_unmap_single(ivc->peer, ivc->rx.phys, size,
  562. DMA_BIDIRECTIONAL);
  563. dma_unmap_single(ivc->peer, ivc->tx.phys, size,
  564. DMA_BIDIRECTIONAL);
  565. }
  566. }
  567. EXPORT_SYMBOL(tegra_ivc_cleanup);