shmem.c 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * For transport using shared mem structure.
  4. *
  5. * Copyright (C) 2019 ARM Ltd.
  6. */
  7. #include <linux/io.h>
  8. #include <linux/processor.h>
  9. #include <linux/types.h>
  10. #include "common.h"
  11. /*
  12. * SCMI specification requires all parameters, message headers, return
  13. * arguments or any protocol data to be expressed in little endian
  14. * format only.
  15. */
  16. struct scmi_shared_mem {
  17. __le32 reserved;
  18. __le32 channel_status;
  19. #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
  20. #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
  21. __le32 reserved1[2];
  22. __le32 flags;
  23. #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
  24. __le32 length;
  25. __le32 msg_header;
  26. u8 msg_payload[];
  27. };
  28. void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
  29. struct scmi_xfer *xfer)
  30. {
  31. /*
  32. * Ideally channel must be free by now unless OS timeout last
  33. * request and platform continued to process the same, wait
  34. * until it releases the shared memory, otherwise we may endup
  35. * overwriting its response with new message payload or vice-versa
  36. */
  37. spin_until_cond(ioread32(&shmem->channel_status) &
  38. SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
  39. /* Mark channel busy + clear error */
  40. iowrite32(0x0, &shmem->channel_status);
  41. iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
  42. &shmem->flags);
  43. iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
  44. iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
  45. if (xfer->tx.buf)
  46. memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
  47. }
  48. u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
  49. {
  50. return ioread32(&shmem->msg_header);
  51. }
  52. void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
  53. struct scmi_xfer *xfer)
  54. {
  55. xfer->hdr.status = ioread32(shmem->msg_payload);
  56. /* Skip the length of header and status in shmem area i.e 8 bytes */
  57. xfer->rx.len = min_t(size_t, xfer->rx.len,
  58. ioread32(&shmem->length) - 8);
  59. /* Take a copy to the rx buffer.. */
  60. memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
  61. }
  62. void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
  63. size_t max_len, struct scmi_xfer *xfer)
  64. {
  65. /* Skip only the length of header in shmem area i.e 4 bytes */
  66. xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
  67. /* Take a copy to the rx buffer.. */
  68. memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
  69. }
  70. void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
  71. {
  72. iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
  73. }
  74. bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
  75. struct scmi_xfer *xfer)
  76. {
  77. u16 xfer_id;
  78. xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
  79. if (xfer->hdr.seq != xfer_id)
  80. return false;
  81. return ioread32(&shmem->channel_status) &
  82. (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
  83. SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
  84. }