xrp_dsp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. /*
  2. * Copyright (c) 2016 - 2017 Cadence Design Systems Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining
  5. * a copy of this software and associated documentation files (the
  6. * "Software"), to deal in the Software without restriction, including
  7. * without limitation the rights to use, copy, modify, merge, publish,
  8. * distribute, sublicense, and/or sell copies of the Software, and to
  9. * permit persons to whom the Software is furnished to do so, subject to
  10. * the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included
  13. * in all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  18. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  19. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  20. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  21. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  22. */
  23. #include <stdint.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <string.h>
  27. #include <xtensa/xtruntime.h>
  28. #include "xrp_api.h"
  29. #include "xrp_debug.h"
  30. #include "xrp_dsp_hw.h"
  31. #include "xrp_dsp_sync.h"
  32. #include "xrp_dsp_user.h"
  33. #include "xrp_ns.h"
  34. #include "xrp_types.h"
  35. #include "xrp_kernel_dsp_interface.h"
  36. extern char xrp_dsp_comm_base_magic[] __attribute__((weak));
  37. void *xrp_dsp_comm_base = &xrp_dsp_comm_base_magic;
  38. //void *xrp_dsp_comm_base = 0x98000000;
  39. static int manage_cache;
  40. int dsp_log_level = FW_DEBUG_LOG_MODE_ERR;
  41. #define MAX_STACK_BUFFERS 16
  42. #define MAX_TLV_LENGTH 0x10000
  43. /* DSP side XRP API implementation */
  44. struct xrp_refcounted {
  45. unsigned long count;
  46. };
  47. struct xrp_device {
  48. struct xrp_refcounted ref;
  49. void *dsp_cmd;
  50. };
  51. struct xrp_buffer {
  52. struct xrp_refcounted ref;
  53. void *ptr;
  54. size_t size;
  55. unsigned long map_count;
  56. enum xrp_access_flags allowed_access;
  57. enum xrp_access_flags map_flags;
  58. };
  59. struct xrp_buffer_group {
  60. struct xrp_refcounted ref;
  61. size_t n_buffers;
  62. struct xrp_buffer *buffer;
  63. };
  64. static struct xrp_cmd_ns_map ns_map;
  65. static size_t dsp_hw_queue_entry_size = XRP_DSP_CMD_STRIDE;
  66. static struct xrp_device dsp_device0;
  67. static int n_dsp_devices;
  68. static struct xrp_device **dsp_device;
  69. void xrp_device_enable_cache(struct xrp_device *device, int enable)
  70. {
  71. (void)device;
  72. manage_cache = enable;
  73. }
  74. static inline void dcache_region_invalidate(void *p, size_t sz)
  75. {
  76. if (manage_cache)
  77. xthal_dcache_region_invalidate(p, sz);
  78. }
  79. static inline void dcache_region_writeback(void *p, size_t sz)
  80. {
  81. if (manage_cache)
  82. xthal_dcache_region_writeback(p, sz);
  83. }
  84. static inline void set_status(enum xrp_status *status, enum xrp_status v)
  85. {
  86. if (status)
  87. *status = v;
  88. }
  89. static void retain_refcounted(struct xrp_refcounted *ref)
  90. {
  91. if (ref) {
  92. ++ref->count;
  93. }
  94. }
  95. static void release_refcounted(struct xrp_refcounted *ref)
  96. {
  97. if (ref) {
  98. --ref->count;
  99. }
  100. }
  101. struct xrp_device *xrp_open_device(int idx, enum xrp_status *status)
  102. {
  103. if (idx == 0) {
  104. dsp_device0.dsp_cmd = xrp_dsp_comm_base;
  105. set_status(status, XRP_STATUS_SUCCESS);
  106. return &dsp_device0;
  107. } else if (idx < n_dsp_devices) {
  108. xrp_retain_device(dsp_device[idx]);
  109. set_status(status, XRP_STATUS_SUCCESS);
  110. return dsp_device[idx];
  111. } else {
  112. set_status(status, XRP_STATUS_FAILURE);
  113. return NULL;
  114. }
  115. }
  116. void xrp_retain_device(struct xrp_device *device)
  117. {
  118. retain_refcounted(&device->ref);
  119. }
  120. void xrp_release_device(struct xrp_device *device)
  121. {
  122. release_refcounted(&device->ref);
  123. }
  124. struct xrp_buffer *xrp_create_buffer(struct xrp_device *device,
  125. size_t size, void *host_ptr,
  126. enum xrp_status *status)
  127. {
  128. (void)device;
  129. (void)size;
  130. (void)host_ptr;
  131. set_status(status, XRP_STATUS_FAILURE);
  132. return NULL;
  133. }
  134. void xrp_retain_buffer(struct xrp_buffer *buffer)
  135. {
  136. retain_refcounted(&buffer->ref);
  137. }
  138. void xrp_release_buffer(struct xrp_buffer *buffer)
  139. {
  140. release_refcounted(&buffer->ref);
  141. }
  142. void *xrp_map_buffer(struct xrp_buffer *buffer, size_t offset, size_t size,
  143. enum xrp_access_flags map_flags, enum xrp_status *status)
  144. {
  145. if (offset <= buffer->size &&
  146. size <= buffer->size - offset &&
  147. (buffer->allowed_access & map_flags) == map_flags) {
  148. retain_refcounted(&buffer->ref);
  149. ++buffer->map_count;
  150. buffer->map_flags |= map_flags;
  151. set_status(status, XRP_STATUS_SUCCESS);
  152. return buffer->ptr + offset;
  153. }
  154. set_status(status, XRP_STATUS_FAILURE);
  155. return NULL;
  156. }
  157. void xrp_unmap_buffer(struct xrp_buffer *buffer, void *p,
  158. enum xrp_status *status)
  159. {
  160. if (p >= buffer->ptr && (size_t)(p - buffer->ptr) <= buffer->size) {
  161. --buffer->map_count;
  162. release_refcounted(&buffer->ref);
  163. set_status(status, XRP_STATUS_SUCCESS);
  164. } else {
  165. set_status(status, XRP_STATUS_FAILURE);
  166. }
  167. }
  168. void xrp_buffer_get_info(struct xrp_buffer *buffer, enum xrp_buffer_info info,
  169. void *out, size_t out_sz, enum xrp_status *status)
  170. {
  171. enum xrp_status s = XRP_STATUS_FAILURE;
  172. size_t sz;
  173. void *ptr;
  174. switch (info) {
  175. case XRP_BUFFER_SIZE_SIZE_T:
  176. sz = sizeof(buffer->size);
  177. ptr = &buffer->size;
  178. break;
  179. case XRP_BUFFER_HOST_POINTER_PTR:
  180. sz = sizeof(void *);
  181. ptr = &buffer->ptr;
  182. break;
  183. default:
  184. goto out;
  185. }
  186. if (sz == out_sz) {
  187. memcpy(out, ptr, sz);
  188. s = XRP_STATUS_SUCCESS;
  189. }
  190. out:
  191. set_status(status, s);
  192. }
  193. struct xrp_buffer_group *xrp_create_buffer_group(enum xrp_status *status)
  194. {
  195. set_status(status, XRP_STATUS_FAILURE);
  196. return NULL;
  197. }
  198. void xrp_retain_buffer_group(struct xrp_buffer_group *group)
  199. {
  200. retain_refcounted(&group->ref);
  201. }
  202. void xrp_release_buffer_group(struct xrp_buffer_group *group)
  203. {
  204. release_refcounted(&group->ref);
  205. }
  206. size_t xrp_add_buffer_to_group(struct xrp_buffer_group *group,
  207. struct xrp_buffer *buffer,
  208. enum xrp_access_flags access_flags,
  209. enum xrp_status *status)
  210. {
  211. (void)group;
  212. (void)buffer;
  213. (void)access_flags;
  214. set_status(status, XRP_STATUS_FAILURE);
  215. return -1;
  216. }
  217. struct xrp_buffer *xrp_get_buffer_from_group(struct xrp_buffer_group *group,
  218. size_t idx,
  219. enum xrp_status *status)
  220. {
  221. if (idx < group->n_buffers) {
  222. set_status(status, XRP_STATUS_SUCCESS);
  223. xrp_retain_buffer(group->buffer + idx);
  224. return group->buffer + idx;
  225. }
  226. set_status(status, XRP_STATUS_FAILURE);
  227. return NULL;
  228. }
  229. void xrp_buffer_group_get_info(struct xrp_buffer_group *group,
  230. enum xrp_buffer_group_info info, size_t idx,
  231. void *out, size_t out_sz,
  232. enum xrp_status *status)
  233. {
  234. enum xrp_status s = XRP_STATUS_FAILURE;
  235. size_t sz;
  236. void *ptr;
  237. switch (info) {
  238. case XRP_BUFFER_GROUP_BUFFER_FLAGS_ENUM:
  239. if (idx >= group->n_buffers)
  240. goto out;
  241. sz = sizeof(group->buffer[idx].allowed_access);
  242. ptr = &group->buffer[idx].allowed_access;
  243. break;
  244. case XRP_BUFFER_GROUP_SIZE_SIZE_T:
  245. sz = sizeof(group->n_buffers);
  246. ptr = &group->n_buffers;
  247. break;
  248. default:
  249. goto out;
  250. }
  251. if (sz == out_sz) {
  252. memcpy(out, ptr, sz);
  253. s = XRP_STATUS_SUCCESS;
  254. }
  255. out:
  256. set_status(status, s);
  257. }
  258. /* DSP side request handling */
  259. static int update_hw_queues(uint32_t queue_priority[], int n)
  260. {
  261. if (xrp_user_create_queues) {
  262. struct xrp_device **new_device = malloc(n * sizeof(void *));
  263. struct xrp_device **old_device = dsp_device;
  264. int n_old = n_dsp_devices;
  265. int i;
  266. if (!new_device) {
  267. ps_debug("%s: device array allocation failed\n",
  268. __func__);
  269. return 0;
  270. }
  271. for (i = 1; i < n; ++i) {
  272. new_device[i] = calloc(1, sizeof(struct xrp_device));
  273. if (!new_device[i]) {
  274. ps_debug("%s: device allocation failed\n",
  275. __func__);
  276. while (--i)
  277. xrp_release_device(new_device[i]);
  278. free(new_device);
  279. return 0;
  280. }
  281. new_device[i]->dsp_cmd = xrp_dsp_comm_base +
  282. i * dsp_hw_queue_entry_size;
  283. xrp_retain_device(new_device[i]);
  284. }
  285. dsp_device = new_device;
  286. n_dsp_devices = n;
  287. for (i = 1; i < n_old; ++i)
  288. xrp_release_device(old_device[i]);
  289. free(old_device);
  290. return xrp_user_create_queues(n, queue_priority) ==
  291. XRP_STATUS_SUCCESS;
  292. } else {
  293. return 0;
  294. }
  295. }
  296. static int set_debug_profile_info(void* data)
  297. {
  298. struct xrp_dsp_debug_info *debug_info = data;
  299. if(xrp_hw_panic_init((void *)debug_info->panic_addr))
  300. {
  301. return -1;
  302. }
  303. dsp_log_level = debug_info->log_level;
  304. return 0;
  305. }
  306. static void process_sync_data(struct xrp_device *device,
  307. struct xrp_dsp_tlv *data)
  308. {
  309. (void)device;
  310. for (;; data = (void *)(data->value + ((data->length + 3) / 4))) {
  311. dcache_region_invalidate(data, sizeof(*data));
  312. if (data->length >= MAX_TLV_LENGTH) {
  313. ps_debug("%s: suspicious length, data = %p, length = %d\n",
  314. __func__, data, (unsigned)data->length);
  315. break;
  316. }
  317. dcache_region_invalidate(data->value, data->length);
  318. switch (data->type & XRP_DSP_SYNC_TYPE_MASK) {
  319. case XRP_DSP_SYNC_TYPE_LAST:
  320. return;
  321. case XRP_DSP_SYNC_TYPE_HW_SPEC_DATA:
  322. if (xrp_hw_set_sync_data)
  323. xrp_hw_set_sync_data(data->value);
  324. data->type |= XRP_DSP_SYNC_TYPE_ACCEPT;
  325. break;
  326. case XRP_DSP_SYNC_TYPE_HW_QUEUES:
  327. if (update_hw_queues(data->value,
  328. data->length / 4))
  329. data->type |= XRP_DSP_SYNC_TYPE_ACCEPT;
  330. break;
  331. case XRP_DSP_SYNC_TYPE_HW_DEBUG_INFO:
  332. if(!set_debug_profile_info(data->value))
  333. data->type |= XRP_DSP_SYNC_TYPE_ACCEPT;
  334. break;
  335. default:
  336. ps_debug("%s, unrecognized TLV: type = 0x%08x, length = %d\n",
  337. __func__, data->type, data->length);
  338. continue;
  339. }
  340. dcache_region_writeback(data, sizeof(data) + data->length);
  341. }
  342. }
  343. static void do_handshake(struct xrp_device *device)
  344. {
  345. struct xrp_dsp_sync_v2 *shared_sync = device->dsp_cmd;
  346. uint32_t v;
  347. while (xrp_l32ai(&shared_sync->sync) != XRP_DSP_SYNC_START) {
  348. dcache_region_invalidate(&shared_sync->sync,
  349. sizeof(shared_sync->sync));
  350. }
  351. xrp_s32ri(XRP_DSP_SYNC_DSP_READY_V2, &shared_sync->sync);
  352. dcache_region_writeback(&shared_sync->sync,
  353. sizeof(shared_sync->sync));
  354. for (;;) {
  355. dcache_region_invalidate(&shared_sync->sync,
  356. sizeof(shared_sync->sync));
  357. v = xrp_l32ai(&shared_sync->sync);
  358. if (v == XRP_DSP_SYNC_HOST_TO_DSP)
  359. break;
  360. if (v != XRP_DSP_SYNC_DSP_READY_V2)
  361. return;
  362. }
  363. process_sync_data(device, shared_sync->hw_sync_data);
  364. // xrp_s32ri(XRP_DSP_SYNC_DSP_TO_HOST, &shared_sync->sync);
  365. // dcache_region_writeback(&shared_sync->sync,
  366. // sizeof(shared_sync->sync));
  367. //
  368. // xrp_hw_wait_device_irq();
  369. //xrp_hw_send_host_irq();
  370. pr_debug("%s: done\n", __func__);
  371. }
  372. static inline int xrp_request_valid(struct xrp_dsp_cmd *dsp_cmd,
  373. uint32_t *pflags)
  374. {
  375. uint32_t flags = xrp_l32ai(&dsp_cmd->flags);
  376. *pflags = flags;
  377. return (flags & (XRP_DSP_CMD_FLAG_REQUEST_VALID |
  378. XRP_DSP_CMD_FLAG_RESPONSE_VALID)) ==
  379. XRP_DSP_CMD_FLAG_REQUEST_VALID;
  380. }
  381. static void complete_request(struct xrp_dsp_cmd *dsp_cmd, uint32_t flags)
  382. {
  383. flags |= XRP_DSP_CMD_FLAG_RESPONSE_VALID;
  384. dcache_region_writeback(dsp_cmd,
  385. sizeof(*dsp_cmd));
  386. xrp_s32ri(flags, &dsp_cmd->flags);
  387. xrp_s32ri(XRP_DSP_REPORT_TO_HOST_FLAG,&dsp_cmd->cmd_flag);
  388. dcache_region_writeback(&dsp_cmd->flags,
  389. sizeof(dsp_cmd->flags));
  390. xrp_hw_send_host_irq();
  391. }
  392. static enum xrp_access_flags dsp_buffer_allowed_access(__u32 flags)
  393. {
  394. return flags == XRP_DSP_BUFFER_FLAG_READ ?
  395. XRP_READ : XRP_READ_WRITE;
  396. }
  397. void xrp_run_command(const void *in_data, size_t in_data_size,
  398. void *out_data, size_t out_data_size,
  399. struct xrp_buffer_group *buffer_group,
  400. enum xrp_status *status) __attribute__((weak));
  401. void xrp_run_command(const void *in_data, size_t in_data_size,
  402. void *out_data, size_t out_data_size,
  403. struct xrp_buffer_group *buffer_group,
  404. enum xrp_status *status)
  405. {
  406. (void)in_data;
  407. (void)in_data_size;
  408. (void)out_data;
  409. (void)out_data_size;
  410. (void)buffer_group;
  411. *status = XRP_STATUS_FAILURE;
  412. }
  413. static inline enum xrp_status
  414. xrp_run_command_handler(void *handler_context,
  415. const void *in_data, size_t in_data_size,
  416. void *out_data, size_t out_data_size,
  417. struct xrp_buffer_group *buffer_group)
  418. {
  419. enum xrp_status status = XRP_STATUS_FAILURE;
  420. (void)handler_context;
  421. xrp_run_command(in_data, in_data_size,
  422. out_data, out_data_size,
  423. buffer_group, &status);
  424. return status;
  425. }
  426. static enum xrp_status process_command(struct xrp_device *device,
  427. uint32_t flags)
  428. {
  429. enum xrp_status status;
  430. struct xrp_dsp_cmd *dsp_cmd = device->dsp_cmd;
  431. size_t n_buffers = dsp_cmd->buffer_size / sizeof(struct xrp_dsp_buffer);
  432. struct xrp_dsp_buffer *dsp_buffer;
  433. struct xrp_buffer_group buffer_group;
  434. struct xrp_buffer sbuffer[n_buffers <= MAX_STACK_BUFFERS ? n_buffers : 1];
  435. struct xrp_buffer *buffer = sbuffer;
  436. xrp_command_handler *command_handler = xrp_run_command_handler;
  437. void *handler_context = NULL;
  438. size_t i;
  439. if (dsp_cmd->flags & XRP_DSP_CMD_FLAG_REQUEST_NSID) {
  440. struct xrp_cmd_ns *cmd_ns = xrp_find_cmd_ns(&ns_map,
  441. dsp_cmd->nsid);
  442. if (xrp_cmd_ns_match(dsp_cmd->nsid, cmd_ns)) {
  443. command_handler = cmd_ns->handler;
  444. handler_context = cmd_ns->handler_context;
  445. } else {
  446. flags |= XRP_DSP_CMD_FLAG_RESPONSE_DELIVERY_FAIL;
  447. status = XRP_STATUS_FAILURE;
  448. goto out;
  449. }
  450. }
  451. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  452. dsp_buffer = (void *)dsp_cmd->buffer_addr;
  453. dcache_region_invalidate(dsp_buffer,
  454. n_buffers * sizeof(*dsp_buffer));
  455. } else {
  456. dsp_buffer = (void *)&dsp_cmd->buffer_data;
  457. }
  458. if (dsp_cmd->in_data_size > sizeof(dsp_cmd->in_data)) {
  459. dcache_region_invalidate((void *)dsp_cmd->in_data_addr,
  460. dsp_cmd->in_data_size);
  461. }
  462. if (n_buffers > MAX_STACK_BUFFERS) {
  463. buffer = malloc(n_buffers * sizeof(*buffer));
  464. if (!buffer) {
  465. status = XRP_STATUS_FAILURE;
  466. goto out;
  467. }
  468. }
  469. /* Create buffers from incoming buffer data, put them to group.
  470. * Passed flags add some restrictions to possible buffer mapping
  471. * modes:
  472. * R only allows R
  473. * W and RW allow R, W or RW
  474. * (actually W only allows W and RW, but that's hard to express and
  475. * is not particularly useful)
  476. */
  477. for (i = 0; i < n_buffers; ++i) {
  478. buffer[i] = (struct xrp_buffer){
  479. .allowed_access =
  480. dsp_buffer_allowed_access(dsp_buffer[i].flags),
  481. .ptr = (void *)dsp_buffer[i].addr,
  482. .size = dsp_buffer[i].size,
  483. };
  484. if (buffer[i].allowed_access & XRP_READ) {
  485. dcache_region_invalidate(buffer[i].ptr,
  486. buffer[i].size);
  487. }
  488. }
  489. buffer_group = (struct xrp_buffer_group){
  490. .n_buffers = n_buffers,
  491. .buffer = buffer,
  492. };
  493. status = command_handler(handler_context,
  494. dsp_cmd->in_data_size > sizeof(dsp_cmd->in_data) ?
  495. (void *)dsp_cmd->in_data_addr : dsp_cmd->in_data,
  496. dsp_cmd->in_data_size,
  497. dsp_cmd->out_data_size > sizeof(dsp_cmd->out_data) ?
  498. (void *)dsp_cmd->out_data_addr : dsp_cmd->out_data,
  499. dsp_cmd->out_data_size,
  500. &buffer_group);
  501. if (status != XRP_STATUS_SUCCESS)
  502. flags |= XRP_DSP_CMD_FLAG_RESPONSE_DELIVERY_FAIL;
  503. /*
  504. * update flags in the buffer data: what access actually took place,
  505. * to update caches on the host side.
  506. */
  507. for (i = 0; i < n_buffers; ++i) {
  508. __u32 flags = 0;
  509. if (buffer[i].map_flags & XRP_READ)
  510. flags |= XRP_DSP_BUFFER_FLAG_READ;
  511. if (buffer[i].map_flags & XRP_WRITE)
  512. flags |= XRP_DSP_BUFFER_FLAG_WRITE;
  513. // pr_debug("%s: dsp_buffer[%d].flags = %d\n", __func__, i, flags);
  514. dsp_buffer[i].flags = flags;
  515. if (buffer[i].ref.count) {
  516. ps_debug("%s: refcount leak on buffer %d\n",
  517. __func__, i);
  518. }
  519. if (buffer[i].map_count) {
  520. ps_debug("%s: map_count leak on buffer %d\n",
  521. __func__, i);
  522. }
  523. if (buffer[i].map_flags & XRP_WRITE) {
  524. dcache_region_writeback(buffer[i].ptr,
  525. buffer[i].size);
  526. }
  527. }
  528. if (buffer_group.ref.count) {
  529. ps_debug("%s: refcount leak on buffer group\n", __func__);
  530. }
  531. if (dsp_cmd->out_data_size > sizeof(dsp_cmd->out_data)) {
  532. dcache_region_writeback((void *)dsp_cmd->out_data_addr,
  533. dsp_cmd->out_data_size);
  534. }
  535. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  536. dcache_region_writeback(dsp_buffer,
  537. n_buffers * sizeof(*dsp_buffer));
  538. }
  539. if (n_buffers > MAX_STACK_BUFFERS) {
  540. free(buffer);
  541. }
  542. out:
  543. complete_request(dsp_cmd, flags);
  544. return status;
  545. }
  546. void xrp_device_register_namespace(struct xrp_device *device,
  547. const void *nsid,
  548. xrp_command_handler *handler,
  549. void *handler_context,
  550. enum xrp_status *status)
  551. {
  552. (void)device;
  553. if (xrp_register_namespace(&ns_map,
  554. nsid, handler, handler_context))
  555. set_status(status, XRP_STATUS_SUCCESS);
  556. else
  557. set_status(status, XRP_STATUS_FAILURE);
  558. }
  559. void xrp_device_unregister_namespace(struct xrp_device *device,
  560. const void *nsid,
  561. enum xrp_status *status)
  562. {
  563. (void)device;
  564. if (xrp_unregister_namespace(&ns_map, nsid))
  565. set_status(status, XRP_STATUS_SUCCESS);
  566. else
  567. set_status(status, XRP_STATUS_FAILURE);
  568. }
  569. enum xrp_status xrp_device_poll(struct xrp_device *device)
  570. {
  571. uint32_t flags;
  572. dcache_region_invalidate(device->dsp_cmd,
  573. sizeof(struct xrp_dsp_cmd));
  574. if (xrp_request_valid(device->dsp_cmd, &flags))
  575. return XRP_STATUS_SUCCESS;
  576. else
  577. return XRP_STATUS_PENDING;
  578. }
  579. enum xrp_status xrp_device_dispatch(struct xrp_device *device)
  580. {
  581. uint32_t flags;
  582. enum xrp_status status;
  583. dcache_region_invalidate(device->dsp_cmd,
  584. sizeof(struct xrp_dsp_cmd));
  585. if (!xrp_request_valid(device->dsp_cmd, &flags))
  586. return XRP_STATUS_PENDING;
  587. if (flags == XRP_DSP_SYNC_START) {
  588. do_handshake(device);
  589. status = XRP_STATUS_SUCCESS;
  590. } else {
  591. status = process_command(device, flags);
  592. }
  593. return status;
  594. }
  595. enum xrp_status xrp_device_sync(struct xrp_device *device)
  596. {
  597. uint32_t flags;
  598. enum xrp_status status;
  599. dcache_region_invalidate(device->dsp_cmd,
  600. sizeof(struct xrp_dsp_cmd));
  601. if (!xrp_request_valid(device->dsp_cmd, &flags))
  602. return XRP_STATUS_PENDING;
  603. if (flags == XRP_DSP_SYNC_START) {
  604. do_handshake(device);
  605. return XRP_STATUS_SUCCESS;
  606. }
  607. return XRP_STATUS_FAILURE;
  608. }
  609. enum xrp_status xrp_device_init_done(struct xrp_device *device)
  610. {
  611. struct xrp_dsp_sync_v2 *shared_sync = device->dsp_cmd;
  612. xrp_s32ri(XRP_DSP_SYNC_DSP_TO_HOST, &shared_sync->sync);
  613. dcache_region_writeback(&shared_sync->sync,
  614. sizeof(shared_sync->sync));
  615. xrp_hw_wait_device_irq();
  616. return XRP_STATUS_SUCCESS;
  617. }
  618. enum xrp_status xrp_device_command_dispatch(struct xrp_device *device)
  619. {
  620. uint32_t flags;
  621. dcache_region_invalidate(device->dsp_cmd,
  622. sizeof(struct xrp_dsp_cmd));
  623. if (!xrp_request_valid(device->dsp_cmd, &flags))
  624. {
  625. return XRP_STATUS_FAILURE;
  626. }
  627. if(flags == XRP_DSP_SYNC_START)
  628. {
  629. return XRP_STATUS_FAILURE;
  630. }
  631. return process_command(device, flags);
  632. }
  633. enum xrp_status xrp_get_report_buffer(struct xrp_device *device,void** buffer,void*buffer_size)
  634. {
  635. enum xrp_status status;
  636. struct xrp_dsp_cmd* share_region=(struct xrp_dsp_cmd*)device->dsp_cmd;
  637. uint32_t report_status = xrp_l32ai(&share_region->report_status);
  638. if(report_status==XRP_DSP_REPORT_INVALID)
  639. {
  640. ps_wrn("Driver kernel report is not setup");
  641. return XRP_STATUS_PENDING;
  642. }
  643. // *buffer = share_region->report_paylad_size>XRP_DSP_CMD_INLINE_DATA_SIZE ?
  644. // share_region->report_addr: share_region->report_data;
  645. *buffer = (void*)share_region->report_addr;
  646. *(uint32_t *)buffer_size = share_region->report_paylad_size;
  647. return XRP_STATUS_SUCCESS;
  648. }
  649. enum xrp_status xrp_send_report(struct xrp_device *device,uint32_t id)
  650. {
  651. enum xrp_status status;
  652. struct xrp_dsp_cmd* share_region=(struct xrp_dsp_cmd*)device->dsp_cmd;
  653. uint32_t current_id = xrp_l32ai(&share_region->report_id);
  654. if(current_id!=0)
  655. {
  656. ps_err("previous report:%d still pending\n",current_id);
  657. return XRP_STATUS_PENDING;
  658. }
  659. xrp_s32ri(id,&share_region->report_id);
  660. xrp_hw_send_host_irq();
  661. // pr_debug("report send to Host\n");
  662. return XRP_STATUS_SUCCESS;
  663. }
  664. enum xrp_status xrp_get_report_status(struct xrp_device *device)
  665. {
  666. enum xrp_status status;
  667. struct xrp_dsp_cmd* share_region=(struct xrp_dsp_cmd*)device->dsp_cmd;
  668. uint32_t current_id = xrp_l32ai(&share_region->report_id);
  669. if(current_id!=0)
  670. {
  671. ps_wrn("previous report:%d still pending\n",current_id);
  672. return XRP_STATUS_PENDING;
  673. }
  674. return XRP_STATUS_SUCCESS;
  675. }
  676. static void exception(ExcFrame *ef)
  677. {
  678. unsigned long exccause, excvaddr, ps, epc1;
  679. exccause = ef->exccause & EXCCAUSE_FULLTYPE_MASK;
  680. excvaddr = ef->excvaddr;
  681. ps = ef->ps;
  682. epc1 = ef->pc;
  683. printf("%s: EXCCAUSE = %ld, EXCVADDR = 0x%08lx, PS = 0x%08lx, EPC1 = 0x%08lx\n",
  684. __func__, exccause, excvaddr, ps, epc1);
  685. hang();
  686. }
  687. static void register_exception_handlers(void)
  688. {
  689. static const int cause[] = {
  690. EXCCAUSE_INSTRUCTION,
  691. EXCCAUSE_ADDRESS,
  692. EXCCAUSE_HARDWARE,
  693. EXCCAUSE_MEMORY,
  694. };
  695. unsigned i;
  696. for (i = 0; i < sizeof(cause) / sizeof(cause[0]); ++i) {
  697. _xtos_set_exception_handler(cause[i], exception);
  698. }
  699. }
  700. void xrp_user_initialize(enum xrp_status *status)
  701. {
  702. ps_debug("%s\n", __func__);
  703. register_exception_handlers();
  704. // atexit(hang);
  705. if (status != NULL)
  706. *status = XRP_STATUS_SUCCESS;
  707. }