xrp_dsp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * Copyright (c) 2016 - 2017 Cadence Design Systems Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining
  5. * a copy of this software and associated documentation files (the
  6. * "Software"), to deal in the Software without restriction, including
  7. * without limitation the rights to use, copy, modify, merge, publish,
  8. * distribute, sublicense, and/or sell copies of the Software, and to
  9. * permit persons to whom the Software is furnished to do so, subject to
  10. * the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included
  13. * in all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  18. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  19. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  20. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  21. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  22. */
  23. #include <stdint.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <string.h>
  27. #include <xtensa/xtruntime.h>
  28. #include "xrp_api.h"
  29. #include "xrp_debug.h"
  30. #include "xrp_dsp_hw.h"
  31. #include "xrp_dsp_sync.h"
  32. #include "xrp_dsp_user.h"
  33. #include "xrp_ns.h"
  34. #include "xrp_types.h"
  35. #include "xrp_kernel_dsp_interface.h"
  36. extern char xrp_dsp_comm_base_magic[] __attribute__((weak));
  37. void *xrp_dsp_comm_base = &xrp_dsp_comm_base_magic;
  38. //void *xrp_dsp_comm_base = 0x98000000;
  39. static int manage_cache;
  40. int dsp_log_level = FW_DEBUG_LOG_MODE_ERR;
  41. #define MAX_STACK_BUFFERS 16
  42. #define MAX_TLV_LENGTH 0x10000
  43. /* DSP side XRP API implementation */
  44. struct xrp_refcounted {
  45. unsigned long count;
  46. };
  47. struct xrp_device {
  48. struct xrp_refcounted ref;
  49. void *dsp_cmd;
  50. };
  51. struct xrp_buffer {
  52. struct xrp_refcounted ref;
  53. void *ptr;
  54. size_t size;
  55. unsigned long map_count;
  56. enum xrp_access_flags allowed_access;
  57. enum xrp_access_flags map_flags;
  58. };
  59. struct xrp_buffer_group {
  60. struct xrp_refcounted ref;
  61. size_t n_buffers;
  62. struct xrp_buffer *buffer;
  63. };
  64. static struct xrp_cmd_ns_map ns_map;
  65. static size_t dsp_hw_queue_entry_size = XRP_DSP_CMD_STRIDE;
  66. static struct xrp_device dsp_device0;
  67. static int n_dsp_devices;
  68. static struct xrp_device **dsp_device;
  69. void xrp_device_enable_cache(struct xrp_device *device, int enable)
  70. {
  71. (void)device;
  72. manage_cache = enable;
  73. }
  74. static inline void dcache_region_invalidate(void *p, size_t sz)
  75. {
  76. if (manage_cache)
  77. xthal_dcache_region_invalidate(p, sz);
  78. }
  79. static inline void dcache_region_writeback(void *p, size_t sz)
  80. {
  81. if (manage_cache)
  82. xthal_dcache_region_writeback(p, sz);
  83. }
  84. static inline void set_status(enum xrp_status *status, enum xrp_status v)
  85. {
  86. if (status)
  87. *status = v;
  88. }
  89. static void retain_refcounted(struct xrp_refcounted *ref)
  90. {
  91. if (ref) {
  92. ++ref->count;
  93. }
  94. }
  95. static void release_refcounted(struct xrp_refcounted *ref)
  96. {
  97. if (ref) {
  98. --ref->count;
  99. }
  100. }
  101. struct xrp_device *xrp_open_device(int idx, enum xrp_status *status)
  102. {
  103. if (idx == 0) {
  104. dsp_device0.dsp_cmd = xrp_dsp_comm_base;
  105. set_status(status, XRP_STATUS_SUCCESS);
  106. return &dsp_device0;
  107. } else if (idx < n_dsp_devices) {
  108. xrp_retain_device(dsp_device[idx]);
  109. set_status(status, XRP_STATUS_SUCCESS);
  110. return dsp_device[idx];
  111. } else {
  112. set_status(status, XRP_STATUS_FAILURE);
  113. return NULL;
  114. }
  115. }
  116. void xrp_retain_device(struct xrp_device *device)
  117. {
  118. retain_refcounted(&device->ref);
  119. }
  120. void xrp_release_device(struct xrp_device *device)
  121. {
  122. release_refcounted(&device->ref);
  123. }
  124. struct xrp_buffer *xrp_create_buffer(struct xrp_device *device,
  125. size_t size, void *host_ptr,
  126. enum xrp_status *status)
  127. {
  128. (void)device;
  129. (void)size;
  130. (void)host_ptr;
  131. set_status(status, XRP_STATUS_FAILURE);
  132. return NULL;
  133. }
  134. void xrp_retain_buffer(struct xrp_buffer *buffer)
  135. {
  136. retain_refcounted(&buffer->ref);
  137. }
  138. void xrp_release_buffer(struct xrp_buffer *buffer)
  139. {
  140. release_refcounted(&buffer->ref);
  141. }
  142. void *xrp_map_buffer(struct xrp_buffer *buffer, size_t offset, size_t size,
  143. enum xrp_access_flags map_flags, enum xrp_status *status)
  144. {
  145. if (offset <= buffer->size &&
  146. size <= buffer->size - offset &&
  147. (buffer->allowed_access & map_flags) == map_flags) {
  148. retain_refcounted(&buffer->ref);
  149. ++buffer->map_count;
  150. buffer->map_flags |= map_flags;
  151. set_status(status, XRP_STATUS_SUCCESS);
  152. return buffer->ptr + offset;
  153. }
  154. set_status(status, XRP_STATUS_FAILURE);
  155. return NULL;
  156. }
  157. void xrp_unmap_buffer(struct xrp_buffer *buffer, void *p,
  158. enum xrp_status *status)
  159. {
  160. if (p >= buffer->ptr && (size_t)(p - buffer->ptr) <= buffer->size) {
  161. --buffer->map_count;
  162. release_refcounted(&buffer->ref);
  163. set_status(status, XRP_STATUS_SUCCESS);
  164. } else {
  165. set_status(status, XRP_STATUS_FAILURE);
  166. }
  167. }
  168. void xrp_buffer_get_info(struct xrp_buffer *buffer, enum xrp_buffer_info info,
  169. void *out, size_t out_sz, enum xrp_status *status)
  170. {
  171. enum xrp_status s = XRP_STATUS_FAILURE;
  172. size_t sz;
  173. void *ptr;
  174. switch (info) {
  175. case XRP_BUFFER_SIZE_SIZE_T:
  176. sz = sizeof(buffer->size);
  177. ptr = &buffer->size;
  178. break;
  179. case XRP_BUFFER_HOST_POINTER_PTR:
  180. sz = sizeof(void *);
  181. ptr = &buffer->ptr;
  182. break;
  183. default:
  184. goto out;
  185. }
  186. if (sz == out_sz) {
  187. memcpy(out, ptr, sz);
  188. s = XRP_STATUS_SUCCESS;
  189. }
  190. out:
  191. set_status(status, s);
  192. }
  193. struct xrp_buffer_group *xrp_create_buffer_group(enum xrp_status *status)
  194. {
  195. set_status(status, XRP_STATUS_FAILURE);
  196. return NULL;
  197. }
  198. void xrp_retain_buffer_group(struct xrp_buffer_group *group)
  199. {
  200. retain_refcounted(&group->ref);
  201. }
  202. void xrp_release_buffer_group(struct xrp_buffer_group *group)
  203. {
  204. release_refcounted(&group->ref);
  205. }
  206. size_t xrp_add_buffer_to_group(struct xrp_buffer_group *group,
  207. struct xrp_buffer *buffer,
  208. enum xrp_access_flags access_flags,
  209. enum xrp_status *status)
  210. {
  211. (void)group;
  212. (void)buffer;
  213. (void)access_flags;
  214. set_status(status, XRP_STATUS_FAILURE);
  215. return -1;
  216. }
  217. struct xrp_buffer *xrp_get_buffer_from_group(struct xrp_buffer_group *group,
  218. size_t idx,
  219. enum xrp_status *status)
  220. {
  221. if (idx < group->n_buffers) {
  222. set_status(status, XRP_STATUS_SUCCESS);
  223. xrp_retain_buffer(group->buffer + idx);
  224. return group->buffer + idx;
  225. }
  226. set_status(status, XRP_STATUS_FAILURE);
  227. return NULL;
  228. }
  229. void xrp_buffer_group_get_info(struct xrp_buffer_group *group,
  230. enum xrp_buffer_group_info info, size_t idx,
  231. void *out, size_t out_sz,
  232. enum xrp_status *status)
  233. {
  234. enum xrp_status s = XRP_STATUS_FAILURE;
  235. size_t sz;
  236. void *ptr;
  237. switch (info) {
  238. case XRP_BUFFER_GROUP_BUFFER_FLAGS_ENUM:
  239. if (idx >= group->n_buffers)
  240. goto out;
  241. sz = sizeof(group->buffer[idx].allowed_access);
  242. ptr = &group->buffer[idx].allowed_access;
  243. break;
  244. case XRP_BUFFER_GROUP_SIZE_SIZE_T:
  245. sz = sizeof(group->n_buffers);
  246. ptr = &group->n_buffers;
  247. break;
  248. default:
  249. goto out;
  250. }
  251. if (sz == out_sz) {
  252. memcpy(out, ptr, sz);
  253. s = XRP_STATUS_SUCCESS;
  254. }
  255. out:
  256. set_status(status, s);
  257. }
  258. /* DSP side request handling */
  259. static int update_hw_queues(uint32_t queue_priority[], int n)
  260. {
  261. if (xrp_user_create_queues) {
  262. struct xrp_device **new_device = malloc(n * sizeof(void *));
  263. struct xrp_device **old_device = dsp_device;
  264. int n_old = n_dsp_devices;
  265. int i;
  266. if (!new_device) {
  267. ps_debug("%s: device array allocation failed\n",
  268. __func__);
  269. return 0;
  270. }
  271. for (i = 1; i < n; ++i) {
  272. new_device[i] = calloc(1, sizeof(struct xrp_device));
  273. if (!new_device[i]) {
  274. ps_debug("%s: device allocation failed\n",
  275. __func__);
  276. while (--i)
  277. xrp_release_device(new_device[i]);
  278. free(new_device);
  279. return 0;
  280. }
  281. new_device[i]->dsp_cmd = xrp_dsp_comm_base +
  282. i * dsp_hw_queue_entry_size;
  283. xrp_retain_device(new_device[i]);
  284. }
  285. dsp_device = new_device;
  286. n_dsp_devices = n;
  287. for (i = 1; i < n_old; ++i)
  288. xrp_release_device(old_device[i]);
  289. free(old_device);
  290. return xrp_user_create_queues(n, queue_priority) ==
  291. XRP_STATUS_SUCCESS;
  292. } else {
  293. return 0;
  294. }
  295. }
  296. static int set_debug_profile_info(void* data)
  297. {
  298. struct xrp_dsp_debug_info *debug_info = data;
  299. if(xrp_hw_panic_init((void *)debug_info->panic_addr))
  300. {
  301. return -1;
  302. }
  303. dsp_log_level = debug_info->log_level;
  304. return 0;
  305. }
  306. static void process_sync_data(struct xrp_device *device,
  307. struct xrp_dsp_tlv *data)
  308. {
  309. (void)device;
  310. for (;; data = (void *)(data->value + ((data->length + 3) / 4))) {
  311. dcache_region_invalidate(data, sizeof(*data));
  312. if (data->length >= MAX_TLV_LENGTH) {
  313. ps_debug("%s: suspicious length, data = %p, length = %d\n",
  314. __func__, data, (unsigned)data->length);
  315. break;
  316. }
  317. dcache_region_invalidate(data->value, data->length);
  318. switch (data->type & XRP_DSP_SYNC_TYPE_MASK) {
  319. case XRP_DSP_SYNC_TYPE_LAST:
  320. return;
  321. case XRP_DSP_SYNC_TYPE_HW_SPEC_DATA:
  322. if (xrp_hw_set_sync_data)
  323. xrp_hw_set_sync_data(data->value);
  324. data->type |= XRP_DSP_SYNC_TYPE_ACCEPT;
  325. break;
  326. case XRP_DSP_SYNC_TYPE_HW_QUEUES:
  327. if (update_hw_queues(data->value,
  328. data->length / 4))
  329. data->type |= XRP_DSP_SYNC_TYPE_ACCEPT;
  330. break;
  331. case XRP_DSP_SYNC_TYPE_HW_DEBUG_INFO:
  332. if(!set_debug_profile_info(data->value))
  333. data->type |= XRP_DSP_SYNC_TYPE_ACCEPT;
  334. break;
  335. default:
  336. ps_debug("%s, unrecognized TLV: type = 0x%08x, length = %d\n",
  337. __func__, data->type, data->length);
  338. continue;
  339. }
  340. dcache_region_writeback(data, sizeof(data) + data->length);
  341. }
  342. }
  343. static void do_handshake(struct xrp_device *device)
  344. {
  345. struct xrp_dsp_sync_v2 *shared_sync = device->dsp_cmd;
  346. uint32_t v;
  347. while (xrp_l32ai(&shared_sync->sync) != XRP_DSP_SYNC_START) {
  348. dcache_region_invalidate(&shared_sync->sync,
  349. sizeof(shared_sync->sync));
  350. }
  351. xrp_s32ri(XRP_DSP_SYNC_DSP_READY_V2, &shared_sync->sync);
  352. dcache_region_writeback(&shared_sync->sync,
  353. sizeof(shared_sync->sync));
  354. for (;;) {
  355. dcache_region_invalidate(&shared_sync->sync,
  356. sizeof(shared_sync->sync));
  357. v = xrp_l32ai(&shared_sync->sync);
  358. if (v == XRP_DSP_SYNC_HOST_TO_DSP)
  359. break;
  360. if (v != XRP_DSP_SYNC_DSP_READY_V2)
  361. return;
  362. }
  363. process_sync_data(device, shared_sync->hw_sync_data);
  364. xrp_s32ri(XRP_DSP_SYNC_DSP_TO_HOST, &shared_sync->sync);
  365. dcache_region_writeback(&shared_sync->sync,
  366. sizeof(shared_sync->sync));
  367. xrp_hw_wait_device_irq();
  368. //xrp_hw_send_host_irq();
  369. pr_debug("%s: done\n", __func__);
  370. }
  371. static inline int xrp_request_valid(struct xrp_dsp_cmd *dsp_cmd,
  372. uint32_t *pflags)
  373. {
  374. uint32_t flags = xrp_l32ai(&dsp_cmd->flags);
  375. *pflags = flags;
  376. return (flags & (XRP_DSP_CMD_FLAG_REQUEST_VALID |
  377. XRP_DSP_CMD_FLAG_RESPONSE_VALID)) ==
  378. XRP_DSP_CMD_FLAG_REQUEST_VALID;
  379. }
  380. static void complete_request(struct xrp_dsp_cmd *dsp_cmd, uint32_t flags)
  381. {
  382. flags |= XRP_DSP_CMD_FLAG_RESPONSE_VALID;
  383. dcache_region_writeback(dsp_cmd,
  384. sizeof(*dsp_cmd));
  385. xrp_s32ri(flags, &dsp_cmd->flags);
  386. xrp_s32ri(XRP_DSP_REPORT_TO_HOST_FLAG,&dsp_cmd->cmd_flag);
  387. dcache_region_writeback(&dsp_cmd->flags,
  388. sizeof(dsp_cmd->flags));
  389. xrp_hw_send_host_irq();
  390. }
  391. static enum xrp_access_flags dsp_buffer_allowed_access(__u32 flags)
  392. {
  393. return flags == XRP_DSP_BUFFER_FLAG_READ ?
  394. XRP_READ : XRP_READ_WRITE;
  395. }
  396. void xrp_run_command(const void *in_data, size_t in_data_size,
  397. void *out_data, size_t out_data_size,
  398. struct xrp_buffer_group *buffer_group,
  399. enum xrp_status *status) __attribute__((weak));
  400. void xrp_run_command(const void *in_data, size_t in_data_size,
  401. void *out_data, size_t out_data_size,
  402. struct xrp_buffer_group *buffer_group,
  403. enum xrp_status *status)
  404. {
  405. (void)in_data;
  406. (void)in_data_size;
  407. (void)out_data;
  408. (void)out_data_size;
  409. (void)buffer_group;
  410. *status = XRP_STATUS_FAILURE;
  411. }
  412. static inline enum xrp_status
  413. xrp_run_command_handler(void *handler_context,
  414. const void *in_data, size_t in_data_size,
  415. void *out_data, size_t out_data_size,
  416. struct xrp_buffer_group *buffer_group)
  417. {
  418. enum xrp_status status = XRP_STATUS_FAILURE;
  419. (void)handler_context;
  420. xrp_run_command(in_data, in_data_size,
  421. out_data, out_data_size,
  422. buffer_group, &status);
  423. return status;
  424. }
  425. static enum xrp_status process_command(struct xrp_device *device,
  426. uint32_t flags)
  427. {
  428. enum xrp_status status;
  429. struct xrp_dsp_cmd *dsp_cmd = device->dsp_cmd;
  430. size_t n_buffers = dsp_cmd->buffer_size / sizeof(struct xrp_dsp_buffer);
  431. struct xrp_dsp_buffer *dsp_buffer;
  432. struct xrp_buffer_group buffer_group;
  433. struct xrp_buffer sbuffer[n_buffers <= MAX_STACK_BUFFERS ? n_buffers : 1];
  434. struct xrp_buffer *buffer = sbuffer;
  435. xrp_command_handler *command_handler = xrp_run_command_handler;
  436. void *handler_context = NULL;
  437. size_t i;
  438. if (dsp_cmd->flags & XRP_DSP_CMD_FLAG_REQUEST_NSID) {
  439. struct xrp_cmd_ns *cmd_ns = xrp_find_cmd_ns(&ns_map,
  440. dsp_cmd->nsid);
  441. if (xrp_cmd_ns_match(dsp_cmd->nsid, cmd_ns)) {
  442. command_handler = cmd_ns->handler;
  443. handler_context = cmd_ns->handler_context;
  444. } else {
  445. flags |= XRP_DSP_CMD_FLAG_RESPONSE_DELIVERY_FAIL;
  446. status = XRP_STATUS_FAILURE;
  447. goto out;
  448. }
  449. }
  450. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  451. dsp_buffer = (void *)dsp_cmd->buffer_addr;
  452. dcache_region_invalidate(dsp_buffer,
  453. n_buffers * sizeof(*dsp_buffer));
  454. } else {
  455. dsp_buffer = (void *)&dsp_cmd->buffer_data;
  456. }
  457. if (dsp_cmd->in_data_size > sizeof(dsp_cmd->in_data)) {
  458. dcache_region_invalidate((void *)dsp_cmd->in_data_addr,
  459. dsp_cmd->in_data_size);
  460. }
  461. if (n_buffers > MAX_STACK_BUFFERS) {
  462. buffer = malloc(n_buffers * sizeof(*buffer));
  463. if (!buffer) {
  464. status = XRP_STATUS_FAILURE;
  465. goto out;
  466. }
  467. }
  468. /* Create buffers from incoming buffer data, put them to group.
  469. * Passed flags add some restrictions to possible buffer mapping
  470. * modes:
  471. * R only allows R
  472. * W and RW allow R, W or RW
  473. * (actually W only allows W and RW, but that's hard to express and
  474. * is not particularly useful)
  475. */
  476. for (i = 0; i < n_buffers; ++i) {
  477. buffer[i] = (struct xrp_buffer){
  478. .allowed_access =
  479. dsp_buffer_allowed_access(dsp_buffer[i].flags),
  480. .ptr = (void *)dsp_buffer[i].addr,
  481. .size = dsp_buffer[i].size,
  482. };
  483. if (buffer[i].allowed_access & XRP_READ) {
  484. dcache_region_invalidate(buffer[i].ptr,
  485. buffer[i].size);
  486. }
  487. }
  488. buffer_group = (struct xrp_buffer_group){
  489. .n_buffers = n_buffers,
  490. .buffer = buffer,
  491. };
  492. status = command_handler(handler_context,
  493. dsp_cmd->in_data_size > sizeof(dsp_cmd->in_data) ?
  494. (void *)dsp_cmd->in_data_addr : dsp_cmd->in_data,
  495. dsp_cmd->in_data_size,
  496. dsp_cmd->out_data_size > sizeof(dsp_cmd->out_data) ?
  497. (void *)dsp_cmd->out_data_addr : dsp_cmd->out_data,
  498. dsp_cmd->out_data_size,
  499. &buffer_group);
  500. if (status != XRP_STATUS_SUCCESS)
  501. flags |= XRP_DSP_CMD_FLAG_RESPONSE_DELIVERY_FAIL;
  502. /*
  503. * update flags in the buffer data: what access actually took place,
  504. * to update caches on the host side.
  505. */
  506. for (i = 0; i < n_buffers; ++i) {
  507. __u32 flags = 0;
  508. if (buffer[i].map_flags & XRP_READ)
  509. flags |= XRP_DSP_BUFFER_FLAG_READ;
  510. if (buffer[i].map_flags & XRP_WRITE)
  511. flags |= XRP_DSP_BUFFER_FLAG_WRITE;
  512. // pr_debug("%s: dsp_buffer[%d].flags = %d\n", __func__, i, flags);
  513. dsp_buffer[i].flags = flags;
  514. if (buffer[i].ref.count) {
  515. ps_debug("%s: refcount leak on buffer %d\n",
  516. __func__, i);
  517. }
  518. if (buffer[i].map_count) {
  519. ps_debug("%s: map_count leak on buffer %d\n",
  520. __func__, i);
  521. }
  522. if (buffer[i].map_flags & XRP_WRITE) {
  523. dcache_region_writeback(buffer[i].ptr,
  524. buffer[i].size);
  525. }
  526. }
  527. if (buffer_group.ref.count) {
  528. ps_debug("%s: refcount leak on buffer group\n", __func__);
  529. }
  530. if (dsp_cmd->out_data_size > sizeof(dsp_cmd->out_data)) {
  531. dcache_region_writeback((void *)dsp_cmd->out_data_addr,
  532. dsp_cmd->out_data_size);
  533. }
  534. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  535. dcache_region_writeback(dsp_buffer,
  536. n_buffers * sizeof(*dsp_buffer));
  537. }
  538. if (n_buffers > MAX_STACK_BUFFERS) {
  539. free(buffer);
  540. }
  541. out:
  542. complete_request(dsp_cmd, flags);
  543. return status;
  544. }
  545. void xrp_device_register_namespace(struct xrp_device *device,
  546. const void *nsid,
  547. xrp_command_handler *handler,
  548. void *handler_context,
  549. enum xrp_status *status)
  550. {
  551. (void)device;
  552. if (xrp_register_namespace(&ns_map,
  553. nsid, handler, handler_context))
  554. set_status(status, XRP_STATUS_SUCCESS);
  555. else
  556. set_status(status, XRP_STATUS_FAILURE);
  557. }
  558. void xrp_device_unregister_namespace(struct xrp_device *device,
  559. const void *nsid,
  560. enum xrp_status *status)
  561. {
  562. (void)device;
  563. if (xrp_unregister_namespace(&ns_map, nsid))
  564. set_status(status, XRP_STATUS_SUCCESS);
  565. else
  566. set_status(status, XRP_STATUS_FAILURE);
  567. }
  568. enum xrp_status xrp_device_poll(struct xrp_device *device)
  569. {
  570. uint32_t flags;
  571. dcache_region_invalidate(device->dsp_cmd,
  572. sizeof(struct xrp_dsp_cmd));
  573. if (xrp_request_valid(device->dsp_cmd, &flags))
  574. return XRP_STATUS_SUCCESS;
  575. else
  576. return XRP_STATUS_PENDING;
  577. }
  578. enum xrp_status xrp_device_dispatch(struct xrp_device *device)
  579. {
  580. uint32_t flags;
  581. enum xrp_status status;
  582. dcache_region_invalidate(device->dsp_cmd,
  583. sizeof(struct xrp_dsp_cmd));
  584. if (!xrp_request_valid(device->dsp_cmd, &flags))
  585. return XRP_STATUS_PENDING;
  586. if (flags == XRP_DSP_SYNC_START) {
  587. do_handshake(device);
  588. status = XRP_STATUS_SUCCESS;
  589. } else {
  590. status = process_command(device, flags);
  591. }
  592. return status;
  593. }
  594. enum xrp_status xrp_device_sync(struct xrp_device *device)
  595. {
  596. uint32_t flags;
  597. enum xrp_status status;
  598. dcache_region_invalidate(device->dsp_cmd,
  599. sizeof(struct xrp_dsp_cmd));
  600. if (!xrp_request_valid(device->dsp_cmd, &flags))
  601. return XRP_STATUS_PENDING;
  602. if (flags == XRP_DSP_SYNC_START) {
  603. do_handshake(device);
  604. return XRP_STATUS_SUCCESS;
  605. }
  606. return XRP_STATUS_FAILURE;
  607. }
  608. enum xrp_status xrp_device_command_dispatch(struct xrp_device *device)
  609. {
  610. uint32_t flags;
  611. dcache_region_invalidate(device->dsp_cmd,
  612. sizeof(struct xrp_dsp_cmd));
  613. if (!xrp_request_valid(device->dsp_cmd, &flags))
  614. {
  615. return XRP_STATUS_FAILURE;
  616. }
  617. if(flags == XRP_DSP_SYNC_START)
  618. {
  619. return XRP_STATUS_FAILURE;
  620. }
  621. return process_command(device, flags);
  622. }
  623. enum xrp_status xrp_get_report_buffer(struct xrp_device *device,void** buffer,void*buffer_size)
  624. {
  625. enum xrp_status status;
  626. struct xrp_dsp_cmd* share_region=(struct xrp_dsp_cmd*)device->dsp_cmd;
  627. uint32_t report_status = xrp_l32ai(&share_region->report_status);
  628. if(report_status==XRP_DSP_REPORT_INVALID)
  629. {
  630. ps_wrn("Driver kernel report is not setup");
  631. return XRP_STATUS_PENDING;
  632. }
  633. // *buffer = share_region->report_paylad_size>XRP_DSP_CMD_INLINE_DATA_SIZE ?
  634. // share_region->report_addr: share_region->report_data;
  635. *buffer = (void*)share_region->report_addr;
  636. *(uint32_t *)buffer_size = share_region->report_paylad_size;
  637. return XRP_STATUS_SUCCESS;
  638. }
  639. enum xrp_status xrp_send_report(struct xrp_device *device,uint32_t id)
  640. {
  641. enum xrp_status status;
  642. struct xrp_dsp_cmd* share_region=(struct xrp_dsp_cmd*)device->dsp_cmd;
  643. uint32_t current_id = xrp_l32ai(&share_region->report_id);
  644. if(current_id!=0)
  645. {
  646. ps_err("previous report:%d still pending\n",current_id);
  647. return XRP_STATUS_PENDING;
  648. }
  649. xrp_s32ri(id,&share_region->report_id);
  650. xrp_hw_send_host_irq();
  651. // pr_debug("report send to Host\n");
  652. return XRP_STATUS_SUCCESS;
  653. }
  654. enum xrp_status xrp_get_report_status(struct xrp_device *device)
  655. {
  656. enum xrp_status status;
  657. struct xrp_dsp_cmd* share_region=(struct xrp_dsp_cmd*)device->dsp_cmd;
  658. uint32_t current_id = xrp_l32ai(&share_region->report_id);
  659. if(current_id!=0)
  660. {
  661. ps_wrn("previous report:%d still pending\n",current_id);
  662. return XRP_STATUS_PENDING;
  663. }
  664. return XRP_STATUS_SUCCESS;
  665. }
  666. static void exception(ExcFrame *ef)
  667. {
  668. unsigned long exccause, excvaddr, ps, epc1;
  669. exccause = ef->exccause & EXCCAUSE_FULLTYPE_MASK;
  670. excvaddr = ef->excvaddr;
  671. ps = ef->ps;
  672. epc1 = ef->pc;
  673. printf("%s: EXCCAUSE = %ld, EXCVADDR = 0x%08lx, PS = 0x%08lx, EPC1 = 0x%08lx\n",
  674. __func__, exccause, excvaddr, ps, epc1);
  675. hang();
  676. }
  677. static void register_exception_handlers(void)
  678. {
  679. static const int cause[] = {
  680. EXCCAUSE_INSTRUCTION,
  681. EXCCAUSE_ADDRESS,
  682. EXCCAUSE_HARDWARE,
  683. EXCCAUSE_MEMORY,
  684. };
  685. unsigned i;
  686. for (i = 0; i < sizeof(cause) / sizeof(cause[0]); ++i) {
  687. _xtos_set_exception_handler(cause[i], exception);
  688. }
  689. }
  690. void xrp_user_initialize(enum xrp_status *status)
  691. {
  692. ps_debug("%s\n", __func__);
  693. register_exception_handlers();
  694. // atexit(hang);
  695. if (status != NULL)
  696. *status = XRP_STATUS_SUCCESS;
  697. }