fastrpc.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  3. // Copyright (c) 2018, Linaro Limited
  4. #include <linux/completion.h>
  5. #include <linux/device.h>
  6. #include <linux/dma-buf.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/idr.h>
  9. #include <linux/list.h>
  10. #include <linux/miscdevice.h>
  11. #include <linux/module.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of.h>
  14. #include <linux/sort.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/rpmsg.h>
  17. #include <linux/scatterlist.h>
  18. #include <linux/slab.h>
  19. #include <uapi/misc/fastrpc.h>
  20. #define ADSP_DOMAIN_ID (0)
  21. #define MDSP_DOMAIN_ID (1)
  22. #define SDSP_DOMAIN_ID (2)
  23. #define CDSP_DOMAIN_ID (3)
  24. #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
  25. #define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
  26. #define FASTRPC_ALIGN 128
  27. #define FASTRPC_MAX_FDLIST 16
  28. #define FASTRPC_MAX_CRCLIST 64
  29. #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
  30. #define FASTRPC_CTX_MAX (256)
  31. #define FASTRPC_INIT_HANDLE 1
  32. #define FASTRPC_CTXID_MASK (0xFF0)
  33. #define INIT_FILELEN_MAX (2 * 1024 * 1024)
  34. #define FASTRPC_DEVICE_NAME "fastrpc"
  35. #define ADSP_MMAP_ADD_PAGES 0x1000
  36. /* Retrives number of input buffers from the scalars parameter */
  37. #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
  38. /* Retrives number of output buffers from the scalars parameter */
  39. #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
  40. /* Retrives number of input handles from the scalars parameter */
  41. #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
  42. /* Retrives number of output handles from the scalars parameter */
  43. #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
  44. #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
  45. REMOTE_SCALARS_OUTBUFS(sc) + \
  46. REMOTE_SCALARS_INHANDLES(sc)+ \
  47. REMOTE_SCALARS_OUTHANDLES(sc))
  48. #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
  49. (((attr & 0x07) << 29) | \
  50. ((method & 0x1f) << 24) | \
  51. ((in & 0xff) << 16) | \
  52. ((out & 0xff) << 8) | \
  53. ((oin & 0x0f) << 4) | \
  54. (oout & 0x0f))
  55. #define FASTRPC_SCALARS(method, in, out) \
  56. FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
  57. #define FASTRPC_CREATE_PROCESS_NARGS 6
  58. /* Remote Method id table */
  59. #define FASTRPC_RMID_INIT_ATTACH 0
  60. #define FASTRPC_RMID_INIT_RELEASE 1
  61. #define FASTRPC_RMID_INIT_MMAP 4
  62. #define FASTRPC_RMID_INIT_MUNMAP 5
  63. #define FASTRPC_RMID_INIT_CREATE 6
  64. #define FASTRPC_RMID_INIT_CREATE_ATTR 7
  65. #define FASTRPC_RMID_INIT_CREATE_STATIC 8
  66. /* Protection Domain(PD) ids */
  67. #define AUDIO_PD (0) /* also GUEST_OS PD? */
  68. #define USER_PD (1)
  69. #define SENSORS_PD (2)
  70. #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
  71. static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
  72. "sdsp", "cdsp"};
  73. struct fastrpc_phy_page {
  74. u64 addr; /* physical address */
  75. u64 size; /* size of contiguous region */
  76. };
  77. struct fastrpc_invoke_buf {
  78. u32 num; /* number of contiguous regions */
  79. u32 pgidx; /* index to start of contiguous region */
  80. };
  81. struct fastrpc_remote_arg {
  82. u64 pv;
  83. u64 len;
  84. };
  85. struct fastrpc_mmap_rsp_msg {
  86. u64 vaddr;
  87. };
  88. struct fastrpc_mmap_req_msg {
  89. s32 pgid;
  90. u32 flags;
  91. u64 vaddr;
  92. s32 num;
  93. };
  94. struct fastrpc_munmap_req_msg {
  95. s32 pgid;
  96. u64 vaddr;
  97. u64 size;
  98. };
  99. struct fastrpc_msg {
  100. int pid; /* process group id */
  101. int tid; /* thread id */
  102. u64 ctx; /* invoke caller context */
  103. u32 handle; /* handle to invoke */
  104. u32 sc; /* scalars structure describing the data */
  105. u64 addr; /* physical address */
  106. u64 size; /* size of contiguous region */
  107. };
  108. struct fastrpc_invoke_rsp {
  109. u64 ctx; /* invoke caller context */
  110. int retval; /* invoke return value */
  111. };
  112. struct fastrpc_buf_overlap {
  113. u64 start;
  114. u64 end;
  115. int raix;
  116. u64 mstart;
  117. u64 mend;
  118. u64 offset;
  119. };
  120. struct fastrpc_buf {
  121. struct fastrpc_user *fl;
  122. struct dma_buf *dmabuf;
  123. struct device *dev;
  124. void *virt;
  125. u64 phys;
  126. u64 size;
  127. /* Lock for dma buf attachments */
  128. struct mutex lock;
  129. struct list_head attachments;
  130. /* mmap support */
  131. struct list_head node; /* list of user requested mmaps */
  132. uintptr_t raddr;
  133. };
  134. struct fastrpc_dma_buf_attachment {
  135. struct device *dev;
  136. struct sg_table sgt;
  137. struct list_head node;
  138. };
  139. struct fastrpc_map {
  140. struct list_head node;
  141. struct fastrpc_user *fl;
  142. int fd;
  143. struct dma_buf *buf;
  144. struct sg_table *table;
  145. struct dma_buf_attachment *attach;
  146. u64 phys;
  147. u64 size;
  148. void *va;
  149. u64 len;
  150. struct kref refcount;
  151. };
  152. struct fastrpc_invoke_ctx {
  153. int nscalars;
  154. int nbufs;
  155. int retval;
  156. int pid;
  157. int tgid;
  158. u32 sc;
  159. u32 *crc;
  160. u64 ctxid;
  161. u64 msg_sz;
  162. struct kref refcount;
  163. struct list_head node; /* list of ctxs */
  164. struct completion work;
  165. struct work_struct put_work;
  166. struct fastrpc_msg msg;
  167. struct fastrpc_user *fl;
  168. struct fastrpc_remote_arg *rpra;
  169. struct fastrpc_map **maps;
  170. struct fastrpc_buf *buf;
  171. struct fastrpc_invoke_args *args;
  172. struct fastrpc_buf_overlap *olaps;
  173. struct fastrpc_channel_ctx *cctx;
  174. };
  175. struct fastrpc_session_ctx {
  176. struct device *dev;
  177. int sid;
  178. bool used;
  179. bool valid;
  180. };
  181. struct fastrpc_channel_ctx {
  182. int domain_id;
  183. int sesscount;
  184. struct rpmsg_device *rpdev;
  185. struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
  186. spinlock_t lock;
  187. struct idr ctx_idr;
  188. struct list_head users;
  189. struct miscdevice miscdev;
  190. struct kref refcount;
  191. };
  192. struct fastrpc_user {
  193. struct list_head user;
  194. struct list_head maps;
  195. struct list_head pending;
  196. struct list_head mmaps;
  197. struct fastrpc_channel_ctx *cctx;
  198. struct fastrpc_session_ctx *sctx;
  199. struct fastrpc_buf *init_mem;
  200. int tgid;
  201. int pd;
  202. /* Lock for lists */
  203. spinlock_t lock;
  204. /* lock for allocations */
  205. struct mutex mutex;
  206. };
  207. static void fastrpc_free_map(struct kref *ref)
  208. {
  209. struct fastrpc_map *map;
  210. map = container_of(ref, struct fastrpc_map, refcount);
  211. if (map->table) {
  212. dma_buf_unmap_attachment(map->attach, map->table,
  213. DMA_BIDIRECTIONAL);
  214. dma_buf_detach(map->buf, map->attach);
  215. dma_buf_put(map->buf);
  216. }
  217. kfree(map);
  218. }
  219. static void fastrpc_map_put(struct fastrpc_map *map)
  220. {
  221. if (map)
  222. kref_put(&map->refcount, fastrpc_free_map);
  223. }
  224. static void fastrpc_map_get(struct fastrpc_map *map)
  225. {
  226. if (map)
  227. kref_get(&map->refcount);
  228. }
  229. static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
  230. struct fastrpc_map **ppmap)
  231. {
  232. struct fastrpc_map *map = NULL;
  233. mutex_lock(&fl->mutex);
  234. list_for_each_entry(map, &fl->maps, node) {
  235. if (map->fd == fd) {
  236. fastrpc_map_get(map);
  237. *ppmap = map;
  238. mutex_unlock(&fl->mutex);
  239. return 0;
  240. }
  241. }
  242. mutex_unlock(&fl->mutex);
  243. return -ENOENT;
  244. }
  245. static void fastrpc_buf_free(struct fastrpc_buf *buf)
  246. {
  247. dma_free_coherent(buf->dev, buf->size, buf->virt,
  248. FASTRPC_PHYS(buf->phys));
  249. kfree(buf);
  250. }
  251. static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
  252. u64 size, struct fastrpc_buf **obuf)
  253. {
  254. struct fastrpc_buf *buf;
  255. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  256. if (!buf)
  257. return -ENOMEM;
  258. INIT_LIST_HEAD(&buf->attachments);
  259. INIT_LIST_HEAD(&buf->node);
  260. mutex_init(&buf->lock);
  261. buf->fl = fl;
  262. buf->virt = NULL;
  263. buf->phys = 0;
  264. buf->size = size;
  265. buf->dev = dev;
  266. buf->raddr = 0;
  267. buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
  268. GFP_KERNEL);
  269. if (!buf->virt) {
  270. mutex_destroy(&buf->lock);
  271. kfree(buf);
  272. return -ENOMEM;
  273. }
  274. if (fl->sctx && fl->sctx->sid)
  275. buf->phys += ((u64)fl->sctx->sid << 32);
  276. *obuf = buf;
  277. return 0;
  278. }
  279. static void fastrpc_channel_ctx_free(struct kref *ref)
  280. {
  281. struct fastrpc_channel_ctx *cctx;
  282. cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
  283. kfree(cctx);
  284. }
  285. static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
  286. {
  287. kref_get(&cctx->refcount);
  288. }
  289. static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
  290. {
  291. kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
  292. }
  293. static void fastrpc_context_free(struct kref *ref)
  294. {
  295. struct fastrpc_invoke_ctx *ctx;
  296. struct fastrpc_channel_ctx *cctx;
  297. unsigned long flags;
  298. int i;
  299. ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
  300. cctx = ctx->cctx;
  301. for (i = 0; i < ctx->nscalars; i++)
  302. fastrpc_map_put(ctx->maps[i]);
  303. if (ctx->buf)
  304. fastrpc_buf_free(ctx->buf);
  305. spin_lock_irqsave(&cctx->lock, flags);
  306. idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
  307. spin_unlock_irqrestore(&cctx->lock, flags);
  308. kfree(ctx->maps);
  309. kfree(ctx->olaps);
  310. kfree(ctx);
  311. fastrpc_channel_ctx_put(cctx);
  312. }
  313. static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
  314. {
  315. kref_get(&ctx->refcount);
  316. }
  317. static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
  318. {
  319. kref_put(&ctx->refcount, fastrpc_context_free);
  320. }
  321. static void fastrpc_context_put_wq(struct work_struct *work)
  322. {
  323. struct fastrpc_invoke_ctx *ctx =
  324. container_of(work, struct fastrpc_invoke_ctx, put_work);
  325. fastrpc_context_put(ctx);
  326. }
  327. #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
  328. static int olaps_cmp(const void *a, const void *b)
  329. {
  330. struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
  331. struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
  332. /* sort with lowest starting buffer first */
  333. int st = CMP(pa->start, pb->start);
  334. /* sort with highest ending buffer first */
  335. int ed = CMP(pb->end, pa->end);
  336. return st == 0 ? ed : st;
  337. }
  338. static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
  339. {
  340. u64 max_end = 0;
  341. int i;
  342. for (i = 0; i < ctx->nbufs; ++i) {
  343. ctx->olaps[i].start = ctx->args[i].ptr;
  344. ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
  345. ctx->olaps[i].raix = i;
  346. }
  347. sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
  348. for (i = 0; i < ctx->nbufs; ++i) {
  349. /* Falling inside previous range */
  350. if (ctx->olaps[i].start < max_end) {
  351. ctx->olaps[i].mstart = max_end;
  352. ctx->olaps[i].mend = ctx->olaps[i].end;
  353. ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
  354. if (ctx->olaps[i].end > max_end) {
  355. max_end = ctx->olaps[i].end;
  356. } else {
  357. ctx->olaps[i].mend = 0;
  358. ctx->olaps[i].mstart = 0;
  359. }
  360. } else {
  361. ctx->olaps[i].mend = ctx->olaps[i].end;
  362. ctx->olaps[i].mstart = ctx->olaps[i].start;
  363. ctx->olaps[i].offset = 0;
  364. max_end = ctx->olaps[i].end;
  365. }
  366. }
  367. }
  368. static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
  369. struct fastrpc_user *user, u32 kernel, u32 sc,
  370. struct fastrpc_invoke_args *args)
  371. {
  372. struct fastrpc_channel_ctx *cctx = user->cctx;
  373. struct fastrpc_invoke_ctx *ctx = NULL;
  374. unsigned long flags;
  375. int ret;
  376. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  377. if (!ctx)
  378. return ERR_PTR(-ENOMEM);
  379. INIT_LIST_HEAD(&ctx->node);
  380. ctx->fl = user;
  381. ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
  382. ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
  383. REMOTE_SCALARS_OUTBUFS(sc);
  384. if (ctx->nscalars) {
  385. ctx->maps = kcalloc(ctx->nscalars,
  386. sizeof(*ctx->maps), GFP_KERNEL);
  387. if (!ctx->maps) {
  388. kfree(ctx);
  389. return ERR_PTR(-ENOMEM);
  390. }
  391. ctx->olaps = kcalloc(ctx->nscalars,
  392. sizeof(*ctx->olaps), GFP_KERNEL);
  393. if (!ctx->olaps) {
  394. kfree(ctx->maps);
  395. kfree(ctx);
  396. return ERR_PTR(-ENOMEM);
  397. }
  398. ctx->args = args;
  399. fastrpc_get_buff_overlaps(ctx);
  400. }
  401. /* Released in fastrpc_context_put() */
  402. fastrpc_channel_ctx_get(cctx);
  403. ctx->sc = sc;
  404. ctx->retval = -1;
  405. ctx->pid = current->pid;
  406. ctx->tgid = user->tgid;
  407. ctx->cctx = cctx;
  408. init_completion(&ctx->work);
  409. INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
  410. spin_lock(&user->lock);
  411. list_add_tail(&ctx->node, &user->pending);
  412. spin_unlock(&user->lock);
  413. spin_lock_irqsave(&cctx->lock, flags);
  414. ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
  415. FASTRPC_CTX_MAX, GFP_ATOMIC);
  416. if (ret < 0) {
  417. spin_unlock_irqrestore(&cctx->lock, flags);
  418. goto err_idr;
  419. }
  420. ctx->ctxid = ret << 4;
  421. spin_unlock_irqrestore(&cctx->lock, flags);
  422. kref_init(&ctx->refcount);
  423. return ctx;
  424. err_idr:
  425. spin_lock(&user->lock);
  426. list_del(&ctx->node);
  427. spin_unlock(&user->lock);
  428. fastrpc_channel_ctx_put(cctx);
  429. kfree(ctx->maps);
  430. kfree(ctx->olaps);
  431. kfree(ctx);
  432. return ERR_PTR(ret);
  433. }
  434. static struct sg_table *
  435. fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
  436. enum dma_data_direction dir)
  437. {
  438. struct fastrpc_dma_buf_attachment *a = attachment->priv;
  439. struct sg_table *table;
  440. int ret;
  441. table = &a->sgt;
  442. ret = dma_map_sgtable(attachment->dev, table, dir, 0);
  443. if (ret)
  444. table = ERR_PTR(ret);
  445. return table;
  446. }
  447. static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
  448. struct sg_table *table,
  449. enum dma_data_direction dir)
  450. {
  451. dma_unmap_sgtable(attach->dev, table, dir, 0);
  452. }
  453. static void fastrpc_release(struct dma_buf *dmabuf)
  454. {
  455. struct fastrpc_buf *buffer = dmabuf->priv;
  456. fastrpc_buf_free(buffer);
  457. }
  458. static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
  459. struct dma_buf_attachment *attachment)
  460. {
  461. struct fastrpc_dma_buf_attachment *a;
  462. struct fastrpc_buf *buffer = dmabuf->priv;
  463. int ret;
  464. a = kzalloc(sizeof(*a), GFP_KERNEL);
  465. if (!a)
  466. return -ENOMEM;
  467. ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
  468. FASTRPC_PHYS(buffer->phys), buffer->size);
  469. if (ret < 0) {
  470. dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
  471. kfree(a);
  472. return -EINVAL;
  473. }
  474. a->dev = attachment->dev;
  475. INIT_LIST_HEAD(&a->node);
  476. attachment->priv = a;
  477. mutex_lock(&buffer->lock);
  478. list_add(&a->node, &buffer->attachments);
  479. mutex_unlock(&buffer->lock);
  480. return 0;
  481. }
  482. static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
  483. struct dma_buf_attachment *attachment)
  484. {
  485. struct fastrpc_dma_buf_attachment *a = attachment->priv;
  486. struct fastrpc_buf *buffer = dmabuf->priv;
  487. mutex_lock(&buffer->lock);
  488. list_del(&a->node);
  489. mutex_unlock(&buffer->lock);
  490. sg_free_table(&a->sgt);
  491. kfree(a);
  492. }
  493. static void *fastrpc_vmap(struct dma_buf *dmabuf)
  494. {
  495. struct fastrpc_buf *buf = dmabuf->priv;
  496. return buf->virt;
  497. }
  498. static int fastrpc_mmap(struct dma_buf *dmabuf,
  499. struct vm_area_struct *vma)
  500. {
  501. struct fastrpc_buf *buf = dmabuf->priv;
  502. size_t size = vma->vm_end - vma->vm_start;
  503. return dma_mmap_coherent(buf->dev, vma, buf->virt,
  504. FASTRPC_PHYS(buf->phys), size);
  505. }
  506. static const struct dma_buf_ops fastrpc_dma_buf_ops = {
  507. .attach = fastrpc_dma_buf_attach,
  508. .detach = fastrpc_dma_buf_detatch,
  509. .map_dma_buf = fastrpc_map_dma_buf,
  510. .unmap_dma_buf = fastrpc_unmap_dma_buf,
  511. .mmap = fastrpc_mmap,
  512. .vmap = fastrpc_vmap,
  513. .release = fastrpc_release,
  514. };
  515. static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
  516. u64 len, struct fastrpc_map **ppmap)
  517. {
  518. struct fastrpc_session_ctx *sess = fl->sctx;
  519. struct fastrpc_map *map = NULL;
  520. int err = 0;
  521. if (!fastrpc_map_find(fl, fd, ppmap))
  522. return 0;
  523. map = kzalloc(sizeof(*map), GFP_KERNEL);
  524. if (!map)
  525. return -ENOMEM;
  526. INIT_LIST_HEAD(&map->node);
  527. map->fl = fl;
  528. map->fd = fd;
  529. map->buf = dma_buf_get(fd);
  530. if (IS_ERR(map->buf)) {
  531. err = PTR_ERR(map->buf);
  532. goto get_err;
  533. }
  534. map->attach = dma_buf_attach(map->buf, sess->dev);
  535. if (IS_ERR(map->attach)) {
  536. dev_err(sess->dev, "Failed to attach dmabuf\n");
  537. err = PTR_ERR(map->attach);
  538. goto attach_err;
  539. }
  540. map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
  541. if (IS_ERR(map->table)) {
  542. err = PTR_ERR(map->table);
  543. goto map_err;
  544. }
  545. map->phys = sg_dma_address(map->table->sgl);
  546. map->phys += ((u64)fl->sctx->sid << 32);
  547. map->size = len;
  548. map->va = sg_virt(map->table->sgl);
  549. map->len = len;
  550. kref_init(&map->refcount);
  551. spin_lock(&fl->lock);
  552. list_add_tail(&map->node, &fl->maps);
  553. spin_unlock(&fl->lock);
  554. *ppmap = map;
  555. return 0;
  556. map_err:
  557. dma_buf_detach(map->buf, map->attach);
  558. attach_err:
  559. dma_buf_put(map->buf);
  560. get_err:
  561. kfree(map);
  562. return err;
  563. }
  564. /*
  565. * Fastrpc payload buffer with metadata looks like:
  566. *
  567. * >>>>>> START of METADATA <<<<<<<<<
  568. * +---------------------------------+
  569. * | Arguments |
  570. * | type:(struct fastrpc_remote_arg)|
  571. * | (0 - N) |
  572. * +---------------------------------+
  573. * | Invoke Buffer list |
  574. * | type:(struct fastrpc_invoke_buf)|
  575. * | (0 - N) |
  576. * +---------------------------------+
  577. * | Page info list |
  578. * | type:(struct fastrpc_phy_page) |
  579. * | (0 - N) |
  580. * +---------------------------------+
  581. * | Optional info |
  582. * |(can be specific to SoC/Firmware)|
  583. * +---------------------------------+
  584. * >>>>>>>> END of METADATA <<<<<<<<<
  585. * +---------------------------------+
  586. * | Inline ARGS |
  587. * | (0-N) |
  588. * +---------------------------------+
  589. */
  590. static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
  591. {
  592. int size = 0;
  593. size = (sizeof(struct fastrpc_remote_arg) +
  594. sizeof(struct fastrpc_invoke_buf) +
  595. sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
  596. sizeof(u64) * FASTRPC_MAX_FDLIST +
  597. sizeof(u32) * FASTRPC_MAX_CRCLIST;
  598. return size;
  599. }
  600. static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
  601. {
  602. u64 size = 0;
  603. int oix;
  604. size = ALIGN(metalen, FASTRPC_ALIGN);
  605. for (oix = 0; oix < ctx->nbufs; oix++) {
  606. int i = ctx->olaps[oix].raix;
  607. if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
  608. if (ctx->olaps[oix].offset == 0)
  609. size = ALIGN(size, FASTRPC_ALIGN);
  610. size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
  611. }
  612. }
  613. return size;
  614. }
  615. static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
  616. {
  617. struct device *dev = ctx->fl->sctx->dev;
  618. int i, err;
  619. for (i = 0; i < ctx->nscalars; ++i) {
  620. /* Make sure reserved field is set to 0 */
  621. if (ctx->args[i].reserved)
  622. return -EINVAL;
  623. if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
  624. ctx->args[i].length == 0)
  625. continue;
  626. err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
  627. ctx->args[i].length, &ctx->maps[i]);
  628. if (err) {
  629. dev_err(dev, "Error Creating map %d\n", err);
  630. return -EINVAL;
  631. }
  632. }
  633. return 0;
  634. }
  635. static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
  636. {
  637. struct device *dev = ctx->fl->sctx->dev;
  638. struct fastrpc_remote_arg *rpra;
  639. struct fastrpc_invoke_buf *list;
  640. struct fastrpc_phy_page *pages;
  641. int inbufs, i, oix, err = 0;
  642. u64 len, rlen, pkt_size;
  643. u64 pg_start, pg_end;
  644. uintptr_t args;
  645. int metalen;
  646. inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
  647. metalen = fastrpc_get_meta_size(ctx);
  648. pkt_size = fastrpc_get_payload_size(ctx, metalen);
  649. err = fastrpc_create_maps(ctx);
  650. if (err)
  651. return err;
  652. ctx->msg_sz = pkt_size;
  653. err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
  654. if (err)
  655. return err;
  656. rpra = ctx->buf->virt;
  657. list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
  658. pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
  659. sizeof(*rpra));
  660. args = (uintptr_t)ctx->buf->virt + metalen;
  661. rlen = pkt_size - metalen;
  662. ctx->rpra = rpra;
  663. for (oix = 0; oix < ctx->nbufs; ++oix) {
  664. int mlen;
  665. i = ctx->olaps[oix].raix;
  666. len = ctx->args[i].length;
  667. rpra[i].pv = 0;
  668. rpra[i].len = len;
  669. list[i].num = len ? 1 : 0;
  670. list[i].pgidx = i;
  671. if (!len)
  672. continue;
  673. if (ctx->maps[i]) {
  674. struct vm_area_struct *vma = NULL;
  675. rpra[i].pv = (u64) ctx->args[i].ptr;
  676. pages[i].addr = ctx->maps[i]->phys;
  677. mmap_read_lock(current->mm);
  678. vma = find_vma(current->mm, ctx->args[i].ptr);
  679. if (vma)
  680. pages[i].addr += ctx->args[i].ptr -
  681. vma->vm_start;
  682. mmap_read_unlock(current->mm);
  683. pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
  684. pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
  685. PAGE_SHIFT;
  686. pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
  687. } else {
  688. if (ctx->olaps[oix].offset == 0) {
  689. rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
  690. args = ALIGN(args, FASTRPC_ALIGN);
  691. }
  692. mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
  693. if (rlen < mlen)
  694. goto bail;
  695. rpra[i].pv = args - ctx->olaps[oix].offset;
  696. pages[i].addr = ctx->buf->phys -
  697. ctx->olaps[oix].offset +
  698. (pkt_size - rlen);
  699. pages[i].addr = pages[i].addr & PAGE_MASK;
  700. pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
  701. pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
  702. pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
  703. args = args + mlen;
  704. rlen -= mlen;
  705. }
  706. if (i < inbufs && !ctx->maps[i]) {
  707. void *dst = (void *)(uintptr_t)rpra[i].pv;
  708. void *src = (void *)(uintptr_t)ctx->args[i].ptr;
  709. if (!kernel) {
  710. if (copy_from_user(dst, (void __user *)src,
  711. len)) {
  712. err = -EFAULT;
  713. goto bail;
  714. }
  715. } else {
  716. memcpy(dst, src, len);
  717. }
  718. }
  719. }
  720. for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
  721. rpra[i].pv = (u64) ctx->args[i].ptr;
  722. rpra[i].len = ctx->args[i].length;
  723. list[i].num = ctx->args[i].length ? 1 : 0;
  724. list[i].pgidx = i;
  725. pages[i].addr = ctx->maps[i]->phys;
  726. pages[i].size = ctx->maps[i]->size;
  727. }
  728. bail:
  729. if (err)
  730. dev_err(dev, "Error: get invoke args failed:%d\n", err);
  731. return err;
  732. }
  733. static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
  734. u32 kernel)
  735. {
  736. struct fastrpc_remote_arg *rpra = ctx->rpra;
  737. int i, inbufs;
  738. inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
  739. for (i = inbufs; i < ctx->nbufs; ++i) {
  740. void *src = (void *)(uintptr_t)rpra[i].pv;
  741. void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
  742. u64 len = rpra[i].len;
  743. if (!kernel) {
  744. if (copy_to_user((void __user *)dst, src, len))
  745. return -EFAULT;
  746. } else {
  747. memcpy(dst, src, len);
  748. }
  749. }
  750. return 0;
  751. }
  752. static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
  753. struct fastrpc_invoke_ctx *ctx,
  754. u32 kernel, uint32_t handle)
  755. {
  756. struct fastrpc_channel_ctx *cctx;
  757. struct fastrpc_user *fl = ctx->fl;
  758. struct fastrpc_msg *msg = &ctx->msg;
  759. int ret;
  760. cctx = fl->cctx;
  761. msg->pid = fl->tgid;
  762. msg->tid = current->pid;
  763. if (kernel)
  764. msg->pid = 0;
  765. msg->ctx = ctx->ctxid | fl->pd;
  766. msg->handle = handle;
  767. msg->sc = ctx->sc;
  768. msg->addr = ctx->buf ? ctx->buf->phys : 0;
  769. msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
  770. fastrpc_context_get(ctx);
  771. ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
  772. if (ret)
  773. fastrpc_context_put(ctx);
  774. return ret;
  775. }
  776. static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
  777. u32 handle, u32 sc,
  778. struct fastrpc_invoke_args *args)
  779. {
  780. struct fastrpc_invoke_ctx *ctx = NULL;
  781. int err = 0;
  782. if (!fl->sctx)
  783. return -EINVAL;
  784. if (!fl->cctx->rpdev)
  785. return -EPIPE;
  786. if (handle == FASTRPC_INIT_HANDLE && !kernel) {
  787. dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
  788. return -EPERM;
  789. }
  790. ctx = fastrpc_context_alloc(fl, kernel, sc, args);
  791. if (IS_ERR(ctx))
  792. return PTR_ERR(ctx);
  793. if (ctx->nscalars) {
  794. err = fastrpc_get_args(kernel, ctx);
  795. if (err)
  796. goto bail;
  797. }
  798. /* make sure that all CPU memory writes are seen by DSP */
  799. dma_wmb();
  800. /* Send invoke buffer to remote dsp */
  801. err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
  802. if (err)
  803. goto bail;
  804. if (kernel) {
  805. if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
  806. err = -ETIMEDOUT;
  807. } else {
  808. err = wait_for_completion_interruptible(&ctx->work);
  809. }
  810. if (err)
  811. goto bail;
  812. /* Check the response from remote dsp */
  813. err = ctx->retval;
  814. if (err)
  815. goto bail;
  816. if (ctx->nscalars) {
  817. /* make sure that all memory writes by DSP are seen by CPU */
  818. dma_rmb();
  819. /* populate all the output buffers with results */
  820. err = fastrpc_put_args(ctx, kernel);
  821. if (err)
  822. goto bail;
  823. }
  824. bail:
  825. if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
  826. /* We are done with this compute context */
  827. spin_lock(&fl->lock);
  828. list_del(&ctx->node);
  829. spin_unlock(&fl->lock);
  830. fastrpc_context_put(ctx);
  831. }
  832. if (err)
  833. dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
  834. return err;
  835. }
  836. static int fastrpc_init_create_process(struct fastrpc_user *fl,
  837. char __user *argp)
  838. {
  839. struct fastrpc_init_create init;
  840. struct fastrpc_invoke_args *args;
  841. struct fastrpc_phy_page pages[1];
  842. struct fastrpc_map *map = NULL;
  843. struct fastrpc_buf *imem = NULL;
  844. int memlen;
  845. int err;
  846. struct {
  847. int pgid;
  848. u32 namelen;
  849. u32 filelen;
  850. u32 pageslen;
  851. u32 attrs;
  852. u32 siglen;
  853. } inbuf;
  854. u32 sc;
  855. args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
  856. if (!args)
  857. return -ENOMEM;
  858. if (copy_from_user(&init, argp, sizeof(init))) {
  859. err = -EFAULT;
  860. goto err;
  861. }
  862. if (init.filelen > INIT_FILELEN_MAX) {
  863. err = -EINVAL;
  864. goto err;
  865. }
  866. inbuf.pgid = fl->tgid;
  867. inbuf.namelen = strlen(current->comm) + 1;
  868. inbuf.filelen = init.filelen;
  869. inbuf.pageslen = 1;
  870. inbuf.attrs = init.attrs;
  871. inbuf.siglen = init.siglen;
  872. fl->pd = USER_PD;
  873. if (init.filelen && init.filefd) {
  874. err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
  875. if (err)
  876. goto err;
  877. }
  878. memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
  879. 1024 * 1024);
  880. err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
  881. &imem);
  882. if (err)
  883. goto err_alloc;
  884. fl->init_mem = imem;
  885. args[0].ptr = (u64)(uintptr_t)&inbuf;
  886. args[0].length = sizeof(inbuf);
  887. args[0].fd = -1;
  888. args[1].ptr = (u64)(uintptr_t)current->comm;
  889. args[1].length = inbuf.namelen;
  890. args[1].fd = -1;
  891. args[2].ptr = (u64) init.file;
  892. args[2].length = inbuf.filelen;
  893. args[2].fd = init.filefd;
  894. pages[0].addr = imem->phys;
  895. pages[0].size = imem->size;
  896. args[3].ptr = (u64)(uintptr_t) pages;
  897. args[3].length = 1 * sizeof(*pages);
  898. args[3].fd = -1;
  899. args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
  900. args[4].length = sizeof(inbuf.attrs);
  901. args[4].fd = -1;
  902. args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
  903. args[5].length = sizeof(inbuf.siglen);
  904. args[5].fd = -1;
  905. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
  906. if (init.attrs)
  907. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
  908. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
  909. sc, args);
  910. if (err)
  911. goto err_invoke;
  912. kfree(args);
  913. return 0;
  914. err_invoke:
  915. fl->init_mem = NULL;
  916. fastrpc_buf_free(imem);
  917. err_alloc:
  918. if (map) {
  919. spin_lock(&fl->lock);
  920. list_del(&map->node);
  921. spin_unlock(&fl->lock);
  922. fastrpc_map_put(map);
  923. }
  924. err:
  925. kfree(args);
  926. return err;
  927. }
  928. static struct fastrpc_session_ctx *fastrpc_session_alloc(
  929. struct fastrpc_channel_ctx *cctx)
  930. {
  931. struct fastrpc_session_ctx *session = NULL;
  932. unsigned long flags;
  933. int i;
  934. spin_lock_irqsave(&cctx->lock, flags);
  935. for (i = 0; i < cctx->sesscount; i++) {
  936. if (!cctx->session[i].used && cctx->session[i].valid) {
  937. cctx->session[i].used = true;
  938. session = &cctx->session[i];
  939. break;
  940. }
  941. }
  942. spin_unlock_irqrestore(&cctx->lock, flags);
  943. return session;
  944. }
  945. static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
  946. struct fastrpc_session_ctx *session)
  947. {
  948. unsigned long flags;
  949. spin_lock_irqsave(&cctx->lock, flags);
  950. session->used = false;
  951. spin_unlock_irqrestore(&cctx->lock, flags);
  952. }
  953. static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
  954. {
  955. struct fastrpc_invoke_args args[1];
  956. int tgid = 0;
  957. u32 sc;
  958. tgid = fl->tgid;
  959. args[0].ptr = (u64)(uintptr_t) &tgid;
  960. args[0].length = sizeof(tgid);
  961. args[0].fd = -1;
  962. args[0].reserved = 0;
  963. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
  964. return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
  965. sc, &args[0]);
  966. }
  967. static int fastrpc_device_release(struct inode *inode, struct file *file)
  968. {
  969. struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
  970. struct fastrpc_channel_ctx *cctx = fl->cctx;
  971. struct fastrpc_invoke_ctx *ctx, *n;
  972. struct fastrpc_map *map, *m;
  973. struct fastrpc_buf *buf, *b;
  974. unsigned long flags;
  975. fastrpc_release_current_dsp_process(fl);
  976. spin_lock_irqsave(&cctx->lock, flags);
  977. list_del(&fl->user);
  978. spin_unlock_irqrestore(&cctx->lock, flags);
  979. if (fl->init_mem)
  980. fastrpc_buf_free(fl->init_mem);
  981. list_for_each_entry_safe(ctx, n, &fl->pending, node) {
  982. list_del(&ctx->node);
  983. fastrpc_context_put(ctx);
  984. }
  985. list_for_each_entry_safe(map, m, &fl->maps, node) {
  986. list_del(&map->node);
  987. fastrpc_map_put(map);
  988. }
  989. list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
  990. list_del(&buf->node);
  991. fastrpc_buf_free(buf);
  992. }
  993. fastrpc_session_free(cctx, fl->sctx);
  994. fastrpc_channel_ctx_put(cctx);
  995. mutex_destroy(&fl->mutex);
  996. kfree(fl);
  997. file->private_data = NULL;
  998. return 0;
  999. }
  1000. static int fastrpc_device_open(struct inode *inode, struct file *filp)
  1001. {
  1002. struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
  1003. struct fastrpc_user *fl = NULL;
  1004. unsigned long flags;
  1005. fl = kzalloc(sizeof(*fl), GFP_KERNEL);
  1006. if (!fl)
  1007. return -ENOMEM;
  1008. /* Released in fastrpc_device_release() */
  1009. fastrpc_channel_ctx_get(cctx);
  1010. filp->private_data = fl;
  1011. spin_lock_init(&fl->lock);
  1012. mutex_init(&fl->mutex);
  1013. INIT_LIST_HEAD(&fl->pending);
  1014. INIT_LIST_HEAD(&fl->maps);
  1015. INIT_LIST_HEAD(&fl->mmaps);
  1016. INIT_LIST_HEAD(&fl->user);
  1017. fl->tgid = current->tgid;
  1018. fl->cctx = cctx;
  1019. fl->sctx = fastrpc_session_alloc(cctx);
  1020. if (!fl->sctx) {
  1021. dev_err(&cctx->rpdev->dev, "No session available\n");
  1022. mutex_destroy(&fl->mutex);
  1023. kfree(fl);
  1024. return -EBUSY;
  1025. }
  1026. spin_lock_irqsave(&cctx->lock, flags);
  1027. list_add_tail(&fl->user, &cctx->users);
  1028. spin_unlock_irqrestore(&cctx->lock, flags);
  1029. return 0;
  1030. }
  1031. static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
  1032. {
  1033. struct fastrpc_alloc_dma_buf bp;
  1034. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  1035. struct fastrpc_buf *buf = NULL;
  1036. int err;
  1037. if (copy_from_user(&bp, argp, sizeof(bp)))
  1038. return -EFAULT;
  1039. err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
  1040. if (err)
  1041. return err;
  1042. exp_info.ops = &fastrpc_dma_buf_ops;
  1043. exp_info.size = bp.size;
  1044. exp_info.flags = O_RDWR;
  1045. exp_info.priv = buf;
  1046. buf->dmabuf = dma_buf_export(&exp_info);
  1047. if (IS_ERR(buf->dmabuf)) {
  1048. err = PTR_ERR(buf->dmabuf);
  1049. fastrpc_buf_free(buf);
  1050. return err;
  1051. }
  1052. bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
  1053. if (bp.fd < 0) {
  1054. dma_buf_put(buf->dmabuf);
  1055. return -EINVAL;
  1056. }
  1057. if (copy_to_user(argp, &bp, sizeof(bp))) {
  1058. /*
  1059. * The usercopy failed, but we can't do much about it, as
  1060. * dma_buf_fd() already called fd_install() and made the
  1061. * file descriptor accessible for the current process. It
  1062. * might already be closed and dmabuf no longer valid when
  1063. * we reach this point. Therefore "leak" the fd and rely on
  1064. * the process exit path to do any required cleanup.
  1065. */
  1066. return -EFAULT;
  1067. }
  1068. return 0;
  1069. }
  1070. static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
  1071. {
  1072. struct fastrpc_invoke_args args[1];
  1073. int tgid = fl->tgid;
  1074. u32 sc;
  1075. args[0].ptr = (u64)(uintptr_t) &tgid;
  1076. args[0].length = sizeof(tgid);
  1077. args[0].fd = -1;
  1078. args[0].reserved = 0;
  1079. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
  1080. fl->pd = pd;
  1081. return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
  1082. sc, &args[0]);
  1083. }
  1084. static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
  1085. {
  1086. struct fastrpc_invoke_args *args = NULL;
  1087. struct fastrpc_invoke inv;
  1088. u32 nscalars;
  1089. int err;
  1090. if (copy_from_user(&inv, argp, sizeof(inv)))
  1091. return -EFAULT;
  1092. /* nscalars is truncated here to max supported value */
  1093. nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
  1094. if (nscalars) {
  1095. args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
  1096. if (!args)
  1097. return -ENOMEM;
  1098. if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
  1099. nscalars * sizeof(*args))) {
  1100. kfree(args);
  1101. return -EFAULT;
  1102. }
  1103. }
  1104. err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
  1105. kfree(args);
  1106. return err;
  1107. }
  1108. static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
  1109. struct fastrpc_req_munmap *req)
  1110. {
  1111. struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
  1112. struct fastrpc_buf *buf, *b;
  1113. struct fastrpc_munmap_req_msg req_msg;
  1114. struct device *dev = fl->sctx->dev;
  1115. int err;
  1116. u32 sc;
  1117. spin_lock(&fl->lock);
  1118. list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
  1119. if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
  1120. break;
  1121. buf = NULL;
  1122. }
  1123. spin_unlock(&fl->lock);
  1124. if (!buf) {
  1125. dev_err(dev, "mmap not in list\n");
  1126. return -EINVAL;
  1127. }
  1128. req_msg.pgid = fl->tgid;
  1129. req_msg.size = buf->size;
  1130. req_msg.vaddr = buf->raddr;
  1131. args[0].ptr = (u64) (uintptr_t) &req_msg;
  1132. args[0].length = sizeof(req_msg);
  1133. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
  1134. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
  1135. &args[0]);
  1136. if (!err) {
  1137. dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
  1138. spin_lock(&fl->lock);
  1139. list_del(&buf->node);
  1140. spin_unlock(&fl->lock);
  1141. fastrpc_buf_free(buf);
  1142. } else {
  1143. dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
  1144. }
  1145. return err;
  1146. }
  1147. static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
  1148. {
  1149. struct fastrpc_req_munmap req;
  1150. if (copy_from_user(&req, argp, sizeof(req)))
  1151. return -EFAULT;
  1152. return fastrpc_req_munmap_impl(fl, &req);
  1153. }
  1154. static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
  1155. {
  1156. struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
  1157. struct fastrpc_buf *buf = NULL;
  1158. struct fastrpc_mmap_req_msg req_msg;
  1159. struct fastrpc_mmap_rsp_msg rsp_msg;
  1160. struct fastrpc_req_munmap req_unmap;
  1161. struct fastrpc_phy_page pages;
  1162. struct fastrpc_req_mmap req;
  1163. struct device *dev = fl->sctx->dev;
  1164. int err;
  1165. u32 sc;
  1166. if (copy_from_user(&req, argp, sizeof(req)))
  1167. return -EFAULT;
  1168. if (req.flags != ADSP_MMAP_ADD_PAGES) {
  1169. dev_err(dev, "flag not supported 0x%x\n", req.flags);
  1170. return -EINVAL;
  1171. }
  1172. if (req.vaddrin) {
  1173. dev_err(dev, "adding user allocated pages is not supported\n");
  1174. return -EINVAL;
  1175. }
  1176. err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
  1177. if (err) {
  1178. dev_err(dev, "failed to allocate buffer\n");
  1179. return err;
  1180. }
  1181. req_msg.pgid = fl->tgid;
  1182. req_msg.flags = req.flags;
  1183. req_msg.vaddr = req.vaddrin;
  1184. req_msg.num = sizeof(pages);
  1185. args[0].ptr = (u64) (uintptr_t) &req_msg;
  1186. args[0].length = sizeof(req_msg);
  1187. pages.addr = buf->phys;
  1188. pages.size = buf->size;
  1189. args[1].ptr = (u64) (uintptr_t) &pages;
  1190. args[1].length = sizeof(pages);
  1191. args[2].ptr = (u64) (uintptr_t) &rsp_msg;
  1192. args[2].length = sizeof(rsp_msg);
  1193. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
  1194. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
  1195. &args[0]);
  1196. if (err) {
  1197. dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
  1198. goto err_invoke;
  1199. }
  1200. /* update the buffer to be able to deallocate the memory on the DSP */
  1201. buf->raddr = (uintptr_t) rsp_msg.vaddr;
  1202. /* let the client know the address to use */
  1203. req.vaddrout = rsp_msg.vaddr;
  1204. spin_lock(&fl->lock);
  1205. list_add_tail(&buf->node, &fl->mmaps);
  1206. spin_unlock(&fl->lock);
  1207. if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
  1208. /* unmap the memory and release the buffer */
  1209. req_unmap.vaddrout = buf->raddr;
  1210. req_unmap.size = buf->size;
  1211. fastrpc_req_munmap_impl(fl, &req_unmap);
  1212. return -EFAULT;
  1213. }
  1214. dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
  1215. buf->raddr, buf->size);
  1216. return 0;
  1217. err_invoke:
  1218. fastrpc_buf_free(buf);
  1219. return err;
  1220. }
  1221. static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
  1222. unsigned long arg)
  1223. {
  1224. struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
  1225. char __user *argp = (char __user *)arg;
  1226. int err;
  1227. switch (cmd) {
  1228. case FASTRPC_IOCTL_INVOKE:
  1229. err = fastrpc_invoke(fl, argp);
  1230. break;
  1231. case FASTRPC_IOCTL_INIT_ATTACH:
  1232. err = fastrpc_init_attach(fl, AUDIO_PD);
  1233. break;
  1234. case FASTRPC_IOCTL_INIT_ATTACH_SNS:
  1235. err = fastrpc_init_attach(fl, SENSORS_PD);
  1236. break;
  1237. case FASTRPC_IOCTL_INIT_CREATE:
  1238. err = fastrpc_init_create_process(fl, argp);
  1239. break;
  1240. case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
  1241. err = fastrpc_dmabuf_alloc(fl, argp);
  1242. break;
  1243. case FASTRPC_IOCTL_MMAP:
  1244. err = fastrpc_req_mmap(fl, argp);
  1245. break;
  1246. case FASTRPC_IOCTL_MUNMAP:
  1247. err = fastrpc_req_munmap(fl, argp);
  1248. break;
  1249. default:
  1250. err = -ENOTTY;
  1251. break;
  1252. }
  1253. return err;
  1254. }
  1255. static const struct file_operations fastrpc_fops = {
  1256. .open = fastrpc_device_open,
  1257. .release = fastrpc_device_release,
  1258. .unlocked_ioctl = fastrpc_device_ioctl,
  1259. .compat_ioctl = fastrpc_device_ioctl,
  1260. };
  1261. static int fastrpc_cb_probe(struct platform_device *pdev)
  1262. {
  1263. struct fastrpc_channel_ctx *cctx;
  1264. struct fastrpc_session_ctx *sess;
  1265. struct device *dev = &pdev->dev;
  1266. int i, sessions = 0;
  1267. unsigned long flags;
  1268. int rc;
  1269. cctx = dev_get_drvdata(dev->parent);
  1270. if (!cctx)
  1271. return -EINVAL;
  1272. of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
  1273. spin_lock_irqsave(&cctx->lock, flags);
  1274. sess = &cctx->session[cctx->sesscount];
  1275. sess->used = false;
  1276. sess->valid = true;
  1277. sess->dev = dev;
  1278. dev_set_drvdata(dev, sess);
  1279. if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
  1280. dev_info(dev, "FastRPC Session ID not specified in DT\n");
  1281. if (sessions > 0) {
  1282. struct fastrpc_session_ctx *dup_sess;
  1283. for (i = 1; i < sessions; i++) {
  1284. if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
  1285. break;
  1286. dup_sess = &cctx->session[cctx->sesscount];
  1287. memcpy(dup_sess, sess, sizeof(*dup_sess));
  1288. }
  1289. }
  1290. cctx->sesscount++;
  1291. spin_unlock_irqrestore(&cctx->lock, flags);
  1292. rc = dma_set_mask(dev, DMA_BIT_MASK(32));
  1293. if (rc) {
  1294. dev_err(dev, "32-bit DMA enable failed\n");
  1295. return rc;
  1296. }
  1297. return 0;
  1298. }
  1299. static int fastrpc_cb_remove(struct platform_device *pdev)
  1300. {
  1301. struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
  1302. struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
  1303. unsigned long flags;
  1304. int i;
  1305. spin_lock_irqsave(&cctx->lock, flags);
  1306. for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
  1307. if (cctx->session[i].sid == sess->sid) {
  1308. cctx->session[i].valid = false;
  1309. cctx->sesscount--;
  1310. }
  1311. }
  1312. spin_unlock_irqrestore(&cctx->lock, flags);
  1313. return 0;
  1314. }
  1315. static const struct of_device_id fastrpc_match_table[] = {
  1316. { .compatible = "qcom,fastrpc-compute-cb", },
  1317. {}
  1318. };
  1319. static struct platform_driver fastrpc_cb_driver = {
  1320. .probe = fastrpc_cb_probe,
  1321. .remove = fastrpc_cb_remove,
  1322. .driver = {
  1323. .name = "qcom,fastrpc-cb",
  1324. .of_match_table = fastrpc_match_table,
  1325. .suppress_bind_attrs = true,
  1326. },
  1327. };
  1328. static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
  1329. {
  1330. struct device *rdev = &rpdev->dev;
  1331. struct fastrpc_channel_ctx *data;
  1332. int i, err, domain_id = -1;
  1333. const char *domain;
  1334. err = of_property_read_string(rdev->of_node, "label", &domain);
  1335. if (err) {
  1336. dev_info(rdev, "FastRPC Domain not specified in DT\n");
  1337. return err;
  1338. }
  1339. for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
  1340. if (!strcmp(domains[i], domain)) {
  1341. domain_id = i;
  1342. break;
  1343. }
  1344. }
  1345. if (domain_id < 0) {
  1346. dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
  1347. return -EINVAL;
  1348. }
  1349. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1350. if (!data)
  1351. return -ENOMEM;
  1352. data->miscdev.minor = MISC_DYNAMIC_MINOR;
  1353. data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
  1354. domains[domain_id]);
  1355. data->miscdev.fops = &fastrpc_fops;
  1356. err = misc_register(&data->miscdev);
  1357. if (err) {
  1358. kfree(data);
  1359. return err;
  1360. }
  1361. kref_init(&data->refcount);
  1362. dev_set_drvdata(&rpdev->dev, data);
  1363. dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
  1364. INIT_LIST_HEAD(&data->users);
  1365. spin_lock_init(&data->lock);
  1366. idr_init(&data->ctx_idr);
  1367. data->domain_id = domain_id;
  1368. data->rpdev = rpdev;
  1369. return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
  1370. }
  1371. static void fastrpc_notify_users(struct fastrpc_user *user)
  1372. {
  1373. struct fastrpc_invoke_ctx *ctx;
  1374. spin_lock(&user->lock);
  1375. list_for_each_entry(ctx, &user->pending, node)
  1376. complete(&ctx->work);
  1377. spin_unlock(&user->lock);
  1378. }
  1379. static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
  1380. {
  1381. struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
  1382. struct fastrpc_user *user;
  1383. unsigned long flags;
  1384. spin_lock_irqsave(&cctx->lock, flags);
  1385. list_for_each_entry(user, &cctx->users, user)
  1386. fastrpc_notify_users(user);
  1387. spin_unlock_irqrestore(&cctx->lock, flags);
  1388. misc_deregister(&cctx->miscdev);
  1389. of_platform_depopulate(&rpdev->dev);
  1390. cctx->rpdev = NULL;
  1391. fastrpc_channel_ctx_put(cctx);
  1392. }
  1393. static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
  1394. int len, void *priv, u32 addr)
  1395. {
  1396. struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
  1397. struct fastrpc_invoke_rsp *rsp = data;
  1398. struct fastrpc_invoke_ctx *ctx;
  1399. unsigned long flags;
  1400. unsigned long ctxid;
  1401. if (len < sizeof(*rsp))
  1402. return -EINVAL;
  1403. ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
  1404. spin_lock_irqsave(&cctx->lock, flags);
  1405. ctx = idr_find(&cctx->ctx_idr, ctxid);
  1406. spin_unlock_irqrestore(&cctx->lock, flags);
  1407. if (!ctx) {
  1408. dev_err(&rpdev->dev, "No context ID matches response\n");
  1409. return -ENOENT;
  1410. }
  1411. ctx->retval = rsp->retval;
  1412. complete(&ctx->work);
  1413. /*
  1414. * The DMA buffer associated with the context cannot be freed in
  1415. * interrupt context so schedule it through a worker thread to
  1416. * avoid a kernel BUG.
  1417. */
  1418. schedule_work(&ctx->put_work);
  1419. return 0;
  1420. }
  1421. static const struct of_device_id fastrpc_rpmsg_of_match[] = {
  1422. { .compatible = "qcom,fastrpc" },
  1423. { },
  1424. };
  1425. MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
  1426. static struct rpmsg_driver fastrpc_driver = {
  1427. .probe = fastrpc_rpmsg_probe,
  1428. .remove = fastrpc_rpmsg_remove,
  1429. .callback = fastrpc_rpmsg_callback,
  1430. .drv = {
  1431. .name = "qcom,fastrpc",
  1432. .of_match_table = fastrpc_rpmsg_of_match,
  1433. },
  1434. };
  1435. static int fastrpc_init(void)
  1436. {
  1437. int ret;
  1438. ret = platform_driver_register(&fastrpc_cb_driver);
  1439. if (ret < 0) {
  1440. pr_err("fastrpc: failed to register cb driver\n");
  1441. return ret;
  1442. }
  1443. ret = register_rpmsg_driver(&fastrpc_driver);
  1444. if (ret < 0) {
  1445. pr_err("fastrpc: failed to register rpmsg driver\n");
  1446. platform_driver_unregister(&fastrpc_cb_driver);
  1447. return ret;
  1448. }
  1449. return 0;
  1450. }
  1451. module_init(fastrpc_init);
  1452. static void fastrpc_exit(void)
  1453. {
  1454. platform_driver_unregister(&fastrpc_cb_driver);
  1455. unregister_rpmsg_driver(&fastrpc_driver);
  1456. }
  1457. module_exit(fastrpc_exit);
  1458. MODULE_LICENSE("GPL v2");