misc: fastrpc: Rework fastrpc_req_munmap
[linux-2.6-block.git] / drivers / misc / fastrpc.c
CommitLineData
f6f9279f
SK
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3// Copyright (c) 2018, Linaro Limited
4
c68cfb71 5#include <linux/completion.h>
f6f9279f 6#include <linux/device.h>
c68cfb71 7#include <linux/dma-buf.h>
f6f9279f
SK
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/list.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/of_address.h>
14#include <linux/of.h>
25e8dfb8 15#include <linux/sort.h>
f6f9279f
SK
16#include <linux/of_platform.h>
17#include <linux/rpmsg.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
e90d9119 20#include <linux/qcom_scm.h>
c68cfb71 21#include <uapi/misc/fastrpc.h>
1ce91d45 22#include <linux/of_reserved_mem.h>
f6f9279f
SK
23
24#define ADSP_DOMAIN_ID (0)
25#define MDSP_DOMAIN_ID (1)
26#define SDSP_DOMAIN_ID (2)
27#define CDSP_DOMAIN_ID (3)
28#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
689a2d9f 29#define FASTRPC_MAX_SESSIONS 14
e90d9119 30#define FASTRPC_MAX_VMIDS 16
c68cfb71
SK
31#define FASTRPC_ALIGN 128
32#define FASTRPC_MAX_FDLIST 16
33#define FASTRPC_MAX_CRCLIST 64
34#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
f6f9279f 35#define FASTRPC_CTX_MAX (256)
d73f71c7 36#define FASTRPC_INIT_HANDLE 1
6c16fd8b 37#define FASTRPC_DSP_UTILITIES_HANDLE 2
f6f9279f 38#define FASTRPC_CTXID_MASK (0xFF0)
efcd2390 39#define INIT_FILELEN_MAX (2 * 1024 * 1024)
f6f9279f 40#define FASTRPC_DEVICE_NAME "fastrpc"
2419e55e 41#define ADSP_MMAP_ADD_PAGES 0x1000
6c16fd8b
J
42#define DSP_UNSUPPORTED_API (0x80000414)
43/* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
44#define FASTRPC_MAX_DSP_ATTRIBUTES (256)
45#define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
f6f9279f 46
c68cfb71
SK
47/* Retrives number of input buffers from the scalars parameter */
48#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
49
50/* Retrives number of output buffers from the scalars parameter */
51#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
52
53/* Retrives number of input handles from the scalars parameter */
54#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
55
56/* Retrives number of output handles from the scalars parameter */
57#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
58
59#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
60 REMOTE_SCALARS_OUTBUFS(sc) + \
61 REMOTE_SCALARS_INHANDLES(sc)+ \
62 REMOTE_SCALARS_OUTHANDLES(sc))
63#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
64 (((attr & 0x07) << 29) | \
65 ((method & 0x1f) << 24) | \
66 ((in & 0xff) << 16) | \
67 ((out & 0xff) << 8) | \
68 ((oin & 0x0f) << 4) | \
69 (oout & 0x0f))
70
71#define FASTRPC_SCALARS(method, in, out) \
72 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
73
d73f71c7
SK
74#define FASTRPC_CREATE_PROCESS_NARGS 6
75/* Remote Method id table */
76#define FASTRPC_RMID_INIT_ATTACH 0
77#define FASTRPC_RMID_INIT_RELEASE 1
2419e55e
JRO
78#define FASTRPC_RMID_INIT_MMAP 4
79#define FASTRPC_RMID_INIT_MUNMAP 5
d73f71c7
SK
80#define FASTRPC_RMID_INIT_CREATE 6
81#define FASTRPC_RMID_INIT_CREATE_ATTR 7
82#define FASTRPC_RMID_INIT_CREATE_STATIC 8
5c1b97c7
J
83#define FASTRPC_RMID_INIT_MEM_MAP 10
84#define FASTRPC_RMID_INIT_MEM_UNMAP 11
d73f71c7 85
84195d20 86/* Protection Domain(PD) ids */
1959ab9e 87#define ROOT_PD (0)
84195d20
JM
88#define USER_PD (1)
89#define SENSORS_PD (2)
90
965602ea 91#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
f6f9279f
SK
92
93static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
94 "sdsp", "cdsp"};
c68cfb71
SK
95struct fastrpc_phy_page {
96 u64 addr; /* physical address */
97 u64 size; /* size of contiguous region */
98};
99
100struct fastrpc_invoke_buf {
101 u32 num; /* number of contiguous regions */
102 u32 pgidx; /* index to start of contiguous region */
103};
104
35a82b87
VKG
105struct fastrpc_remote_dmahandle {
106 s32 fd; /* dma handle fd */
107 u32 offset; /* dma handle offset */
108 u32 len; /* dma handle length */
109};
110
111struct fastrpc_remote_buf {
112 u64 pv; /* buffer pointer */
113 u64 len; /* length of buffer */
114};
115
116union fastrpc_remote_arg {
117 struct fastrpc_remote_buf buf;
118 struct fastrpc_remote_dmahandle dma;
c68cfb71
SK
119};
120
2419e55e
JRO
121struct fastrpc_mmap_rsp_msg {
122 u64 vaddr;
123};
124
125struct fastrpc_mmap_req_msg {
126 s32 pgid;
127 u32 flags;
128 u64 vaddr;
129 s32 num;
130};
131
5c1b97c7
J
132struct fastrpc_mem_map_req_msg {
133 s32 pgid;
134 s32 fd;
135 s32 offset;
136 u32 flags;
137 u64 vaddrin;
138 s32 num;
139 s32 data_len;
140};
141
2419e55e
JRO
142struct fastrpc_munmap_req_msg {
143 s32 pgid;
144 u64 vaddr;
145 u64 size;
146};
147
5c1b97c7
J
148struct fastrpc_mem_unmap_req_msg {
149 s32 pgid;
150 s32 fd;
151 u64 vaddrin;
152 u64 len;
153};
154
c68cfb71
SK
155struct fastrpc_msg {
156 int pid; /* process group id */
157 int tid; /* thread id */
158 u64 ctx; /* invoke caller context */
159 u32 handle; /* handle to invoke */
160 u32 sc; /* scalars structure describing the data */
161 u64 addr; /* physical address */
162 u64 size; /* size of contiguous region */
163};
164
165struct fastrpc_invoke_rsp {
166 u64 ctx; /* invoke caller context */
167 int retval; /* invoke return value */
168};
169
25e8dfb8
SK
170struct fastrpc_buf_overlap {
171 u64 start;
172 u64 end;
173 int raix;
174 u64 mstart;
175 u64 mend;
176 u64 offset;
177};
178
c68cfb71
SK
179struct fastrpc_buf {
180 struct fastrpc_user *fl;
6cffd795 181 struct dma_buf *dmabuf;
c68cfb71
SK
182 struct device *dev;
183 void *virt;
184 u64 phys;
185 u64 size;
6cffd795
SK
186 /* Lock for dma buf attachments */
187 struct mutex lock;
188 struct list_head attachments;
2419e55e
JRO
189 /* mmap support */
190 struct list_head node; /* list of user requested mmaps */
191 uintptr_t raddr;
6cffd795
SK
192};
193
194struct fastrpc_dma_buf_attachment {
195 struct device *dev;
196 struct sg_table sgt;
197 struct list_head node;
c68cfb71
SK
198};
199
200struct fastrpc_map {
201 struct list_head node;
202 struct fastrpc_user *fl;
203 int fd;
204 struct dma_buf *buf;
205 struct sg_table *table;
206 struct dma_buf_attachment *attach;
207 u64 phys;
208 u64 size;
209 void *va;
210 u64 len;
5c1b97c7 211 u64 raddr;
e90d9119 212 u32 attr;
c68cfb71
SK
213 struct kref refcount;
214};
215
216struct fastrpc_invoke_ctx {
217 int nscalars;
218 int nbufs;
219 int retval;
220 int pid;
221 int tgid;
222 u32 sc;
223 u32 *crc;
224 u64 ctxid;
225 u64 msg_sz;
226 struct kref refcount;
227 struct list_head node; /* list of ctxs */
228 struct completion work;
8e7389c7 229 struct work_struct put_work;
c68cfb71
SK
230 struct fastrpc_msg msg;
231 struct fastrpc_user *fl;
35a82b87 232 union fastrpc_remote_arg *rpra;
c68cfb71
SK
233 struct fastrpc_map **maps;
234 struct fastrpc_buf *buf;
235 struct fastrpc_invoke_args *args;
25e8dfb8 236 struct fastrpc_buf_overlap *olaps;
c68cfb71
SK
237 struct fastrpc_channel_ctx *cctx;
238};
f6f9279f
SK
239
240struct fastrpc_session_ctx {
241 struct device *dev;
242 int sid;
243 bool used;
244 bool valid;
245};
246
247struct fastrpc_channel_ctx {
248 int domain_id;
249 int sesscount;
e90d9119
VKG
250 int vmcount;
251 u32 perms;
252 struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
f6f9279f
SK
253 struct rpmsg_device *rpdev;
254 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
255 spinlock_t lock;
256 struct idr ctx_idr;
257 struct list_head users;
278d56f9 258 struct kref refcount;
6c16fd8b
J
259 /* Flag if dsp attributes are cached */
260 bool valid_attributes;
261 u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
3abe3ab3 262 struct fastrpc_device *secure_fdevice;
965602ea 263 struct fastrpc_device *fdevice;
3abe3ab3 264 bool secure;
7f1f4812 265 bool unsigned_support;
965602ea
SK
266};
267
268struct fastrpc_device {
269 struct fastrpc_channel_ctx *cctx;
270 struct miscdevice miscdev;
3abe3ab3 271 bool secure;
f6f9279f
SK
272};
273
274struct fastrpc_user {
275 struct list_head user;
276 struct list_head maps;
277 struct list_head pending;
2419e55e 278 struct list_head mmaps;
f6f9279f
SK
279
280 struct fastrpc_channel_ctx *cctx;
281 struct fastrpc_session_ctx *sctx;
c68cfb71 282 struct fastrpc_buf *init_mem;
f6f9279f
SK
283
284 int tgid;
285 int pd;
7f1f4812 286 bool is_secure_dev;
f6f9279f
SK
287 /* Lock for lists */
288 spinlock_t lock;
289 /* lock for allocations */
290 struct mutex mutex;
291};
292
c68cfb71
SK
293static void fastrpc_free_map(struct kref *ref)
294{
295 struct fastrpc_map *map;
296
297 map = container_of(ref, struct fastrpc_map, refcount);
298
299 if (map->table) {
e90d9119
VKG
300 if (map->attr & FASTRPC_ATTR_SECUREMAP) {
301 struct qcom_scm_vmperm perm;
302 int err = 0;
303
304 perm.vmid = QCOM_SCM_VMID_HLOS;
305 perm.perm = QCOM_SCM_PERM_RWX;
306 err = qcom_scm_assign_mem(map->phys, map->size,
307 &(map->fl->cctx->vmperms[0].vmid), &perm, 1);
308 if (err) {
309 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
310 map->phys, map->size, err);
311 return;
312 }
313 }
c68cfb71
SK
314 dma_buf_unmap_attachment(map->attach, map->table,
315 DMA_BIDIRECTIONAL);
316 dma_buf_detach(map->buf, map->attach);
317 dma_buf_put(map->buf);
318 }
319
320 kfree(map);
321}
322
323static void fastrpc_map_put(struct fastrpc_map *map)
324{
325 if (map)
326 kref_put(&map->refcount, fastrpc_free_map);
327}
328
329static void fastrpc_map_get(struct fastrpc_map *map)
330{
331 if (map)
332 kref_get(&map->refcount);
333}
334
8f6c1d8c
VKG
335
336static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
c68cfb71
SK
337 struct fastrpc_map **ppmap)
338{
339 struct fastrpc_map *map = NULL;
340
341 mutex_lock(&fl->mutex);
342 list_for_each_entry(map, &fl->maps, node) {
343 if (map->fd == fd) {
c68cfb71
SK
344 *ppmap = map;
345 mutex_unlock(&fl->mutex);
346 return 0;
347 }
348 }
349 mutex_unlock(&fl->mutex);
350
351 return -ENOENT;
352}
353
8f6c1d8c
VKG
354static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
355 struct fastrpc_map **ppmap)
356{
357 int ret = fastrpc_map_lookup(fl, fd, ppmap);
358
359 if (!ret)
360 fastrpc_map_get(*ppmap);
361
362 return ret;
363}
364
c68cfb71
SK
365static void fastrpc_buf_free(struct fastrpc_buf *buf)
366{
367 dma_free_coherent(buf->dev, buf->size, buf->virt,
368 FASTRPC_PHYS(buf->phys));
369 kfree(buf);
370}
371
6f18c7e8 372static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
c68cfb71
SK
373 u64 size, struct fastrpc_buf **obuf)
374{
375 struct fastrpc_buf *buf;
376
377 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
378 if (!buf)
379 return -ENOMEM;
380
6cffd795 381 INIT_LIST_HEAD(&buf->attachments);
2419e55e 382 INIT_LIST_HEAD(&buf->node);
6cffd795
SK
383 mutex_init(&buf->lock);
384
c68cfb71
SK
385 buf->fl = fl;
386 buf->virt = NULL;
387 buf->phys = 0;
388 buf->size = size;
389 buf->dev = dev;
2419e55e 390 buf->raddr = 0;
c68cfb71
SK
391
392 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
393 GFP_KERNEL);
41db5f83
JRO
394 if (!buf->virt) {
395 mutex_destroy(&buf->lock);
396 kfree(buf);
c68cfb71 397 return -ENOMEM;
41db5f83 398 }
c68cfb71 399
6f18c7e8
AV
400 *obuf = buf;
401
402 return 0;
403}
404
405static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
406 u64 size, struct fastrpc_buf **obuf)
407{
408 int ret;
409 struct fastrpc_buf *buf;
410
411 ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
412 if (ret)
413 return ret;
414
415 buf = *obuf;
416
c68cfb71
SK
417 if (fl->sctx && fl->sctx->sid)
418 buf->phys += ((u64)fl->sctx->sid << 32);
419
c68cfb71
SK
420 return 0;
421}
422
6f18c7e8
AV
423static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev,
424 u64 size, struct fastrpc_buf **obuf)
425{
426 struct device *rdev = &fl->cctx->rpdev->dev;
427
428 return __fastrpc_buf_alloc(fl, rdev, size, obuf);
429}
430
278d56f9
BA
431static void fastrpc_channel_ctx_free(struct kref *ref)
432{
433 struct fastrpc_channel_ctx *cctx;
434
435 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
436
437 kfree(cctx);
438}
439
440static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
441{
442 kref_get(&cctx->refcount);
443}
444
445static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
446{
447 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
448}
449
c68cfb71
SK
450static void fastrpc_context_free(struct kref *ref)
451{
452 struct fastrpc_invoke_ctx *ctx;
453 struct fastrpc_channel_ctx *cctx;
977e6c8d 454 unsigned long flags;
c68cfb71
SK
455 int i;
456
457 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
458 cctx = ctx->cctx;
459
8f6c1d8c 460 for (i = 0; i < ctx->nbufs; i++)
c68cfb71
SK
461 fastrpc_map_put(ctx->maps[i]);
462
463 if (ctx->buf)
464 fastrpc_buf_free(ctx->buf);
465
977e6c8d 466 spin_lock_irqsave(&cctx->lock, flags);
c68cfb71 467 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
977e6c8d 468 spin_unlock_irqrestore(&cctx->lock, flags);
c68cfb71
SK
469
470 kfree(ctx->maps);
25e8dfb8 471 kfree(ctx->olaps);
c68cfb71 472 kfree(ctx);
278d56f9
BA
473
474 fastrpc_channel_ctx_put(cctx);
c68cfb71
SK
475}
476
477static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
478{
479 kref_get(&ctx->refcount);
480}
481
482static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
483{
484 kref_put(&ctx->refcount, fastrpc_context_free);
485}
486
8e7389c7
TE
487static void fastrpc_context_put_wq(struct work_struct *work)
488{
489 struct fastrpc_invoke_ctx *ctx =
490 container_of(work, struct fastrpc_invoke_ctx, put_work);
491
492 fastrpc_context_put(ctx);
493}
494
25e8dfb8
SK
495#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
496static int olaps_cmp(const void *a, const void *b)
497{
498 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
499 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
500 /* sort with lowest starting buffer first */
501 int st = CMP(pa->start, pb->start);
502 /* sort with highest ending buffer first */
503 int ed = CMP(pb->end, pa->end);
504
505 return st == 0 ? ed : st;
506}
507
508static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
509{
510 u64 max_end = 0;
511 int i;
512
513 for (i = 0; i < ctx->nbufs; ++i) {
514 ctx->olaps[i].start = ctx->args[i].ptr;
515 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
516 ctx->olaps[i].raix = i;
517 }
518
519 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
520
521 for (i = 0; i < ctx->nbufs; ++i) {
522 /* Falling inside previous range */
523 if (ctx->olaps[i].start < max_end) {
524 ctx->olaps[i].mstart = max_end;
525 ctx->olaps[i].mend = ctx->olaps[i].end;
526 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
527
528 if (ctx->olaps[i].end > max_end) {
529 max_end = ctx->olaps[i].end;
530 } else {
531 ctx->olaps[i].mend = 0;
532 ctx->olaps[i].mstart = 0;
533 }
534
535 } else {
536 ctx->olaps[i].mend = ctx->olaps[i].end;
537 ctx->olaps[i].mstart = ctx->olaps[i].start;
538 ctx->olaps[i].offset = 0;
539 max_end = ctx->olaps[i].end;
540 }
541 }
542}
543
c68cfb71
SK
544static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
545 struct fastrpc_user *user, u32 kernel, u32 sc,
546 struct fastrpc_invoke_args *args)
547{
548 struct fastrpc_channel_ctx *cctx = user->cctx;
549 struct fastrpc_invoke_ctx *ctx = NULL;
977e6c8d 550 unsigned long flags;
c68cfb71
SK
551 int ret;
552
553 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
554 if (!ctx)
555 return ERR_PTR(-ENOMEM);
556
557 INIT_LIST_HEAD(&ctx->node);
558 ctx->fl = user;
559 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
560 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
561 REMOTE_SCALARS_OUTBUFS(sc);
562
563 if (ctx->nscalars) {
564 ctx->maps = kcalloc(ctx->nscalars,
565 sizeof(*ctx->maps), GFP_KERNEL);
566 if (!ctx->maps) {
567 kfree(ctx);
568 return ERR_PTR(-ENOMEM);
569 }
25e8dfb8
SK
570 ctx->olaps = kcalloc(ctx->nscalars,
571 sizeof(*ctx->olaps), GFP_KERNEL);
572 if (!ctx->olaps) {
573 kfree(ctx->maps);
574 kfree(ctx);
575 return ERR_PTR(-ENOMEM);
576 }
c68cfb71 577 ctx->args = args;
25e8dfb8 578 fastrpc_get_buff_overlaps(ctx);
c68cfb71
SK
579 }
580
278d56f9
BA
581 /* Released in fastrpc_context_put() */
582 fastrpc_channel_ctx_get(cctx);
583
c68cfb71
SK
584 ctx->sc = sc;
585 ctx->retval = -1;
586 ctx->pid = current->pid;
587 ctx->tgid = user->tgid;
588 ctx->cctx = cctx;
589 init_completion(&ctx->work);
8e7389c7 590 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
c68cfb71
SK
591
592 spin_lock(&user->lock);
593 list_add_tail(&ctx->node, &user->pending);
594 spin_unlock(&user->lock);
595
977e6c8d 596 spin_lock_irqsave(&cctx->lock, flags);
c68cfb71
SK
597 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
598 FASTRPC_CTX_MAX, GFP_ATOMIC);
599 if (ret < 0) {
977e6c8d 600 spin_unlock_irqrestore(&cctx->lock, flags);
c68cfb71
SK
601 goto err_idr;
602 }
603 ctx->ctxid = ret << 4;
977e6c8d 604 spin_unlock_irqrestore(&cctx->lock, flags);
c68cfb71
SK
605
606 kref_init(&ctx->refcount);
607
608 return ctx;
609err_idr:
610 spin_lock(&user->lock);
611 list_del(&ctx->node);
612 spin_unlock(&user->lock);
278d56f9 613 fastrpc_channel_ctx_put(cctx);
c68cfb71 614 kfree(ctx->maps);
25e8dfb8 615 kfree(ctx->olaps);
c68cfb71
SK
616 kfree(ctx);
617
618 return ERR_PTR(ret);
619}
620
6cffd795
SK
621static struct sg_table *
622fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
623 enum dma_data_direction dir)
624{
625 struct fastrpc_dma_buf_attachment *a = attachment->priv;
626 struct sg_table *table;
b212658a 627 int ret;
6cffd795
SK
628
629 table = &a->sgt;
630
b212658a
JM
631 ret = dma_map_sgtable(attachment->dev, table, dir, 0);
632 if (ret)
633 table = ERR_PTR(ret);
6cffd795
SK
634 return table;
635}
636
637static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
638 struct sg_table *table,
639 enum dma_data_direction dir)
640{
7cd7edb8 641 dma_unmap_sgtable(attach->dev, table, dir, 0);
6cffd795
SK
642}
643
644static void fastrpc_release(struct dma_buf *dmabuf)
645{
646 struct fastrpc_buf *buffer = dmabuf->priv;
647
648 fastrpc_buf_free(buffer);
649}
650
651static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
652 struct dma_buf_attachment *attachment)
653{
654 struct fastrpc_dma_buf_attachment *a;
655 struct fastrpc_buf *buffer = dmabuf->priv;
656 int ret;
657
658 a = kzalloc(sizeof(*a), GFP_KERNEL);
659 if (!a)
660 return -ENOMEM;
661
662 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
663 FASTRPC_PHYS(buffer->phys), buffer->size);
664 if (ret < 0) {
665 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
fc739a05 666 kfree(a);
6cffd795
SK
667 return -EINVAL;
668 }
669
670 a->dev = attachment->dev;
671 INIT_LIST_HEAD(&a->node);
672 attachment->priv = a;
673
674 mutex_lock(&buffer->lock);
675 list_add(&a->node, &buffer->attachments);
676 mutex_unlock(&buffer->lock);
677
678 return 0;
679}
680
681static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
682 struct dma_buf_attachment *attachment)
683{
684 struct fastrpc_dma_buf_attachment *a = attachment->priv;
685 struct fastrpc_buf *buffer = dmabuf->priv;
686
687 mutex_lock(&buffer->lock);
688 list_del(&a->node);
689 mutex_unlock(&buffer->lock);
cf61860e 690 sg_free_table(&a->sgt);
6cffd795
SK
691 kfree(a);
692}
693
7938f421 694static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
6cffd795
SK
695{
696 struct fastrpc_buf *buf = dmabuf->priv;
697
7938f421 698 iosys_map_set_vaddr(map, buf->virt);
6619ccf1
TZ
699
700 return 0;
6cffd795
SK
701}
702
703static int fastrpc_mmap(struct dma_buf *dmabuf,
704 struct vm_area_struct *vma)
705{
706 struct fastrpc_buf *buf = dmabuf->priv;
707 size_t size = vma->vm_end - vma->vm_start;
708
709 return dma_mmap_coherent(buf->dev, vma, buf->virt,
710 FASTRPC_PHYS(buf->phys), size);
711}
712
713static const struct dma_buf_ops fastrpc_dma_buf_ops = {
714 .attach = fastrpc_dma_buf_attach,
715 .detach = fastrpc_dma_buf_detatch,
716 .map_dma_buf = fastrpc_map_dma_buf,
717 .unmap_dma_buf = fastrpc_unmap_dma_buf,
718 .mmap = fastrpc_mmap,
6cffd795
SK
719 .vmap = fastrpc_vmap,
720 .release = fastrpc_release,
721};
722
c68cfb71 723static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
e90d9119 724 u64 len, u32 attr, struct fastrpc_map **ppmap)
c68cfb71
SK
725{
726 struct fastrpc_session_ctx *sess = fl->sctx;
727 struct fastrpc_map *map = NULL;
728 int err = 0;
729
730 if (!fastrpc_map_find(fl, fd, ppmap))
731 return 0;
732
733 map = kzalloc(sizeof(*map), GFP_KERNEL);
734 if (!map)
735 return -ENOMEM;
736
737 INIT_LIST_HEAD(&map->node);
334f1a1c
AV
738 kref_init(&map->refcount);
739
c68cfb71
SK
740 map->fl = fl;
741 map->fd = fd;
742 map->buf = dma_buf_get(fd);
682a6044
WY
743 if (IS_ERR(map->buf)) {
744 err = PTR_ERR(map->buf);
c68cfb71
SK
745 goto get_err;
746 }
747
748 map->attach = dma_buf_attach(map->buf, sess->dev);
749 if (IS_ERR(map->attach)) {
750 dev_err(sess->dev, "Failed to attach dmabuf\n");
751 err = PTR_ERR(map->attach);
752 goto attach_err;
753 }
754
755 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
756 if (IS_ERR(map->table)) {
757 err = PTR_ERR(map->table);
758 goto map_err;
759 }
760
761 map->phys = sg_dma_address(map->table->sgl);
762 map->phys += ((u64)fl->sctx->sid << 32);
763 map->size = len;
764 map->va = sg_virt(map->table->sgl);
765 map->len = len;
c68cfb71 766
e90d9119
VKG
767 if (attr & FASTRPC_ATTR_SECUREMAP) {
768 /*
769 * If subsystem VMIDs are defined in DTSI, then do
770 * hyp_assign from HLOS to those VM(s)
771 */
772 unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
773
774 map->attr = attr;
775 err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
776 fl->cctx->vmperms, fl->cctx->vmcount);
777 if (err) {
778 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
779 map->phys, map->size, err);
780 goto map_err;
781 }
782 }
c68cfb71
SK
783 spin_lock(&fl->lock);
784 list_add_tail(&map->node, &fl->maps);
785 spin_unlock(&fl->lock);
786 *ppmap = map;
787
788 return 0;
789
790map_err:
791 dma_buf_detach(map->buf, map->attach);
792attach_err:
793 dma_buf_put(map->buf);
794get_err:
334f1a1c 795 fastrpc_map_put(map);
c68cfb71
SK
796
797 return err;
798}
799
800/*
801 * Fastrpc payload buffer with metadata looks like:
802 *
803 * >>>>>> START of METADATA <<<<<<<<<
804 * +---------------------------------+
805 * | Arguments |
35a82b87 806 * | type:(union fastrpc_remote_arg)|
c68cfb71
SK
807 * | (0 - N) |
808 * +---------------------------------+
809 * | Invoke Buffer list |
810 * | type:(struct fastrpc_invoke_buf)|
811 * | (0 - N) |
812 * +---------------------------------+
813 * | Page info list |
814 * | type:(struct fastrpc_phy_page) |
815 * | (0 - N) |
816 * +---------------------------------+
817 * | Optional info |
818 * |(can be specific to SoC/Firmware)|
819 * +---------------------------------+
820 * >>>>>>>> END of METADATA <<<<<<<<<
821 * +---------------------------------+
822 * | Inline ARGS |
823 * | (0-N) |
824 * +---------------------------------+
825 */
826
827static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
828{
829 int size = 0;
830
35a82b87 831 size = (sizeof(struct fastrpc_remote_buf) +
c68cfb71
SK
832 sizeof(struct fastrpc_invoke_buf) +
833 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
834 sizeof(u64) * FASTRPC_MAX_FDLIST +
835 sizeof(u32) * FASTRPC_MAX_CRCLIST;
836
837 return size;
838}
839
840static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
841{
842 u64 size = 0;
3a1bf591 843 int oix;
c68cfb71
SK
844
845 size = ALIGN(metalen, FASTRPC_ALIGN);
3a1bf591
J
846 for (oix = 0; oix < ctx->nbufs; oix++) {
847 int i = ctx->olaps[oix].raix;
848
c68cfb71 849 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
25e8dfb8 850
3a1bf591 851 if (ctx->olaps[oix].offset == 0)
25e8dfb8
SK
852 size = ALIGN(size, FASTRPC_ALIGN);
853
3a1bf591 854 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
c68cfb71
SK
855 }
856 }
857
858 return size;
859}
860
861static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
862{
863 struct device *dev = ctx->fl->sctx->dev;
864 int i, err;
865
866 for (i = 0; i < ctx->nscalars; ++i) {
c68cfb71
SK
867
868 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
869 ctx->args[i].length == 0)
870 continue;
871
872 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
e90d9119 873 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
c68cfb71
SK
874 if (err) {
875 dev_err(dev, "Error Creating map %d\n", err);
876 return -EINVAL;
877 }
878
879 }
880 return 0;
881}
882
54f7c85b
VKG
883static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
884{
885 return (struct fastrpc_invoke_buf *)(&pra[len]);
886}
887
888static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
889{
890 return (struct fastrpc_phy_page *)(&buf[len]);
891}
892
c68cfb71
SK
893static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
894{
895 struct device *dev = ctx->fl->sctx->dev;
35a82b87 896 union fastrpc_remote_arg *rpra;
c68cfb71
SK
897 struct fastrpc_invoke_buf *list;
898 struct fastrpc_phy_page *pages;
25e8dfb8
SK
899 int inbufs, i, oix, err = 0;
900 u64 len, rlen, pkt_size;
02b45b47 901 u64 pg_start, pg_end;
c68cfb71
SK
902 uintptr_t args;
903 int metalen;
904
c68cfb71
SK
905 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
906 metalen = fastrpc_get_meta_size(ctx);
907 pkt_size = fastrpc_get_payload_size(ctx, metalen);
908
909 err = fastrpc_create_maps(ctx);
910 if (err)
911 return err;
912
913 ctx->msg_sz = pkt_size;
914
915 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
916 if (err)
917 return err;
918
919 rpra = ctx->buf->virt;
54f7c85b
VKG
920 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
921 pages = fastrpc_phy_page_start(list, ctx->nscalars);
c68cfb71
SK
922 args = (uintptr_t)ctx->buf->virt + metalen;
923 rlen = pkt_size - metalen;
924 ctx->rpra = rpra;
925
25e8dfb8
SK
926 for (oix = 0; oix < ctx->nbufs; ++oix) {
927 int mlen;
928
929 i = ctx->olaps[oix].raix;
930 len = ctx->args[i].length;
c68cfb71 931
35a82b87
VKG
932 rpra[i].buf.pv = 0;
933 rpra[i].buf.len = len;
c68cfb71
SK
934 list[i].num = len ? 1 : 0;
935 list[i].pgidx = i;
936
937 if (!len)
938 continue;
939
c68cfb71 940 if (ctx->maps[i]) {
80f3afd7
SK
941 struct vm_area_struct *vma = NULL;
942
35a82b87 943 rpra[i].buf.pv = (u64) ctx->args[i].ptr;
c68cfb71 944 pages[i].addr = ctx->maps[i]->phys;
80f3afd7 945
f9a470db 946 mmap_read_lock(current->mm);
80f3afd7
SK
947 vma = find_vma(current->mm, ctx->args[i].ptr);
948 if (vma)
949 pages[i].addr += ctx->args[i].ptr -
950 vma->vm_start;
f9a470db 951 mmap_read_unlock(current->mm);
80f3afd7 952
02b45b47
SK
953 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
954 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
955 PAGE_SHIFT;
956 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
957
c68cfb71 958 } else {
25e8dfb8
SK
959
960 if (ctx->olaps[oix].offset == 0) {
961 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
962 args = ALIGN(args, FASTRPC_ALIGN);
963 }
964
965 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
966
967 if (rlen < mlen)
c68cfb71
SK
968 goto bail;
969
35a82b87 970 rpra[i].buf.pv = args - ctx->olaps[oix].offset;
25e8dfb8
SK
971 pages[i].addr = ctx->buf->phys -
972 ctx->olaps[oix].offset +
973 (pkt_size - rlen);
c68cfb71 974 pages[i].addr = pages[i].addr & PAGE_MASK;
25e8dfb8 975
02b45b47
SK
976 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
977 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
978 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
25e8dfb8
SK
979 args = args + mlen;
980 rlen -= mlen;
c68cfb71
SK
981 }
982
983 if (i < inbufs && !ctx->maps[i]) {
35a82b87 984 void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
c68cfb71
SK
985 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
986
987 if (!kernel) {
988 if (copy_from_user(dst, (void __user *)src,
989 len)) {
990 err = -EFAULT;
991 goto bail;
992 }
993 } else {
994 memcpy(dst, src, len);
995 }
996 }
997 }
998
999 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
c68cfb71
SK
1000 list[i].num = ctx->args[i].length ? 1 : 0;
1001 list[i].pgidx = i;
35a82b87
VKG
1002 if (ctx->maps[i]) {
1003 pages[i].addr = ctx->maps[i]->phys;
1004 pages[i].size = ctx->maps[i]->size;
1005 }
1006 rpra[i].dma.fd = ctx->args[i].fd;
1007 rpra[i].dma.len = ctx->args[i].length;
1008 rpra[i].dma.offset = (u64) ctx->args[i].ptr;
c68cfb71
SK
1009 }
1010
1011bail:
1012 if (err)
1013 dev_err(dev, "Error: get invoke args failed:%d\n", err);
1014
1015 return err;
1016}
1017
1018static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
1019 u32 kernel)
1020{
35a82b87 1021 union fastrpc_remote_arg *rpra = ctx->rpra;
8f6c1d8c
VKG
1022 struct fastrpc_user *fl = ctx->fl;
1023 struct fastrpc_map *mmap = NULL;
1024 struct fastrpc_invoke_buf *list;
1025 struct fastrpc_phy_page *pages;
1026 u64 *fdlist;
1027 int i, inbufs, outbufs, handles;
c68cfb71
SK
1028
1029 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
8f6c1d8c
VKG
1030 outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1031 handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
1032 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
1033 pages = fastrpc_phy_page_start(list, ctx->nscalars);
1034 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
c68cfb71
SK
1035
1036 for (i = inbufs; i < ctx->nbufs; ++i) {
847afd7b 1037 if (!ctx->maps[i]) {
35a82b87 1038 void *src = (void *)(uintptr_t)rpra[i].buf.pv;
847afd7b 1039 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
35a82b87 1040 u64 len = rpra[i].buf.len;
c68cfb71 1041
847afd7b
J
1042 if (!kernel) {
1043 if (copy_to_user((void __user *)dst, src, len))
1044 return -EFAULT;
1045 } else {
1046 memcpy(dst, src, len);
1047 }
c68cfb71
SK
1048 }
1049 }
1050
8f6c1d8c
VKG
1051 for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
1052 if (!fdlist[i])
1053 break;
1054 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
1055 fastrpc_map_put(mmap);
1056 }
1057
c68cfb71
SK
1058 return 0;
1059}
1060
1061static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
1062 struct fastrpc_invoke_ctx *ctx,
1063 u32 kernel, uint32_t handle)
1064{
1065 struct fastrpc_channel_ctx *cctx;
1066 struct fastrpc_user *fl = ctx->fl;
1067 struct fastrpc_msg *msg = &ctx->msg;
74003385 1068 int ret;
c68cfb71
SK
1069
1070 cctx = fl->cctx;
1071 msg->pid = fl->tgid;
1072 msg->tid = current->pid;
1073
1074 if (kernel)
1075 msg->pid = 0;
1076
1077 msg->ctx = ctx->ctxid | fl->pd;
1078 msg->handle = handle;
1079 msg->sc = ctx->sc;
1080 msg->addr = ctx->buf ? ctx->buf->phys : 0;
1081 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
1082 fastrpc_context_get(ctx);
1083
74003385
SK
1084 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
1085
1086 if (ret)
1087 fastrpc_context_put(ctx);
1088
1089 return ret;
1090
c68cfb71
SK
1091}
1092
1093static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
1094 u32 handle, u32 sc,
1095 struct fastrpc_invoke_args *args)
1096{
1097 struct fastrpc_invoke_ctx *ctx = NULL;
1098 int err = 0;
1099
1100 if (!fl->sctx)
1101 return -EINVAL;
1102
2e369878
BA
1103 if (!fl->cctx->rpdev)
1104 return -EPIPE;
1105
20c40794
DB
1106 if (handle == FASTRPC_INIT_HANDLE && !kernel) {
1107 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
1108 return -EPERM;
1109 }
1110
c68cfb71
SK
1111 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1112 if (IS_ERR(ctx))
1113 return PTR_ERR(ctx);
1114
1115 if (ctx->nscalars) {
1116 err = fastrpc_get_args(kernel, ctx);
1117 if (err)
1118 goto bail;
1119 }
415a0729
SK
1120
1121 /* make sure that all CPU memory writes are seen by DSP */
1122 dma_wmb();
c68cfb71
SK
1123 /* Send invoke buffer to remote dsp */
1124 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1125 if (err)
1126 goto bail;
1127
55bcda35
JRO
1128 if (kernel) {
1129 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
1130 err = -ETIMEDOUT;
1131 } else {
1132 err = wait_for_completion_interruptible(&ctx->work);
1133 }
1134
c68cfb71
SK
1135 if (err)
1136 goto bail;
1137
1138 /* Check the response from remote dsp */
1139 err = ctx->retval;
1140 if (err)
1141 goto bail;
1142
1143 if (ctx->nscalars) {
415a0729
SK
1144 /* make sure that all memory writes by DSP are seen by CPU */
1145 dma_rmb();
c68cfb71
SK
1146 /* populate all the output buffers with results */
1147 err = fastrpc_put_args(ctx, kernel);
1148 if (err)
1149 goto bail;
1150 }
1151
1152bail:
387f6255
JRO
1153 if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1154 /* We are done with this compute context */
1155 spin_lock(&fl->lock);
1156 list_del(&ctx->node);
1157 spin_unlock(&fl->lock);
1158 fastrpc_context_put(ctx);
1159 }
c68cfb71
SK
1160 if (err)
1161 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1162
1163 return err;
1164}
1165
7f1f4812
J
1166static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1167{
1168 /* Check if the device node is non-secure and channel is secure*/
1169 if (!fl->is_secure_dev && fl->cctx->secure) {
1170 /*
1171 * Allow untrusted applications to offload only to Unsigned PD when
1172 * channel is configured as secure and block untrusted apps on channel
1173 * that does not support unsigned PD offload
1174 */
1175 if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1176 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
1177 return true;
1178 }
1179 }
1180
1181 return false;
1182}
1183
d73f71c7
SK
1184static int fastrpc_init_create_process(struct fastrpc_user *fl,
1185 char __user *argp)
1186{
1187 struct fastrpc_init_create init;
1188 struct fastrpc_invoke_args *args;
1189 struct fastrpc_phy_page pages[1];
1190 struct fastrpc_map *map = NULL;
1191 struct fastrpc_buf *imem = NULL;
1192 int memlen;
1193 int err;
1194 struct {
1195 int pgid;
1196 u32 namelen;
1197 u32 filelen;
1198 u32 pageslen;
1199 u32 attrs;
1200 u32 siglen;
1201 } inbuf;
1202 u32 sc;
7f1f4812 1203 bool unsigned_module = false;
d73f71c7
SK
1204
1205 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1206 if (!args)
1207 return -ENOMEM;
1208
1209 if (copy_from_user(&init, argp, sizeof(init))) {
1210 err = -EFAULT;
b49f6d83 1211 goto err;
d73f71c7
SK
1212 }
1213
7f1f4812
J
1214 if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
1215 unsigned_module = true;
1216
1217 if (is_session_rejected(fl, unsigned_module)) {
1218 err = -ECONNREFUSED;
1219 goto err;
1220 }
1221
d73f71c7
SK
1222 if (init.filelen > INIT_FILELEN_MAX) {
1223 err = -EINVAL;
b49f6d83 1224 goto err;
d73f71c7
SK
1225 }
1226
1227 inbuf.pgid = fl->tgid;
1228 inbuf.namelen = strlen(current->comm) + 1;
1229 inbuf.filelen = init.filelen;
1230 inbuf.pageslen = 1;
1231 inbuf.attrs = init.attrs;
1232 inbuf.siglen = init.siglen;
84195d20 1233 fl->pd = USER_PD;
d73f71c7
SK
1234
1235 if (init.filelen && init.filefd) {
e90d9119 1236 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
d73f71c7 1237 if (err)
b49f6d83 1238 goto err;
d73f71c7
SK
1239 }
1240
1241 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1242 1024 * 1024);
1243 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1244 &imem);
b49f6d83
TE
1245 if (err)
1246 goto err_alloc;
d73f71c7
SK
1247
1248 fl->init_mem = imem;
1249 args[0].ptr = (u64)(uintptr_t)&inbuf;
1250 args[0].length = sizeof(inbuf);
1251 args[0].fd = -1;
1252
1253 args[1].ptr = (u64)(uintptr_t)current->comm;
1254 args[1].length = inbuf.namelen;
1255 args[1].fd = -1;
1256
1257 args[2].ptr = (u64) init.file;
1258 args[2].length = inbuf.filelen;
1259 args[2].fd = init.filefd;
1260
1261 pages[0].addr = imem->phys;
1262 pages[0].size = imem->size;
1263
1264 args[3].ptr = (u64)(uintptr_t) pages;
1265 args[3].length = 1 * sizeof(*pages);
1266 args[3].fd = -1;
1267
1268 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1269 args[4].length = sizeof(inbuf.attrs);
1270 args[4].fd = -1;
1271
1272 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1273 args[5].length = sizeof(inbuf.siglen);
1274 args[5].fd = -1;
1275
1276 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1277 if (init.attrs)
1278 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1279
1280 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1281 sc, args);
b49f6d83
TE
1282 if (err)
1283 goto err_invoke;
1284
1285 kfree(args);
d73f71c7 1286
b49f6d83
TE
1287 return 0;
1288
1289err_invoke:
1290 fl->init_mem = NULL;
1291 fastrpc_buf_free(imem);
1292err_alloc:
1293 if (map) {
1294 spin_lock(&fl->lock);
1295 list_del(&map->node);
1296 spin_unlock(&fl->lock);
d73f71c7 1297 fastrpc_map_put(map);
d73f71c7 1298 }
b49f6d83 1299err:
d73f71c7
SK
1300 kfree(args);
1301
1302 return err;
1303}
1304
f6f9279f
SK
1305static struct fastrpc_session_ctx *fastrpc_session_alloc(
1306 struct fastrpc_channel_ctx *cctx)
1307{
1308 struct fastrpc_session_ctx *session = NULL;
977e6c8d 1309 unsigned long flags;
f6f9279f
SK
1310 int i;
1311
977e6c8d 1312 spin_lock_irqsave(&cctx->lock, flags);
f6f9279f
SK
1313 for (i = 0; i < cctx->sesscount; i++) {
1314 if (!cctx->session[i].used && cctx->session[i].valid) {
1315 cctx->session[i].used = true;
1316 session = &cctx->session[i];
1317 break;
1318 }
1319 }
977e6c8d 1320 spin_unlock_irqrestore(&cctx->lock, flags);
f6f9279f
SK
1321
1322 return session;
1323}
1324
1325static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1326 struct fastrpc_session_ctx *session)
1327{
977e6c8d
SK
1328 unsigned long flags;
1329
1330 spin_lock_irqsave(&cctx->lock, flags);
f6f9279f 1331 session->used = false;
977e6c8d 1332 spin_unlock_irqrestore(&cctx->lock, flags);
f6f9279f
SK
1333}
1334
d73f71c7
SK
1335static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1336{
1337 struct fastrpc_invoke_args args[1];
1338 int tgid = 0;
1339 u32 sc;
1340
1341 tgid = fl->tgid;
1342 args[0].ptr = (u64)(uintptr_t) &tgid;
1343 args[0].length = sizeof(tgid);
1344 args[0].fd = -1;
d73f71c7
SK
1345 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1346
1347 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1348 sc, &args[0]);
1349}
1350
f6f9279f
SK
1351static int fastrpc_device_release(struct inode *inode, struct file *file)
1352{
1353 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1354 struct fastrpc_channel_ctx *cctx = fl->cctx;
c68cfb71
SK
1355 struct fastrpc_invoke_ctx *ctx, *n;
1356 struct fastrpc_map *map, *m;
2419e55e 1357 struct fastrpc_buf *buf, *b;
977e6c8d 1358 unsigned long flags;
f6f9279f 1359
d73f71c7
SK
1360 fastrpc_release_current_dsp_process(fl);
1361
977e6c8d 1362 spin_lock_irqsave(&cctx->lock, flags);
f6f9279f 1363 list_del(&fl->user);
977e6c8d 1364 spin_unlock_irqrestore(&cctx->lock, flags);
f6f9279f 1365
c68cfb71
SK
1366 if (fl->init_mem)
1367 fastrpc_buf_free(fl->init_mem);
1368
1369 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1370 list_del(&ctx->node);
1371 fastrpc_context_put(ctx);
1372 }
1373
1374 list_for_each_entry_safe(map, m, &fl->maps, node) {
1375 list_del(&map->node);
1376 fastrpc_map_put(map);
1377 }
1378
2419e55e
JRO
1379 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1380 list_del(&buf->node);
1381 fastrpc_buf_free(buf);
1382 }
1383
f6f9279f 1384 fastrpc_session_free(cctx, fl->sctx);
278d56f9 1385 fastrpc_channel_ctx_put(cctx);
f6f9279f
SK
1386
1387 mutex_destroy(&fl->mutex);
1388 kfree(fl);
1389 file->private_data = NULL;
1390
1391 return 0;
1392}
1393
1394static int fastrpc_device_open(struct inode *inode, struct file *filp)
1395{
965602ea
SK
1396 struct fastrpc_channel_ctx *cctx;
1397 struct fastrpc_device *fdevice;
f6f9279f 1398 struct fastrpc_user *fl = NULL;
977e6c8d 1399 unsigned long flags;
f6f9279f 1400
965602ea
SK
1401 fdevice = miscdev_to_fdevice(filp->private_data);
1402 cctx = fdevice->cctx;
1403
f6f9279f
SK
1404 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1405 if (!fl)
1406 return -ENOMEM;
1407
278d56f9
BA
1408 /* Released in fastrpc_device_release() */
1409 fastrpc_channel_ctx_get(cctx);
1410
f6f9279f
SK
1411 filp->private_data = fl;
1412 spin_lock_init(&fl->lock);
1413 mutex_init(&fl->mutex);
1414 INIT_LIST_HEAD(&fl->pending);
1415 INIT_LIST_HEAD(&fl->maps);
2419e55e 1416 INIT_LIST_HEAD(&fl->mmaps);
f6f9279f
SK
1417 INIT_LIST_HEAD(&fl->user);
1418 fl->tgid = current->tgid;
1419 fl->cctx = cctx;
7f1f4812 1420 fl->is_secure_dev = fdevice->secure;
7c11df42
TE
1421
1422 fl->sctx = fastrpc_session_alloc(cctx);
1423 if (!fl->sctx) {
1424 dev_err(&cctx->rpdev->dev, "No session available\n");
1425 mutex_destroy(&fl->mutex);
1426 kfree(fl);
1427
1428 return -EBUSY;
1429 }
1430
977e6c8d 1431 spin_lock_irqsave(&cctx->lock, flags);
f6f9279f 1432 list_add_tail(&fl->user, &cctx->users);
977e6c8d 1433 spin_unlock_irqrestore(&cctx->lock, flags);
f6f9279f
SK
1434
1435 return 0;
1436}
1437
6cffd795
SK
1438static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1439{
1440 struct fastrpc_alloc_dma_buf bp;
1441 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1442 struct fastrpc_buf *buf = NULL;
1443 int err;
1444
1445 if (copy_from_user(&bp, argp, sizeof(bp)))
1446 return -EFAULT;
1447
1448 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1449 if (err)
1450 return err;
1451 exp_info.ops = &fastrpc_dma_buf_ops;
1452 exp_info.size = bp.size;
1453 exp_info.flags = O_RDWR;
1454 exp_info.priv = buf;
1455 buf->dmabuf = dma_buf_export(&exp_info);
1456 if (IS_ERR(buf->dmabuf)) {
1457 err = PTR_ERR(buf->dmabuf);
1458 fastrpc_buf_free(buf);
1459 return err;
1460 }
1461
1462 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1463 if (bp.fd < 0) {
1464 dma_buf_put(buf->dmabuf);
1465 return -EINVAL;
1466 }
1467
1468 if (copy_to_user(argp, &bp, sizeof(bp))) {
46963e2e
MK
1469 /*
1470 * The usercopy failed, but we can't do much about it, as
1471 * dma_buf_fd() already called fd_install() and made the
1472 * file descriptor accessible for the current process. It
1473 * might already be closed and dmabuf no longer valid when
1474 * we reach this point. Therefore "leak" the fd and rely on
1475 * the process exit path to do any required cleanup.
1476 */
6cffd795
SK
1477 return -EFAULT;
1478 }
1479
6cffd795
SK
1480 return 0;
1481}
1482
6010d9be 1483static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
d73f71c7
SK
1484{
1485 struct fastrpc_invoke_args args[1];
1486 int tgid = fl->tgid;
1487 u32 sc;
1488
1489 args[0].ptr = (u64)(uintptr_t) &tgid;
1490 args[0].length = sizeof(tgid);
1491 args[0].fd = -1;
d73f71c7 1492 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
6010d9be 1493 fl->pd = pd;
d73f71c7
SK
1494
1495 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1496 sc, &args[0]);
1497}
1498
c68cfb71
SK
1499static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1500{
1501 struct fastrpc_invoke_args *args = NULL;
1502 struct fastrpc_invoke inv;
1503 u32 nscalars;
1504 int err;
1505
1506 if (copy_from_user(&inv, argp, sizeof(inv)))
1507 return -EFAULT;
1508
1509 /* nscalars is truncated here to max supported value */
1510 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1511 if (nscalars) {
1512 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1513 if (!args)
1514 return -ENOMEM;
1515
1516 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1517 nscalars * sizeof(*args))) {
1518 kfree(args);
1519 return -EFAULT;
1520 }
1521 }
1522
1523 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1524 kfree(args);
1525
1526 return err;
1527}
1528
6c16fd8b
J
1529static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1530 uint32_t dsp_attr_buf_len)
1531{
1532 struct fastrpc_invoke_args args[2] = { 0 };
1533
1534 /* Capability filled in userspace */
1535 dsp_attr_buf[0] = 0;
1536
1537 args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
1538 args[0].length = sizeof(dsp_attr_buf_len);
1539 args[0].fd = -1;
1540 args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
1541 args[1].length = dsp_attr_buf_len;
1542 args[1].fd = -1;
f667f56b 1543 fl->pd = USER_PD;
6c16fd8b
J
1544
1545 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1546 FASTRPC_SCALARS(0, 1, 1), args);
1547}
1548
1549static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
1550 struct fastrpc_user *fl)
1551{
1552 struct fastrpc_channel_ctx *cctx = fl->cctx;
1553 uint32_t attribute_id = cap->attribute_id;
1554 uint32_t *dsp_attributes;
1555 unsigned long flags;
1556 uint32_t domain = cap->domain;
1557 int err;
1558
1559 spin_lock_irqsave(&cctx->lock, flags);
1560 /* check if we already have queried dsp for attributes */
1561 if (cctx->valid_attributes) {
1562 spin_unlock_irqrestore(&cctx->lock, flags);
1563 goto done;
1564 }
1565 spin_unlock_irqrestore(&cctx->lock, flags);
1566
1567 dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
1568 if (!dsp_attributes)
1569 return -ENOMEM;
1570
1571 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1572 if (err == DSP_UNSUPPORTED_API) {
1573 dev_info(&cctx->rpdev->dev,
1574 "Warning: DSP capabilities not supported on domain: %d\n", domain);
1575 kfree(dsp_attributes);
1576 return -EOPNOTSUPP;
1577 } else if (err) {
1578 dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
1579 kfree(dsp_attributes);
1580 return err;
1581 }
1582
1583 spin_lock_irqsave(&cctx->lock, flags);
1584 memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1585 cctx->valid_attributes = true;
1586 spin_unlock_irqrestore(&cctx->lock, flags);
1587 kfree(dsp_attributes);
1588done:
1589 cap->capability = cctx->dsp_attributes[attribute_id];
1590 return 0;
1591}
1592
1593static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1594{
1595 struct fastrpc_ioctl_capability cap = {0};
1596 int err = 0;
1597
1598 if (copy_from_user(&cap, argp, sizeof(cap)))
1599 return -EFAULT;
1600
1601 cap.capability = 0;
1602 if (cap.domain >= FASTRPC_DEV_MAX) {
1603 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
1604 cap.domain, err);
1605 return -ECHRNG;
1606 }
1607
1608 /* Fastrpc Capablities does not support modem domain */
1609 if (cap.domain == MDSP_DOMAIN_ID) {
1610 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
1611 return -ECHRNG;
1612 }
1613
1614 if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
1615 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1616 cap.attribute_id, err);
1617 return -EOVERFLOW;
1618 }
1619
1620 err = fastrpc_get_info_from_kernel(&cap, fl);
1621 if (err)
1622 return err;
1623
1624 if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
1625 return -EFAULT;
1626
1627 return 0;
1628}
1629
72fa6f78 1630static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
2419e55e
JRO
1631{
1632 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
2419e55e
JRO
1633 struct fastrpc_munmap_req_msg req_msg;
1634 struct device *dev = fl->sctx->dev;
1635 int err;
1636 u32 sc;
1637
2419e55e
JRO
1638 req_msg.pgid = fl->tgid;
1639 req_msg.size = buf->size;
1640 req_msg.vaddr = buf->raddr;
1641
1642 args[0].ptr = (u64) (uintptr_t) &req_msg;
1643 args[0].length = sizeof(req_msg);
1644
1645 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1646 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1647 &args[0]);
1648 if (!err) {
1649 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1650 spin_lock(&fl->lock);
1651 list_del(&buf->node);
1652 spin_unlock(&fl->lock);
1653 fastrpc_buf_free(buf);
1654 } else {
1655 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1656 }
1657
1658 return err;
1659}
1660
1661static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1662{
72fa6f78 1663 struct fastrpc_buf *buf = NULL, *iter, *b;
2419e55e 1664 struct fastrpc_req_munmap req;
72fa6f78 1665 struct device *dev = fl->sctx->dev;
2419e55e
JRO
1666
1667 if (copy_from_user(&req, argp, sizeof(req)))
1668 return -EFAULT;
1669
72fa6f78
AV
1670 spin_lock(&fl->lock);
1671 list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1672 if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
1673 buf = iter;
1674 break;
1675 }
1676 }
1677 spin_unlock(&fl->lock);
1678
1679 if (!buf) {
1680 dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
1681 req.vaddrout, req.size);
1682 return -EINVAL;
1683 }
1684
1685 return fastrpc_req_munmap_impl(fl, buf);
2419e55e
JRO
1686}
1687
1688static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1689{
1690 struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1691 struct fastrpc_buf *buf = NULL;
1692 struct fastrpc_mmap_req_msg req_msg;
1693 struct fastrpc_mmap_rsp_msg rsp_msg;
2419e55e
JRO
1694 struct fastrpc_phy_page pages;
1695 struct fastrpc_req_mmap req;
1696 struct device *dev = fl->sctx->dev;
1697 int err;
1698 u32 sc;
1699
1700 if (copy_from_user(&req, argp, sizeof(req)))
1701 return -EFAULT;
1702
1703 if (req.flags != ADSP_MMAP_ADD_PAGES) {
1704 dev_err(dev, "flag not supported 0x%x\n", req.flags);
1705 return -EINVAL;
1706 }
1707
1708 if (req.vaddrin) {
1709 dev_err(dev, "adding user allocated pages is not supported\n");
1710 return -EINVAL;
1711 }
1712
1713 err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
1714 if (err) {
1715 dev_err(dev, "failed to allocate buffer\n");
1716 return err;
1717 }
1718
1719 req_msg.pgid = fl->tgid;
1720 req_msg.flags = req.flags;
1721 req_msg.vaddr = req.vaddrin;
1722 req_msg.num = sizeof(pages);
1723
1724 args[0].ptr = (u64) (uintptr_t) &req_msg;
1725 args[0].length = sizeof(req_msg);
1726
1727 pages.addr = buf->phys;
1728 pages.size = buf->size;
1729
1730 args[1].ptr = (u64) (uintptr_t) &pages;
1731 args[1].length = sizeof(pages);
1732
1733 args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1734 args[2].length = sizeof(rsp_msg);
1735
1736 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1737 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1738 &args[0]);
1739 if (err) {
1740 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1741 goto err_invoke;
1742 }
1743
1744 /* update the buffer to be able to deallocate the memory on the DSP */
1745 buf->raddr = (uintptr_t) rsp_msg.vaddr;
1746
1747 /* let the client know the address to use */
1748 req.vaddrout = rsp_msg.vaddr;
1749
1750 spin_lock(&fl->lock);
1751 list_add_tail(&buf->node, &fl->mmaps);
1752 spin_unlock(&fl->lock);
1753
1754 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
72fa6f78
AV
1755 err = -EFAULT;
1756 goto err_assign;
2419e55e
JRO
1757 }
1758
1759 dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1760 buf->raddr, buf->size);
1761
1762 return 0;
1763
72fa6f78
AV
1764err_assign:
1765 fastrpc_req_munmap_impl(fl, buf);
2419e55e
JRO
1766err_invoke:
1767 fastrpc_buf_free(buf);
1768
1769 return err;
1770}
1771
5c1b97c7
J
1772static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
1773{
1774 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
c5c07c59 1775 struct fastrpc_map *map = NULL, *iter, *m;
5c1b97c7
J
1776 struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
1777 int err = 0;
1778 u32 sc;
1779 struct device *dev = fl->sctx->dev;
1780
1781 spin_lock(&fl->lock);
c5c07c59
SK
1782 list_for_each_entry_safe(iter, m, &fl->maps, node) {
1783 if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
1784 map = iter;
5c1b97c7 1785 break;
c5c07c59 1786 }
5c1b97c7
J
1787 }
1788
1789 spin_unlock(&fl->lock);
1790
1791 if (!map) {
1792 dev_err(dev, "map not in list\n");
1793 return -EINVAL;
1794 }
1795
1796 req_msg.pgid = fl->tgid;
1797 req_msg.len = map->len;
1798 req_msg.vaddrin = map->raddr;
1799 req_msg.fd = map->fd;
1800
1801 args[0].ptr = (u64) (uintptr_t) &req_msg;
1802 args[0].length = sizeof(req_msg);
1803
1804 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
1805 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1806 &args[0]);
1807 fastrpc_map_put(map);
1808 if (err)
1809 dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
1810
1811 return err;
1812}
1813
1814static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
1815{
1816 struct fastrpc_mem_unmap req;
1817
1818 if (copy_from_user(&req, argp, sizeof(req)))
1819 return -EFAULT;
1820
1821 return fastrpc_req_mem_unmap_impl(fl, &req);
1822}
1823
1824static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
1825{
1826 struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
1827 struct fastrpc_mem_map_req_msg req_msg = { 0 };
1828 struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
1829 struct fastrpc_mem_unmap req_unmap = { 0 };
1830 struct fastrpc_phy_page pages = { 0 };
1831 struct fastrpc_mem_map req;
1832 struct device *dev = fl->sctx->dev;
1833 struct fastrpc_map *map = NULL;
1834 int err;
1835 u32 sc;
1836
1837 if (copy_from_user(&req, argp, sizeof(req)))
1838 return -EFAULT;
1839
1840 /* create SMMU mapping */
3abe3ab3 1841 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
5c1b97c7
J
1842 if (err) {
1843 dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
1844 return err;
1845 }
1846
1847 req_msg.pgid = fl->tgid;
1848 req_msg.fd = req.fd;
1849 req_msg.offset = req.offset;
1850 req_msg.vaddrin = req.vaddrin;
1851 map->va = (void *) (uintptr_t) req.vaddrin;
1852 req_msg.flags = req.flags;
1853 req_msg.num = sizeof(pages);
1854 req_msg.data_len = 0;
1855
1856 args[0].ptr = (u64) (uintptr_t) &req_msg;
1857 args[0].length = sizeof(req_msg);
1858
1859 pages.addr = map->phys;
1860 pages.size = map->size;
1861
1862 args[1].ptr = (u64) (uintptr_t) &pages;
1863 args[1].length = sizeof(pages);
1864
1865 args[2].ptr = (u64) (uintptr_t) &pages;
1866 args[2].length = 0;
1867
1868 args[3].ptr = (u64) (uintptr_t) &rsp_msg;
1869 args[3].length = sizeof(rsp_msg);
1870
1871 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
1872 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
1873 if (err) {
1874 dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
1875 req.fd, req.vaddrin, map->size);
1876 goto err_invoke;
1877 }
1878
1879 /* update the buffer to be able to deallocate the memory on the DSP */
1880 map->raddr = rsp_msg.vaddr;
1881
1882 /* let the client know the address to use */
1883 req.vaddrout = rsp_msg.vaddr;
1884
1885 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1886 /* unmap the memory and release the buffer */
1887 req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
1888 req_unmap.length = map->size;
1889 fastrpc_req_mem_unmap_impl(fl, &req_unmap);
1890 return -EFAULT;
1891 }
1892
1893 return 0;
1894
1895err_invoke:
1896 fastrpc_map_put(map);
1897
1898 return err;
1899}
1900
c68cfb71
SK
1901static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1902 unsigned long arg)
1903{
1904 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1905 char __user *argp = (char __user *)arg;
1906 int err;
1907
1908 switch (cmd) {
1909 case FASTRPC_IOCTL_INVOKE:
1910 err = fastrpc_invoke(fl, argp);
1911 break;
d73f71c7 1912 case FASTRPC_IOCTL_INIT_ATTACH:
1959ab9e 1913 err = fastrpc_init_attach(fl, ROOT_PD);
6010d9be
JM
1914 break;
1915 case FASTRPC_IOCTL_INIT_ATTACH_SNS:
1916 err = fastrpc_init_attach(fl, SENSORS_PD);
d73f71c7
SK
1917 break;
1918 case FASTRPC_IOCTL_INIT_CREATE:
1919 err = fastrpc_init_create_process(fl, argp);
1920 break;
6cffd795
SK
1921 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1922 err = fastrpc_dmabuf_alloc(fl, argp);
1923 break;
2419e55e
JRO
1924 case FASTRPC_IOCTL_MMAP:
1925 err = fastrpc_req_mmap(fl, argp);
1926 break;
1927 case FASTRPC_IOCTL_MUNMAP:
1928 err = fastrpc_req_munmap(fl, argp);
1929 break;
5c1b97c7
J
1930 case FASTRPC_IOCTL_MEM_MAP:
1931 err = fastrpc_req_mem_map(fl, argp);
1932 break;
1933 case FASTRPC_IOCTL_MEM_UNMAP:
1934 err = fastrpc_req_mem_unmap(fl, argp);
1935 break;
6c16fd8b
J
1936 case FASTRPC_IOCTL_GET_DSP_INFO:
1937 err = fastrpc_get_dsp_info(fl, argp);
1938 break;
c68cfb71
SK
1939 default:
1940 err = -ENOTTY;
1941 break;
1942 }
1943
1944 return err;
1945}
1946
f6f9279f
SK
1947static const struct file_operations fastrpc_fops = {
1948 .open = fastrpc_device_open,
1949 .release = fastrpc_device_release,
c68cfb71
SK
1950 .unlocked_ioctl = fastrpc_device_ioctl,
1951 .compat_ioctl = fastrpc_device_ioctl,
f6f9279f
SK
1952};
1953
1954static int fastrpc_cb_probe(struct platform_device *pdev)
1955{
1956 struct fastrpc_channel_ctx *cctx;
1957 struct fastrpc_session_ctx *sess;
1958 struct device *dev = &pdev->dev;
1959 int i, sessions = 0;
977e6c8d 1960 unsigned long flags;
01b76c32 1961 int rc;
f6f9279f
SK
1962
1963 cctx = dev_get_drvdata(dev->parent);
1964 if (!cctx)
1965 return -EINVAL;
1966
1967 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1968
977e6c8d 1969 spin_lock_irqsave(&cctx->lock, flags);
9baa1415
JH
1970 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
1971 dev_err(&pdev->dev, "too many sessions\n");
1972 spin_unlock_irqrestore(&cctx->lock, flags);
1973 return -ENOSPC;
1974 }
d245f43a 1975 sess = &cctx->session[cctx->sesscount++];
f6f9279f
SK
1976 sess->used = false;
1977 sess->valid = true;
1978 sess->dev = dev;
1979 dev_set_drvdata(dev, sess);
1980
1981 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1982 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1983
1984 if (sessions > 0) {
1985 struct fastrpc_session_ctx *dup_sess;
1986
1987 for (i = 1; i < sessions; i++) {
d245f43a 1988 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
f6f9279f 1989 break;
d245f43a 1990 dup_sess = &cctx->session[cctx->sesscount++];
f6f9279f
SK
1991 memcpy(dup_sess, sess, sizeof(*dup_sess));
1992 }
1993 }
977e6c8d 1994 spin_unlock_irqrestore(&cctx->lock, flags);
01b76c32
BY
1995 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1996 if (rc) {
1997 dev_err(dev, "32-bit DMA enable failed\n");
1998 return rc;
1999 }
f6f9279f
SK
2000
2001 return 0;
2002}
2003
2004static int fastrpc_cb_remove(struct platform_device *pdev)
2005{
2006 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
2007 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
977e6c8d 2008 unsigned long flags;
f6f9279f
SK
2009 int i;
2010
977e6c8d 2011 spin_lock_irqsave(&cctx->lock, flags);
f6f9279f
SK
2012 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
2013 if (cctx->session[i].sid == sess->sid) {
2014 cctx->session[i].valid = false;
2015 cctx->sesscount--;
2016 }
2017 }
977e6c8d 2018 spin_unlock_irqrestore(&cctx->lock, flags);
f6f9279f
SK
2019
2020 return 0;
2021}
2022
2023static const struct of_device_id fastrpc_match_table[] = {
2024 { .compatible = "qcom,fastrpc-compute-cb", },
2025 {}
2026};
2027
2028static struct platform_driver fastrpc_cb_driver = {
2029 .probe = fastrpc_cb_probe,
2030 .remove = fastrpc_cb_remove,
2031 .driver = {
2032 .name = "qcom,fastrpc-cb",
2033 .of_match_table = fastrpc_match_table,
2034 .suppress_bind_attrs = true,
2035 },
2036};
2037
965602ea 2038static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
3abe3ab3 2039 bool is_secured, const char *domain)
965602ea
SK
2040{
2041 struct fastrpc_device *fdev;
2042 int err;
2043
2044 fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
2045 if (!fdev)
2046 return -ENOMEM;
2047
3abe3ab3 2048 fdev->secure = is_secured;
965602ea
SK
2049 fdev->cctx = cctx;
2050 fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
2051 fdev->miscdev.fops = &fastrpc_fops;
3abe3ab3
SK
2052 fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
2053 domain, is_secured ? "-secure" : "");
965602ea 2054 err = misc_register(&fdev->miscdev);
3abe3ab3
SK
2055 if (!err) {
2056 if (is_secured)
2057 cctx->secure_fdevice = fdev;
2058 else
2059 cctx->fdevice = fdev;
2060 }
965602ea
SK
2061
2062 return err;
2063}
2064
f6f9279f
SK
2065static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
2066{
2067 struct device *rdev = &rpdev->dev;
2068 struct fastrpc_channel_ctx *data;
e90d9119 2069 int i, err, domain_id = -1, vmcount;
f6f9279f 2070 const char *domain;
3abe3ab3 2071 bool secure_dsp;
e90d9119 2072 unsigned int vmids[FASTRPC_MAX_VMIDS];
f6f9279f 2073
f6f9279f
SK
2074 err = of_property_read_string(rdev->of_node, "label", &domain);
2075 if (err) {
2076 dev_info(rdev, "FastRPC Domain not specified in DT\n");
2077 return err;
2078 }
2079
2080 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
2081 if (!strcmp(domains[i], domain)) {
2082 domain_id = i;
2083 break;
2084 }
2085 }
2086
2087 if (domain_id < 0) {
2088 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
2089 return -EINVAL;
2090 }
2091
1ce91d45
AV
2092 if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0))
2093 dev_info(rdev, "no reserved DMA memory for FASTRPC\n");
2094
e90d9119
VKG
2095 vmcount = of_property_read_variable_u32_array(rdev->of_node,
2096 "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
2097 if (vmcount < 0)
2098 vmcount = 0;
2099 else if (!qcom_scm_is_available())
2100 return -EPROBE_DEFER;
2101
278d56f9
BA
2102 data = kzalloc(sizeof(*data), GFP_KERNEL);
2103 if (!data)
2104 return -ENOMEM;
2105
e90d9119
VKG
2106 if (vmcount) {
2107 data->vmcount = vmcount;
2108 data->perms = BIT(QCOM_SCM_VMID_HLOS);
2109 for (i = 0; i < data->vmcount; i++) {
2110 data->vmperms[i].vmid = vmids[i];
2111 data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
2112 }
2113 }
3abe3ab3
SK
2114
2115 secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
2116 data->secure = secure_dsp;
2117
2118 switch (domain_id) {
2119 case ADSP_DOMAIN_ID:
2120 case MDSP_DOMAIN_ID:
2121 case SDSP_DOMAIN_ID:
7f1f4812
J
2122 /* Unsigned PD offloading is only supported on CDSP*/
2123 data->unsigned_support = false;
3abe3ab3
SK
2124 err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
2125 if (err)
2126 goto fdev_error;
2127 break;
2128 case CDSP_DOMAIN_ID:
7f1f4812 2129 data->unsigned_support = true;
3abe3ab3
SK
2130 /* Create both device nodes so that we can allow both Signed and Unsigned PD */
2131 err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
2132 if (err)
2133 goto fdev_error;
2134
2135 err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
2136 if (err)
2137 goto fdev_error;
2138 break;
2139 default:
2140 err = -EINVAL;
2141 goto fdev_error;
0978de9f 2142 }
f6f9279f 2143
278d56f9
BA
2144 kref_init(&data->refcount);
2145
f6f9279f
SK
2146 dev_set_drvdata(&rpdev->dev, data);
2147 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
2148 INIT_LIST_HEAD(&data->users);
2149 spin_lock_init(&data->lock);
2150 idr_init(&data->ctx_idr);
2151 data->domain_id = domain_id;
2152 data->rpdev = rpdev;
2153
2154 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
3abe3ab3
SK
2155fdev_error:
2156 kfree(data);
2157 return err;
f6f9279f
SK
2158}
2159
c68cfb71
SK
2160static void fastrpc_notify_users(struct fastrpc_user *user)
2161{
2162 struct fastrpc_invoke_ctx *ctx;
2163
2164 spin_lock(&user->lock);
2165 list_for_each_entry(ctx, &user->pending, node)
2166 complete(&ctx->work);
2167 spin_unlock(&user->lock);
2168}
2169
f6f9279f
SK
2170static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
2171{
2172 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
c68cfb71 2173 struct fastrpc_user *user;
977e6c8d 2174 unsigned long flags;
c68cfb71 2175
977e6c8d 2176 spin_lock_irqsave(&cctx->lock, flags);
c68cfb71
SK
2177 list_for_each_entry(user, &cctx->users, user)
2178 fastrpc_notify_users(user);
977e6c8d 2179 spin_unlock_irqrestore(&cctx->lock, flags);
f6f9279f 2180
965602ea
SK
2181 if (cctx->fdevice)
2182 misc_deregister(&cctx->fdevice->miscdev);
2183
3abe3ab3
SK
2184 if (cctx->secure_fdevice)
2185 misc_deregister(&cctx->secure_fdevice->miscdev);
2186
f6f9279f 2187 of_platform_depopulate(&rpdev->dev);
278d56f9 2188
2e369878 2189 cctx->rpdev = NULL;
278d56f9 2190 fastrpc_channel_ctx_put(cctx);
f6f9279f
SK
2191}
2192
2193static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
2194 int len, void *priv, u32 addr)
2195{
c68cfb71
SK
2196 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2197 struct fastrpc_invoke_rsp *rsp = data;
2198 struct fastrpc_invoke_ctx *ctx;
2199 unsigned long flags;
2200 unsigned long ctxid;
2201
2202 if (len < sizeof(*rsp))
2203 return -EINVAL;
2204
2205 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2206
2207 spin_lock_irqsave(&cctx->lock, flags);
2208 ctx = idr_find(&cctx->ctx_idr, ctxid);
2209 spin_unlock_irqrestore(&cctx->lock, flags);
2210
2211 if (!ctx) {
2212 dev_err(&rpdev->dev, "No context ID matches response\n");
2213 return -ENOENT;
2214 }
2215
2216 ctx->retval = rsp->retval;
2217 complete(&ctx->work);
8e7389c7
TE
2218
2219 /*
2220 * The DMA buffer associated with the context cannot be freed in
2221 * interrupt context so schedule it through a worker thread to
2222 * avoid a kernel BUG.
2223 */
2224 schedule_work(&ctx->put_work);
c68cfb71 2225
f6f9279f
SK
2226 return 0;
2227}
2228
2229static const struct of_device_id fastrpc_rpmsg_of_match[] = {
2230 { .compatible = "qcom,fastrpc" },
2231 { },
2232};
2233MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
2234
2235static struct rpmsg_driver fastrpc_driver = {
2236 .probe = fastrpc_rpmsg_probe,
2237 .remove = fastrpc_rpmsg_remove,
2238 .callback = fastrpc_rpmsg_callback,
2239 .drv = {
2240 .name = "qcom,fastrpc",
2241 .of_match_table = fastrpc_rpmsg_of_match,
2242 },
2243};
2244
2245static int fastrpc_init(void)
2246{
2247 int ret;
2248
2249 ret = platform_driver_register(&fastrpc_cb_driver);
2250 if (ret < 0) {
2251 pr_err("fastrpc: failed to register cb driver\n");
2252 return ret;
2253 }
2254
2255 ret = register_rpmsg_driver(&fastrpc_driver);
2256 if (ret < 0) {
2257 pr_err("fastrpc: failed to register rpmsg driver\n");
2258 platform_driver_unregister(&fastrpc_cb_driver);
2259 return ret;
2260 }
2261
2262 return 0;
2263}
2264module_init(fastrpc_init);
2265
2266static void fastrpc_exit(void)
2267{
2268 platform_driver_unregister(&fastrpc_cb_driver);
2269 unregister_rpmsg_driver(&fastrpc_driver);
2270}
2271module_exit(fastrpc_exit);
2272
2273MODULE_LICENSE("GPL v2");
16b0314a 2274MODULE_IMPORT_NS(DMA_BUF);