Commit | Line | Data |
---|---|---|
9c92ab61 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
4fb0a5eb JW |
2 | /* |
3 | * Copyright (c) 2015, Linaro Limited | |
4fb0a5eb JW |
4 | */ |
5 | #include <linux/arm-smccc.h> | |
6 | #include <linux/device.h> | |
7 | #include <linux/err.h> | |
8 | #include <linux/errno.h> | |
f681e08f | 9 | #include <linux/mm.h> |
4fb0a5eb JW |
10 | #include <linux/slab.h> |
11 | #include <linux/tee_drv.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/uaccess.h> | |
14 | #include "optee_private.h" | |
15 | #include "optee_smc.h" | |
16 | ||
17 | struct optee_call_waiter { | |
18 | struct list_head list_node; | |
19 | struct completion c; | |
20 | }; | |
21 | ||
22 | static void optee_cq_wait_init(struct optee_call_queue *cq, | |
23 | struct optee_call_waiter *w) | |
24 | { | |
25 | /* | |
26 | * We're preparing to make a call to secure world. In case we can't | |
27 | * allocate a thread in secure world we'll end up waiting in | |
28 | * optee_cq_wait_for_completion(). | |
29 | * | |
30 | * Normally if there's no contention in secure world the call will | |
31 | * complete and we can cleanup directly with optee_cq_wait_final(). | |
32 | */ | |
33 | mutex_lock(&cq->mutex); | |
34 | ||
35 | /* | |
36 | * We add ourselves to the queue, but we don't wait. This | |
37 | * guarantees that we don't lose a completion if secure world | |
38 | * returns busy and another thread just exited and try to complete | |
39 | * someone. | |
40 | */ | |
41 | init_completion(&w->c); | |
42 | list_add_tail(&w->list_node, &cq->waiters); | |
43 | ||
44 | mutex_unlock(&cq->mutex); | |
45 | } | |
46 | ||
47 | static void optee_cq_wait_for_completion(struct optee_call_queue *cq, | |
48 | struct optee_call_waiter *w) | |
49 | { | |
50 | wait_for_completion(&w->c); | |
51 | ||
52 | mutex_lock(&cq->mutex); | |
53 | ||
54 | /* Move to end of list to get out of the way for other waiters */ | |
55 | list_del(&w->list_node); | |
56 | reinit_completion(&w->c); | |
57 | list_add_tail(&w->list_node, &cq->waiters); | |
58 | ||
59 | mutex_unlock(&cq->mutex); | |
60 | } | |
61 | ||
62 | static void optee_cq_complete_one(struct optee_call_queue *cq) | |
63 | { | |
64 | struct optee_call_waiter *w; | |
65 | ||
66 | list_for_each_entry(w, &cq->waiters, list_node) { | |
67 | if (!completion_done(&w->c)) { | |
68 | complete(&w->c); | |
69 | break; | |
70 | } | |
71 | } | |
72 | } | |
73 | ||
74 | static void optee_cq_wait_final(struct optee_call_queue *cq, | |
75 | struct optee_call_waiter *w) | |
76 | { | |
77 | /* | |
78 | * We're done with the call to secure world. The thread in secure | |
79 | * world that was used for this call is now available for some | |
80 | * other task to use. | |
81 | */ | |
82 | mutex_lock(&cq->mutex); | |
83 | ||
84 | /* Get out of the list */ | |
85 | list_del(&w->list_node); | |
86 | ||
87 | /* Wake up one eventual waiting task */ | |
88 | optee_cq_complete_one(cq); | |
89 | ||
90 | /* | |
91 | * If we're completed we've got a completion from another task that | |
92 | * was just done with its call to secure world. Since yet another | |
93 | * thread now is available in secure world wake up another eventual | |
94 | * waiting task. | |
95 | */ | |
96 | if (completion_done(&w->c)) | |
97 | optee_cq_complete_one(cq); | |
98 | ||
99 | mutex_unlock(&cq->mutex); | |
100 | } | |
101 | ||
102 | /* Requires the filpstate mutex to be held */ | |
103 | static struct optee_session *find_session(struct optee_context_data *ctxdata, | |
104 | u32 session_id) | |
105 | { | |
106 | struct optee_session *sess; | |
107 | ||
108 | list_for_each_entry(sess, &ctxdata->sess_list, list_node) | |
109 | if (sess->session_id == session_id) | |
110 | return sess; | |
111 | ||
112 | return NULL; | |
113 | } | |
114 | ||
115 | /** | |
116 | * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world | |
117 | * @ctx: calling context | |
118 | * @parg: physical address of message to pass to secure world | |
119 | * | |
120 | * Does and SMC to OP-TEE in secure world and handles eventual resulting | |
121 | * Remote Procedure Calls (RPC) from OP-TEE. | |
122 | * | |
123 | * Returns return code from secure world, 0 is OK | |
124 | */ | |
125 | u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) | |
126 | { | |
127 | struct optee *optee = tee_get_drvdata(ctx->teedev); | |
128 | struct optee_call_waiter w; | |
129 | struct optee_rpc_param param = { }; | |
53a107c8 | 130 | struct optee_call_ctx call_ctx = { }; |
4fb0a5eb JW |
131 | u32 ret; |
132 | ||
133 | param.a0 = OPTEE_SMC_CALL_WITH_ARG; | |
134 | reg_pair_from_64(¶m.a1, ¶m.a2, parg); | |
135 | /* Initialize waiter */ | |
136 | optee_cq_wait_init(&optee->call_queue, &w); | |
137 | while (true) { | |
138 | struct arm_smccc_res res; | |
139 | ||
140 | optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, | |
141 | param.a4, param.a5, param.a6, param.a7, | |
142 | &res); | |
143 | ||
144 | if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { | |
145 | /* | |
146 | * Out of threads in secure world, wait for a thread | |
147 | * become available. | |
148 | */ | |
149 | optee_cq_wait_for_completion(&optee->call_queue, &w); | |
150 | } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { | |
9f02b8f6 | 151 | might_sleep(); |
4fb0a5eb JW |
152 | param.a0 = res.a0; |
153 | param.a1 = res.a1; | |
154 | param.a2 = res.a2; | |
155 | param.a3 = res.a3; | |
53a107c8 | 156 | optee_handle_rpc(ctx, ¶m, &call_ctx); |
4fb0a5eb JW |
157 | } else { |
158 | ret = res.a0; | |
159 | break; | |
160 | } | |
161 | } | |
162 | ||
53a107c8 | 163 | optee_rpc_finalize_call(&call_ctx); |
4fb0a5eb JW |
164 | /* |
165 | * We're done with our thread in secure world, if there's any | |
166 | * thread waiters wake up one. | |
167 | */ | |
168 | optee_cq_wait_final(&optee->call_queue, &w); | |
169 | ||
170 | return ret; | |
171 | } | |
172 | ||
173 | static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, | |
174 | struct optee_msg_arg **msg_arg, | |
175 | phys_addr_t *msg_parg) | |
176 | { | |
177 | int rc; | |
178 | struct tee_shm *shm; | |
179 | struct optee_msg_arg *ma; | |
180 | ||
181 | shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), | |
182 | TEE_SHM_MAPPED); | |
183 | if (IS_ERR(shm)) | |
184 | return shm; | |
185 | ||
186 | ma = tee_shm_get_va(shm, 0); | |
187 | if (IS_ERR(ma)) { | |
188 | rc = PTR_ERR(ma); | |
189 | goto out; | |
190 | } | |
191 | ||
192 | rc = tee_shm_get_pa(shm, 0, msg_parg); | |
193 | if (rc) | |
194 | goto out; | |
195 | ||
196 | memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); | |
197 | ma->num_params = num_params; | |
198 | *msg_arg = ma; | |
199 | out: | |
200 | if (rc) { | |
201 | tee_shm_free(shm); | |
202 | return ERR_PTR(rc); | |
203 | } | |
204 | ||
205 | return shm; | |
206 | } | |
207 | ||
208 | int optee_open_session(struct tee_context *ctx, | |
209 | struct tee_ioctl_open_session_arg *arg, | |
210 | struct tee_param *param) | |
211 | { | |
212 | struct optee_context_data *ctxdata = ctx->data; | |
213 | int rc; | |
214 | struct tee_shm *shm; | |
215 | struct optee_msg_arg *msg_arg; | |
216 | phys_addr_t msg_parg; | |
217 | struct optee_session *sess = NULL; | |
218 | ||
219 | /* +2 for the meta parameters added below */ | |
220 | shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); | |
221 | if (IS_ERR(shm)) | |
222 | return PTR_ERR(shm); | |
223 | ||
224 | msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; | |
225 | msg_arg->cancel_id = arg->cancel_id; | |
226 | ||
227 | /* | |
228 | * Initialize and add the meta parameters needed when opening a | |
229 | * session. | |
230 | */ | |
231 | msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | | |
232 | OPTEE_MSG_ATTR_META; | |
233 | msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | | |
234 | OPTEE_MSG_ATTR_META; | |
235 | memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); | |
4fb0a5eb JW |
236 | msg_arg->params[1].u.value.c = arg->clnt_login; |
237 | ||
c5b4312b VJ |
238 | rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value, |
239 | arg->clnt_login, arg->clnt_uuid); | |
240 | if (rc) | |
241 | goto out; | |
242 | ||
4fb0a5eb JW |
243 | rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); |
244 | if (rc) | |
245 | goto out; | |
246 | ||
247 | sess = kzalloc(sizeof(*sess), GFP_KERNEL); | |
248 | if (!sess) { | |
249 | rc = -ENOMEM; | |
250 | goto out; | |
251 | } | |
252 | ||
253 | if (optee_do_call_with_arg(ctx, msg_parg)) { | |
254 | msg_arg->ret = TEEC_ERROR_COMMUNICATION; | |
255 | msg_arg->ret_origin = TEEC_ORIGIN_COMMS; | |
256 | } | |
257 | ||
258 | if (msg_arg->ret == TEEC_SUCCESS) { | |
259 | /* A new session has been created, add it to the list. */ | |
260 | sess->session_id = msg_arg->session; | |
261 | mutex_lock(&ctxdata->mutex); | |
262 | list_add(&sess->list_node, &ctxdata->sess_list); | |
263 | mutex_unlock(&ctxdata->mutex); | |
264 | } else { | |
265 | kfree(sess); | |
266 | } | |
267 | ||
268 | if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { | |
269 | arg->ret = TEEC_ERROR_COMMUNICATION; | |
270 | arg->ret_origin = TEEC_ORIGIN_COMMS; | |
271 | /* Close session again to avoid leakage */ | |
272 | optee_close_session(ctx, msg_arg->session); | |
273 | } else { | |
274 | arg->session = msg_arg->session; | |
275 | arg->ret = msg_arg->ret; | |
276 | arg->ret_origin = msg_arg->ret_origin; | |
277 | } | |
278 | out: | |
279 | tee_shm_free(shm); | |
280 | ||
281 | return rc; | |
282 | } | |
283 | ||
284 | int optee_close_session(struct tee_context *ctx, u32 session) | |
285 | { | |
286 | struct optee_context_data *ctxdata = ctx->data; | |
287 | struct tee_shm *shm; | |
288 | struct optee_msg_arg *msg_arg; | |
289 | phys_addr_t msg_parg; | |
290 | struct optee_session *sess; | |
291 | ||
292 | /* Check that the session is valid and remove it from the list */ | |
293 | mutex_lock(&ctxdata->mutex); | |
294 | sess = find_session(ctxdata, session); | |
295 | if (sess) | |
296 | list_del(&sess->list_node); | |
297 | mutex_unlock(&ctxdata->mutex); | |
298 | if (!sess) | |
299 | return -EINVAL; | |
300 | kfree(sess); | |
301 | ||
302 | shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); | |
303 | if (IS_ERR(shm)) | |
304 | return PTR_ERR(shm); | |
305 | ||
306 | msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; | |
307 | msg_arg->session = session; | |
308 | optee_do_call_with_arg(ctx, msg_parg); | |
309 | ||
310 | tee_shm_free(shm); | |
311 | return 0; | |
312 | } | |
313 | ||
314 | int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, | |
315 | struct tee_param *param) | |
316 | { | |
317 | struct optee_context_data *ctxdata = ctx->data; | |
318 | struct tee_shm *shm; | |
319 | struct optee_msg_arg *msg_arg; | |
320 | phys_addr_t msg_parg; | |
321 | struct optee_session *sess; | |
322 | int rc; | |
323 | ||
324 | /* Check that the session is valid */ | |
325 | mutex_lock(&ctxdata->mutex); | |
326 | sess = find_session(ctxdata, arg->session); | |
327 | mutex_unlock(&ctxdata->mutex); | |
328 | if (!sess) | |
329 | return -EINVAL; | |
330 | ||
331 | shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); | |
332 | if (IS_ERR(shm)) | |
333 | return PTR_ERR(shm); | |
334 | msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; | |
335 | msg_arg->func = arg->func; | |
336 | msg_arg->session = arg->session; | |
337 | msg_arg->cancel_id = arg->cancel_id; | |
338 | ||
339 | rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); | |
340 | if (rc) | |
341 | goto out; | |
342 | ||
343 | if (optee_do_call_with_arg(ctx, msg_parg)) { | |
344 | msg_arg->ret = TEEC_ERROR_COMMUNICATION; | |
345 | msg_arg->ret_origin = TEEC_ORIGIN_COMMS; | |
346 | } | |
347 | ||
348 | if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { | |
349 | msg_arg->ret = TEEC_ERROR_COMMUNICATION; | |
350 | msg_arg->ret_origin = TEEC_ORIGIN_COMMS; | |
351 | } | |
352 | ||
353 | arg->ret = msg_arg->ret; | |
354 | arg->ret_origin = msg_arg->ret_origin; | |
355 | out: | |
356 | tee_shm_free(shm); | |
357 | return rc; | |
358 | } | |
359 | ||
360 | int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) | |
361 | { | |
362 | struct optee_context_data *ctxdata = ctx->data; | |
363 | struct tee_shm *shm; | |
364 | struct optee_msg_arg *msg_arg; | |
365 | phys_addr_t msg_parg; | |
366 | struct optee_session *sess; | |
367 | ||
368 | /* Check that the session is valid */ | |
369 | mutex_lock(&ctxdata->mutex); | |
370 | sess = find_session(ctxdata, session); | |
371 | mutex_unlock(&ctxdata->mutex); | |
372 | if (!sess) | |
373 | return -EINVAL; | |
374 | ||
375 | shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); | |
376 | if (IS_ERR(shm)) | |
377 | return PTR_ERR(shm); | |
378 | ||
379 | msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; | |
380 | msg_arg->session = session; | |
381 | msg_arg->cancel_id = cancel_id; | |
382 | optee_do_call_with_arg(ctx, msg_parg); | |
383 | ||
384 | tee_shm_free(shm); | |
385 | return 0; | |
386 | } | |
387 | ||
388 | /** | |
389 | * optee_enable_shm_cache() - Enables caching of some shared memory allocation | |
390 | * in OP-TEE | |
391 | * @optee: main service struct | |
392 | */ | |
393 | void optee_enable_shm_cache(struct optee *optee) | |
394 | { | |
395 | struct optee_call_waiter w; | |
396 | ||
397 | /* We need to retry until secure world isn't busy. */ | |
398 | optee_cq_wait_init(&optee->call_queue, &w); | |
399 | while (true) { | |
400 | struct arm_smccc_res res; | |
401 | ||
402 | optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, | |
403 | 0, &res); | |
404 | if (res.a0 == OPTEE_SMC_RETURN_OK) | |
405 | break; | |
406 | optee_cq_wait_for_completion(&optee->call_queue, &w); | |
407 | } | |
408 | optee_cq_wait_final(&optee->call_queue, &w); | |
409 | } | |
410 | ||
411 | /** | |
412 | * optee_disable_shm_cache() - Disables caching of some shared memory allocation | |
413 | * in OP-TEE | |
414 | * @optee: main service struct | |
415 | */ | |
416 | void optee_disable_shm_cache(struct optee *optee) | |
417 | { | |
418 | struct optee_call_waiter w; | |
419 | ||
420 | /* We need to retry until secure world isn't busy. */ | |
421 | optee_cq_wait_init(&optee->call_queue, &w); | |
422 | while (true) { | |
423 | union { | |
424 | struct arm_smccc_res smccc; | |
425 | struct optee_smc_disable_shm_cache_result result; | |
426 | } res; | |
427 | ||
428 | optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, | |
429 | 0, &res.smccc); | |
430 | if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) | |
431 | break; /* All shm's freed */ | |
432 | if (res.result.status == OPTEE_SMC_RETURN_OK) { | |
433 | struct tee_shm *shm; | |
434 | ||
435 | shm = reg_pair_to_ptr(res.result.shm_upper32, | |
436 | res.result.shm_lower32); | |
437 | tee_shm_free(shm); | |
438 | } else { | |
439 | optee_cq_wait_for_completion(&optee->call_queue, &w); | |
440 | } | |
441 | } | |
442 | optee_cq_wait_final(&optee->call_queue, &w); | |
443 | } | |
3bb48ba5 VB |
444 | |
445 | #define PAGELIST_ENTRIES_PER_PAGE \ | |
446 | ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) | |
447 | ||
448 | /** | |
449 | * optee_fill_pages_list() - write list of user pages to given shared | |
450 | * buffer. | |
451 | * | |
452 | * @dst: page-aligned buffer where list of pages will be stored | |
453 | * @pages: array of pages that represents shared buffer | |
454 | * @num_pages: number of entries in @pages | |
455 | * @page_offset: offset of user buffer from page start | |
456 | * | |
457 | * @dst should be big enough to hold list of user page addresses and | |
458 | * links to the next pages of buffer | |
459 | */ | |
460 | void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, | |
461 | size_t page_offset) | |
462 | { | |
463 | int n = 0; | |
464 | phys_addr_t optee_page; | |
465 | /* | |
466 | * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h | |
467 | * for details. | |
468 | */ | |
469 | struct { | |
470 | u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; | |
471 | u64 next_page_data; | |
472 | } *pages_data; | |
473 | ||
474 | /* | |
475 | * Currently OP-TEE uses 4k page size and it does not looks | |
476 | * like this will change in the future. On other hand, there are | |
477 | * no know ARM architectures with page size < 4k. | |
478 | * Thus the next built assert looks redundant. But the following | |
479 | * code heavily relies on this assumption, so it is better be | |
480 | * safe than sorry. | |
481 | */ | |
482 | BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); | |
483 | ||
484 | pages_data = (void *)dst; | |
485 | /* | |
486 | * If linux page is bigger than 4k, and user buffer offset is | |
487 | * larger than 4k/8k/12k/etc this will skip first 4k pages, | |
488 | * because they bear no value data for OP-TEE. | |
489 | */ | |
490 | optee_page = page_to_phys(*pages) + | |
491 | round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); | |
492 | ||
493 | while (true) { | |
494 | pages_data->pages_list[n++] = optee_page; | |
495 | ||
496 | if (n == PAGELIST_ENTRIES_PER_PAGE) { | |
497 | pages_data->next_page_data = | |
498 | virt_to_phys(pages_data + 1); | |
499 | pages_data++; | |
500 | n = 0; | |
501 | } | |
502 | ||
503 | optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; | |
504 | if (!(optee_page & ~PAGE_MASK)) { | |
505 | if (!--num_pages) | |
506 | break; | |
507 | pages++; | |
508 | optee_page = page_to_phys(*pages); | |
509 | } | |
510 | } | |
511 | } | |
512 | ||
513 | /* | |
514 | * The final entry in each pagelist page is a pointer to the next | |
515 | * pagelist page. | |
516 | */ | |
517 | static size_t get_pages_list_size(size_t num_entries) | |
518 | { | |
519 | int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); | |
520 | ||
521 | return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; | |
522 | } | |
523 | ||
524 | u64 *optee_allocate_pages_list(size_t num_entries) | |
525 | { | |
526 | return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); | |
527 | } | |
528 | ||
529 | void optee_free_pages_list(void *list, size_t num_entries) | |
530 | { | |
531 | free_pages_exact(list, get_pages_list_size(num_entries)); | |
532 | } | |
533 | ||
cdbcf83d JW |
534 | static bool is_normal_memory(pgprot_t p) |
535 | { | |
536 | #if defined(CONFIG_ARM) | |
537 | return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC; | |
538 | #elif defined(CONFIG_ARM64) | |
539 | return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); | |
540 | #else | |
541 | #error "Unuspported architecture" | |
542 | #endif | |
543 | } | |
544 | ||
545 | static int __check_mem_type(struct vm_area_struct *vma, unsigned long end) | |
546 | { | |
547 | while (vma && is_normal_memory(vma->vm_page_prot)) { | |
548 | if (vma->vm_end >= end) | |
549 | return 0; | |
550 | vma = vma->vm_next; | |
551 | } | |
552 | ||
553 | return -EINVAL; | |
554 | } | |
555 | ||
556 | static int check_mem_type(unsigned long start, size_t num_pages) | |
557 | { | |
558 | struct mm_struct *mm = current->mm; | |
559 | int rc; | |
560 | ||
a249dd20 SG |
561 | /* |
562 | * Allow kernel address to register with OP-TEE as kernel | |
563 | * pages are configured as normal memory only. | |
564 | */ | |
565 | if (virt_addr_valid(start)) | |
566 | return 0; | |
567 | ||
d8ed45c5 | 568 | mmap_read_lock(mm); |
cdbcf83d JW |
569 | rc = __check_mem_type(find_vma(mm, start), |
570 | start + num_pages * PAGE_SIZE); | |
d8ed45c5 | 571 | mmap_read_unlock(mm); |
cdbcf83d JW |
572 | |
573 | return rc; | |
574 | } | |
575 | ||
06ca7917 | 576 | int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, |
95ffe4ca JW |
577 | struct page **pages, size_t num_pages, |
578 | unsigned long start) | |
06ca7917 VB |
579 | { |
580 | struct tee_shm *shm_arg = NULL; | |
581 | struct optee_msg_arg *msg_arg; | |
582 | u64 *pages_list; | |
583 | phys_addr_t msg_parg; | |
cdbcf83d | 584 | int rc; |
06ca7917 VB |
585 | |
586 | if (!num_pages) | |
587 | return -EINVAL; | |
588 | ||
cdbcf83d JW |
589 | rc = check_mem_type(start, num_pages); |
590 | if (rc) | |
591 | return rc; | |
592 | ||
06ca7917 VB |
593 | pages_list = optee_allocate_pages_list(num_pages); |
594 | if (!pages_list) | |
595 | return -ENOMEM; | |
596 | ||
597 | shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); | |
598 | if (IS_ERR(shm_arg)) { | |
599 | rc = PTR_ERR(shm_arg); | |
600 | goto out; | |
601 | } | |
602 | ||
603 | optee_fill_pages_list(pages_list, pages, num_pages, | |
604 | tee_shm_get_page_offset(shm)); | |
605 | ||
606 | msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; | |
607 | msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | | |
608 | OPTEE_MSG_ATTR_NONCONTIG; | |
609 | msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; | |
610 | msg_arg->params->u.tmem.size = tee_shm_get_size(shm); | |
611 | /* | |
612 | * In the least bits of msg_arg->params->u.tmem.buf_ptr we | |
613 | * store buffer offset from 4k page, as described in OP-TEE ABI. | |
614 | */ | |
615 | msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | | |
616 | (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); | |
617 | ||
618 | if (optee_do_call_with_arg(ctx, msg_parg) || | |
619 | msg_arg->ret != TEEC_SUCCESS) | |
620 | rc = -EINVAL; | |
621 | ||
622 | tee_shm_free(shm_arg); | |
623 | out: | |
624 | optee_free_pages_list(pages_list, num_pages); | |
625 | return rc; | |
626 | } | |
627 | ||
628 | int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) | |
629 | { | |
630 | struct tee_shm *shm_arg; | |
631 | struct optee_msg_arg *msg_arg; | |
632 | phys_addr_t msg_parg; | |
633 | int rc = 0; | |
634 | ||
635 | shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); | |
636 | if (IS_ERR(shm_arg)) | |
637 | return PTR_ERR(shm_arg); | |
638 | ||
639 | msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; | |
640 | ||
641 | msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; | |
642 | msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; | |
643 | ||
644 | if (optee_do_call_with_arg(ctx, msg_parg) || | |
645 | msg_arg->ret != TEEC_SUCCESS) | |
646 | rc = -EINVAL; | |
647 | tee_shm_free(shm_arg); | |
648 | return rc; | |
649 | } | |
53a107c8 VB |
650 | |
651 | int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, | |
95ffe4ca JW |
652 | struct page **pages, size_t num_pages, |
653 | unsigned long start) | |
53a107c8 VB |
654 | { |
655 | /* | |
656 | * We don't want to register supplicant memory in OP-TEE. | |
657 | * Instead information about it will be passed in RPC code. | |
658 | */ | |
cdbcf83d | 659 | return check_mem_type(start, num_pages); |
53a107c8 VB |
660 | } |
661 | ||
662 | int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) | |
663 | { | |
664 | return 0; | |
665 | } |