1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2016, EPAM Systems
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/errno.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irqdomain.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/tee_drv.h>
24 #include <linux/types.h>
25 #include <linux/workqueue.h>
26 #include "optee_private.h"
27 #include "optee_smc.h"
28 #include "optee_rpc_cmd.h"
29 #include <linux/kmemleak.h>
30 #define CREATE_TRACE_POINTS
31 #include "optee_trace.h"
32 
33 /*
34  * This file implement the SMC ABI used when communicating with secure world
35  * OP-TEE OS via raw SMCs.
36  * This file is divided into the following sections:
37  * 1. Convert between struct tee_param and struct optee_msg_param
38  * 2. Low level support functions to register shared memory in secure world
39  * 3. Dynamic shared memory pool based on alloc_pages()
40  * 4. Do a normal scheduled call into secure world
41  * 5. Asynchronous notification
42  * 6. Driver initialization.
43  */
44 
45 /*
46  * A typical OP-TEE private shm allocation is 224 bytes (argument struct
47  * with 6 parameters, needed for open session). So with an alignment of 512
48  * we'll waste a bit more than 50%. However, it's only expected that we'll
49  * have a handful of these structs allocated at a time. Most memory will
50  * be allocated aligned to the page size, So all in all this should scale
51  * up and down quite well.
52  */
53 #define OPTEE_MIN_STATIC_POOL_ALIGN    9 /* 512 bytes aligned */
54 
55 /*
56  * 1. Convert between struct tee_param and struct optee_msg_param
57  *
58  * optee_from_msg_param() and optee_to_msg_param() are the main
59  * functions.
60  */
61 
from_msg_param_tmp_mem(struct tee_param * p,u32 attr,const struct optee_msg_param * mp)62 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
63 				  const struct optee_msg_param *mp)
64 {
65 	struct tee_shm *shm;
66 	phys_addr_t pa;
67 	int rc;
68 
69 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
70 		  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
71 	p->u.memref.size = mp->u.tmem.size;
72 	shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
73 	if (!shm) {
74 		p->u.memref.shm_offs = 0;
75 		p->u.memref.shm = NULL;
76 		return 0;
77 	}
78 
79 	rc = tee_shm_get_pa(shm, 0, &pa);
80 	if (rc)
81 		return rc;
82 
83 	p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
84 	p->u.memref.shm = shm;
85 
86 	return 0;
87 }
88 
from_msg_param_reg_mem(struct tee_param * p,u32 attr,const struct optee_msg_param * mp)89 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
90 				   const struct optee_msg_param *mp)
91 {
92 	struct tee_shm *shm;
93 
94 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
95 		  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
96 	p->u.memref.size = mp->u.rmem.size;
97 	shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
98 
99 	if (shm) {
100 		p->u.memref.shm_offs = mp->u.rmem.offs;
101 		p->u.memref.shm = shm;
102 	} else {
103 		p->u.memref.shm_offs = 0;
104 		p->u.memref.shm = NULL;
105 	}
106 }
107 
108 /**
109  * optee_from_msg_param() - convert from OPTEE_MSG parameters to
110  *			    struct tee_param
111  * @optee:	main service struct
112  * @params:	subsystem internal parameter representation
113  * @num_params:	number of elements in the parameter arrays
114  * @msg_params:	OPTEE_MSG parameters
115  * Returns 0 on success or <0 on failure
116  */
optee_from_msg_param(struct optee * optee,struct tee_param * params,size_t num_params,const struct optee_msg_param * msg_params)117 static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
118 				size_t num_params,
119 				const struct optee_msg_param *msg_params)
120 {
121 	int rc;
122 	size_t n;
123 
124 	for (n = 0; n < num_params; n++) {
125 		struct tee_param *p = params + n;
126 		const struct optee_msg_param *mp = msg_params + n;
127 		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
128 
129 		switch (attr) {
130 		case OPTEE_MSG_ATTR_TYPE_NONE:
131 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
132 			memset(&p->u, 0, sizeof(p->u));
133 			break;
134 		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
135 		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
136 		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
137 			optee_from_msg_param_value(p, attr, mp);
138 			break;
139 		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
140 		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
141 		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
142 			rc = from_msg_param_tmp_mem(p, attr, mp);
143 			if (rc)
144 				return rc;
145 			break;
146 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
147 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
148 		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
149 			from_msg_param_reg_mem(p, attr, mp);
150 			break;
151 
152 		default:
153 			return -EINVAL;
154 		}
155 	}
156 	return 0;
157 }
158 
to_msg_param_tmp_mem(struct optee_msg_param * mp,const struct tee_param * p)159 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
160 				const struct tee_param *p)
161 {
162 	int rc;
163 	phys_addr_t pa;
164 
165 	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
166 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
167 
168 	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
169 	mp->u.tmem.size = p->u.memref.size;
170 
171 	if (!p->u.memref.shm) {
172 		mp->u.tmem.buf_ptr = 0;
173 		return 0;
174 	}
175 
176 	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
177 	if (rc)
178 		return rc;
179 
180 	mp->u.tmem.buf_ptr = pa;
181 	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
182 		    OPTEE_MSG_ATTR_CACHE_SHIFT;
183 
184 	return 0;
185 }
186 
to_msg_param_reg_mem(struct optee_msg_param * mp,const struct tee_param * p)187 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
188 				const struct tee_param *p)
189 {
190 	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
191 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
192 
193 	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
194 	mp->u.rmem.size = p->u.memref.size;
195 	mp->u.rmem.offs = p->u.memref.shm_offs;
196 	return 0;
197 }
198 
199 /**
200  * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
201  * @optee:	main service struct
202  * @msg_params:	OPTEE_MSG parameters
203  * @num_params:	number of elements in the parameter arrays
204  * @params:	subsystem itnernal parameter representation
205  * Returns 0 on success or <0 on failure
206  */
optee_to_msg_param(struct optee * optee,struct optee_msg_param * msg_params,size_t num_params,const struct tee_param * params)207 static int optee_to_msg_param(struct optee *optee,
208 			      struct optee_msg_param *msg_params,
209 			      size_t num_params, const struct tee_param *params)
210 {
211 	int rc;
212 	size_t n;
213 
214 	for (n = 0; n < num_params; n++) {
215 		const struct tee_param *p = params + n;
216 		struct optee_msg_param *mp = msg_params + n;
217 
218 		switch (p->attr) {
219 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
220 			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
221 			memset(&mp->u, 0, sizeof(mp->u));
222 			break;
223 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
224 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
225 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
226 			optee_to_msg_param_value(mp, p);
227 			break;
228 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
229 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
230 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
231 			if (tee_shm_is_dynamic(p->u.memref.shm))
232 				rc = to_msg_param_reg_mem(mp, p);
233 			else
234 				rc = to_msg_param_tmp_mem(mp, p);
235 			if (rc)
236 				return rc;
237 			break;
238 		default:
239 			return -EINVAL;
240 		}
241 	}
242 	return 0;
243 }
244 
245 /*
246  * 2. Low level support functions to register shared memory in secure world
247  *
248  * Functions to enable/disable shared memory caching in secure world, that
249  * is, lazy freeing of previously allocated shared memory. Freeing is
250  * performed when a request has been compled.
251  *
252  * Functions to register and unregister shared memory both for normal
253  * clients and for tee-supplicant.
254  */
255 
256 /**
257  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
258  *			      in OP-TEE
259  * @optee:	main service struct
260  */
optee_enable_shm_cache(struct optee * optee)261 static void optee_enable_shm_cache(struct optee *optee)
262 {
263 	struct optee_call_waiter w;
264 
265 	/* We need to retry until secure world isn't busy. */
266 	optee_cq_wait_init(&optee->call_queue, &w);
267 	while (true) {
268 		struct arm_smccc_res res;
269 
270 		optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
271 				     0, 0, 0, 0, 0, 0, 0, &res);
272 		if (res.a0 == OPTEE_SMC_RETURN_OK)
273 			break;
274 		optee_cq_wait_for_completion(&optee->call_queue, &w);
275 	}
276 	optee_cq_wait_final(&optee->call_queue, &w);
277 }
278 
279 /**
280  * __optee_disable_shm_cache() - Disables caching of some shared memory
281  *				 allocation in OP-TEE
282  * @optee:	main service struct
283  * @is_mapped:	true if the cached shared memory addresses were mapped by this
284  *		kernel, are safe to dereference, and should be freed
285  */
__optee_disable_shm_cache(struct optee * optee,bool is_mapped)286 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
287 {
288 	struct optee_call_waiter w;
289 
290 	/* We need to retry until secure world isn't busy. */
291 	optee_cq_wait_init(&optee->call_queue, &w);
292 	while (true) {
293 		union {
294 			struct arm_smccc_res smccc;
295 			struct optee_smc_disable_shm_cache_result result;
296 		} res;
297 
298 		optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
299 				     0, 0, 0, 0, 0, 0, 0, &res.smccc);
300 		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
301 			break; /* All shm's freed */
302 		if (res.result.status == OPTEE_SMC_RETURN_OK) {
303 			struct tee_shm *shm;
304 
305 			/*
306 			 * Shared memory references that were not mapped by
307 			 * this kernel must be ignored to prevent a crash.
308 			 */
309 			if (!is_mapped)
310 				continue;
311 
312 			shm = reg_pair_to_ptr(res.result.shm_upper32,
313 					      res.result.shm_lower32);
314 			tee_shm_free(shm);
315 		} else {
316 			optee_cq_wait_for_completion(&optee->call_queue, &w);
317 		}
318 	}
319 	optee_cq_wait_final(&optee->call_queue, &w);
320 }
321 
322 /**
323  * optee_disable_shm_cache() - Disables caching of mapped shared memory
324  *			       allocations in OP-TEE
325  * @optee:	main service struct
326  */
optee_disable_shm_cache(struct optee * optee)327 static void optee_disable_shm_cache(struct optee *optee)
328 {
329 	return __optee_disable_shm_cache(optee, true);
330 }
331 
332 /**
333  * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
334  *					allocations in OP-TEE which are not
335  *					currently mapped
336  * @optee:	main service struct
337  */
optee_disable_unmapped_shm_cache(struct optee * optee)338 static void optee_disable_unmapped_shm_cache(struct optee *optee)
339 {
340 	return __optee_disable_shm_cache(optee, false);
341 }
342 
343 #define PAGELIST_ENTRIES_PER_PAGE				\
344 	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
345 
346 /*
347  * The final entry in each pagelist page is a pointer to the next
348  * pagelist page.
349  */
get_pages_list_size(size_t num_entries)350 static size_t get_pages_list_size(size_t num_entries)
351 {
352 	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
353 
354 	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
355 }
356 
optee_allocate_pages_list(size_t num_entries)357 static u64 *optee_allocate_pages_list(size_t num_entries)
358 {
359 	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
360 }
361 
optee_free_pages_list(void * list,size_t num_entries)362 static void optee_free_pages_list(void *list, size_t num_entries)
363 {
364 	free_pages_exact(list, get_pages_list_size(num_entries));
365 }
366 
367 /**
368  * optee_fill_pages_list() - write list of user pages to given shared
369  * buffer.
370  *
371  * @dst: page-aligned buffer where list of pages will be stored
372  * @pages: array of pages that represents shared buffer
373  * @num_pages: number of entries in @pages
374  * @page_offset: offset of user buffer from page start
375  *
376  * @dst should be big enough to hold list of user page addresses and
377  *	links to the next pages of buffer
378  */
optee_fill_pages_list(u64 * dst,struct page ** pages,int num_pages,size_t page_offset)379 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
380 				  size_t page_offset)
381 {
382 	int n = 0;
383 	phys_addr_t optee_page;
384 	/*
385 	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
386 	 * for details.
387 	 */
388 	struct {
389 		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
390 		u64 next_page_data;
391 	} *pages_data;
392 
393 	/*
394 	 * Currently OP-TEE uses 4k page size and it does not looks
395 	 * like this will change in the future.  On other hand, there are
396 	 * no know ARM architectures with page size < 4k.
397 	 * Thus the next built assert looks redundant. But the following
398 	 * code heavily relies on this assumption, so it is better be
399 	 * safe than sorry.
400 	 */
401 	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
402 
403 	pages_data = (void *)dst;
404 	/*
405 	 * If linux page is bigger than 4k, and user buffer offset is
406 	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
407 	 * because they bear no value data for OP-TEE.
408 	 */
409 	optee_page = page_to_phys(*pages) +
410 		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
411 
412 	while (true) {
413 		pages_data->pages_list[n++] = optee_page;
414 
415 		if (n == PAGELIST_ENTRIES_PER_PAGE) {
416 			pages_data->next_page_data =
417 				virt_to_phys(pages_data + 1);
418 			pages_data++;
419 			n = 0;
420 		}
421 
422 		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
423 		if (!(optee_page & ~PAGE_MASK)) {
424 			if (!--num_pages)
425 				break;
426 			pages++;
427 			optee_page = page_to_phys(*pages);
428 		}
429 	}
430 }
431 
optee_shm_register(struct tee_context * ctx,struct tee_shm * shm,struct page ** pages,size_t num_pages,unsigned long start)432 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
433 			      struct page **pages, size_t num_pages,
434 			      unsigned long start)
435 {
436 	struct optee *optee = tee_get_drvdata(ctx->teedev);
437 	struct optee_msg_arg *msg_arg;
438 	struct tee_shm *shm_arg;
439 	u64 *pages_list;
440 	size_t sz;
441 	int rc;
442 
443 	if (!num_pages)
444 		return -EINVAL;
445 
446 	rc = optee_check_mem_type(start, num_pages);
447 	if (rc)
448 		return rc;
449 
450 	pages_list = optee_allocate_pages_list(num_pages);
451 	if (!pages_list)
452 		return -ENOMEM;
453 
454 	/*
455 	 * We're about to register shared memory we can't register shared
456 	 * memory for this request or there's a catch-22.
457 	 *
458 	 * So in this we'll have to do the good old temporary private
459 	 * allocation instead of using optee_get_msg_arg().
460 	 */
461 	sz = optee_msg_arg_size(optee->rpc_param_count);
462 	shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
463 	if (IS_ERR(shm_arg)) {
464 		rc = PTR_ERR(shm_arg);
465 		goto out;
466 	}
467 	msg_arg = tee_shm_get_va(shm_arg, 0);
468 	if (IS_ERR(msg_arg)) {
469 		rc = PTR_ERR(msg_arg);
470 		goto out;
471 	}
472 
473 	optee_fill_pages_list(pages_list, pages, num_pages,
474 			      tee_shm_get_page_offset(shm));
475 
476 	memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
477 	msg_arg->num_params = 1;
478 	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
479 	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
480 				OPTEE_MSG_ATTR_NONCONTIG;
481 	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
482 	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
483 	/*
484 	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
485 	 * store buffer offset from 4k page, as described in OP-TEE ABI.
486 	 */
487 	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
488 	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
489 
490 	if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
491 	    msg_arg->ret != TEEC_SUCCESS)
492 		rc = -EINVAL;
493 
494 	tee_shm_free(shm_arg);
495 out:
496 	optee_free_pages_list(pages_list, num_pages);
497 	return rc;
498 }
499 
optee_shm_unregister(struct tee_context * ctx,struct tee_shm * shm)500 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
501 {
502 	struct optee *optee = tee_get_drvdata(ctx->teedev);
503 	struct optee_msg_arg *msg_arg;
504 	struct tee_shm *shm_arg;
505 	int rc = 0;
506 	size_t sz;
507 
508 	/*
509 	 * We're about to unregister shared memory and we may not be able
510 	 * register shared memory for this request in case we're called
511 	 * from optee_shm_arg_cache_uninit().
512 	 *
513 	 * So in order to keep things simple in this function just as in
514 	 * optee_shm_register() we'll use temporary private allocation
515 	 * instead of using optee_get_msg_arg().
516 	 */
517 	sz = optee_msg_arg_size(optee->rpc_param_count);
518 	shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
519 	if (IS_ERR(shm_arg))
520 		return PTR_ERR(shm_arg);
521 	msg_arg = tee_shm_get_va(shm_arg, 0);
522 	if (IS_ERR(msg_arg)) {
523 		rc = PTR_ERR(msg_arg);
524 		goto out;
525 	}
526 
527 	memset(msg_arg, 0, sz);
528 	msg_arg->num_params = 1;
529 	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
530 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
531 	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
532 
533 	if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
534 	    msg_arg->ret != TEEC_SUCCESS)
535 		rc = -EINVAL;
536 out:
537 	tee_shm_free(shm_arg);
538 	return rc;
539 }
540 
optee_shm_register_supp(struct tee_context * ctx,struct tee_shm * shm,struct page ** pages,size_t num_pages,unsigned long start)541 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
542 				   struct page **pages, size_t num_pages,
543 				   unsigned long start)
544 {
545 	/*
546 	 * We don't want to register supplicant memory in OP-TEE.
547 	 * Instead information about it will be passed in RPC code.
548 	 */
549 	return optee_check_mem_type(start, num_pages);
550 }
551 
optee_shm_unregister_supp(struct tee_context * ctx,struct tee_shm * shm)552 static int optee_shm_unregister_supp(struct tee_context *ctx,
553 				     struct tee_shm *shm)
554 {
555 	return 0;
556 }
557 
558 /*
559  * 3. Dynamic shared memory pool based on alloc_pages()
560  *
561  * Implements an OP-TEE specific shared memory pool which is used
562  * when dynamic shared memory is supported by secure world.
563  *
564  * The main function is optee_shm_pool_alloc_pages().
565  */
566 
pool_op_alloc(struct tee_shm_pool * pool,struct tee_shm * shm,size_t size,size_t align)567 static int pool_op_alloc(struct tee_shm_pool *pool,
568 			 struct tee_shm *shm, size_t size, size_t align)
569 {
570 	/*
571 	 * Shared memory private to the OP-TEE driver doesn't need
572 	 * to be registered with OP-TEE.
573 	 */
574 	if (shm->flags & TEE_SHM_PRIV)
575 		return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
576 
577 	return optee_pool_op_alloc_helper(pool, shm, size, align,
578 					  optee_shm_register);
579 }
580 
pool_op_free(struct tee_shm_pool * pool,struct tee_shm * shm)581 static void pool_op_free(struct tee_shm_pool *pool,
582 			 struct tee_shm *shm)
583 {
584 	if (!(shm->flags & TEE_SHM_PRIV))
585 		optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
586 	else
587 		optee_pool_op_free_helper(pool, shm, NULL);
588 }
589 
pool_op_destroy_pool(struct tee_shm_pool * pool)590 static void pool_op_destroy_pool(struct tee_shm_pool *pool)
591 {
592 	kfree(pool);
593 }
594 
595 static const struct tee_shm_pool_ops pool_ops = {
596 	.alloc = pool_op_alloc,
597 	.free = pool_op_free,
598 	.destroy_pool = pool_op_destroy_pool,
599 };
600 
601 /**
602  * optee_shm_pool_alloc_pages() - create page-based allocator pool
603  *
604  * This pool is used when OP-TEE supports dymanic SHM. In this case
605  * command buffers and such are allocated from kernel's own memory.
606  */
optee_shm_pool_alloc_pages(void)607 static struct tee_shm_pool *optee_shm_pool_alloc_pages(void)
608 {
609 	struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
610 
611 	if (!pool)
612 		return ERR_PTR(-ENOMEM);
613 
614 	pool->ops = &pool_ops;
615 
616 	return pool;
617 }
618 
619 /*
620  * 4. Do a normal scheduled call into secure world
621  *
622  * The function optee_smc_do_call_with_arg() performs a normal scheduled
623  * call into secure world. During this call may normal world request help
624  * from normal world using RPCs, Remote Procedure Calls. This includes
625  * delivery of non-secure interrupts to for instance allow rescheduling of
626  * the current task.
627  */
628 
handle_rpc_func_cmd_shm_free(struct tee_context * ctx,struct optee_msg_arg * arg)629 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
630 					 struct optee_msg_arg *arg)
631 {
632 	struct tee_shm *shm;
633 
634 	arg->ret_origin = TEEC_ORIGIN_COMMS;
635 
636 	if (arg->num_params != 1 ||
637 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
638 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
639 		return;
640 	}
641 
642 	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
643 	switch (arg->params[0].u.value.a) {
644 	case OPTEE_RPC_SHM_TYPE_APPL:
645 		optee_rpc_cmd_free_suppl(ctx, shm);
646 		break;
647 	case OPTEE_RPC_SHM_TYPE_KERNEL:
648 		tee_shm_free(shm);
649 		break;
650 	default:
651 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
652 	}
653 	arg->ret = TEEC_SUCCESS;
654 }
655 
handle_rpc_func_cmd_shm_alloc(struct tee_context * ctx,struct optee * optee,struct optee_msg_arg * arg,struct optee_call_ctx * call_ctx)656 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
657 					  struct optee *optee,
658 					  struct optee_msg_arg *arg,
659 					  struct optee_call_ctx *call_ctx)
660 {
661 	phys_addr_t pa;
662 	struct tee_shm *shm;
663 	size_t sz;
664 	size_t n;
665 
666 	arg->ret_origin = TEEC_ORIGIN_COMMS;
667 
668 	if (!arg->num_params ||
669 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
670 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
671 		return;
672 	}
673 
674 	for (n = 1; n < arg->num_params; n++) {
675 		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
676 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
677 			return;
678 		}
679 	}
680 
681 	sz = arg->params[0].u.value.b;
682 	switch (arg->params[0].u.value.a) {
683 	case OPTEE_RPC_SHM_TYPE_APPL:
684 		shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
685 		break;
686 	case OPTEE_RPC_SHM_TYPE_KERNEL:
687 		shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
688 		break;
689 	default:
690 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
691 		return;
692 	}
693 
694 	if (IS_ERR(shm)) {
695 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
696 		return;
697 	}
698 
699 	if (tee_shm_get_pa(shm, 0, &pa)) {
700 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
701 		goto bad;
702 	}
703 
704 	sz = tee_shm_get_size(shm);
705 
706 	if (tee_shm_is_dynamic(shm)) {
707 		struct page **pages;
708 		u64 *pages_list;
709 		size_t page_num;
710 
711 		pages = tee_shm_get_pages(shm, &page_num);
712 		if (!pages || !page_num) {
713 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
714 			goto bad;
715 		}
716 
717 		pages_list = optee_allocate_pages_list(page_num);
718 		if (!pages_list) {
719 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
720 			goto bad;
721 		}
722 
723 		call_ctx->pages_list = pages_list;
724 		call_ctx->num_entries = page_num;
725 
726 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
727 				      OPTEE_MSG_ATTR_NONCONTIG;
728 		/*
729 		 * In the least bits of u.tmem.buf_ptr we store buffer offset
730 		 * from 4k page, as described in OP-TEE ABI.
731 		 */
732 		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
733 			(tee_shm_get_page_offset(shm) &
734 			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
735 		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
736 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
737 
738 		optee_fill_pages_list(pages_list, pages, page_num,
739 				      tee_shm_get_page_offset(shm));
740 	} else {
741 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
742 		arg->params[0].u.tmem.buf_ptr = pa;
743 		arg->params[0].u.tmem.size = sz;
744 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
745 	}
746 
747 	arg->ret = TEEC_SUCCESS;
748 	return;
749 bad:
750 	tee_shm_free(shm);
751 }
752 
free_pages_list(struct optee_call_ctx * call_ctx)753 static void free_pages_list(struct optee_call_ctx *call_ctx)
754 {
755 	if (call_ctx->pages_list) {
756 		optee_free_pages_list(call_ctx->pages_list,
757 				      call_ctx->num_entries);
758 		call_ctx->pages_list = NULL;
759 		call_ctx->num_entries = 0;
760 	}
761 }
762 
optee_rpc_finalize_call(struct optee_call_ctx * call_ctx)763 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
764 {
765 	free_pages_list(call_ctx);
766 }
767 
handle_rpc_func_cmd(struct tee_context * ctx,struct optee * optee,struct optee_msg_arg * arg,struct optee_call_ctx * call_ctx)768 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
769 				struct optee_msg_arg *arg,
770 				struct optee_call_ctx *call_ctx)
771 {
772 
773 	switch (arg->cmd) {
774 	case OPTEE_RPC_CMD_SHM_ALLOC:
775 		free_pages_list(call_ctx);
776 		handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
777 		break;
778 	case OPTEE_RPC_CMD_SHM_FREE:
779 		handle_rpc_func_cmd_shm_free(ctx, arg);
780 		break;
781 	default:
782 		optee_rpc_cmd(ctx, optee, arg);
783 	}
784 }
785 
786 /**
787  * optee_handle_rpc() - handle RPC from secure world
788  * @ctx:	context doing the RPC
789  * @param:	value of registers for the RPC
790  * @call_ctx:	call context. Preserved during one OP-TEE invocation
791  *
792  * Result of RPC is written back into @param.
793  */
optee_handle_rpc(struct tee_context * ctx,struct optee_msg_arg * rpc_arg,struct optee_rpc_param * param,struct optee_call_ctx * call_ctx)794 static void optee_handle_rpc(struct tee_context *ctx,
795 			     struct optee_msg_arg *rpc_arg,
796 			     struct optee_rpc_param *param,
797 			     struct optee_call_ctx *call_ctx)
798 {
799 	struct tee_device *teedev = ctx->teedev;
800 	struct optee *optee = tee_get_drvdata(teedev);
801 	struct optee_msg_arg *arg;
802 	struct tee_shm *shm;
803 	phys_addr_t pa;
804 
805 	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
806 	case OPTEE_SMC_RPC_FUNC_ALLOC:
807 		shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
808 		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
809 			reg_pair_from_64(&param->a1, &param->a2, pa);
810 			reg_pair_from_64(&param->a4, &param->a5,
811 					 (unsigned long)shm);
812 		} else {
813 			param->a1 = 0;
814 			param->a2 = 0;
815 			param->a4 = 0;
816 			param->a5 = 0;
817 		}
818 		kmemleak_not_leak(shm);
819 		break;
820 	case OPTEE_SMC_RPC_FUNC_FREE:
821 		shm = reg_pair_to_ptr(param->a1, param->a2);
822 		tee_shm_free(shm);
823 		break;
824 	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
825 		/*
826 		 * A foreign interrupt was raised while secure world was
827 		 * executing, since they are handled in Linux a dummy RPC is
828 		 * performed to let Linux take the interrupt through the normal
829 		 * vector.
830 		 */
831 		break;
832 	case OPTEE_SMC_RPC_FUNC_CMD:
833 		if (rpc_arg) {
834 			arg = rpc_arg;
835 		} else {
836 			shm = reg_pair_to_ptr(param->a1, param->a2);
837 			arg = tee_shm_get_va(shm, 0);
838 			if (IS_ERR(arg)) {
839 				pr_err("%s: tee_shm_get_va %p failed\n",
840 				       __func__, shm);
841 				break;
842 			}
843 		}
844 
845 		handle_rpc_func_cmd(ctx, optee, arg, call_ctx);
846 		break;
847 	default:
848 		pr_warn("Unknown RPC func 0x%x\n",
849 			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
850 		break;
851 	}
852 
853 	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
854 }
855 
856 /**
857  * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
858  * @ctx:	calling context
859  * @shm:	shared memory holding the message to pass to secure world
860  * @offs:	offset of the message in @shm
861  *
862  * Does and SMC to OP-TEE in secure world and handles eventual resulting
863  * Remote Procedure Calls (RPC) from OP-TEE.
864  *
865  * Returns return code from secure world, 0 is OK
866  */
optee_smc_do_call_with_arg(struct tee_context * ctx,struct tee_shm * shm,u_int offs)867 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
868 				      struct tee_shm *shm, u_int offs)
869 {
870 	struct optee *optee = tee_get_drvdata(ctx->teedev);
871 	struct optee_call_waiter w;
872 	struct optee_rpc_param param = { };
873 	struct optee_call_ctx call_ctx = { };
874 	struct optee_msg_arg *rpc_arg = NULL;
875 	int rc;
876 
877 	if (optee->rpc_param_count) {
878 		struct optee_msg_arg *arg;
879 		unsigned int rpc_arg_offs;
880 
881 		arg = tee_shm_get_va(shm, offs);
882 		if (IS_ERR(arg))
883 			return PTR_ERR(arg);
884 
885 		rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
886 		rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
887 		if (IS_ERR(rpc_arg))
888 			return PTR_ERR(rpc_arg);
889 	}
890 
891 	if  (rpc_arg && tee_shm_is_dynamic(shm)) {
892 		param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG;
893 		reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
894 		param.a3 = offs;
895 	} else {
896 		phys_addr_t parg;
897 
898 		rc = tee_shm_get_pa(shm, offs, &parg);
899 		if (rc)
900 			return rc;
901 
902 		if (rpc_arg)
903 			param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG;
904 		else
905 			param.a0 = OPTEE_SMC_CALL_WITH_ARG;
906 		reg_pair_from_64(&param.a1, &param.a2, parg);
907 	}
908 	/* Initialize waiter */
909 	optee_cq_wait_init(&optee->call_queue, &w);
910 	while (true) {
911 		struct arm_smccc_res res;
912 
913 		trace_optee_invoke_fn_begin(&param);
914 		optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
915 				     param.a4, param.a5, param.a6, param.a7,
916 				     &res);
917 		trace_optee_invoke_fn_end(&param, &res);
918 
919 		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
920 			/*
921 			 * Out of threads in secure world, wait for a thread
922 			 * become available.
923 			 */
924 			optee_cq_wait_for_completion(&optee->call_queue, &w);
925 		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
926 			cond_resched();
927 			param.a0 = res.a0;
928 			param.a1 = res.a1;
929 			param.a2 = res.a2;
930 			param.a3 = res.a3;
931 			optee_handle_rpc(ctx, rpc_arg, &param, &call_ctx);
932 		} else {
933 			rc = res.a0;
934 			break;
935 		}
936 	}
937 
938 	optee_rpc_finalize_call(&call_ctx);
939 	/*
940 	 * We're done with our thread in secure world, if there's any
941 	 * thread waiters wake up one.
942 	 */
943 	optee_cq_wait_final(&optee->call_queue, &w);
944 
945 	return rc;
946 }
947 
simple_call_with_arg(struct tee_context * ctx,u32 cmd)948 static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
949 {
950 	struct optee_shm_arg_entry *entry;
951 	struct optee_msg_arg *msg_arg;
952 	struct tee_shm *shm;
953 	u_int offs;
954 
955 	msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
956 	if (IS_ERR(msg_arg))
957 		return PTR_ERR(msg_arg);
958 
959 	msg_arg->cmd = cmd;
960 	optee_smc_do_call_with_arg(ctx, shm, offs);
961 
962 	optee_free_msg_arg(ctx, entry, offs);
963 	return 0;
964 }
965 
optee_smc_do_bottom_half(struct tee_context * ctx)966 static int optee_smc_do_bottom_half(struct tee_context *ctx)
967 {
968 	return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
969 }
970 
optee_smc_stop_async_notif(struct tee_context * ctx)971 static int optee_smc_stop_async_notif(struct tee_context *ctx)
972 {
973 	return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
974 }
975 
976 /*
977  * 5. Asynchronous notification
978  */
979 
get_async_notif_value(optee_invoke_fn * invoke_fn,bool * value_valid,bool * value_pending)980 static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
981 				 bool *value_pending)
982 {
983 	struct arm_smccc_res res;
984 
985 	invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
986 
987 	if (res.a0)
988 		return 0;
989 	*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
990 	*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
991 	return res.a1;
992 }
993 
notif_irq_handler(int irq,void * dev_id)994 static irqreturn_t notif_irq_handler(int irq, void *dev_id)
995 {
996 	struct optee *optee = dev_id;
997 	bool do_bottom_half = false;
998 	bool value_valid;
999 	bool value_pending;
1000 	u32 value;
1001 
1002 	do {
1003 		value = get_async_notif_value(optee->smc.invoke_fn,
1004 					      &value_valid, &value_pending);
1005 		if (!value_valid)
1006 			break;
1007 
1008 		if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
1009 			do_bottom_half = true;
1010 		else
1011 			optee_notif_send(optee, value);
1012 	} while (value_pending);
1013 
1014 	if (do_bottom_half)
1015 		return IRQ_WAKE_THREAD;
1016 	return IRQ_HANDLED;
1017 }
1018 
notif_irq_thread_fn(int irq,void * dev_id)1019 static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
1020 {
1021 	struct optee *optee = dev_id;
1022 
1023 	optee_smc_do_bottom_half(optee->ctx);
1024 
1025 	return IRQ_HANDLED;
1026 }
1027 
optee_smc_notif_init_irq(struct optee * optee,u_int irq)1028 static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
1029 {
1030 	int rc;
1031 
1032 	rc = request_threaded_irq(irq, notif_irq_handler,
1033 				  notif_irq_thread_fn,
1034 				  0, "optee_notification", optee);
1035 	if (rc)
1036 		return rc;
1037 
1038 	optee->smc.notif_irq = irq;
1039 
1040 	return 0;
1041 }
1042 
optee_smc_notif_uninit_irq(struct optee * optee)1043 static void optee_smc_notif_uninit_irq(struct optee *optee)
1044 {
1045 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1046 		optee_smc_stop_async_notif(optee->ctx);
1047 		if (optee->smc.notif_irq) {
1048 			free_irq(optee->smc.notif_irq, optee);
1049 			irq_dispose_mapping(optee->smc.notif_irq);
1050 		}
1051 	}
1052 }
1053 
1054 /*
1055  * 6. Driver initialization
1056  *
1057  * During driver initialization is secure world probed to find out which
1058  * features it supports so the driver can be initialized with a matching
1059  * configuration. This involves for instance support for dynamic shared
1060  * memory instead of a static memory carvout.
1061  */
1062 
optee_get_version(struct tee_device * teedev,struct tee_ioctl_version_data * vers)1063 static void optee_get_version(struct tee_device *teedev,
1064 			      struct tee_ioctl_version_data *vers)
1065 {
1066 	struct tee_ioctl_version_data v = {
1067 		.impl_id = TEE_IMPL_ID_OPTEE,
1068 		.impl_caps = TEE_OPTEE_CAP_TZ,
1069 		.gen_caps = TEE_GEN_CAP_GP,
1070 	};
1071 	struct optee *optee = tee_get_drvdata(teedev);
1072 
1073 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1074 		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
1075 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
1076 		v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
1077 	*vers = v;
1078 }
1079 
optee_smc_open(struct tee_context * ctx)1080 static int optee_smc_open(struct tee_context *ctx)
1081 {
1082 	struct optee *optee = tee_get_drvdata(ctx->teedev);
1083 	u32 sec_caps = optee->smc.sec_caps;
1084 
1085 	return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
1086 }
1087 
1088 static const struct tee_driver_ops optee_clnt_ops = {
1089 	.get_version = optee_get_version,
1090 	.open = optee_smc_open,
1091 	.release = optee_release,
1092 	.open_session = optee_open_session,
1093 	.close_session = optee_close_session,
1094 	.invoke_func = optee_invoke_func,
1095 	.cancel_req = optee_cancel_req,
1096 	.shm_register = optee_shm_register,
1097 	.shm_unregister = optee_shm_unregister,
1098 };
1099 
1100 static const struct tee_desc optee_clnt_desc = {
1101 	.name = DRIVER_NAME "-clnt",
1102 	.ops = &optee_clnt_ops,
1103 	.owner = THIS_MODULE,
1104 };
1105 
1106 static const struct tee_driver_ops optee_supp_ops = {
1107 	.get_version = optee_get_version,
1108 	.open = optee_smc_open,
1109 	.release = optee_release_supp,
1110 	.supp_recv = optee_supp_recv,
1111 	.supp_send = optee_supp_send,
1112 	.shm_register = optee_shm_register_supp,
1113 	.shm_unregister = optee_shm_unregister_supp,
1114 };
1115 
1116 static const struct tee_desc optee_supp_desc = {
1117 	.name = DRIVER_NAME "-supp",
1118 	.ops = &optee_supp_ops,
1119 	.owner = THIS_MODULE,
1120 	.flags = TEE_DESC_PRIVILEGED,
1121 };
1122 
1123 static const struct optee_ops optee_ops = {
1124 	.do_call_with_arg = optee_smc_do_call_with_arg,
1125 	.to_msg_param = optee_to_msg_param,
1126 	.from_msg_param = optee_from_msg_param,
1127 };
1128 
enable_async_notif(optee_invoke_fn * invoke_fn)1129 static int enable_async_notif(optee_invoke_fn *invoke_fn)
1130 {
1131 	struct arm_smccc_res res;
1132 
1133 	invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
1134 
1135 	if (res.a0)
1136 		return -EINVAL;
1137 	return 0;
1138 }
1139 
optee_msg_api_uid_is_optee_api(optee_invoke_fn * invoke_fn)1140 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
1141 {
1142 	struct arm_smccc_res res;
1143 
1144 	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1145 
1146 	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
1147 	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
1148 		return true;
1149 	return false;
1150 }
1151 
optee_msg_get_os_revision(optee_invoke_fn * invoke_fn)1152 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
1153 {
1154 	union {
1155 		struct arm_smccc_res smccc;
1156 		struct optee_smc_call_get_os_revision_result result;
1157 	} res = {
1158 		.result = {
1159 			.build_id = 0
1160 		}
1161 	};
1162 
1163 	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
1164 		  &res.smccc);
1165 
1166 	if (res.result.build_id)
1167 		pr_info("revision %lu.%lu (%08lx)", res.result.major,
1168 			res.result.minor, res.result.build_id);
1169 	else
1170 		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
1171 }
1172 
optee_msg_api_revision_is_compatible(optee_invoke_fn * invoke_fn)1173 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
1174 {
1175 	union {
1176 		struct arm_smccc_res smccc;
1177 		struct optee_smc_calls_revision_result result;
1178 	} res;
1179 
1180 	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1181 
1182 	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
1183 	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
1184 		return true;
1185 	return false;
1186 }
1187 
optee_msg_exchange_capabilities(optee_invoke_fn * invoke_fn,u32 * sec_caps,u32 * max_notif_value,unsigned int * rpc_param_count)1188 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1189 					    u32 *sec_caps, u32 *max_notif_value,
1190 					    unsigned int *rpc_param_count)
1191 {
1192 	union {
1193 		struct arm_smccc_res smccc;
1194 		struct optee_smc_exchange_capabilities_result result;
1195 	} res;
1196 	u32 a1 = 0;
1197 
1198 	/*
1199 	 * TODO This isn't enough to tell if it's UP system (from kernel
1200 	 * point of view) or not, is_smp() returns the information
1201 	 * needed, but can't be called directly from here.
1202 	 */
1203 	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1204 		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1205 
1206 	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1207 		  &res.smccc);
1208 
1209 	if (res.result.status != OPTEE_SMC_RETURN_OK)
1210 		return false;
1211 
1212 	*sec_caps = res.result.capabilities;
1213 	if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
1214 		*max_notif_value = res.result.max_notif_value;
1215 	else
1216 		*max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
1217 	if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
1218 		*rpc_param_count = (u8)res.result.data;
1219 	else
1220 		*rpc_param_count = 0;
1221 
1222 	return true;
1223 }
1224 
1225 static struct tee_shm_pool *
optee_config_shm_memremap(optee_invoke_fn * invoke_fn,void ** memremaped_shm)1226 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1227 {
1228 	union {
1229 		struct arm_smccc_res smccc;
1230 		struct optee_smc_get_shm_config_result result;
1231 	} res;
1232 	unsigned long vaddr;
1233 	phys_addr_t paddr;
1234 	size_t size;
1235 	phys_addr_t begin;
1236 	phys_addr_t end;
1237 	void *va;
1238 	void *rc;
1239 
1240 	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1241 	if (res.result.status != OPTEE_SMC_RETURN_OK) {
1242 		pr_err("static shm service not available\n");
1243 		return ERR_PTR(-ENOENT);
1244 	}
1245 
1246 	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1247 		pr_err("only normal cached shared memory supported\n");
1248 		return ERR_PTR(-EINVAL);
1249 	}
1250 
1251 	begin = roundup(res.result.start, PAGE_SIZE);
1252 	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1253 	paddr = begin;
1254 	size = end - begin;
1255 
1256 	va = memremap(paddr, size, MEMREMAP_WB);
1257 	if (!va) {
1258 		pr_err("shared memory ioremap failed\n");
1259 		return ERR_PTR(-EINVAL);
1260 	}
1261 	vaddr = (unsigned long)va;
1262 
1263 	rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
1264 					OPTEE_MIN_STATIC_POOL_ALIGN);
1265 	if (IS_ERR(rc))
1266 		memunmap(va);
1267 	else
1268 		*memremaped_shm = va;
1269 
1270 	return rc;
1271 }
1272 
1273 /* Simple wrapper functions to be able to use a function pointer */
optee_smccc_smc(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,unsigned long a6,unsigned long a7,struct arm_smccc_res * res)1274 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1275 			    unsigned long a2, unsigned long a3,
1276 			    unsigned long a4, unsigned long a5,
1277 			    unsigned long a6, unsigned long a7,
1278 			    struct arm_smccc_res *res)
1279 {
1280 	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1281 }
1282 
optee_smccc_hvc(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,unsigned long a6,unsigned long a7,struct arm_smccc_res * res)1283 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1284 			    unsigned long a2, unsigned long a3,
1285 			    unsigned long a4, unsigned long a5,
1286 			    unsigned long a6, unsigned long a7,
1287 			    struct arm_smccc_res *res)
1288 {
1289 	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1290 }
1291 
get_invoke_func(struct device * dev)1292 static optee_invoke_fn *get_invoke_func(struct device *dev)
1293 {
1294 	const char *method;
1295 
1296 	pr_info("probing for conduit method.\n");
1297 
1298 	if (device_property_read_string(dev, "method", &method)) {
1299 		pr_warn("missing \"method\" property\n");
1300 		return ERR_PTR(-ENXIO);
1301 	}
1302 
1303 	if (!strcmp("hvc", method))
1304 		return optee_smccc_hvc;
1305 	else if (!strcmp("smc", method))
1306 		return optee_smccc_smc;
1307 
1308 	pr_warn("invalid \"method\" property: %s\n", method);
1309 	return ERR_PTR(-EINVAL);
1310 }
1311 
1312 /* optee_remove - Device Removal Routine
1313  * @pdev: platform device information struct
1314  *
1315  * optee_remove is called by platform subsystem to alert the driver
1316  * that it should release the device
1317  */
optee_smc_remove(struct platform_device * pdev)1318 static int optee_smc_remove(struct platform_device *pdev)
1319 {
1320 	struct optee *optee = platform_get_drvdata(pdev);
1321 
1322 	/*
1323 	 * Ask OP-TEE to free all cached shared memory objects to decrease
1324 	 * reference counters and also avoid wild pointers in secure world
1325 	 * into the old shared memory range.
1326 	 */
1327 	if (!optee->rpc_param_count)
1328 		optee_disable_shm_cache(optee);
1329 
1330 	optee_smc_notif_uninit_irq(optee);
1331 
1332 	optee_remove_common(optee);
1333 
1334 	if (optee->smc.memremaped_shm)
1335 		memunmap(optee->smc.memremaped_shm);
1336 
1337 	kfree(optee);
1338 
1339 	return 0;
1340 }
1341 
1342 /* optee_shutdown - Device Removal Routine
1343  * @pdev: platform device information struct
1344  *
1345  * platform_shutdown is called by the platform subsystem to alert
1346  * the driver that a shutdown, reboot, or kexec is happening and
1347  * device must be disabled.
1348  */
optee_shutdown(struct platform_device * pdev)1349 static void optee_shutdown(struct platform_device *pdev)
1350 {
1351 	struct optee *optee = platform_get_drvdata(pdev);
1352 
1353 	if (!optee->rpc_param_count)
1354 		optee_disable_shm_cache(optee);
1355 }
1356 
optee_probe(struct platform_device * pdev)1357 static int optee_probe(struct platform_device *pdev)
1358 {
1359 	optee_invoke_fn *invoke_fn;
1360 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1361 	struct optee *optee = NULL;
1362 	void *memremaped_shm = NULL;
1363 	unsigned int rpc_param_count;
1364 	struct tee_device *teedev;
1365 	struct tee_context *ctx;
1366 	u32 max_notif_value;
1367 	u32 arg_cache_flags;
1368 	u32 sec_caps;
1369 	int rc;
1370 
1371 	invoke_fn = get_invoke_func(&pdev->dev);
1372 	if (IS_ERR(invoke_fn))
1373 		return PTR_ERR(invoke_fn);
1374 
1375 	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1376 		pr_warn("api uid mismatch\n");
1377 		return -EINVAL;
1378 	}
1379 
1380 	optee_msg_get_os_revision(invoke_fn);
1381 
1382 	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1383 		pr_warn("api revision mismatch\n");
1384 		return -EINVAL;
1385 	}
1386 
1387 	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
1388 					     &max_notif_value,
1389 					     &rpc_param_count)) {
1390 		pr_warn("capabilities mismatch\n");
1391 		return -EINVAL;
1392 	}
1393 
1394 	/*
1395 	 * Try to use dynamic shared memory if possible
1396 	 */
1397 	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
1398 		/*
1399 		 * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask
1400 		 * optee_get_msg_arg() to pre-register (by having
1401 		 * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass
1402 		 * an argument struct.
1403 		 *
1404 		 * With the page is pre-registered we can use a non-zero
1405 		 * offset for argument struct, this is indicated with
1406 		 * OPTEE_SHM_ARG_SHARED.
1407 		 *
1408 		 * This means that optee_smc_do_call_with_arg() will use
1409 		 * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages.
1410 		 */
1411 		if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
1412 			arg_cache_flags = OPTEE_SHM_ARG_SHARED;
1413 		else
1414 			arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV;
1415 
1416 		pool = optee_shm_pool_alloc_pages();
1417 	}
1418 
1419 	/*
1420 	 * If dynamic shared memory is not available or failed - try static one
1421 	 */
1422 	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) {
1423 		/*
1424 		 * The static memory pool can use non-zero page offsets so
1425 		 * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED.
1426 		 *
1427 		 * optee_get_msg_arg() should not pre-register the
1428 		 * allocated page used to pass an argument struct, this is
1429 		 * indicated with OPTEE_SHM_ARG_ALLOC_PRIV.
1430 		 *
1431 		 * This means that optee_smc_do_call_with_arg() will use
1432 		 * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else
1433 		 * OPTEE_SMC_CALL_WITH_RPC_ARG.
1434 		 */
1435 		arg_cache_flags = OPTEE_SHM_ARG_SHARED |
1436 				  OPTEE_SHM_ARG_ALLOC_PRIV;
1437 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1438 	}
1439 
1440 	if (IS_ERR(pool))
1441 		return PTR_ERR(pool);
1442 
1443 	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1444 	if (!optee) {
1445 		rc = -ENOMEM;
1446 		goto err_free_pool;
1447 	}
1448 
1449 	optee->ops = &optee_ops;
1450 	optee->smc.invoke_fn = invoke_fn;
1451 	optee->smc.sec_caps = sec_caps;
1452 	optee->rpc_param_count = rpc_param_count;
1453 
1454 	teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1455 	if (IS_ERR(teedev)) {
1456 		rc = PTR_ERR(teedev);
1457 		goto err_free_optee;
1458 	}
1459 	optee->teedev = teedev;
1460 
1461 	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1462 	if (IS_ERR(teedev)) {
1463 		rc = PTR_ERR(teedev);
1464 		goto err_unreg_teedev;
1465 	}
1466 	optee->supp_teedev = teedev;
1467 
1468 	rc = tee_device_register(optee->teedev);
1469 	if (rc)
1470 		goto err_unreg_supp_teedev;
1471 
1472 	rc = tee_device_register(optee->supp_teedev);
1473 	if (rc)
1474 		goto err_unreg_supp_teedev;
1475 
1476 	mutex_init(&optee->call_queue.mutex);
1477 	INIT_LIST_HEAD(&optee->call_queue.waiters);
1478 	optee_supp_init(&optee->supp);
1479 	optee->smc.memremaped_shm = memremaped_shm;
1480 	optee->pool = pool;
1481 	optee_shm_arg_cache_init(optee, arg_cache_flags);
1482 
1483 	platform_set_drvdata(pdev, optee);
1484 	ctx = teedev_open(optee->teedev);
1485 	if (IS_ERR(ctx)) {
1486 		rc = PTR_ERR(ctx);
1487 		goto err_supp_uninit;
1488 	}
1489 	optee->ctx = ctx;
1490 	rc = optee_notif_init(optee, max_notif_value);
1491 	if (rc)
1492 		goto err_close_ctx;
1493 
1494 	if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1495 		unsigned int irq;
1496 
1497 		rc = platform_get_irq(pdev, 0);
1498 		if (rc < 0) {
1499 			pr_err("platform_get_irq: ret %d\n", rc);
1500 			goto err_notif_uninit;
1501 		}
1502 		irq = rc;
1503 
1504 		rc = optee_smc_notif_init_irq(optee, irq);
1505 		if (rc) {
1506 			irq_dispose_mapping(irq);
1507 			goto err_notif_uninit;
1508 		}
1509 		enable_async_notif(optee->smc.invoke_fn);
1510 		pr_info("Asynchronous notifications enabled\n");
1511 	}
1512 
1513 	/*
1514 	 * Ensure that there are no pre-existing shm objects before enabling
1515 	 * the shm cache so that there's no chance of receiving an invalid
1516 	 * address during shutdown. This could occur, for example, if we're
1517 	 * kexec booting from an older kernel that did not properly cleanup the
1518 	 * shm cache.
1519 	 */
1520 	optee_disable_unmapped_shm_cache(optee);
1521 
1522 	/*
1523 	 * Only enable the shm cache in case we're not able to pass the RPC
1524 	 * arg struct right after the normal arg struct.
1525 	 */
1526 	if (!optee->rpc_param_count)
1527 		optee_enable_shm_cache(optee);
1528 
1529 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1530 		pr_info("dynamic shared memory is enabled\n");
1531 
1532 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1533 	if (rc)
1534 		goto err_disable_shm_cache;
1535 
1536 	pr_info("initialized driver\n");
1537 	return 0;
1538 
1539 err_disable_shm_cache:
1540 	if (!optee->rpc_param_count)
1541 		optee_disable_shm_cache(optee);
1542 	optee_smc_notif_uninit_irq(optee);
1543 	optee_unregister_devices();
1544 err_notif_uninit:
1545 	optee_notif_uninit(optee);
1546 err_close_ctx:
1547 	teedev_close_context(ctx);
1548 err_supp_uninit:
1549 	optee_shm_arg_cache_uninit(optee);
1550 	optee_supp_uninit(&optee->supp);
1551 	mutex_destroy(&optee->call_queue.mutex);
1552 err_unreg_supp_teedev:
1553 	tee_device_unregister(optee->supp_teedev);
1554 err_unreg_teedev:
1555 	tee_device_unregister(optee->teedev);
1556 err_free_optee:
1557 	kfree(optee);
1558 err_free_pool:
1559 	tee_shm_pool_free(pool);
1560 	if (memremaped_shm)
1561 		memunmap(memremaped_shm);
1562 	return rc;
1563 }
1564 
1565 static const struct of_device_id optee_dt_match[] = {
1566 	{ .compatible = "linaro,optee-tz" },
1567 	{},
1568 };
1569 MODULE_DEVICE_TABLE(of, optee_dt_match);
1570 
1571 static struct platform_driver optee_driver = {
1572 	.probe  = optee_probe,
1573 	.remove = optee_smc_remove,
1574 	.shutdown = optee_shutdown,
1575 	.driver = {
1576 		.name = "optee",
1577 		.of_match_table = optee_dt_match,
1578 	},
1579 };
1580 
optee_smc_abi_register(void)1581 int optee_smc_abi_register(void)
1582 {
1583 	return platform_driver_register(&optee_driver);
1584 }
1585 
optee_smc_abi_unregister(void)1586 void optee_smc_abi_unregister(void)
1587 {
1588 	platform_driver_unregister(&optee_driver);
1589 }
1590