GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / tee / tee_shm.c
1 /*
2  * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #include <linux/anon_inodes.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/tee_drv.h>
21 #include "tee_private.h"
22
23 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
24 {
25         if (shm->flags & TEE_SHM_POOL) {
26                 struct tee_shm_pool_mgr *poolm;
27
28                 if (shm->flags & TEE_SHM_DMA_BUF)
29                         poolm = teedev->pool->dma_buf_mgr;
30                 else
31                         poolm = teedev->pool->private_mgr;
32
33                 poolm->ops->free(poolm, shm);
34         } else if (shm->flags & TEE_SHM_REGISTER) {
35                 size_t n;
36                 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
37
38                 if (rc)
39                         dev_err(teedev->dev.parent,
40                                 "unregister shm %p failed: %d", shm, rc);
41
42                 for (n = 0; n < shm->num_pages; n++)
43                         put_page(shm->pages[n]);
44
45                 kfree(shm->pages);
46         }
47
48         if (shm->ctx)
49                 teedev_ctx_put(shm->ctx);
50
51         kfree(shm);
52
53         tee_device_put(teedev);
54 }
55
56 static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
57                                        struct tee_device *teedev,
58                                        size_t size, u32 flags)
59 {
60         struct tee_shm_pool_mgr *poolm = NULL;
61         struct tee_shm *shm;
62         void *ret;
63         int rc;
64
65         if (ctx && ctx->teedev != teedev) {
66                 dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
67                 return ERR_PTR(-EINVAL);
68         }
69
70         if (!(flags & TEE_SHM_MAPPED)) {
71                 dev_err(teedev->dev.parent,
72                         "only mapped allocations supported\n");
73                 return ERR_PTR(-EINVAL);
74         }
75
76         if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
77                 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
78                 return ERR_PTR(-EINVAL);
79         }
80
81         if (!tee_device_get(teedev))
82                 return ERR_PTR(-EINVAL);
83
84         if (!teedev->pool) {
85                 /* teedev has been detached from driver */
86                 ret = ERR_PTR(-EINVAL);
87                 goto err_dev_put;
88         }
89
90         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
91         if (!shm) {
92                 ret = ERR_PTR(-ENOMEM);
93                 goto err_dev_put;
94         }
95
96         refcount_set(&shm->refcount, 1);
97         shm->flags = flags | TEE_SHM_POOL;
98         shm->teedev = teedev;
99         shm->ctx = ctx;
100         if (flags & TEE_SHM_DMA_BUF)
101                 poolm = teedev->pool->dma_buf_mgr;
102         else
103                 poolm = teedev->pool->private_mgr;
104
105         rc = poolm->ops->alloc(poolm, shm, size);
106         if (rc) {
107                 ret = ERR_PTR(rc);
108                 goto err_kfree;
109         }
110
111         mutex_lock(&teedev->mutex);
112         shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
113         mutex_unlock(&teedev->mutex);
114         if (shm->id < 0) {
115                 ret = ERR_PTR(shm->id);
116                 goto err_pool_free;
117         }
118
119         if (ctx) {
120                 teedev_ctx_get(ctx);
121                 mutex_lock(&teedev->mutex);
122                 list_add_tail(&shm->link, &ctx->list_shm);
123                 mutex_unlock(&teedev->mutex);
124         }
125
126         return shm;
127 err_pool_free:
128         poolm->ops->free(poolm, shm);
129 err_kfree:
130         kfree(shm);
131 err_dev_put:
132         tee_device_put(teedev);
133         return ret;
134 }
135
136 /**
137  * tee_shm_alloc() - Allocate shared memory
138  * @ctx:        Context that allocates the shared memory
139  * @size:       Requested size of shared memory
140  * @flags:      Flags setting properties for the requested shared memory.
141  *
142  * Memory allocated as global shared memory is automatically freed when the
143  * TEE file pointer is closed. The @flags field uses the bits defined by
144  * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
145  * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
146  * associated with a dma-buf handle, else driver private memory.
147  */
148 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
149 {
150         return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
151 }
152 EXPORT_SYMBOL_GPL(tee_shm_alloc);
153
154 struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
155 {
156         return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
157 }
158 EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
159
160 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
161                                  size_t length, u32 flags)
162 {
163         struct tee_device *teedev = ctx->teedev;
164         const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
165         struct tee_shm *shm;
166         void *ret;
167         int rc;
168         int num_pages;
169         unsigned long start;
170
171         if (flags != req_flags)
172                 return ERR_PTR(-ENOTSUPP);
173
174         if (!tee_device_get(teedev))
175                 return ERR_PTR(-EINVAL);
176
177         if (!teedev->desc->ops->shm_register ||
178             !teedev->desc->ops->shm_unregister) {
179                 tee_device_put(teedev);
180                 return ERR_PTR(-ENOTSUPP);
181         }
182
183         teedev_ctx_get(ctx);
184
185         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
186         if (!shm) {
187                 ret = ERR_PTR(-ENOMEM);
188                 goto err;
189         }
190
191         refcount_set(&shm->refcount, 1);
192         shm->flags = flags | TEE_SHM_REGISTER;
193         shm->teedev = teedev;
194         shm->ctx = ctx;
195         shm->id = -1;
196         start = rounddown(addr, PAGE_SIZE);
197         shm->offset = addr - start;
198         shm->size = length;
199         num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
200         shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
201         if (!shm->pages) {
202                 ret = ERR_PTR(-ENOMEM);
203                 goto err;
204         }
205
206         rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
207         if (rc > 0)
208                 shm->num_pages = rc;
209         if (rc != num_pages) {
210                 if (rc >= 0)
211                         rc = -ENOMEM;
212                 ret = ERR_PTR(rc);
213                 goto err;
214         }
215
216         mutex_lock(&teedev->mutex);
217         shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
218         mutex_unlock(&teedev->mutex);
219
220         if (shm->id < 0) {
221                 ret = ERR_PTR(shm->id);
222                 goto err;
223         }
224
225         rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
226                                              shm->num_pages, start);
227         if (rc) {
228                 ret = ERR_PTR(rc);
229                 goto err;
230         }
231
232         mutex_lock(&teedev->mutex);
233         list_add_tail(&shm->link, &ctx->list_shm);
234         mutex_unlock(&teedev->mutex);
235
236         return shm;
237 err:
238         if (shm) {
239                 size_t n;
240
241                 if (shm->id >= 0) {
242                         mutex_lock(&teedev->mutex);
243                         idr_remove(&teedev->idr, shm->id);
244                         mutex_unlock(&teedev->mutex);
245                 }
246                 if (shm->pages) {
247                         for (n = 0; n < shm->num_pages; n++)
248                                 put_page(shm->pages[n]);
249                         kfree(shm->pages);
250                 }
251         }
252         kfree(shm);
253         teedev_ctx_put(ctx);
254         tee_device_put(teedev);
255         return ret;
256 }
257 EXPORT_SYMBOL_GPL(tee_shm_register);
258
259 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
260 {
261         tee_shm_put(filp->private_data);
262         return 0;
263 }
264
265 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
266 {
267         struct tee_shm *shm = filp->private_data;
268         size_t size = vma->vm_end - vma->vm_start;
269
270         /* Refuse sharing shared memory provided by application */
271         if (shm->flags & TEE_SHM_USER_MAPPED)
272                 return -EINVAL;
273
274         /* check for overflowing the buffer's size */
275         if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
276                 return -EINVAL;
277
278         return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
279                                size, vma->vm_page_prot);
280 }
281
282 static const struct file_operations tee_shm_fops = {
283         .owner = THIS_MODULE,
284         .release = tee_shm_fop_release,
285         .mmap = tee_shm_fop_mmap,
286 };
287
288 /**
289  * tee_shm_get_fd() - Increase reference count and return file descriptor
290  * @shm:        Shared memory handle
291  * @returns user space file descriptor to shared memory
292  */
293 int tee_shm_get_fd(struct tee_shm *shm)
294 {
295         int fd;
296
297         if (!(shm->flags & TEE_SHM_DMA_BUF))
298                 return -EINVAL;
299
300         /* matched by tee_shm_put() in tee_shm_op_release() */
301         refcount_inc(&shm->refcount);
302         fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
303         if (fd < 0)
304                 tee_shm_put(shm);
305         return fd;
306 }
307
308 /**
309  * tee_shm_free() - Free shared memory
310  * @shm:        Handle to shared memory to free
311  */
312 void tee_shm_free(struct tee_shm *shm)
313 {
314         tee_shm_put(shm);
315 }
316 EXPORT_SYMBOL_GPL(tee_shm_free);
317
318 /**
319  * tee_shm_va2pa() - Get physical address of a virtual address
320  * @shm:        Shared memory handle
321  * @va:         Virtual address to tranlsate
322  * @pa:         Returned physical address
323  * @returns 0 on success and < 0 on failure
324  */
325 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
326 {
327         if (!(shm->flags & TEE_SHM_MAPPED))
328                 return -EINVAL;
329         /* Check that we're in the range of the shm */
330         if ((char *)va < (char *)shm->kaddr)
331                 return -EINVAL;
332         if ((char *)va >= ((char *)shm->kaddr + shm->size))
333                 return -EINVAL;
334
335         return tee_shm_get_pa(
336                         shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
337 }
338 EXPORT_SYMBOL_GPL(tee_shm_va2pa);
339
340 /**
341  * tee_shm_pa2va() - Get virtual address of a physical address
342  * @shm:        Shared memory handle
343  * @pa:         Physical address to tranlsate
344  * @va:         Returned virtual address
345  * @returns 0 on success and < 0 on failure
346  */
347 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
348 {
349         if (!(shm->flags & TEE_SHM_MAPPED))
350                 return -EINVAL;
351         /* Check that we're in the range of the shm */
352         if (pa < shm->paddr)
353                 return -EINVAL;
354         if (pa >= (shm->paddr + shm->size))
355                 return -EINVAL;
356
357         if (va) {
358                 void *v = tee_shm_get_va(shm, pa - shm->paddr);
359
360                 if (IS_ERR(v))
361                         return PTR_ERR(v);
362                 *va = v;
363         }
364         return 0;
365 }
366 EXPORT_SYMBOL_GPL(tee_shm_pa2va);
367
368 /**
369  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
370  * @shm:        Shared memory handle
371  * @offs:       Offset from start of this shared memory
372  * @returns virtual address of the shared memory + offs if offs is within
373  *      the bounds of this shared memory, else an ERR_PTR
374  */
375 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
376 {
377         if (!(shm->flags & TEE_SHM_MAPPED))
378                 return ERR_PTR(-EINVAL);
379         if (offs >= shm->size)
380                 return ERR_PTR(-EINVAL);
381         return (char *)shm->kaddr + offs;
382 }
383 EXPORT_SYMBOL_GPL(tee_shm_get_va);
384
385 /**
386  * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
387  * @shm:        Shared memory handle
388  * @offs:       Offset from start of this shared memory
389  * @pa:         Physical address to return
390  * @returns 0 if offs is within the bounds of this shared memory, else an
391  *      error code.
392  */
393 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
394 {
395         if (offs >= shm->size)
396                 return -EINVAL;
397         if (pa)
398                 *pa = shm->paddr + offs;
399         return 0;
400 }
401 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
402
403 /**
404  * tee_shm_get_from_id() - Find shared memory object and increase reference
405  * count
406  * @ctx:        Context owning the shared memory
407  * @id:         Id of shared memory object
408  * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
409  */
410 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
411 {
412         struct tee_device *teedev;
413         struct tee_shm *shm;
414
415         if (!ctx)
416                 return ERR_PTR(-EINVAL);
417
418         teedev = ctx->teedev;
419         mutex_lock(&teedev->mutex);
420         shm = idr_find(&teedev->idr, id);
421         /*
422          * If the tee_shm was found in the IDR it must have a refcount
423          * larger than 0 due to the guarantee in tee_shm_put() below. So
424          * it's safe to use refcount_inc().
425          */
426         if (!shm || shm->ctx != ctx)
427                 shm = ERR_PTR(-EINVAL);
428         else
429                 refcount_inc(&shm->refcount);
430         mutex_unlock(&teedev->mutex);
431         return shm;
432 }
433 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
434
435 /**
436  * tee_shm_put() - Decrease reference count on a shared memory handle
437  * @shm:        Shared memory handle
438  */
439 void tee_shm_put(struct tee_shm *shm)
440 {
441         struct tee_device *teedev = shm->teedev;
442         bool do_release = false;
443
444         mutex_lock(&teedev->mutex);
445         if (refcount_dec_and_test(&shm->refcount)) {
446                 /*
447                  * refcount has reached 0, we must now remove it from the
448                  * IDR before releasing the mutex. This will guarantee that
449                  * the refcount_inc() in tee_shm_get_from_id() never starts
450                  * from 0.
451                  */
452                 idr_remove(&teedev->idr, shm->id);
453                 if (shm->ctx)
454                         list_del(&shm->link);
455                 do_release = true;
456         }
457         mutex_unlock(&teedev->mutex);
458
459         if (do_release)
460                 tee_shm_release(teedev, shm);
461 }
462 EXPORT_SYMBOL_GPL(tee_shm_put);