2 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/anon_inodes.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/tee_drv.h>
21 #include "tee_private.h"
23 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
25 struct tee_shm_pool_mgr *poolm;
27 if (shm->flags & TEE_SHM_DMA_BUF)
28 poolm = &teedev->pool->dma_buf_mgr;
30 poolm = &teedev->pool->private_mgr;
32 poolm->ops->free(poolm, shm);
35 tee_device_put(teedev);
39 * tee_shm_alloc() - Allocate shared memory
40 * @ctx: Context that allocates the shared memory
41 * @size: Requested size of shared memory
42 * @flags: Flags setting properties for the requested shared memory.
44 * Memory allocated as global shared memory is automatically freed when the
45 * TEE file pointer is closed. The @flags field uses the bits defined by
46 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
47 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
48 * associated with a dma-buf handle, else driver private memory.
50 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
52 struct tee_device *teedev = ctx->teedev;
53 struct tee_shm_pool_mgr *poolm = NULL;
58 if (!(flags & TEE_SHM_MAPPED)) {
59 dev_err(teedev->dev.parent,
60 "only mapped allocations supported\n");
61 return ERR_PTR(-EINVAL);
64 if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
65 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
66 return ERR_PTR(-EINVAL);
69 if (!tee_device_get(teedev))
70 return ERR_PTR(-EINVAL);
73 /* teedev has been detached from driver */
74 ret = ERR_PTR(-EINVAL);
78 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
80 ret = ERR_PTR(-ENOMEM);
84 refcount_set(&shm->refcount, 1);
88 if (flags & TEE_SHM_DMA_BUF)
89 poolm = &teedev->pool->dma_buf_mgr;
91 poolm = &teedev->pool->private_mgr;
93 rc = poolm->ops->alloc(poolm, shm, size);
99 mutex_lock(&teedev->mutex);
100 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
101 mutex_unlock(&teedev->mutex);
103 ret = ERR_PTR(shm->id);
107 mutex_lock(&teedev->mutex);
108 list_add_tail(&shm->link, &ctx->list_shm);
109 mutex_unlock(&teedev->mutex);
113 poolm->ops->free(poolm, shm);
117 tee_device_put(teedev);
120 EXPORT_SYMBOL_GPL(tee_shm_alloc);
122 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
124 tee_shm_put(filp->private_data);
128 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
130 struct tee_shm *shm = filp->private_data;
131 size_t size = vma->vm_end - vma->vm_start;
133 /* check for overflowing the buffer's size */
134 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
137 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
138 size, vma->vm_page_prot);
141 static const struct file_operations tee_shm_fops = {
142 .owner = THIS_MODULE,
143 .release = tee_shm_fop_release,
144 .mmap = tee_shm_fop_mmap,
148 * tee_shm_get_fd() - Increase reference count and return file descriptor
149 * @shm: Shared memory handle
150 * @returns user space file descriptor to shared memory
152 int tee_shm_get_fd(struct tee_shm *shm)
154 u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
157 if ((shm->flags & req_flags) != req_flags)
160 /* matched by tee_shm_put() in tee_shm_op_release() */
161 refcount_inc(&shm->refcount);
162 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
169 * tee_shm_free() - Free shared memory
170 * @shm: Handle to shared memory to free
172 void tee_shm_free(struct tee_shm *shm)
176 EXPORT_SYMBOL_GPL(tee_shm_free);
179 * tee_shm_va2pa() - Get physical address of a virtual address
180 * @shm: Shared memory handle
181 * @va: Virtual address to tranlsate
182 * @pa: Returned physical address
183 * @returns 0 on success and < 0 on failure
185 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
187 /* Check that we're in the range of the shm */
188 if ((char *)va < (char *)shm->kaddr)
190 if ((char *)va >= ((char *)shm->kaddr + shm->size))
193 return tee_shm_get_pa(
194 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
196 EXPORT_SYMBOL_GPL(tee_shm_va2pa);
199 * tee_shm_pa2va() - Get virtual address of a physical address
200 * @shm: Shared memory handle
201 * @pa: Physical address to tranlsate
202 * @va: Returned virtual address
203 * @returns 0 on success and < 0 on failure
205 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
207 /* Check that we're in the range of the shm */
210 if (pa >= (shm->paddr + shm->size))
214 void *v = tee_shm_get_va(shm, pa - shm->paddr);
222 EXPORT_SYMBOL_GPL(tee_shm_pa2va);
225 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
226 * @shm: Shared memory handle
227 * @offs: Offset from start of this shared memory
228 * @returns virtual address of the shared memory + offs if offs is within
229 * the bounds of this shared memory, else an ERR_PTR
231 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
233 if (offs >= shm->size)
234 return ERR_PTR(-EINVAL);
235 return (char *)shm->kaddr + offs;
237 EXPORT_SYMBOL_GPL(tee_shm_get_va);
240 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
241 * @shm: Shared memory handle
242 * @offs: Offset from start of this shared memory
243 * @pa: Physical address to return
244 * @returns 0 if offs is within the bounds of this shared memory, else an
247 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
249 if (offs >= shm->size)
252 *pa = shm->paddr + offs;
255 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
258 * tee_shm_get_from_id() - Find shared memory object and increase reference
260 * @ctx: Context owning the shared memory
261 * @id: Id of shared memory object
262 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
264 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
266 struct tee_device *teedev;
270 return ERR_PTR(-EINVAL);
272 teedev = ctx->teedev;
273 mutex_lock(&teedev->mutex);
274 shm = idr_find(&teedev->idr, id);
276 * If the tee_shm was found in the IDR it must have a refcount
277 * larger than 0 due to the guarantee in tee_shm_put() below. So
278 * it's safe to use refcount_inc().
280 if (!shm || shm->ctx != ctx)
281 shm = ERR_PTR(-EINVAL);
283 refcount_inc(&shm->refcount);
284 mutex_unlock(&teedev->mutex);
287 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
290 * tee_shm_get_id() - Get id of a shared memory object
291 * @shm: Shared memory handle
294 int tee_shm_get_id(struct tee_shm *shm)
298 EXPORT_SYMBOL_GPL(tee_shm_get_id);
301 * tee_shm_put() - Decrease reference count on a shared memory handle
302 * @shm: Shared memory handle
304 void tee_shm_put(struct tee_shm *shm)
306 struct tee_device *teedev = shm->teedev;
307 bool do_release = false;
309 mutex_lock(&teedev->mutex);
310 if (refcount_dec_and_test(&shm->refcount)) {
312 * refcount has reached 0, we must now remove it from the
313 * IDR before releasing the mutex. This will guarantee
314 * that the refcount_inc() in tee_shm_get_from_id() never
318 list_del(&shm->link);
321 mutex_unlock(&teedev->mutex);
324 tee_shm_release(teedev, shm);
326 EXPORT_SYMBOL_GPL(tee_shm_put);