GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / xen / gntdev-dmabuf.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Xen dma-buf functionality for gntdev.
5  *
6  * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7  *
8  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17
18 #include <xen/xen.h>
19 #include <xen/grant_table.h>
20
21 #include "gntdev-common.h"
22 #include "gntdev-dmabuf.h"
23
24 #ifndef GRANT_INVALID_REF
25 /*
26  * Note on usage of grant reference 0 as invalid grant reference:
27  * grant reference 0 is valid, but never exposed to a driver,
28  * because of the fact it is already in use/reserved by the PV console.
29  */
30 #define GRANT_INVALID_REF       0
31 #endif
32
33 struct gntdev_dmabuf {
34         struct gntdev_dmabuf_priv *priv;
35         struct dma_buf *dmabuf;
36         struct list_head next;
37         int fd;
38
39         union {
40                 struct {
41                         /* Exported buffers are reference counted. */
42                         struct kref refcount;
43
44                         struct gntdev_priv *priv;
45                         struct gntdev_grant_map *map;
46                 } exp;
47                 struct {
48                         /* Granted references of the imported buffer. */
49                         grant_ref_t *refs;
50                         /* Scatter-gather table of the imported buffer. */
51                         struct sg_table *sgt;
52                         /* dma-buf attachment of the imported buffer. */
53                         struct dma_buf_attachment *attach;
54                 } imp;
55         } u;
56
57         /* Number of pages this buffer has. */
58         int nr_pages;
59         /* Pages of this buffer. */
60         struct page **pages;
61 };
62
63 struct gntdev_dmabuf_wait_obj {
64         struct list_head next;
65         struct gntdev_dmabuf *gntdev_dmabuf;
66         struct completion completion;
67 };
68
69 struct gntdev_dmabuf_attachment {
70         struct sg_table *sgt;
71         enum dma_data_direction dir;
72 };
73
74 struct gntdev_dmabuf_priv {
75         /* List of exported DMA buffers. */
76         struct list_head exp_list;
77         /* List of wait objects. */
78         struct list_head exp_wait_list;
79         /* List of imported DMA buffers. */
80         struct list_head imp_list;
81         /* This is the lock which protects dma_buf_xxx lists. */
82         struct mutex lock;
83         /*
84          * We reference this file while exporting dma-bufs, so
85          * the grant device context is not destroyed while there are
86          * external users alive.
87          */
88         struct file *filp;
89 };
90
91 /* DMA buffer export support. */
92
93 /* Implementation of wait for exported DMA buffer to be released. */
94
95 static void dmabuf_exp_release(struct kref *kref);
96
97 static struct gntdev_dmabuf_wait_obj *
98 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
99                         struct gntdev_dmabuf *gntdev_dmabuf)
100 {
101         struct gntdev_dmabuf_wait_obj *obj;
102
103         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
104         if (!obj)
105                 return ERR_PTR(-ENOMEM);
106
107         init_completion(&obj->completion);
108         obj->gntdev_dmabuf = gntdev_dmabuf;
109
110         mutex_lock(&priv->lock);
111         list_add(&obj->next, &priv->exp_wait_list);
112         /* Put our reference and wait for gntdev_dmabuf's release to fire. */
113         kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
114         mutex_unlock(&priv->lock);
115         return obj;
116 }
117
118 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
119                                      struct gntdev_dmabuf_wait_obj *obj)
120 {
121         mutex_lock(&priv->lock);
122         list_del(&obj->next);
123         mutex_unlock(&priv->lock);
124         kfree(obj);
125 }
126
127 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
128                                     u32 wait_to_ms)
129 {
130         if (wait_for_completion_timeout(&obj->completion,
131                         msecs_to_jiffies(wait_to_ms)) <= 0)
132                 return -ETIMEDOUT;
133
134         return 0;
135 }
136
137 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
138                                        struct gntdev_dmabuf *gntdev_dmabuf)
139 {
140         struct gntdev_dmabuf_wait_obj *obj;
141
142         list_for_each_entry(obj, &priv->exp_wait_list, next)
143                 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
144                         pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
145                         complete_all(&obj->completion);
146                         break;
147                 }
148 }
149
150 static struct gntdev_dmabuf *
151 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
152 {
153         struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
154
155         mutex_lock(&priv->lock);
156         list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
157                 if (gntdev_dmabuf->fd == fd) {
158                         pr_debug("Found gntdev_dmabuf in the wait list\n");
159                         kref_get(&gntdev_dmabuf->u.exp.refcount);
160                         ret = gntdev_dmabuf;
161                         break;
162                 }
163         mutex_unlock(&priv->lock);
164         return ret;
165 }
166
167 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
168                                     int wait_to_ms)
169 {
170         struct gntdev_dmabuf *gntdev_dmabuf;
171         struct gntdev_dmabuf_wait_obj *obj;
172         int ret;
173
174         pr_debug("Will wait for dma-buf with fd %d\n", fd);
175         /*
176          * Try to find the DMA buffer: if not found means that
177          * either the buffer has already been released or file descriptor
178          * provided is wrong.
179          */
180         gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
181         if (IS_ERR(gntdev_dmabuf))
182                 return PTR_ERR(gntdev_dmabuf);
183
184         /*
185          * gntdev_dmabuf still exists and is reference count locked by us now,
186          * so prepare to wait: allocate wait object and add it to the wait list,
187          * so we can find it on release.
188          */
189         obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
190         if (IS_ERR(obj))
191                 return PTR_ERR(obj);
192
193         ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
194         dmabuf_exp_wait_obj_free(priv, obj);
195         return ret;
196 }
197
198 /* DMA buffer export support. */
199
200 static struct sg_table *
201 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
202 {
203         struct sg_table *sgt;
204         int ret;
205
206         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
207         if (!sgt) {
208                 ret = -ENOMEM;
209                 goto out;
210         }
211
212         ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
213                                         nr_pages << PAGE_SHIFT,
214                                         GFP_KERNEL);
215         if (ret)
216                 goto out;
217
218         return sgt;
219
220 out:
221         kfree(sgt);
222         return ERR_PTR(ret);
223 }
224
225 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
226                                  struct dma_buf_attachment *attach)
227 {
228         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
229
230         gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
231                                        GFP_KERNEL);
232         if (!gntdev_dmabuf_attach)
233                 return -ENOMEM;
234
235         gntdev_dmabuf_attach->dir = DMA_NONE;
236         attach->priv = gntdev_dmabuf_attach;
237         return 0;
238 }
239
240 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
241                                   struct dma_buf_attachment *attach)
242 {
243         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
244
245         if (gntdev_dmabuf_attach) {
246                 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
247
248                 if (sgt) {
249                         if (gntdev_dmabuf_attach->dir != DMA_NONE)
250                                 dma_unmap_sg_attrs(attach->dev, sgt->sgl,
251                                                    sgt->nents,
252                                                    gntdev_dmabuf_attach->dir,
253                                                    DMA_ATTR_SKIP_CPU_SYNC);
254                         sg_free_table(sgt);
255                 }
256
257                 kfree(sgt);
258                 kfree(gntdev_dmabuf_attach);
259                 attach->priv = NULL;
260         }
261 }
262
263 static struct sg_table *
264 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
265                            enum dma_data_direction dir)
266 {
267         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
268         struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
269         struct sg_table *sgt;
270
271         pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
272                  attach->dev);
273
274         if (dir == DMA_NONE || !gntdev_dmabuf_attach)
275                 return ERR_PTR(-EINVAL);
276
277         /* Return the cached mapping when possible. */
278         if (gntdev_dmabuf_attach->dir == dir)
279                 return gntdev_dmabuf_attach->sgt;
280
281         /*
282          * Two mappings with different directions for the same attachment are
283          * not allowed.
284          */
285         if (gntdev_dmabuf_attach->dir != DMA_NONE)
286                 return ERR_PTR(-EBUSY);
287
288         sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289                                   gntdev_dmabuf->nr_pages);
290         if (!IS_ERR(sgt)) {
291                 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
292                                       DMA_ATTR_SKIP_CPU_SYNC)) {
293                         sg_free_table(sgt);
294                         kfree(sgt);
295                         sgt = ERR_PTR(-ENOMEM);
296                 } else {
297                         gntdev_dmabuf_attach->sgt = sgt;
298                         gntdev_dmabuf_attach->dir = dir;
299                 }
300         }
301         if (IS_ERR(sgt))
302                 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
303         return sgt;
304 }
305
306 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
307                                          struct sg_table *sgt,
308                                          enum dma_data_direction dir)
309 {
310         /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
311 }
312
313 static void dmabuf_exp_release(struct kref *kref)
314 {
315         struct gntdev_dmabuf *gntdev_dmabuf =
316                 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
317
318         dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
319         list_del(&gntdev_dmabuf->next);
320         fput(gntdev_dmabuf->priv->filp);
321         kfree(gntdev_dmabuf);
322 }
323
324 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
325                                   struct gntdev_grant_map *map)
326 {
327         mutex_lock(&priv->lock);
328         list_del(&map->next);
329         gntdev_put_map(NULL /* already removed */, map);
330         mutex_unlock(&priv->lock);
331 }
332
333 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
334 {
335         struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
336         struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
337
338         dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
339                               gntdev_dmabuf->u.exp.map);
340         mutex_lock(&priv->lock);
341         kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
342         mutex_unlock(&priv->lock);
343 }
344
345 static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
346                                  unsigned long page_num)
347 {
348         /* Not implemented. */
349         return NULL;
350 }
351
352 static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
353                                   unsigned long page_num, void *addr)
354 {
355         /* Not implemented. */
356 }
357
358 static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
359                                struct vm_area_struct *vma)
360 {
361         /* Not implemented. */
362         return 0;
363 }
364
365 static const struct dma_buf_ops dmabuf_exp_ops =  {
366         .attach = dmabuf_exp_ops_attach,
367         .detach = dmabuf_exp_ops_detach,
368         .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
369         .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
370         .release = dmabuf_exp_ops_release,
371         .map = dmabuf_exp_ops_kmap,
372         .unmap = dmabuf_exp_ops_kunmap,
373         .mmap = dmabuf_exp_ops_mmap,
374 };
375
376 struct gntdev_dmabuf_export_args {
377         struct gntdev_priv *priv;
378         struct gntdev_grant_map *map;
379         struct gntdev_dmabuf_priv *dmabuf_priv;
380         struct device *dev;
381         int count;
382         struct page **pages;
383         u32 fd;
384 };
385
386 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
387 {
388         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
389         struct gntdev_dmabuf *gntdev_dmabuf;
390         int ret;
391
392         gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
393         if (!gntdev_dmabuf)
394                 return -ENOMEM;
395
396         kref_init(&gntdev_dmabuf->u.exp.refcount);
397
398         gntdev_dmabuf->priv = args->dmabuf_priv;
399         gntdev_dmabuf->nr_pages = args->count;
400         gntdev_dmabuf->pages = args->pages;
401         gntdev_dmabuf->u.exp.priv = args->priv;
402         gntdev_dmabuf->u.exp.map = args->map;
403
404         exp_info.exp_name = KBUILD_MODNAME;
405         if (args->dev->driver && args->dev->driver->owner)
406                 exp_info.owner = args->dev->driver->owner;
407         else
408                 exp_info.owner = THIS_MODULE;
409         exp_info.ops = &dmabuf_exp_ops;
410         exp_info.size = args->count << PAGE_SHIFT;
411         exp_info.flags = O_RDWR;
412         exp_info.priv = gntdev_dmabuf;
413
414         gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
415         if (IS_ERR(gntdev_dmabuf->dmabuf)) {
416                 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
417                 gntdev_dmabuf->dmabuf = NULL;
418                 goto fail;
419         }
420
421         ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
422         if (ret < 0)
423                 goto fail;
424
425         gntdev_dmabuf->fd = ret;
426         args->fd = ret;
427
428         pr_debug("Exporting DMA buffer with fd %d\n", ret);
429
430         mutex_lock(&args->dmabuf_priv->lock);
431         list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
432         mutex_unlock(&args->dmabuf_priv->lock);
433         get_file(gntdev_dmabuf->priv->filp);
434         return 0;
435
436 fail:
437         if (gntdev_dmabuf->dmabuf)
438                 dma_buf_put(gntdev_dmabuf->dmabuf);
439         kfree(gntdev_dmabuf);
440         return ret;
441 }
442
443 static struct gntdev_grant_map *
444 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
445                                  int count)
446 {
447         struct gntdev_grant_map *map;
448
449         if (unlikely(count <= 0))
450                 return ERR_PTR(-EINVAL);
451
452         if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
453             (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
454                 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
455                 return ERR_PTR(-EINVAL);
456         }
457
458         map = gntdev_alloc_map(priv, count, dmabuf_flags);
459         if (!map)
460                 return ERR_PTR(-ENOMEM);
461
462         if (unlikely(gntdev_account_mapped_pages(count))) {
463                 pr_debug("can't map %d pages: over limit\n", count);
464                 gntdev_put_map(NULL, map);
465                 return ERR_PTR(-ENOMEM);
466         }
467         return map;
468 }
469
470 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
471                                 int count, u32 domid, u32 *refs, u32 *fd)
472 {
473         struct gntdev_grant_map *map;
474         struct gntdev_dmabuf_export_args args;
475         int i, ret;
476
477         map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
478         if (IS_ERR(map))
479                 return PTR_ERR(map);
480
481         for (i = 0; i < count; i++) {
482                 map->grants[i].domid = domid;
483                 map->grants[i].ref = refs[i];
484         }
485
486         mutex_lock(&priv->lock);
487         gntdev_add_map(priv, map);
488         mutex_unlock(&priv->lock);
489
490         map->flags |= GNTMAP_host_map;
491 #if defined(CONFIG_X86)
492         map->flags |= GNTMAP_device_map;
493 #endif
494
495         ret = gntdev_map_grant_pages(map);
496         if (ret < 0)
497                 goto out;
498
499         args.priv = priv;
500         args.map = map;
501         args.dev = priv->dma_dev;
502         args.dmabuf_priv = priv->dmabuf_priv;
503         args.count = map->count;
504         args.pages = map->pages;
505         args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
506
507         ret = dmabuf_exp_from_pages(&args);
508         if (ret < 0)
509                 goto out;
510
511         *fd = args.fd;
512         return 0;
513
514 out:
515         dmabuf_exp_remove_map(priv, map);
516         return ret;
517 }
518
519 /* DMA buffer import support. */
520
521 static int
522 dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
523                                 int count, int domid)
524 {
525         grant_ref_t priv_gref_head;
526         int i, ret;
527
528         ret = gnttab_alloc_grant_references(count, &priv_gref_head);
529         if (ret < 0) {
530                 pr_debug("Cannot allocate grant references, ret %d\n", ret);
531                 return ret;
532         }
533
534         for (i = 0; i < count; i++) {
535                 int cur_ref;
536
537                 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
538                 if (cur_ref < 0) {
539                         ret = cur_ref;
540                         pr_debug("Cannot claim grant reference, ret %d\n", ret);
541                         goto out;
542                 }
543
544                 gnttab_grant_foreign_access_ref(cur_ref, domid,
545                                                 xen_page_to_gfn(pages[i]), 0);
546                 refs[i] = cur_ref;
547         }
548
549         return 0;
550
551 out:
552         gnttab_free_grant_references(priv_gref_head);
553         return ret;
554 }
555
556 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
557 {
558         int i;
559
560         for (i = 0; i < count; i++)
561                 if (refs[i] != GRANT_INVALID_REF)
562                         gnttab_end_foreign_access(refs[i], 0, 0UL);
563 }
564
565 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
566 {
567         kfree(gntdev_dmabuf->pages);
568         kfree(gntdev_dmabuf->u.imp.refs);
569         kfree(gntdev_dmabuf);
570 }
571
572 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
573 {
574         struct gntdev_dmabuf *gntdev_dmabuf;
575         int i;
576
577         gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
578         if (!gntdev_dmabuf)
579                 goto fail_no_free;
580
581         gntdev_dmabuf->u.imp.refs = kcalloc(count,
582                                             sizeof(gntdev_dmabuf->u.imp.refs[0]),
583                                             GFP_KERNEL);
584         if (!gntdev_dmabuf->u.imp.refs)
585                 goto fail;
586
587         gntdev_dmabuf->pages = kcalloc(count,
588                                        sizeof(gntdev_dmabuf->pages[0]),
589                                        GFP_KERNEL);
590         if (!gntdev_dmabuf->pages)
591                 goto fail;
592
593         gntdev_dmabuf->nr_pages = count;
594
595         for (i = 0; i < count; i++)
596                 gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
597
598         return gntdev_dmabuf;
599
600 fail:
601         dmabuf_imp_free_storage(gntdev_dmabuf);
602 fail_no_free:
603         return ERR_PTR(-ENOMEM);
604 }
605
606 static struct gntdev_dmabuf *
607 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
608                    int fd, int count, int domid)
609 {
610         struct gntdev_dmabuf *gntdev_dmabuf, *ret;
611         struct dma_buf *dma_buf;
612         struct dma_buf_attachment *attach;
613         struct sg_table *sgt;
614         struct sg_page_iter sg_iter;
615         int i;
616
617         dma_buf = dma_buf_get(fd);
618         if (IS_ERR(dma_buf))
619                 return ERR_CAST(dma_buf);
620
621         gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
622         if (IS_ERR(gntdev_dmabuf)) {
623                 ret = gntdev_dmabuf;
624                 goto fail_put;
625         }
626
627         gntdev_dmabuf->priv = priv;
628         gntdev_dmabuf->fd = fd;
629
630         attach = dma_buf_attach(dma_buf, dev);
631         if (IS_ERR(attach)) {
632                 ret = ERR_CAST(attach);
633                 goto fail_free_obj;
634         }
635
636         gntdev_dmabuf->u.imp.attach = attach;
637
638         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
639         if (IS_ERR(sgt)) {
640                 ret = ERR_CAST(sgt);
641                 goto fail_detach;
642         }
643
644         /* Check that we have zero offset. */
645         if (sgt->sgl->offset) {
646                 ret = ERR_PTR(-EINVAL);
647                 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
648                          sgt->sgl->offset);
649                 goto fail_unmap;
650         }
651
652         /* Check number of pages that imported buffer has. */
653         if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
654                 ret = ERR_PTR(-EINVAL);
655                 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
656                          attach->dmabuf->size, gntdev_dmabuf->nr_pages);
657                 goto fail_unmap;
658         }
659
660         gntdev_dmabuf->u.imp.sgt = sgt;
661
662         /* Now convert sgt to array of pages and check for page validity. */
663         i = 0;
664         for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
665                 struct page *page = sg_page_iter_page(&sg_iter);
666                 /*
667                  * Check if page is valid: this can happen if we are given
668                  * a page from VRAM or other resources which are not backed
669                  * by a struct page.
670                  */
671                 if (!pfn_valid(page_to_pfn(page))) {
672                         ret = ERR_PTR(-EINVAL);
673                         goto fail_unmap;
674                 }
675
676                 gntdev_dmabuf->pages[i++] = page;
677         }
678
679         ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
680                                                       gntdev_dmabuf->u.imp.refs,
681                                                       count, domid));
682         if (IS_ERR(ret))
683                 goto fail_end_access;
684
685         pr_debug("Imported DMA buffer with fd %d\n", fd);
686
687         mutex_lock(&priv->lock);
688         list_add(&gntdev_dmabuf->next, &priv->imp_list);
689         mutex_unlock(&priv->lock);
690
691         return gntdev_dmabuf;
692
693 fail_end_access:
694         dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
695 fail_unmap:
696         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
697 fail_detach:
698         dma_buf_detach(dma_buf, attach);
699 fail_free_obj:
700         dmabuf_imp_free_storage(gntdev_dmabuf);
701 fail_put:
702         dma_buf_put(dma_buf);
703         return ret;
704 }
705
706 /*
707  * Find the hyper dma-buf by its file descriptor and remove
708  * it from the buffer's list.
709  */
710 static struct gntdev_dmabuf *
711 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
712 {
713         struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
714
715         mutex_lock(&priv->lock);
716         list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
717                 if (gntdev_dmabuf->fd == fd) {
718                         pr_debug("Found gntdev_dmabuf in the import list\n");
719                         ret = gntdev_dmabuf;
720                         list_del(&gntdev_dmabuf->next);
721                         break;
722                 }
723         }
724         mutex_unlock(&priv->lock);
725         return ret;
726 }
727
728 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
729 {
730         struct gntdev_dmabuf *gntdev_dmabuf;
731         struct dma_buf_attachment *attach;
732         struct dma_buf *dma_buf;
733
734         gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
735         if (IS_ERR(gntdev_dmabuf))
736                 return PTR_ERR(gntdev_dmabuf);
737
738         pr_debug("Releasing DMA buffer with fd %d\n", fd);
739
740         dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
741                                       gntdev_dmabuf->nr_pages);
742
743         attach = gntdev_dmabuf->u.imp.attach;
744
745         if (gntdev_dmabuf->u.imp.sgt)
746                 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
747                                          DMA_BIDIRECTIONAL);
748         dma_buf = attach->dmabuf;
749         dma_buf_detach(attach->dmabuf, attach);
750         dma_buf_put(dma_buf);
751
752         dmabuf_imp_free_storage(gntdev_dmabuf);
753         return 0;
754 }
755
756 /* DMA buffer IOCTL support. */
757
758 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
759                                        struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
760 {
761         struct ioctl_gntdev_dmabuf_exp_from_refs op;
762         u32 *refs;
763         long ret;
764
765         if (use_ptemod) {
766                 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
767                          use_ptemod);
768                 return -EINVAL;
769         }
770
771         if (copy_from_user(&op, u, sizeof(op)) != 0)
772                 return -EFAULT;
773
774         if (unlikely(op.count <= 0))
775                 return -EINVAL;
776
777         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
778         if (!refs)
779                 return -ENOMEM;
780
781         if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
782                 ret = -EFAULT;
783                 goto out;
784         }
785
786         ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
787                                    op.domid, refs, &op.fd);
788         if (ret)
789                 goto out;
790
791         if (copy_to_user(u, &op, sizeof(op)) != 0)
792                 ret = -EFAULT;
793
794 out:
795         kfree(refs);
796         return ret;
797 }
798
799 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
800                                            struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
801 {
802         struct ioctl_gntdev_dmabuf_exp_wait_released op;
803
804         if (copy_from_user(&op, u, sizeof(op)) != 0)
805                 return -EFAULT;
806
807         return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
808                                         op.wait_to_ms);
809 }
810
811 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
812                                      struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
813 {
814         struct ioctl_gntdev_dmabuf_imp_to_refs op;
815         struct gntdev_dmabuf *gntdev_dmabuf;
816         long ret;
817
818         if (copy_from_user(&op, u, sizeof(op)) != 0)
819                 return -EFAULT;
820
821         if (unlikely(op.count <= 0))
822                 return -EINVAL;
823
824         gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
825                                            priv->dma_dev, op.fd,
826                                            op.count, op.domid);
827         if (IS_ERR(gntdev_dmabuf))
828                 return PTR_ERR(gntdev_dmabuf);
829
830         if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
831                          sizeof(*u->refs) * op.count) != 0) {
832                 ret = -EFAULT;
833                 goto out_release;
834         }
835         return 0;
836
837 out_release:
838         dmabuf_imp_release(priv->dmabuf_priv, op.fd);
839         return ret;
840 }
841
842 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
843                                      struct ioctl_gntdev_dmabuf_imp_release __user *u)
844 {
845         struct ioctl_gntdev_dmabuf_imp_release op;
846
847         if (copy_from_user(&op, u, sizeof(op)) != 0)
848                 return -EFAULT;
849
850         return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
851 }
852
853 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
854 {
855         struct gntdev_dmabuf_priv *priv;
856
857         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
858         if (!priv)
859                 return ERR_PTR(-ENOMEM);
860
861         mutex_init(&priv->lock);
862         INIT_LIST_HEAD(&priv->exp_list);
863         INIT_LIST_HEAD(&priv->exp_wait_list);
864         INIT_LIST_HEAD(&priv->imp_list);
865
866         priv->filp = filp;
867
868         return priv;
869 }
870
871 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
872 {
873         kfree(priv);
874 }