GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / gpu / drm / udl / udl_dmabuf.c
1 /*
2  * udl_dmabuf.c
3  *
4  * Copyright (c) 2014 The Chromium OS Authors
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program. If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <drm/drmP.h>
21 #include "udl_drv.h"
22 #include <linux/shmem_fs.h>
23 #include <linux/dma-buf.h>
24
25 struct udl_drm_dmabuf_attachment {
26         struct sg_table sgt;
27         enum dma_data_direction dir;
28         bool is_mapped;
29 };
30
31 static int udl_attach_dma_buf(struct dma_buf *dmabuf,
32                               struct dma_buf_attachment *attach)
33 {
34         struct udl_drm_dmabuf_attachment *udl_attach;
35
36         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
37                         attach->dmabuf->size);
38
39         udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
40         if (!udl_attach)
41                 return -ENOMEM;
42
43         udl_attach->dir = DMA_NONE;
44         attach->priv = udl_attach;
45
46         return 0;
47 }
48
49 static void udl_detach_dma_buf(struct dma_buf *dmabuf,
50                                struct dma_buf_attachment *attach)
51 {
52         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
53         struct sg_table *sgt;
54
55         if (!udl_attach)
56                 return;
57
58         DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
59                         attach->dmabuf->size);
60
61         sgt = &udl_attach->sgt;
62
63         if (udl_attach->dir != DMA_NONE)
64                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
65                                 udl_attach->dir);
66
67         sg_free_table(sgt);
68         kfree(udl_attach);
69         attach->priv = NULL;
70 }
71
72 static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
73                                         enum dma_data_direction dir)
74 {
75         struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
76         struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
77         struct drm_device *dev = obj->base.dev;
78         struct udl_device *udl = dev->dev_private;
79         struct scatterlist *rd, *wr;
80         struct sg_table *sgt = NULL;
81         unsigned int i;
82         int page_count;
83         int nents, ret;
84
85         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
86                         attach->dmabuf->size, dir);
87
88         /* just return current sgt if already requested. */
89         if (udl_attach->dir == dir && udl_attach->is_mapped)
90                 return &udl_attach->sgt;
91
92         if (!obj->pages) {
93                 ret = udl_gem_get_pages(obj);
94                 if (ret) {
95                         DRM_ERROR("failed to map pages.\n");
96                         return ERR_PTR(ret);
97                 }
98         }
99
100         page_count = obj->base.size / PAGE_SIZE;
101         obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
102         if (IS_ERR(obj->sg)) {
103                 DRM_ERROR("failed to allocate sgt.\n");
104                 return ERR_CAST(obj->sg);
105         }
106
107         sgt = &udl_attach->sgt;
108
109         ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
110         if (ret) {
111                 DRM_ERROR("failed to alloc sgt.\n");
112                 return ERR_PTR(-ENOMEM);
113         }
114
115         mutex_lock(&udl->gem_lock);
116
117         rd = obj->sg->sgl;
118         wr = sgt->sgl;
119         for (i = 0; i < sgt->orig_nents; ++i) {
120                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
121                 rd = sg_next(rd);
122                 wr = sg_next(wr);
123         }
124
125         if (dir != DMA_NONE) {
126                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
127                 if (!nents) {
128                         DRM_ERROR("failed to map sgl with iommu.\n");
129                         sg_free_table(sgt);
130                         sgt = ERR_PTR(-EIO);
131                         goto err_unlock;
132                 }
133         }
134
135         udl_attach->is_mapped = true;
136         udl_attach->dir = dir;
137         attach->priv = udl_attach;
138
139 err_unlock:
140         mutex_unlock(&udl->gem_lock);
141         return sgt;
142 }
143
144 static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
145                               struct sg_table *sgt,
146                               enum dma_data_direction dir)
147 {
148         /* Nothing to do. */
149         DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
150                         attach->dmabuf->size, dir);
151 }
152
153 static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
154 {
155         /* TODO */
156
157         return NULL;
158 }
159
160 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
161                               unsigned long page_num, void *addr)
162 {
163         /* TODO */
164 }
165
166 static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
167                            struct vm_area_struct *vma)
168 {
169         /* TODO */
170
171         return -EINVAL;
172 }
173
174 static const struct dma_buf_ops udl_dmabuf_ops = {
175         .attach                 = udl_attach_dma_buf,
176         .detach                 = udl_detach_dma_buf,
177         .map_dma_buf            = udl_map_dma_buf,
178         .unmap_dma_buf          = udl_unmap_dma_buf,
179         .map                    = udl_dmabuf_kmap,
180         .unmap                  = udl_dmabuf_kunmap,
181         .mmap                   = udl_dmabuf_mmap,
182         .release                = drm_gem_dmabuf_release,
183 };
184
185 struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
186                                      struct drm_gem_object *obj, int flags)
187 {
188         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
189
190         exp_info.ops = &udl_dmabuf_ops;
191         exp_info.size = obj->size;
192         exp_info.flags = flags;
193         exp_info.priv = obj;
194
195         return drm_gem_dmabuf_export(dev, &exp_info);
196 }
197
198 static int udl_prime_create(struct drm_device *dev,
199                             size_t size,
200                             struct sg_table *sg,
201                             struct udl_gem_object **obj_p)
202 {
203         struct udl_gem_object *obj;
204         int npages;
205
206         npages = size / PAGE_SIZE;
207
208         *obj_p = NULL;
209         obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
210         if (!obj)
211                 return -ENOMEM;
212
213         obj->sg = sg;
214         obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
215         if (obj->pages == NULL) {
216                 DRM_ERROR("obj pages is NULL %d\n", npages);
217                 return -ENOMEM;
218         }
219
220         drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
221
222         *obj_p = obj;
223         return 0;
224 }
225
226 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
227                                 struct dma_buf *dma_buf)
228 {
229         struct dma_buf_attachment *attach;
230         struct sg_table *sg;
231         struct udl_gem_object *uobj;
232         int ret;
233
234         /* need to attach */
235         get_device(dev->dev);
236         attach = dma_buf_attach(dma_buf, dev->dev);
237         if (IS_ERR(attach)) {
238                 put_device(dev->dev);
239                 return ERR_CAST(attach);
240         }
241
242         get_dma_buf(dma_buf);
243
244         sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
245         if (IS_ERR(sg)) {
246                 ret = PTR_ERR(sg);
247                 goto fail_detach;
248         }
249
250         ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
251         if (ret)
252                 goto fail_unmap;
253
254         uobj->base.import_attach = attach;
255         uobj->flags = UDL_BO_WC;
256
257         return &uobj->base;
258
259 fail_unmap:
260         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
261 fail_detach:
262         dma_buf_detach(dma_buf, attach);
263         dma_buf_put(dma_buf);
264         put_device(dev->dev);
265         return ERR_PTR(ret);
266 }