GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / gpu / drm / virtio / virtgpu_ioctl.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/virtgpu_drm.h>
30 #include <drm/ttm/ttm_execbuf_util.h>
31
32 #include "virtgpu_drv.h"
33
34 static void convert_to_hw_box(struct virtio_gpu_box *dst,
35                               const struct drm_virtgpu_3d_box *src)
36 {
37         dst->x = cpu_to_le32(src->x);
38         dst->y = cpu_to_le32(src->y);
39         dst->z = cpu_to_le32(src->z);
40         dst->w = cpu_to_le32(src->w);
41         dst->h = cpu_to_le32(src->h);
42         dst->d = cpu_to_le32(src->d);
43 }
44
45 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
46                                 struct drm_file *file_priv)
47 {
48         struct virtio_gpu_device *vgdev = dev->dev_private;
49         struct drm_virtgpu_map *virtio_gpu_map = data;
50
51         return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
52                                          virtio_gpu_map->handle,
53                                          &virtio_gpu_map->offset);
54 }
55
56 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
57                                            struct list_head *head)
58 {
59         struct ttm_validate_buffer *buf;
60         struct ttm_buffer_object *bo;
61         struct virtio_gpu_object *qobj;
62         int ret;
63
64         ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
65         if (ret != 0)
66                 return ret;
67
68         list_for_each_entry(buf, head, head) {
69                 bo = buf->bo;
70                 qobj = container_of(bo, struct virtio_gpu_object, tbo);
71                 ret = ttm_bo_validate(bo, &qobj->placement, false, false);
72                 if (ret) {
73                         ttm_eu_backoff_reservation(ticket, head);
74                         return ret;
75                 }
76         }
77         return 0;
78 }
79
80 static void virtio_gpu_unref_list(struct list_head *head)
81 {
82         struct ttm_validate_buffer *buf;
83         struct ttm_buffer_object *bo;
84         struct virtio_gpu_object *qobj;
85         list_for_each_entry(buf, head, head) {
86                 bo = buf->bo;
87                 qobj = container_of(bo, struct virtio_gpu_object, tbo);
88
89                 drm_gem_object_unreference_unlocked(&qobj->gem_base);
90         }
91 }
92
93 /*
94  * Usage of execbuffer:
95  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
96  * However, the command as passed from user space must *not* contain the initial
97  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
98  */
99 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
100                                  struct drm_file *drm_file)
101 {
102         struct drm_virtgpu_execbuffer *exbuf = data;
103         struct virtio_gpu_device *vgdev = dev->dev_private;
104         struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
105         struct drm_gem_object *gobj;
106         struct virtio_gpu_fence *fence;
107         struct virtio_gpu_object *qobj;
108         int ret;
109         uint32_t *bo_handles = NULL;
110         void __user *user_bo_handles = NULL;
111         struct list_head validate_list;
112         struct ttm_validate_buffer *buflist = NULL;
113         int i;
114         struct ww_acquire_ctx ticket;
115         void *buf;
116
117         if (vgdev->has_virgl_3d == false)
118                 return -ENOSYS;
119
120         INIT_LIST_HEAD(&validate_list);
121         if (exbuf->num_bo_handles) {
122
123                 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
124                                            sizeof(uint32_t), GFP_KERNEL);
125                 buflist = kvmalloc_array(exbuf->num_bo_handles,
126                                            sizeof(struct ttm_validate_buffer),
127                                            GFP_KERNEL | __GFP_ZERO);
128                 if (!bo_handles || !buflist) {
129                         kvfree(bo_handles);
130                         kvfree(buflist);
131                         return -ENOMEM;
132                 }
133
134                 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
135                 if (copy_from_user(bo_handles, user_bo_handles,
136                                    exbuf->num_bo_handles * sizeof(uint32_t))) {
137                         ret = -EFAULT;
138                         kvfree(bo_handles);
139                         kvfree(buflist);
140                         return ret;
141                 }
142
143                 for (i = 0; i < exbuf->num_bo_handles; i++) {
144                         gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
145                         if (!gobj) {
146                                 kvfree(bo_handles);
147                                 kvfree(buflist);
148                                 return -ENOENT;
149                         }
150
151                         qobj = gem_to_virtio_gpu_obj(gobj);
152                         buflist[i].bo = &qobj->tbo;
153
154                         list_add(&buflist[i].head, &validate_list);
155                 }
156                 kvfree(bo_handles);
157         }
158
159         ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
160         if (ret)
161                 goto out_free;
162
163         buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
164                           exbuf->size);
165         if (IS_ERR(buf)) {
166                 ret = PTR_ERR(buf);
167                 goto out_unresv;
168         }
169         virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
170                               vfpriv->ctx_id, &fence);
171
172         ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
173
174         /* fence the command bo */
175         virtio_gpu_unref_list(&validate_list);
176         kvfree(buflist);
177         dma_fence_put(&fence->f);
178         return 0;
179
180 out_unresv:
181         ttm_eu_backoff_reservation(&ticket, &validate_list);
182 out_free:
183         virtio_gpu_unref_list(&validate_list);
184         kvfree(buflist);
185         return ret;
186 }
187
188 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
189                                      struct drm_file *file_priv)
190 {
191         struct virtio_gpu_device *vgdev = dev->dev_private;
192         struct drm_virtgpu_getparam *param = data;
193         int value;
194
195         switch (param->param) {
196         case VIRTGPU_PARAM_3D_FEATURES:
197                 value = vgdev->has_virgl_3d == true ? 1 : 0;
198                 break;
199         case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
200                 value = 1;
201                 break;
202         default:
203                 return -EINVAL;
204         }
205         if (copy_to_user((void __user *)(unsigned long)param->value,
206                          &value, sizeof(int))) {
207                 return -EFAULT;
208         }
209         return 0;
210 }
211
212 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
213                                             struct drm_file *file_priv)
214 {
215         struct virtio_gpu_device *vgdev = dev->dev_private;
216         struct drm_virtgpu_resource_create *rc = data;
217         int ret;
218         uint32_t res_id;
219         struct virtio_gpu_object *qobj;
220         struct drm_gem_object *obj;
221         uint32_t handle = 0;
222         uint32_t size;
223         struct list_head validate_list;
224         struct ttm_validate_buffer mainbuf;
225         struct virtio_gpu_fence *fence = NULL;
226         struct ww_acquire_ctx ticket;
227         struct virtio_gpu_resource_create_3d rc_3d;
228
229         if (vgdev->has_virgl_3d == false) {
230                 if (rc->depth > 1)
231                         return -EINVAL;
232                 if (rc->nr_samples > 1)
233                         return -EINVAL;
234                 if (rc->last_level > 1)
235                         return -EINVAL;
236                 if (rc->target != 2)
237                         return -EINVAL;
238                 if (rc->array_size > 1)
239                         return -EINVAL;
240         }
241
242         INIT_LIST_HEAD(&validate_list);
243         memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
244
245         virtio_gpu_resource_id_get(vgdev, &res_id);
246
247         size = rc->size;
248
249         /* allocate a single page size object */
250         if (size == 0)
251                 size = PAGE_SIZE;
252
253         qobj = virtio_gpu_alloc_object(dev, size, false, false);
254         if (IS_ERR(qobj)) {
255                 ret = PTR_ERR(qobj);
256                 goto fail_id;
257         }
258         obj = &qobj->gem_base;
259
260         if (!vgdev->has_virgl_3d) {
261                 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
262                                                rc->width, rc->height);
263
264                 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
265         } else {
266                 /* use a gem reference since unref list undoes them */
267                 drm_gem_object_reference(&qobj->gem_base);
268                 mainbuf.bo = &qobj->tbo;
269                 list_add(&mainbuf.head, &validate_list);
270
271                 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
272                 if (ret) {
273                         DRM_DEBUG("failed to validate\n");
274                         goto fail_unref;
275                 }
276
277                 rc_3d.resource_id = cpu_to_le32(res_id);
278                 rc_3d.target = cpu_to_le32(rc->target);
279                 rc_3d.format = cpu_to_le32(rc->format);
280                 rc_3d.bind = cpu_to_le32(rc->bind);
281                 rc_3d.width = cpu_to_le32(rc->width);
282                 rc_3d.height = cpu_to_le32(rc->height);
283                 rc_3d.depth = cpu_to_le32(rc->depth);
284                 rc_3d.array_size = cpu_to_le32(rc->array_size);
285                 rc_3d.last_level = cpu_to_le32(rc->last_level);
286                 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
287                 rc_3d.flags = cpu_to_le32(rc->flags);
288
289                 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
290                 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
291                 if (ret) {
292                         ttm_eu_backoff_reservation(&ticket, &validate_list);
293                         goto fail_unref;
294                 }
295                 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
296         }
297
298         qobj->hw_res_handle = res_id;
299
300         ret = drm_gem_handle_create(file_priv, obj, &handle);
301         if (ret) {
302
303                 drm_gem_object_release(obj);
304                 if (vgdev->has_virgl_3d) {
305                         virtio_gpu_unref_list(&validate_list);
306                         dma_fence_put(&fence->f);
307                 }
308                 return ret;
309         }
310         drm_gem_object_unreference_unlocked(obj);
311
312         rc->res_handle = res_id; /* similiar to a VM address */
313         rc->bo_handle = handle;
314
315         if (vgdev->has_virgl_3d) {
316                 virtio_gpu_unref_list(&validate_list);
317                 dma_fence_put(&fence->f);
318         }
319         return 0;
320 fail_unref:
321         if (vgdev->has_virgl_3d) {
322                 virtio_gpu_unref_list(&validate_list);
323                 dma_fence_put(&fence->f);
324         }
325 //fail_obj:
326 //      drm_gem_object_handle_unreference_unlocked(obj);
327 fail_id:
328         virtio_gpu_resource_id_put(vgdev, res_id);
329         return ret;
330 }
331
332 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
333                                           struct drm_file *file_priv)
334 {
335         struct drm_virtgpu_resource_info *ri = data;
336         struct drm_gem_object *gobj = NULL;
337         struct virtio_gpu_object *qobj = NULL;
338
339         gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
340         if (gobj == NULL)
341                 return -ENOENT;
342
343         qobj = gem_to_virtio_gpu_obj(gobj);
344
345         ri->size = qobj->gem_base.size;
346         ri->res_handle = qobj->hw_res_handle;
347         drm_gem_object_unreference_unlocked(gobj);
348         return 0;
349 }
350
351 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
352                                                void *data,
353                                                struct drm_file *file)
354 {
355         struct virtio_gpu_device *vgdev = dev->dev_private;
356         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
357         struct drm_virtgpu_3d_transfer_from_host *args = data;
358         struct drm_gem_object *gobj = NULL;
359         struct virtio_gpu_object *qobj = NULL;
360         struct virtio_gpu_fence *fence;
361         int ret;
362         u32 offset = args->offset;
363         struct virtio_gpu_box box;
364
365         if (vgdev->has_virgl_3d == false)
366                 return -ENOSYS;
367
368         gobj = drm_gem_object_lookup(file, args->bo_handle);
369         if (gobj == NULL)
370                 return -ENOENT;
371
372         qobj = gem_to_virtio_gpu_obj(gobj);
373
374         ret = virtio_gpu_object_reserve(qobj, false);
375         if (ret)
376                 goto out;
377
378         ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
379                               true, false);
380         if (unlikely(ret))
381                 goto out_unres;
382
383         convert_to_hw_box(&box, &args->box);
384         virtio_gpu_cmd_transfer_from_host_3d
385                 (vgdev, qobj->hw_res_handle,
386                  vfpriv->ctx_id, offset, args->level,
387                  &box, &fence);
388         reservation_object_add_excl_fence(qobj->tbo.resv,
389                                           &fence->f);
390
391         dma_fence_put(&fence->f);
392 out_unres:
393         virtio_gpu_object_unreserve(qobj);
394 out:
395         drm_gem_object_unreference_unlocked(gobj);
396         return ret;
397 }
398
399 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
400                                              struct drm_file *file)
401 {
402         struct virtio_gpu_device *vgdev = dev->dev_private;
403         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
404         struct drm_virtgpu_3d_transfer_to_host *args = data;
405         struct drm_gem_object *gobj = NULL;
406         struct virtio_gpu_object *qobj = NULL;
407         struct virtio_gpu_fence *fence;
408         struct virtio_gpu_box box;
409         int ret;
410         u32 offset = args->offset;
411
412         gobj = drm_gem_object_lookup(file, args->bo_handle);
413         if (gobj == NULL)
414                 return -ENOENT;
415
416         qobj = gem_to_virtio_gpu_obj(gobj);
417
418         ret = virtio_gpu_object_reserve(qobj, false);
419         if (ret)
420                 goto out;
421
422         ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
423                               true, false);
424         if (unlikely(ret))
425                 goto out_unres;
426
427         convert_to_hw_box(&box, &args->box);
428         if (!vgdev->has_virgl_3d) {
429                 virtio_gpu_cmd_transfer_to_host_2d
430                         (vgdev, qobj->hw_res_handle, offset,
431                          box.w, box.h, box.x, box.y, NULL);
432         } else {
433                 virtio_gpu_cmd_transfer_to_host_3d
434                         (vgdev, qobj->hw_res_handle,
435                          vfpriv ? vfpriv->ctx_id : 0, offset,
436                          args->level, &box, &fence);
437                 reservation_object_add_excl_fence(qobj->tbo.resv,
438                                                   &fence->f);
439                 dma_fence_put(&fence->f);
440         }
441
442 out_unres:
443         virtio_gpu_object_unreserve(qobj);
444 out:
445         drm_gem_object_unreference_unlocked(gobj);
446         return ret;
447 }
448
449 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
450                             struct drm_file *file)
451 {
452         struct drm_virtgpu_3d_wait *args = data;
453         struct drm_gem_object *gobj = NULL;
454         struct virtio_gpu_object *qobj = NULL;
455         int ret;
456         bool nowait = false;
457
458         gobj = drm_gem_object_lookup(file, args->handle);
459         if (gobj == NULL)
460                 return -ENOENT;
461
462         qobj = gem_to_virtio_gpu_obj(gobj);
463
464         if (args->flags & VIRTGPU_WAIT_NOWAIT)
465                 nowait = true;
466         ret = virtio_gpu_object_wait(qobj, nowait);
467
468         drm_gem_object_unreference_unlocked(gobj);
469         return ret;
470 }
471
472 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
473                                 void *data, struct drm_file *file)
474 {
475         struct virtio_gpu_device *vgdev = dev->dev_private;
476         struct drm_virtgpu_get_caps *args = data;
477         unsigned size, host_caps_size;
478         int i;
479         int found_valid = -1;
480         int ret;
481         struct virtio_gpu_drv_cap_cache *cache_ent;
482         void *ptr;
483         if (vgdev->num_capsets == 0)
484                 return -ENOSYS;
485
486         /* don't allow userspace to pass 0 */
487         if (args->size == 0)
488                 return -EINVAL;
489
490         spin_lock(&vgdev->display_info_lock);
491         for (i = 0; i < vgdev->num_capsets; i++) {
492                 if (vgdev->capsets[i].id == args->cap_set_id) {
493                         if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
494                                 found_valid = i;
495                                 break;
496                         }
497                 }
498         }
499
500         if (found_valid == -1) {
501                 spin_unlock(&vgdev->display_info_lock);
502                 return -EINVAL;
503         }
504
505         host_caps_size = vgdev->capsets[found_valid].max_size;
506         /* only copy to user the minimum of the host caps size or the guest caps size */
507         size = min(args->size, host_caps_size);
508
509         list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
510                 if (cache_ent->id == args->cap_set_id &&
511                     cache_ent->version == args->cap_set_ver) {
512                         ptr = cache_ent->caps_cache;
513                         spin_unlock(&vgdev->display_info_lock);
514                         goto copy_exit;
515                 }
516         }
517         spin_unlock(&vgdev->display_info_lock);
518
519         /* not in cache - need to talk to hw */
520         virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
521                                   &cache_ent);
522
523         ret = wait_event_timeout(vgdev->resp_wq,
524                                  atomic_read(&cache_ent->is_valid), 5 * HZ);
525
526         /* is_valid check must proceed before copy of the cache entry. */
527         smp_rmb();
528
529         ptr = cache_ent->caps_cache;
530
531 copy_exit:
532         if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
533                 return -EFAULT;
534
535         return 0;
536 }
537
538 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
539         DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
540                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
541
542         DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
543                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
544
545         DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
546                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
547
548         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
549                           virtio_gpu_resource_create_ioctl,
550                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
551
552         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
553                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
554
555         /* make transfer async to the main ring? - no sure, can we
556            thread these in the underlying GL */
557         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
558                           virtio_gpu_transfer_from_host_ioctl,
559                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
560         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
561                           virtio_gpu_transfer_to_host_ioctl,
562                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
563
564         DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
565                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
566
567         DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
568                           DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
569 };