GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/pagemap.h>
30 #include <drm/drmP.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33
34 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
35 {
36         struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37
38         if (robj) {
39                 amdgpu_mn_unregister(robj);
40                 amdgpu_bo_unref(&robj);
41         }
42 }
43
44 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
45                                 int alignment, u32 initial_domain,
46                                 u64 flags, bool kernel,
47                                 struct drm_gem_object **obj)
48 {
49         struct amdgpu_bo *robj;
50         int r;
51
52         *obj = NULL;
53         /* At least align on page size */
54         if (alignment < PAGE_SIZE) {
55                 alignment = PAGE_SIZE;
56         }
57
58 retry:
59         r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
60                              flags, NULL, NULL, 0, &robj);
61         if (r) {
62                 if (r != -ERESTARTSYS) {
63                         if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
64                                 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
65                                 goto retry;
66                         }
67                         DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
68                                   size, initial_domain, alignment, r);
69                 }
70                 return r;
71         }
72         *obj = &robj->gem_base;
73
74         return 0;
75 }
76
77 void amdgpu_gem_force_release(struct amdgpu_device *adev)
78 {
79         struct drm_device *ddev = adev->ddev;
80         struct drm_file *file;
81
82         mutex_lock(&ddev->filelist_mutex);
83
84         list_for_each_entry(file, &ddev->filelist, lhead) {
85                 struct drm_gem_object *gobj;
86                 int handle;
87
88                 WARN_ONCE(1, "Still active user space clients!\n");
89                 spin_lock(&file->table_lock);
90                 idr_for_each_entry(&file->object_idr, gobj, handle) {
91                         WARN_ONCE(1, "And also active allocations!\n");
92                         drm_gem_object_put_unlocked(gobj);
93                 }
94                 idr_destroy(&file->object_idr);
95                 spin_unlock(&file->table_lock);
96         }
97
98         mutex_unlock(&ddev->filelist_mutex);
99 }
100
101 /*
102  * Call from drm_gem_handle_create which appear in both new and open ioctl
103  * case.
104  */
105 int amdgpu_gem_object_open(struct drm_gem_object *obj,
106                            struct drm_file *file_priv)
107 {
108         struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
109         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
110         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
111         struct amdgpu_vm *vm = &fpriv->vm;
112         struct amdgpu_bo_va *bo_va;
113         int r;
114         r = amdgpu_bo_reserve(abo, false);
115         if (r)
116                 return r;
117
118         bo_va = amdgpu_vm_bo_find(vm, abo);
119         if (!bo_va) {
120                 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
121         } else {
122                 ++bo_va->ref_count;
123         }
124         amdgpu_bo_unreserve(abo);
125         return 0;
126 }
127
128 static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
129 {
130         /* if anything is swapped out don't swap it in here,
131            just abort and wait for the next CS */
132         if (!amdgpu_bo_gpu_accessible(bo))
133                 return -ERESTARTSYS;
134
135         if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
136                 return -ERESTARTSYS;
137
138         return 0;
139 }
140
141 static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
142                                 struct amdgpu_vm *vm,
143                                 struct list_head *list)
144 {
145         struct ttm_validate_buffer *entry;
146
147         list_for_each_entry(entry, list, head) {
148                 struct amdgpu_bo *bo =
149                         container_of(entry->bo, struct amdgpu_bo, tbo);
150                 if (amdgpu_gem_vm_check(NULL, bo))
151                         return false;
152         }
153
154         return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
155 }
156
157 void amdgpu_gem_object_close(struct drm_gem_object *obj,
158                              struct drm_file *file_priv)
159 {
160         struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
161         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
162         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
163         struct amdgpu_vm *vm = &fpriv->vm;
164
165         struct amdgpu_bo_list_entry vm_pd;
166         struct list_head list;
167         struct ttm_validate_buffer tv;
168         struct ww_acquire_ctx ticket;
169         struct amdgpu_bo_va *bo_va;
170         int r;
171
172         INIT_LIST_HEAD(&list);
173
174         tv.bo = &bo->tbo;
175         tv.shared = true;
176         list_add(&tv.head, &list);
177
178         amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
179
180         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
181         if (r) {
182                 dev_err(adev->dev, "leaking bo va because "
183                         "we fail to reserve bo (%d)\n", r);
184                 return;
185         }
186         bo_va = amdgpu_vm_bo_find(vm, bo);
187         if (bo_va && --bo_va->ref_count == 0) {
188                 amdgpu_vm_bo_rmv(adev, bo_va);
189
190                 if (amdgpu_gem_vm_ready(adev, vm, &list)) {
191                         struct dma_fence *fence = NULL;
192
193                         r = amdgpu_vm_clear_freed(adev, vm, &fence);
194                         if (unlikely(r)) {
195                                 dev_err(adev->dev, "failed to clear page "
196                                         "tables on GEM object close (%d)\n", r);
197                         }
198
199                         if (fence) {
200                                 amdgpu_bo_fence(bo, fence, true);
201                                 dma_fence_put(fence);
202                         }
203                 }
204         }
205         ttm_eu_backoff_reservation(&ticket, &list);
206 }
207
208 /*
209  * GEM ioctls.
210  */
211 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
212                             struct drm_file *filp)
213 {
214         struct amdgpu_device *adev = dev->dev_private;
215         union drm_amdgpu_gem_create *args = data;
216         uint64_t size = args->in.bo_size;
217         struct drm_gem_object *gobj;
218         uint32_t handle;
219         bool kernel = false;
220         int r;
221
222         /* reject invalid gem flags */
223         if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
224                                       AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
225                                       AMDGPU_GEM_CREATE_CPU_GTT_USWC |
226                                       AMDGPU_GEM_CREATE_VRAM_CLEARED))
227                 return -EINVAL;
228
229         /* reject invalid gem domains */
230         if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
231                                  AMDGPU_GEM_DOMAIN_GTT |
232                                  AMDGPU_GEM_DOMAIN_VRAM |
233                                  AMDGPU_GEM_DOMAIN_GDS |
234                                  AMDGPU_GEM_DOMAIN_GWS |
235                                  AMDGPU_GEM_DOMAIN_OA))
236                 return -EINVAL;
237
238         /* create a gem object to contain this object in */
239         if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
240             AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
241                 kernel = true;
242                 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
243                         size = size << AMDGPU_GDS_SHIFT;
244                 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
245                         size = size << AMDGPU_GWS_SHIFT;
246                 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
247                         size = size << AMDGPU_OA_SHIFT;
248                 else
249                         return -EINVAL;
250         }
251         size = roundup(size, PAGE_SIZE);
252
253         r = amdgpu_gem_object_create(adev, size, args->in.alignment,
254                                      (u32)(0xffffffff & args->in.domains),
255                                      args->in.domain_flags,
256                                      kernel, &gobj);
257         if (r)
258                 return r;
259
260         r = drm_gem_handle_create(filp, gobj, &handle);
261         /* drop reference from allocate - handle holds it now */
262         drm_gem_object_put_unlocked(gobj);
263         if (r)
264                 return r;
265
266         memset(args, 0, sizeof(*args));
267         args->out.handle = handle;
268         return 0;
269 }
270
271 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
272                              struct drm_file *filp)
273 {
274         struct amdgpu_device *adev = dev->dev_private;
275         struct drm_amdgpu_gem_userptr *args = data;
276         struct drm_gem_object *gobj;
277         struct amdgpu_bo *bo;
278         uint32_t handle;
279         int r;
280
281         if (offset_in_page(args->addr | args->size))
282                 return -EINVAL;
283
284         /* reject unknown flag values */
285         if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
286             AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
287             AMDGPU_GEM_USERPTR_REGISTER))
288                 return -EINVAL;
289
290         if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
291              !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
292
293                 /* if we want to write to it we must install a MMU notifier */
294                 return -EACCES;
295         }
296
297         /* create a gem object to contain this object in */
298         r = amdgpu_gem_object_create(adev, args->size, 0,
299                                      AMDGPU_GEM_DOMAIN_CPU, 0,
300                                      0, &gobj);
301         if (r)
302                 return r;
303
304         bo = gem_to_amdgpu_bo(gobj);
305         bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
306         bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
307         r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
308         if (r)
309                 goto release_object;
310
311         if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
312                 r = amdgpu_mn_register(bo, args->addr);
313                 if (r)
314                         goto release_object;
315         }
316
317         if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
318                 down_read(&current->mm->mmap_sem);
319
320                 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
321                                                  bo->tbo.ttm->pages);
322                 if (r)
323                         goto unlock_mmap_sem;
324
325                 r = amdgpu_bo_reserve(bo, true);
326                 if (r)
327                         goto free_pages;
328
329                 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
330                 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
331                 amdgpu_bo_unreserve(bo);
332                 if (r)
333                         goto free_pages;
334
335                 up_read(&current->mm->mmap_sem);
336         }
337
338         r = drm_gem_handle_create(filp, gobj, &handle);
339         /* drop reference from allocate - handle holds it now */
340         drm_gem_object_put_unlocked(gobj);
341         if (r)
342                 return r;
343
344         args->handle = handle;
345         return 0;
346
347 free_pages:
348         release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
349
350 unlock_mmap_sem:
351         up_read(&current->mm->mmap_sem);
352
353 release_object:
354         drm_gem_object_put_unlocked(gobj);
355
356         return r;
357 }
358
359 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
360                           struct drm_device *dev,
361                           uint32_t handle, uint64_t *offset_p)
362 {
363         struct drm_gem_object *gobj;
364         struct amdgpu_bo *robj;
365
366         gobj = drm_gem_object_lookup(filp, handle);
367         if (gobj == NULL) {
368                 return -ENOENT;
369         }
370         robj = gem_to_amdgpu_bo(gobj);
371         if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
372             (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
373                 drm_gem_object_put_unlocked(gobj);
374                 return -EPERM;
375         }
376         *offset_p = amdgpu_bo_mmap_offset(robj);
377         drm_gem_object_put_unlocked(gobj);
378         return 0;
379 }
380
381 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
382                           struct drm_file *filp)
383 {
384         union drm_amdgpu_gem_mmap *args = data;
385         uint32_t handle = args->in.handle;
386         memset(args, 0, sizeof(*args));
387         return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
388 }
389
390 /**
391  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
392  *
393  * @timeout_ns: timeout in ns
394  *
395  * Calculate the timeout in jiffies from an absolute timeout in ns.
396  */
397 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
398 {
399         unsigned long timeout_jiffies;
400         ktime_t timeout;
401
402         /* clamp timeout if it's to large */
403         if (((int64_t)timeout_ns) < 0)
404                 return MAX_SCHEDULE_TIMEOUT;
405
406         timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
407         if (ktime_to_ns(timeout) < 0)
408                 return 0;
409
410         timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
411         /*  clamp timeout to avoid unsigned-> signed overflow */
412         if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
413                 return MAX_SCHEDULE_TIMEOUT - 1;
414
415         return timeout_jiffies;
416 }
417
418 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
419                               struct drm_file *filp)
420 {
421         union drm_amdgpu_gem_wait_idle *args = data;
422         struct drm_gem_object *gobj;
423         struct amdgpu_bo *robj;
424         uint32_t handle = args->in.handle;
425         unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
426         int r = 0;
427         long ret;
428
429         gobj = drm_gem_object_lookup(filp, handle);
430         if (gobj == NULL) {
431                 return -ENOENT;
432         }
433         robj = gem_to_amdgpu_bo(gobj);
434         ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
435                                                   timeout);
436
437         /* ret == 0 means not signaled,
438          * ret > 0 means signaled
439          * ret < 0 means interrupted before timeout
440          */
441         if (ret >= 0) {
442                 memset(args, 0, sizeof(*args));
443                 args->out.status = (ret == 0);
444         } else
445                 r = ret;
446
447         drm_gem_object_put_unlocked(gobj);
448         return r;
449 }
450
451 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
452                                 struct drm_file *filp)
453 {
454         struct drm_amdgpu_gem_metadata *args = data;
455         struct drm_gem_object *gobj;
456         struct amdgpu_bo *robj;
457         int r = -1;
458
459         DRM_DEBUG("%d \n", args->handle);
460         gobj = drm_gem_object_lookup(filp, args->handle);
461         if (gobj == NULL)
462                 return -ENOENT;
463         robj = gem_to_amdgpu_bo(gobj);
464
465         r = amdgpu_bo_reserve(robj, false);
466         if (unlikely(r != 0))
467                 goto out;
468
469         if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
470                 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
471                 r = amdgpu_bo_get_metadata(robj, args->data.data,
472                                            sizeof(args->data.data),
473                                            &args->data.data_size_bytes,
474                                            &args->data.flags);
475         } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
476                 if (args->data.data_size_bytes > sizeof(args->data.data)) {
477                         r = -EINVAL;
478                         goto unreserve;
479                 }
480                 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
481                 if (!r)
482                         r = amdgpu_bo_set_metadata(robj, args->data.data,
483                                                    args->data.data_size_bytes,
484                                                    args->data.flags);
485         }
486
487 unreserve:
488         amdgpu_bo_unreserve(robj);
489 out:
490         drm_gem_object_put_unlocked(gobj);
491         return r;
492 }
493
494 /**
495  * amdgpu_gem_va_update_vm -update the bo_va in its VM
496  *
497  * @adev: amdgpu_device pointer
498  * @vm: vm to update
499  * @bo_va: bo_va to update
500  * @list: validation list
501  * @operation: map, unmap or clear
502  *
503  * Update the bo_va directly after setting its address. Errors are not
504  * vital here, so they are not reported back to userspace.
505  */
506 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
507                                     struct amdgpu_vm *vm,
508                                     struct amdgpu_bo_va *bo_va,
509                                     struct list_head *list,
510                                     uint32_t operation)
511 {
512         int r = -ERESTARTSYS;
513
514         if (!amdgpu_gem_vm_ready(adev, vm, list))
515                 goto error;
516
517         r = amdgpu_vm_update_directories(adev, vm);
518         if (r)
519                 goto error;
520
521         r = amdgpu_vm_clear_freed(adev, vm, NULL);
522         if (r)
523                 goto error;
524
525         if (operation == AMDGPU_VA_OP_MAP ||
526             operation == AMDGPU_VA_OP_REPLACE)
527                 r = amdgpu_vm_bo_update(adev, bo_va, false);
528
529 error:
530         if (r && r != -ERESTARTSYS)
531                 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
532 }
533
534 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
535                           struct drm_file *filp)
536 {
537         const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
538                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
539                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
540         const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
541                 AMDGPU_VM_PAGE_PRT;
542
543         struct drm_amdgpu_gem_va *args = data;
544         struct drm_gem_object *gobj;
545         struct amdgpu_device *adev = dev->dev_private;
546         struct amdgpu_fpriv *fpriv = filp->driver_priv;
547         struct amdgpu_bo *abo;
548         struct amdgpu_bo_va *bo_va;
549         struct amdgpu_bo_list_entry vm_pd;
550         struct ttm_validate_buffer tv;
551         struct ww_acquire_ctx ticket;
552         struct list_head list;
553         uint64_t va_flags;
554         uint64_t vm_size;
555         int r = 0;
556
557         if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
558                 dev_err(&dev->pdev->dev,
559                         "va_address 0x%lX is in reserved area 0x%X\n",
560                         (unsigned long)args->va_address,
561                         AMDGPU_VA_RESERVED_SIZE);
562                 return -EINVAL;
563         }
564
565         vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
566         vm_size -= AMDGPU_VA_RESERVED_SIZE;
567         if (args->va_address + args->map_size > vm_size) {
568                 dev_dbg(&dev->pdev->dev,
569                         "va_address 0x%llx is in top reserved area 0x%llx\n",
570                         args->va_address + args->map_size, vm_size);
571                 return -EINVAL;
572         }
573
574         if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
575                 dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
576                         args->flags);
577                 return -EINVAL;
578         }
579
580         switch (args->operation) {
581         case AMDGPU_VA_OP_MAP:
582         case AMDGPU_VA_OP_UNMAP:
583         case AMDGPU_VA_OP_CLEAR:
584         case AMDGPU_VA_OP_REPLACE:
585                 break;
586         default:
587                 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
588                         args->operation);
589                 return -EINVAL;
590         }
591         if ((args->operation == AMDGPU_VA_OP_MAP) ||
592             (args->operation == AMDGPU_VA_OP_REPLACE)) {
593                 if (amdgpu_kms_vram_lost(adev, fpriv))
594                         return -ENODEV;
595         }
596
597         INIT_LIST_HEAD(&list);
598         if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
599             !(args->flags & AMDGPU_VM_PAGE_PRT)) {
600                 gobj = drm_gem_object_lookup(filp, args->handle);
601                 if (gobj == NULL)
602                         return -ENOENT;
603                 abo = gem_to_amdgpu_bo(gobj);
604                 tv.bo = &abo->tbo;
605                 tv.shared = false;
606                 list_add(&tv.head, &list);
607         } else {
608                 gobj = NULL;
609                 abo = NULL;
610         }
611
612         amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
613
614         r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
615         if (r)
616                 goto error_unref;
617
618         if (abo) {
619                 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
620                 if (!bo_va) {
621                         r = -ENOENT;
622                         goto error_backoff;
623                 }
624         } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
625                 bo_va = fpriv->prt_va;
626         } else {
627                 bo_va = NULL;
628         }
629
630         switch (args->operation) {
631         case AMDGPU_VA_OP_MAP:
632                 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
633                                         args->map_size);
634                 if (r)
635                         goto error_backoff;
636
637                 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
638                 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
639                                      args->offset_in_bo, args->map_size,
640                                      va_flags);
641                 break;
642         case AMDGPU_VA_OP_UNMAP:
643                 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
644                 break;
645
646         case AMDGPU_VA_OP_CLEAR:
647                 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
648                                                 args->va_address,
649                                                 args->map_size);
650                 break;
651         case AMDGPU_VA_OP_REPLACE:
652                 r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
653                                         args->map_size);
654                 if (r)
655                         goto error_backoff;
656
657                 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
658                 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
659                                              args->offset_in_bo, args->map_size,
660                                              va_flags);
661                 break;
662         default:
663                 break;
664         }
665         if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
666                 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
667                                         args->operation);
668
669 error_backoff:
670         ttm_eu_backoff_reservation(&ticket, &list);
671
672 error_unref:
673         drm_gem_object_put_unlocked(gobj);
674         return r;
675 }
676
677 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
678                         struct drm_file *filp)
679 {
680         struct drm_amdgpu_gem_op *args = data;
681         struct drm_gem_object *gobj;
682         struct amdgpu_bo *robj;
683         int r;
684
685         gobj = drm_gem_object_lookup(filp, args->handle);
686         if (gobj == NULL) {
687                 return -ENOENT;
688         }
689         robj = gem_to_amdgpu_bo(gobj);
690
691         r = amdgpu_bo_reserve(robj, false);
692         if (unlikely(r))
693                 goto out;
694
695         switch (args->op) {
696         case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
697                 struct drm_amdgpu_gem_create_in info;
698                 void __user *out = u64_to_user_ptr(args->value);
699
700                 info.bo_size = robj->gem_base.size;
701                 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
702                 info.domains = robj->preferred_domains;
703                 info.domain_flags = robj->flags;
704                 amdgpu_bo_unreserve(robj);
705                 if (copy_to_user(out, &info, sizeof(info)))
706                         r = -EFAULT;
707                 break;
708         }
709         case AMDGPU_GEM_OP_SET_PLACEMENT:
710                 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
711                         r = -EINVAL;
712                         amdgpu_bo_unreserve(robj);
713                         break;
714                 }
715                 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
716                         r = -EPERM;
717                         amdgpu_bo_unreserve(robj);
718                         break;
719                 }
720                 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
721                                                         AMDGPU_GEM_DOMAIN_GTT |
722                                                         AMDGPU_GEM_DOMAIN_CPU);
723                 robj->allowed_domains = robj->preferred_domains;
724                 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
725                         robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
726
727                 amdgpu_bo_unreserve(robj);
728                 break;
729         default:
730                 amdgpu_bo_unreserve(robj);
731                 r = -EINVAL;
732         }
733
734 out:
735         drm_gem_object_put_unlocked(gobj);
736         return r;
737 }
738
739 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
740                             struct drm_device *dev,
741                             struct drm_mode_create_dumb *args)
742 {
743         struct amdgpu_device *adev = dev->dev_private;
744         struct drm_gem_object *gobj;
745         uint32_t handle;
746         int r;
747
748         args->pitch = amdgpu_align_pitch(adev, args->width,
749                                          DIV_ROUND_UP(args->bpp, 8), 0);
750         args->size = (u64)args->pitch * args->height;
751         args->size = ALIGN(args->size, PAGE_SIZE);
752
753         r = amdgpu_gem_object_create(adev, args->size, 0,
754                                      AMDGPU_GEM_DOMAIN_VRAM,
755                                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
756                                      ttm_bo_type_device,
757                                      &gobj);
758         if (r)
759                 return -ENOMEM;
760
761         r = drm_gem_handle_create(file_priv, gobj, &handle);
762         /* drop reference from allocate - handle holds it now */
763         drm_gem_object_put_unlocked(gobj);
764         if (r) {
765                 return r;
766         }
767         args->handle = handle;
768         return 0;
769 }
770
771 #if defined(CONFIG_DEBUG_FS)
772 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
773 {
774         struct drm_gem_object *gobj = ptr;
775         struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
776         struct seq_file *m = data;
777
778         unsigned domain;
779         const char *placement;
780         unsigned pin_count;
781         uint64_t offset;
782
783         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
784         switch (domain) {
785         case AMDGPU_GEM_DOMAIN_VRAM:
786                 placement = "VRAM";
787                 break;
788         case AMDGPU_GEM_DOMAIN_GTT:
789                 placement = " GTT";
790                 break;
791         case AMDGPU_GEM_DOMAIN_CPU:
792         default:
793                 placement = " CPU";
794                 break;
795         }
796         seq_printf(m, "\t0x%08x: %12ld byte %s",
797                    id, amdgpu_bo_size(bo), placement);
798
799         offset = ACCESS_ONCE(bo->tbo.mem.start);
800         if (offset != AMDGPU_BO_INVALID_OFFSET)
801                 seq_printf(m, " @ 0x%010Lx", offset);
802
803         pin_count = ACCESS_ONCE(bo->pin_count);
804         if (pin_count)
805                 seq_printf(m, " pin count %d", pin_count);
806         seq_printf(m, "\n");
807
808         return 0;
809 }
810
811 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
812 {
813         struct drm_info_node *node = (struct drm_info_node *)m->private;
814         struct drm_device *dev = node->minor->dev;
815         struct drm_file *file;
816         int r;
817
818         r = mutex_lock_interruptible(&dev->filelist_mutex);
819         if (r)
820                 return r;
821
822         list_for_each_entry(file, &dev->filelist, lhead) {
823                 struct task_struct *task;
824
825                 /*
826                  * Although we have a valid reference on file->pid, that does
827                  * not guarantee that the task_struct who called get_pid() is
828                  * still alive (e.g. get_pid(current) => fork() => exit()).
829                  * Therefore, we need to protect this ->comm access using RCU.
830                  */
831                 rcu_read_lock();
832                 task = pid_task(file->pid, PIDTYPE_PID);
833                 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
834                            task ? task->comm : "<unknown>");
835                 rcu_read_unlock();
836
837                 spin_lock(&file->table_lock);
838                 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
839                 spin_unlock(&file->table_lock);
840         }
841
842         mutex_unlock(&dev->filelist_mutex);
843         return 0;
844 }
845
846 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
847         {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
848 };
849 #endif
850
851 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
852 {
853 #if defined(CONFIG_DEBUG_FS)
854         return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
855 #endif
856         return 0;
857 }