GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30
31 static void amdgpu_job_timedout(struct amd_sched_job *s_job)
32 {
33         struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
34
35         DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36                   job->base.sched->name,
37                   atomic_read(&job->ring->fence_drv.last_seq),
38                   job->ring->fence_drv.sync_seq);
39
40         if (amdgpu_sriov_vf(job->adev))
41                 amdgpu_sriov_gpu_reset(job->adev, job);
42         else
43                 amdgpu_gpu_reset(job->adev);
44 }
45
46 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
47                      struct amdgpu_job **job, struct amdgpu_vm *vm)
48 {
49         size_t size = sizeof(struct amdgpu_job);
50
51         if (num_ibs == 0)
52                 return -EINVAL;
53
54         size += sizeof(struct amdgpu_ib) * num_ibs;
55
56         *job = kzalloc(size, GFP_KERNEL);
57         if (!*job)
58                 return -ENOMEM;
59
60         (*job)->adev = adev;
61         (*job)->vm = vm;
62         (*job)->ibs = (void *)&(*job)[1];
63         (*job)->num_ibs = num_ibs;
64
65         amdgpu_sync_create(&(*job)->sync);
66         amdgpu_sync_create(&(*job)->dep_sync);
67         amdgpu_sync_create(&(*job)->sched_sync);
68
69         return 0;
70 }
71
72 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
73                              struct amdgpu_job **job)
74 {
75         int r;
76
77         r = amdgpu_job_alloc(adev, 1, job, NULL);
78         if (r)
79                 return r;
80
81         r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
82         if (r)
83                 kfree(*job);
84         else
85                 (*job)->vm_pd_addr = adev->gart.table_addr;
86
87         return r;
88 }
89
90 void amdgpu_job_free_resources(struct amdgpu_job *job)
91 {
92         struct dma_fence *f;
93         unsigned i;
94
95         /* use sched fence if available */
96         f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
97
98         for (i = 0; i < job->num_ibs; ++i)
99                 amdgpu_ib_free(job->adev, &job->ibs[i], f);
100 }
101
102 static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
103 {
104         struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
105
106         dma_fence_put(job->fence);
107         amdgpu_sync_free(&job->sync);
108         amdgpu_sync_free(&job->dep_sync);
109         amdgpu_sync_free(&job->sched_sync);
110         kfree(job);
111 }
112
113 void amdgpu_job_free(struct amdgpu_job *job)
114 {
115         amdgpu_job_free_resources(job);
116
117         dma_fence_put(job->fence);
118         amdgpu_sync_free(&job->sync);
119         amdgpu_sync_free(&job->dep_sync);
120         amdgpu_sync_free(&job->sched_sync);
121         kfree(job);
122 }
123
124 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
125                       struct amd_sched_entity *entity, void *owner,
126                       struct dma_fence **f)
127 {
128         int r;
129         job->ring = ring;
130
131         if (!f)
132                 return -EINVAL;
133
134         r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
135         if (r)
136                 return r;
137
138         job->owner = owner;
139         job->fence_ctx = entity->fence_context;
140         *f = dma_fence_get(&job->base.s_fence->finished);
141         amdgpu_job_free_resources(job);
142         amd_sched_entity_push_job(&job->base);
143
144         return 0;
145 }
146
147 static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
148 {
149         struct amdgpu_job *job = to_amdgpu_job(sched_job);
150         struct amdgpu_vm *vm = job->vm;
151
152         struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
153         int r;
154
155         if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
156                 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
157                 if (r)
158                         DRM_ERROR("Error adding fence to sync (%d)\n", r);
159         }
160         if (!fence)
161                 fence = amdgpu_sync_get_fence(&job->sync);
162         while (fence == NULL && vm && !job->vm_id) {
163                 struct amdgpu_ring *ring = job->ring;
164
165                 r = amdgpu_vm_grab_id(vm, ring, &job->sync,
166                                       &job->base.s_fence->finished,
167                                       job);
168                 if (r)
169                         DRM_ERROR("Error getting VM ID (%d)\n", r);
170
171                 fence = amdgpu_sync_get_fence(&job->sync);
172         }
173
174         return fence;
175 }
176
177 static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
178 {
179         struct dma_fence *fence = NULL;
180         struct amdgpu_job *job;
181         struct amdgpu_fpriv *fpriv = NULL;
182         int r;
183
184         if (!sched_job) {
185                 DRM_ERROR("job is null\n");
186                 return NULL;
187         }
188         job = to_amdgpu_job(sched_job);
189
190         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
191
192         trace_amdgpu_sched_run_job(job);
193         if (job->vm)
194                 fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
195         /* skip ib schedule when vram is lost */
196         if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
197                 DRM_ERROR("Skip scheduling IBs!\n");
198         else {
199                 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
200                 if (r)
201                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
202         }
203         /* if gpu reset, hw fence will be replaced here */
204         dma_fence_put(job->fence);
205         job->fence = dma_fence_get(fence);
206         amdgpu_job_free_resources(job);
207         return fence;
208 }
209
210 const struct amd_sched_backend_ops amdgpu_sched_ops = {
211         .dependency = amdgpu_job_dependency,
212         .run_job = amdgpu_job_run,
213         .timedout_job = amdgpu_job_timedout,
214         .free_job = amdgpu_job_free_cb
215 };