GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30
31 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32 {
33         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34         struct amdgpu_job *job = to_amdgpu_job(s_job);
35
36         DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
37                   job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
38                   ring->fence_drv.sync_seq);
39
40         amdgpu_device_gpu_recover(ring->adev, job, false);
41 }
42
43 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
44                      struct amdgpu_job **job, struct amdgpu_vm *vm)
45 {
46         size_t size = sizeof(struct amdgpu_job);
47
48         if (num_ibs == 0)
49                 return -EINVAL;
50
51         size += sizeof(struct amdgpu_ib) * num_ibs;
52
53         *job = kzalloc(size, GFP_KERNEL);
54         if (!*job)
55                 return -ENOMEM;
56
57         /*
58          * Initialize the scheduler to at least some ring so that we always
59          * have a pointer to adev.
60          */
61         (*job)->base.sched = &adev->rings[0]->sched;
62         (*job)->vm = vm;
63         (*job)->ibs = (void *)&(*job)[1];
64         (*job)->num_ibs = num_ibs;
65
66         amdgpu_sync_create(&(*job)->sync);
67         amdgpu_sync_create(&(*job)->sched_sync);
68         (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
69         (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
70
71         return 0;
72 }
73
74 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
75                              struct amdgpu_job **job)
76 {
77         int r;
78
79         r = amdgpu_job_alloc(adev, 1, job, NULL);
80         if (r)
81                 return r;
82
83         r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
84         if (r)
85                 kfree(*job);
86         else
87                 (*job)->vm_pd_addr = adev->gart.table_addr;
88
89         return r;
90 }
91
92 void amdgpu_job_free_resources(struct amdgpu_job *job)
93 {
94         struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
95         struct dma_fence *f;
96         unsigned i;
97
98         /* use sched fence if available */
99         f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
100
101         for (i = 0; i < job->num_ibs; ++i)
102                 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
103 }
104
105 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
106 {
107         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
108         struct amdgpu_job *job = to_amdgpu_job(s_job);
109
110         amdgpu_ring_priority_put(ring, s_job->s_priority);
111         dma_fence_put(job->fence);
112         amdgpu_sync_free(&job->sync);
113         amdgpu_sync_free(&job->sched_sync);
114         kfree(job);
115 }
116
117 void amdgpu_job_free(struct amdgpu_job *job)
118 {
119         amdgpu_job_free_resources(job);
120
121         dma_fence_put(job->fence);
122         amdgpu_sync_free(&job->sync);
123         amdgpu_sync_free(&job->sched_sync);
124         kfree(job);
125 }
126
127 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
128                       void *owner, struct dma_fence **f)
129 {
130         enum drm_sched_priority priority;
131         struct amdgpu_ring *ring;
132         int r;
133
134         if (!f)
135                 return -EINVAL;
136
137         r = drm_sched_job_init(&job->base, entity, owner);
138         if (r)
139                 return r;
140
141         job->owner = owner;
142         *f = dma_fence_get(&job->base.s_fence->finished);
143         amdgpu_job_free_resources(job);
144         priority = job->base.s_priority;
145         drm_sched_entity_push_job(&job->base, entity);
146
147         ring = to_amdgpu_ring(entity->rq->sched);
148         amdgpu_ring_priority_get(ring, priority);
149
150         return 0;
151 }
152
153 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
154                              struct dma_fence **fence)
155 {
156         int r;
157
158         job->base.sched = &ring->sched;
159         r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
160         job->fence = dma_fence_get(*fence);
161         if (r)
162                 return r;
163
164         amdgpu_job_free(job);
165         return 0;
166 }
167
168 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
169                                                struct drm_sched_entity *s_entity)
170 {
171         struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
172         struct amdgpu_job *job = to_amdgpu_job(sched_job);
173         struct amdgpu_vm *vm = job->vm;
174         struct dma_fence *fence;
175         bool explicit = false;
176         int r;
177
178         fence = amdgpu_sync_get_fence(&job->sync, &explicit);
179         if (fence && explicit) {
180                 if (drm_sched_dependency_optimized(fence, s_entity)) {
181                         r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
182                                               fence, false);
183                         if (r)
184                                 DRM_ERROR("Error adding fence (%d)\n", r);
185                 }
186         }
187
188         while (fence == NULL && vm && !job->vmid) {
189                 r = amdgpu_vmid_grab(vm, ring, &job->sync,
190                                      &job->base.s_fence->finished,
191                                      job);
192                 if (r)
193                         DRM_ERROR("Error getting VM ID (%d)\n", r);
194
195                 fence = amdgpu_sync_get_fence(&job->sync, NULL);
196         }
197
198         return fence;
199 }
200
201 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
202 {
203         struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
204         struct dma_fence *fence = NULL, *finished;
205         struct amdgpu_job *job;
206         int r = 0;
207
208         job = to_amdgpu_job(sched_job);
209         finished = &job->base.s_fence->finished;
210
211         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
212
213         trace_amdgpu_sched_run_job(job);
214
215         if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
216                 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
217
218         if (finished->error < 0) {
219                 DRM_INFO("Skip scheduling IBs!\n");
220         } else {
221                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
222                                        &fence);
223                 if (r)
224                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
225         }
226         /* if gpu reset, hw fence will be replaced here */
227         dma_fence_put(job->fence);
228         job->fence = dma_fence_get(fence);
229
230         amdgpu_job_free_resources(job);
231
232         fence = r ? ERR_PTR(r) : fence;
233         return fence;
234 }
235
236 const struct drm_sched_backend_ops amdgpu_sched_ops = {
237         .dependency = amdgpu_job_dependency,
238         .run_job = amdgpu_job_run,
239         .timedout_job = amdgpu_job_timedout,
240         .free_job = amdgpu_job_free_cb
241 };