GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / gpu / drm / i915 / intel_breadcrumbs.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
27
28 #include "i915_drv.h"
29
30 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
31 {
32         struct intel_wait *wait;
33         unsigned int result = 0;
34
35         lockdep_assert_held(&b->irq_lock);
36
37         wait = b->irq_wait;
38         if (wait) {
39                 result = ENGINE_WAKEUP_WAITER;
40                 if (wake_up_process(wait->tsk))
41                         result |= ENGINE_WAKEUP_ASLEEP;
42         }
43
44         return result;
45 }
46
47 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
48 {
49         struct intel_breadcrumbs *b = &engine->breadcrumbs;
50         unsigned long flags;
51         unsigned int result;
52
53         spin_lock_irqsave(&b->irq_lock, flags);
54         result = __intel_breadcrumbs_wakeup(b);
55         spin_unlock_irqrestore(&b->irq_lock, flags);
56
57         return result;
58 }
59
60 static unsigned long wait_timeout(void)
61 {
62         return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
63 }
64
65 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
66 {
67         DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s, current seqno=%x, last=%x\n",
68                          engine->name, __builtin_return_address(0),
69                          yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
70                                         &engine->irq_posted)),
71                          intel_engine_get_seqno(engine),
72                          intel_engine_last_submit(engine));
73
74         set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
75 }
76
77 static void intel_breadcrumbs_hangcheck(unsigned long data)
78 {
79         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
80         struct intel_breadcrumbs *b = &engine->breadcrumbs;
81
82         if (!b->irq_armed)
83                 return;
84
85         if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
86                 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
87                 mod_timer(&b->hangcheck, wait_timeout());
88                 return;
89         }
90
91         /* We keep the hangcheck timer alive until we disarm the irq, even
92          * if there are no waiters at present.
93          *
94          * If the waiter was currently running, assume it hasn't had a chance
95          * to process the pending interrupt (e.g, low priority task on a loaded
96          * system) and wait until it sleeps before declaring a missed interrupt.
97          *
98          * If the waiter was asleep (and not even pending a wakeup), then we
99          * must have missed an interrupt as the GPU has stopped advancing
100          * but we still have a waiter. Assuming all batches complete within
101          * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
102          */
103         if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
104                 missed_breadcrumb(engine);
105                 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
106         } else {
107                 mod_timer(&b->hangcheck, wait_timeout());
108         }
109 }
110
111 static void intel_breadcrumbs_fake_irq(unsigned long data)
112 {
113         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
114         struct intel_breadcrumbs *b = &engine->breadcrumbs;
115
116         /* The timer persists in case we cannot enable interrupts,
117          * or if we have previously seen seqno/interrupt incoherency
118          * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
119          * Here the worker will wake up every jiffie in order to kick the
120          * oldest waiter to do the coherent seqno check.
121          */
122
123         spin_lock_irq(&b->irq_lock);
124         if (!__intel_breadcrumbs_wakeup(b))
125                 __intel_engine_disarm_breadcrumbs(engine);
126         spin_unlock_irq(&b->irq_lock);
127         if (!b->irq_armed)
128                 return;
129
130         mod_timer(&b->fake_irq, jiffies + 1);
131
132         /* Ensure that even if the GPU hangs, we get woken up.
133          *
134          * However, note that if no one is waiting, we never notice
135          * a gpu hang. Eventually, we will have to wait for a resource
136          * held by the GPU and so trigger a hangcheck. In the most
137          * pathological case, this will be upon memory starvation! To
138          * prevent this, we also queue the hangcheck from the retire
139          * worker.
140          */
141         i915_queue_hangcheck(engine->i915);
142 }
143
144 static void irq_enable(struct intel_engine_cs *engine)
145 {
146         /* Enabling the IRQ may miss the generation of the interrupt, but
147          * we still need to force the barrier before reading the seqno,
148          * just in case.
149          */
150         set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
151
152         /* Caller disables interrupts */
153         spin_lock(&engine->i915->irq_lock);
154         engine->irq_enable(engine);
155         spin_unlock(&engine->i915->irq_lock);
156 }
157
158 static void irq_disable(struct intel_engine_cs *engine)
159 {
160         /* Caller disables interrupts */
161         spin_lock(&engine->i915->irq_lock);
162         engine->irq_disable(engine);
163         spin_unlock(&engine->i915->irq_lock);
164 }
165
166 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
167 {
168         struct intel_breadcrumbs *b = &engine->breadcrumbs;
169
170         lockdep_assert_held(&b->irq_lock);
171         GEM_BUG_ON(b->irq_wait);
172
173         if (b->irq_enabled) {
174                 irq_disable(engine);
175                 b->irq_enabled = false;
176         }
177
178         b->irq_armed = false;
179 }
180
181 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
182 {
183         struct intel_breadcrumbs *b = &engine->breadcrumbs;
184         struct intel_wait *wait, *n, *first;
185
186         if (!b->irq_armed)
187                 return;
188
189         /* We only disarm the irq when we are idle (all requests completed),
190          * so if the bottom-half remains asleep, it missed the request
191          * completion.
192          */
193
194         spin_lock_irq(&b->rb_lock);
195
196         spin_lock(&b->irq_lock);
197         first = fetch_and_zero(&b->irq_wait);
198         __intel_engine_disarm_breadcrumbs(engine);
199         spin_unlock(&b->irq_lock);
200
201         rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
202                 RB_CLEAR_NODE(&wait->node);
203                 if (wake_up_process(wait->tsk) && wait == first)
204                         missed_breadcrumb(engine);
205         }
206         b->waiters = RB_ROOT;
207
208         spin_unlock_irq(&b->rb_lock);
209 }
210
211 static bool use_fake_irq(const struct intel_breadcrumbs *b)
212 {
213         const struct intel_engine_cs *engine =
214                 container_of(b, struct intel_engine_cs, breadcrumbs);
215
216         if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
217                 return false;
218
219         /* Only start with the heavy weight fake irq timer if we have not
220          * seen any interrupts since enabling it the first time. If the
221          * interrupts are still arriving, it means we made a mistake in our
222          * engine->seqno_barrier(), a timing error that should be transient
223          * and unlikely to reoccur.
224          */
225         return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
226 }
227
228 static void enable_fake_irq(struct intel_breadcrumbs *b)
229 {
230         /* Ensure we never sleep indefinitely */
231         if (!b->irq_enabled || use_fake_irq(b))
232                 mod_timer(&b->fake_irq, jiffies + 1);
233         else
234                 mod_timer(&b->hangcheck, wait_timeout());
235 }
236
237 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
238 {
239         struct intel_engine_cs *engine =
240                 container_of(b, struct intel_engine_cs, breadcrumbs);
241         struct drm_i915_private *i915 = engine->i915;
242
243         lockdep_assert_held(&b->irq_lock);
244         if (b->irq_armed)
245                 return false;
246
247         /* The breadcrumb irq will be disarmed on the interrupt after the
248          * waiters are signaled. This gives us a single interrupt window in
249          * which we can add a new waiter and avoid the cost of re-enabling
250          * the irq.
251          */
252         b->irq_armed = true;
253         GEM_BUG_ON(b->irq_enabled);
254
255         if (I915_SELFTEST_ONLY(b->mock)) {
256                 /* For our mock objects we want to avoid interaction
257                  * with the real hardware (which is not set up). So
258                  * we simply pretend we have enabled the powerwell
259                  * and the irq, and leave it up to the mock
260                  * implementation to call intel_engine_wakeup()
261                  * itself when it wants to simulate a user interrupt,
262                  */
263                 return true;
264         }
265
266         /* Since we are waiting on a request, the GPU should be busy
267          * and should have its own rpm reference. This is tracked
268          * by i915->gt.awake, we can forgo holding our own wakref
269          * for the interrupt as before i915->gt.awake is released (when
270          * the driver is idle) we disarm the breadcrumbs.
271          */
272
273         /* No interrupts? Kick the waiter every jiffie! */
274         if (intel_irqs_enabled(i915)) {
275                 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
276                         irq_enable(engine);
277                 b->irq_enabled = true;
278         }
279
280         enable_fake_irq(b);
281         return true;
282 }
283
284 static inline struct intel_wait *to_wait(struct rb_node *node)
285 {
286         return rb_entry(node, struct intel_wait, node);
287 }
288
289 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
290                                               struct intel_wait *wait)
291 {
292         lockdep_assert_held(&b->rb_lock);
293         GEM_BUG_ON(b->irq_wait == wait);
294
295         /* This request is completed, so remove it from the tree, mark it as
296          * complete, and *then* wake up the associated task. N.B. when the
297          * task wakes up, it will find the empty rb_node, discern that it
298          * has already been removed from the tree and skip the serialisation
299          * of the b->rb_lock and b->irq_lock. This means that the destruction
300          * of the intel_wait is not serialised with the interrupt handler
301          * by the waiter - it must instead be serialised by the caller.
302          */
303         rb_erase(&wait->node, &b->waiters);
304         RB_CLEAR_NODE(&wait->node);
305
306         wake_up_process(wait->tsk); /* implicit smp_wmb() */
307 }
308
309 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
310                                             struct rb_node *next)
311 {
312         struct intel_breadcrumbs *b = &engine->breadcrumbs;
313
314         spin_lock(&b->irq_lock);
315         GEM_BUG_ON(!b->irq_armed);
316         GEM_BUG_ON(!b->irq_wait);
317         b->irq_wait = to_wait(next);
318         spin_unlock(&b->irq_lock);
319
320         /* We always wake up the next waiter that takes over as the bottom-half
321          * as we may delegate not only the irq-seqno barrier to the next waiter
322          * but also the task of waking up concurrent waiters.
323          */
324         if (next)
325                 wake_up_process(to_wait(next)->tsk);
326 }
327
328 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
329                                     struct intel_wait *wait)
330 {
331         struct intel_breadcrumbs *b = &engine->breadcrumbs;
332         struct rb_node **p, *parent, *completed;
333         bool first, armed;
334         u32 seqno;
335
336         /* Insert the request into the retirement ordered list
337          * of waiters by walking the rbtree. If we are the oldest
338          * seqno in the tree (the first to be retired), then
339          * set ourselves as the bottom-half.
340          *
341          * As we descend the tree, prune completed branches since we hold the
342          * spinlock we know that the first_waiter must be delayed and can
343          * reduce some of the sequential wake up latency if we take action
344          * ourselves and wake up the completed tasks in parallel. Also, by
345          * removing stale elements in the tree, we may be able to reduce the
346          * ping-pong between the old bottom-half and ourselves as first-waiter.
347          */
348         armed = false;
349         first = true;
350         parent = NULL;
351         completed = NULL;
352         seqno = intel_engine_get_seqno(engine);
353
354          /* If the request completed before we managed to grab the spinlock,
355           * return now before adding ourselves to the rbtree. We let the
356           * current bottom-half handle any pending wakeups and instead
357           * try and get out of the way quickly.
358           */
359         if (i915_seqno_passed(seqno, wait->seqno)) {
360                 RB_CLEAR_NODE(&wait->node);
361                 return first;
362         }
363
364         p = &b->waiters.rb_node;
365         while (*p) {
366                 parent = *p;
367                 if (wait->seqno == to_wait(parent)->seqno) {
368                         /* We have multiple waiters on the same seqno, select
369                          * the highest priority task (that with the smallest
370                          * task->prio) to serve as the bottom-half for this
371                          * group.
372                          */
373                         if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
374                                 p = &parent->rb_right;
375                                 first = false;
376                         } else {
377                                 p = &parent->rb_left;
378                         }
379                 } else if (i915_seqno_passed(wait->seqno,
380                                              to_wait(parent)->seqno)) {
381                         p = &parent->rb_right;
382                         if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
383                                 completed = parent;
384                         else
385                                 first = false;
386                 } else {
387                         p = &parent->rb_left;
388                 }
389         }
390         rb_link_node(&wait->node, parent, p);
391         rb_insert_color(&wait->node, &b->waiters);
392
393         if (first) {
394                 spin_lock(&b->irq_lock);
395                 b->irq_wait = wait;
396                 /* After assigning ourselves as the new bottom-half, we must
397                  * perform a cursory check to prevent a missed interrupt.
398                  * Either we miss the interrupt whilst programming the hardware,
399                  * or if there was a previous waiter (for a later seqno) they
400                  * may be woken instead of us (due to the inherent race
401                  * in the unlocked read of b->irq_seqno_bh in the irq handler)
402                  * and so we miss the wake up.
403                  */
404                 armed = __intel_breadcrumbs_enable_irq(b);
405                 spin_unlock(&b->irq_lock);
406         }
407
408         if (completed) {
409                 /* Advance the bottom-half (b->irq_wait) before we wake up
410                  * the waiters who may scribble over their intel_wait
411                  * just as the interrupt handler is dereferencing it via
412                  * b->irq_wait.
413                  */
414                 if (!first) {
415                         struct rb_node *next = rb_next(completed);
416                         GEM_BUG_ON(next == &wait->node);
417                         __intel_breadcrumbs_next(engine, next);
418                 }
419
420                 do {
421                         struct intel_wait *crumb = to_wait(completed);
422                         completed = rb_prev(completed);
423                         __intel_breadcrumbs_finish(b, crumb);
424                 } while (completed);
425         }
426
427         GEM_BUG_ON(!b->irq_wait);
428         GEM_BUG_ON(!b->irq_armed);
429         GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
430
431         return armed;
432 }
433
434 bool intel_engine_add_wait(struct intel_engine_cs *engine,
435                            struct intel_wait *wait)
436 {
437         struct intel_breadcrumbs *b = &engine->breadcrumbs;
438         bool armed;
439
440         spin_lock_irq(&b->rb_lock);
441         armed = __intel_engine_add_wait(engine, wait);
442         spin_unlock_irq(&b->rb_lock);
443         if (armed)
444                 return armed;
445
446         /* Make the caller recheck if its request has already started. */
447         return i915_seqno_passed(intel_engine_get_seqno(engine),
448                                  wait->seqno - 1);
449 }
450
451 static inline bool chain_wakeup(struct rb_node *rb, int priority)
452 {
453         return rb && to_wait(rb)->tsk->prio <= priority;
454 }
455
456 static inline int wakeup_priority(struct intel_breadcrumbs *b,
457                                   struct task_struct *tsk)
458 {
459         if (tsk == b->signaler)
460                 return INT_MIN;
461         else
462                 return tsk->prio;
463 }
464
465 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
466                                        struct intel_wait *wait)
467 {
468         struct intel_breadcrumbs *b = &engine->breadcrumbs;
469
470         lockdep_assert_held(&b->rb_lock);
471
472         if (RB_EMPTY_NODE(&wait->node))
473                 goto out;
474
475         if (b->irq_wait == wait) {
476                 const int priority = wakeup_priority(b, wait->tsk);
477                 struct rb_node *next;
478
479                 /* We are the current bottom-half. Find the next candidate,
480                  * the first waiter in the queue on the remaining oldest
481                  * request. As multiple seqnos may complete in the time it
482                  * takes us to wake up and find the next waiter, we have to
483                  * wake up that waiter for it to perform its own coherent
484                  * completion check.
485                  */
486                 next = rb_next(&wait->node);
487                 if (chain_wakeup(next, priority)) {
488                         /* If the next waiter is already complete,
489                          * wake it up and continue onto the next waiter. So
490                          * if have a small herd, they will wake up in parallel
491                          * rather than sequentially, which should reduce
492                          * the overall latency in waking all the completed
493                          * clients.
494                          *
495                          * However, waking up a chain adds extra latency to
496                          * the first_waiter. This is undesirable if that
497                          * waiter is a high priority task.
498                          */
499                         u32 seqno = intel_engine_get_seqno(engine);
500
501                         while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
502                                 struct rb_node *n = rb_next(next);
503
504                                 __intel_breadcrumbs_finish(b, to_wait(next));
505                                 next = n;
506                                 if (!chain_wakeup(next, priority))
507                                         break;
508                         }
509                 }
510
511                 __intel_breadcrumbs_next(engine, next);
512         } else {
513                 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
514         }
515
516         GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
517         rb_erase(&wait->node, &b->waiters);
518
519 out:
520         GEM_BUG_ON(b->irq_wait == wait);
521         GEM_BUG_ON(rb_first(&b->waiters) !=
522                    (b->irq_wait ? &b->irq_wait->node : NULL));
523 }
524
525 void intel_engine_remove_wait(struct intel_engine_cs *engine,
526                               struct intel_wait *wait)
527 {
528         struct intel_breadcrumbs *b = &engine->breadcrumbs;
529
530         /* Quick check to see if this waiter was already decoupled from
531          * the tree by the bottom-half to avoid contention on the spinlock
532          * by the herd.
533          */
534         if (RB_EMPTY_NODE(&wait->node)) {
535                 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
536                 return;
537         }
538
539         spin_lock_irq(&b->rb_lock);
540         __intel_engine_remove_wait(engine, wait);
541         spin_unlock_irq(&b->rb_lock);
542 }
543
544 static bool signal_complete(const struct drm_i915_gem_request *request)
545 {
546         if (!request)
547                 return false;
548
549         /*
550          * Carefully check if the request is complete, giving time for the
551          * seqno to be visible or if the GPU hung.
552          */
553         return __i915_request_irq_complete(request);
554 }
555
556 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
557 {
558         return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
559 }
560
561 static void signaler_set_rtpriority(void)
562 {
563          struct sched_param param = { .sched_priority = 1 };
564
565          sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
566 }
567
568 static int intel_breadcrumbs_signaler(void *arg)
569 {
570         struct intel_engine_cs *engine = arg;
571         struct intel_breadcrumbs *b = &engine->breadcrumbs;
572         struct drm_i915_gem_request *request;
573
574         /* Install ourselves with high priority to reduce signalling latency */
575         signaler_set_rtpriority();
576
577         do {
578                 bool do_schedule = true;
579
580                 set_current_state(TASK_INTERRUPTIBLE);
581
582                 /* We are either woken up by the interrupt bottom-half,
583                  * or by a client adding a new signaller. In both cases,
584                  * the GPU seqno may have advanced beyond our oldest signal.
585                  * If it has, propagate the signal, remove the waiter and
586                  * check again with the next oldest signal. Otherwise we
587                  * need to wait for a new interrupt from the GPU or for
588                  * a new client.
589                  */
590                 rcu_read_lock();
591                 request = rcu_dereference(b->first_signal);
592                 if (request)
593                         request = i915_gem_request_get_rcu(request);
594                 rcu_read_unlock();
595                 if (signal_complete(request)) {
596                         if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
597                                       &request->fence.flags)) {
598                                 local_bh_disable();
599                                 dma_fence_signal(&request->fence);
600                                 GEM_BUG_ON(!i915_gem_request_completed(request));
601                                 local_bh_enable(); /* kick start the tasklets */
602                         }
603
604                         spin_lock_irq(&b->rb_lock);
605
606                         /* Wake up all other completed waiters and select the
607                          * next bottom-half for the next user interrupt.
608                          */
609                         __intel_engine_remove_wait(engine,
610                                                    &request->signaling.wait);
611
612                         /* Find the next oldest signal. Note that as we have
613                          * not been holding the lock, another client may
614                          * have installed an even older signal than the one
615                          * we just completed - so double check we are still
616                          * the oldest before picking the next one.
617                          */
618                         if (request == rcu_access_pointer(b->first_signal)) {
619                                 struct rb_node *rb =
620                                         rb_next(&request->signaling.node);
621                                 rcu_assign_pointer(b->first_signal,
622                                                    rb ? to_signaler(rb) : NULL);
623                         }
624                         rb_erase(&request->signaling.node, &b->signals);
625                         RB_CLEAR_NODE(&request->signaling.node);
626
627                         spin_unlock_irq(&b->rb_lock);
628
629                         i915_gem_request_put(request);
630
631                         /* If the engine is saturated we may be continually
632                          * processing completed requests. This angers the
633                          * NMI watchdog if we never let anything else
634                          * have access to the CPU. Let's pretend to be nice
635                          * and relinquish the CPU if we burn through the
636                          * entire RT timeslice!
637                          */
638                         do_schedule = need_resched();
639                 }
640
641                 if (unlikely(do_schedule)) {
642                         DEFINE_WAIT(exec);
643
644                         if (kthread_should_park())
645                                 kthread_parkme();
646
647                         if (kthread_should_stop()) {
648                                 GEM_BUG_ON(request);
649                                 break;
650                         }
651
652                         if (request)
653                                 add_wait_queue(&request->execute, &exec);
654
655                         schedule();
656
657                         if (request)
658                                 remove_wait_queue(&request->execute, &exec);
659                 }
660                 i915_gem_request_put(request);
661         } while (1);
662         __set_current_state(TASK_RUNNING);
663
664         return 0;
665 }
666
667 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
668                                    bool wakeup)
669 {
670         struct intel_engine_cs *engine = request->engine;
671         struct intel_breadcrumbs *b = &engine->breadcrumbs;
672         u32 seqno;
673
674         /* Note that we may be called from an interrupt handler on another
675          * device (e.g. nouveau signaling a fence completion causing us
676          * to submit a request, and so enable signaling). As such,
677          * we need to make sure that all other users of b->rb_lock protect
678          * against interrupts, i.e. use spin_lock_irqsave.
679          */
680
681         /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
682         GEM_BUG_ON(!irqs_disabled());
683         lockdep_assert_held(&request->lock);
684
685         seqno = i915_gem_request_global_seqno(request);
686         if (!seqno)
687                 return;
688
689         request->signaling.wait.tsk = b->signaler;
690         request->signaling.wait.request = request;
691         request->signaling.wait.seqno = seqno;
692         i915_gem_request_get(request);
693
694         spin_lock(&b->rb_lock);
695
696         /* First add ourselves into the list of waiters, but register our
697          * bottom-half as the signaller thread. As per usual, only the oldest
698          * waiter (not just signaller) is tasked as the bottom-half waking
699          * up all completed waiters after the user interrupt.
700          *
701          * If we are the oldest waiter, enable the irq (after which we
702          * must double check that the seqno did not complete).
703          */
704         wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
705
706         if (!__i915_gem_request_completed(request, seqno)) {
707                 struct rb_node *parent, **p;
708                 bool first;
709
710                 /* Now insert ourselves into the retirement ordered list of
711                  * signals on this engine. We track the oldest seqno as that
712                  * will be the first signal to complete.
713                  */
714                 parent = NULL;
715                 first = true;
716                 p = &b->signals.rb_node;
717                 while (*p) {
718                         parent = *p;
719                         if (i915_seqno_passed(seqno,
720                                               to_signaler(parent)->signaling.wait.seqno)) {
721                                 p = &parent->rb_right;
722                                 first = false;
723                         } else {
724                                 p = &parent->rb_left;
725                         }
726                 }
727                 rb_link_node(&request->signaling.node, parent, p);
728                 rb_insert_color(&request->signaling.node, &b->signals);
729                 if (first)
730                         rcu_assign_pointer(b->first_signal, request);
731         } else {
732                 __intel_engine_remove_wait(engine, &request->signaling.wait);
733                 i915_gem_request_put(request);
734                 wakeup = false;
735         }
736
737         spin_unlock(&b->rb_lock);
738
739         if (wakeup)
740                 wake_up_process(b->signaler);
741 }
742
743 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
744 {
745         struct intel_engine_cs *engine = request->engine;
746         struct intel_breadcrumbs *b = &engine->breadcrumbs;
747
748         GEM_BUG_ON(!irqs_disabled());
749         lockdep_assert_held(&request->lock);
750         GEM_BUG_ON(!request->signaling.wait.seqno);
751
752         spin_lock(&b->rb_lock);
753
754         if (!RB_EMPTY_NODE(&request->signaling.node)) {
755                 if (request == rcu_access_pointer(b->first_signal)) {
756                         struct rb_node *rb =
757                                 rb_next(&request->signaling.node);
758                         rcu_assign_pointer(b->first_signal,
759                                            rb ? to_signaler(rb) : NULL);
760                 }
761                 rb_erase(&request->signaling.node, &b->signals);
762                 RB_CLEAR_NODE(&request->signaling.node);
763                 i915_gem_request_put(request);
764         }
765
766         __intel_engine_remove_wait(engine, &request->signaling.wait);
767
768         spin_unlock(&b->rb_lock);
769
770         request->signaling.wait.seqno = 0;
771 }
772
773 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
774 {
775         struct intel_breadcrumbs *b = &engine->breadcrumbs;
776         struct task_struct *tsk;
777
778         spin_lock_init(&b->rb_lock);
779         spin_lock_init(&b->irq_lock);
780
781         setup_timer(&b->fake_irq,
782                     intel_breadcrumbs_fake_irq,
783                     (unsigned long)engine);
784         setup_timer(&b->hangcheck,
785                     intel_breadcrumbs_hangcheck,
786                     (unsigned long)engine);
787
788         /* Spawn a thread to provide a common bottom-half for all signals.
789          * As this is an asynchronous interface we cannot steal the current
790          * task for handling the bottom-half to the user interrupt, therefore
791          * we create a thread to do the coherent seqno dance after the
792          * interrupt and then signal the waitqueue (via the dma-buf/fence).
793          */
794         tsk = kthread_run(intel_breadcrumbs_signaler, engine,
795                           "i915/signal:%d", engine->id);
796         if (IS_ERR(tsk))
797                 return PTR_ERR(tsk);
798
799         b->signaler = tsk;
800
801         return 0;
802 }
803
804 static void cancel_fake_irq(struct intel_engine_cs *engine)
805 {
806         struct intel_breadcrumbs *b = &engine->breadcrumbs;
807
808         del_timer_sync(&b->hangcheck);
809         del_timer_sync(&b->fake_irq);
810         clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
811 }
812
813 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
814 {
815         struct intel_breadcrumbs *b = &engine->breadcrumbs;
816
817         cancel_fake_irq(engine);
818         spin_lock_irq(&b->irq_lock);
819
820         if (b->irq_enabled)
821                 irq_enable(engine);
822         else
823                 irq_disable(engine);
824
825         /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
826          * GPU is active and may have already executed the MI_USER_INTERRUPT
827          * before the CPU is ready to receive. However, the engine is currently
828          * idle (we haven't started it yet), there is no possibility for a
829          * missed interrupt as we enabled the irq and so we can clear the
830          * immediate wakeup (until a real interrupt arrives for the waiter).
831          */
832         clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
833
834         if (b->irq_armed)
835                 enable_fake_irq(b);
836
837         spin_unlock_irq(&b->irq_lock);
838 }
839
840 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
841 {
842         struct intel_breadcrumbs *b = &engine->breadcrumbs;
843
844         /* The engines should be idle and all requests accounted for! */
845         WARN_ON(READ_ONCE(b->irq_wait));
846         WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
847         WARN_ON(rcu_access_pointer(b->first_signal));
848         WARN_ON(!RB_EMPTY_ROOT(&b->signals));
849
850         if (!IS_ERR_OR_NULL(b->signaler))
851                 kthread_stop(b->signaler);
852
853         cancel_fake_irq(engine);
854 }
855
856 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
857 {
858         struct intel_breadcrumbs *b = &engine->breadcrumbs;
859         bool busy = false;
860
861         spin_lock_irq(&b->rb_lock);
862
863         if (b->irq_wait) {
864                 wake_up_process(b->irq_wait->tsk);
865                 busy = true;
866         }
867
868         if (rcu_access_pointer(b->first_signal)) {
869                 wake_up_process(b->signaler);
870                 busy = true;
871         }
872
873         spin_unlock_irq(&b->rb_lock);
874
875         return busy;
876 }
877
878 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
879 #include "selftests/intel_breadcrumbs.c"
880 #endif