GNU Linux-libre 4.14.266-gnu1
[releases.git] / kernel / irq / manage.c
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9
10 #define pr_fmt(fmt) "genirq: " fmt
11
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <uapi/linux/sched/types.h>
22 #include <linux/task_work.h>
23
24 #include "internals.h"
25
26 #ifdef CONFIG_IRQ_FORCED_THREADING
27 __read_mostly bool force_irqthreads;
28
29 static int __init setup_forced_irqthreads(char *arg)
30 {
31         force_irqthreads = true;
32         return 0;
33 }
34 early_param("threadirqs", setup_forced_irqthreads);
35 #endif
36
37 static void __synchronize_hardirq(struct irq_desc *desc)
38 {
39         bool inprogress;
40
41         do {
42                 unsigned long flags;
43
44                 /*
45                  * Wait until we're out of the critical section.  This might
46                  * give the wrong answer due to the lack of memory barriers.
47                  */
48                 while (irqd_irq_inprogress(&desc->irq_data))
49                         cpu_relax();
50
51                 /* Ok, that indicated we're done: double-check carefully. */
52                 raw_spin_lock_irqsave(&desc->lock, flags);
53                 inprogress = irqd_irq_inprogress(&desc->irq_data);
54                 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56                 /* Oops, that failed? */
57         } while (inprogress);
58 }
59
60 /**
61  *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
62  *      @irq: interrupt number to wait for
63  *
64  *      This function waits for any pending hard IRQ handlers for this
65  *      interrupt to complete before returning. If you use this
66  *      function while holding a resource the IRQ handler may need you
67  *      will deadlock. It does not take associated threaded handlers
68  *      into account.
69  *
70  *      Do not use this for shutdown scenarios where you must be sure
71  *      that all parts (hardirq and threaded handler) have completed.
72  *
73  *      Returns: false if a threaded handler is active.
74  *
75  *      This function may be called - with care - from IRQ context.
76  */
77 bool synchronize_hardirq(unsigned int irq)
78 {
79         struct irq_desc *desc = irq_to_desc(irq);
80
81         if (desc) {
82                 __synchronize_hardirq(desc);
83                 return !atomic_read(&desc->threads_active);
84         }
85
86         return true;
87 }
88 EXPORT_SYMBOL(synchronize_hardirq);
89
90 /**
91  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
92  *      @irq: interrupt number to wait for
93  *
94  *      This function waits for any pending IRQ handlers for this interrupt
95  *      to complete before returning. If you use this function while
96  *      holding a resource the IRQ handler may need you will deadlock.
97  *
98  *      This function may be called - with care - from IRQ context.
99  */
100 void synchronize_irq(unsigned int irq)
101 {
102         struct irq_desc *desc = irq_to_desc(irq);
103
104         if (desc) {
105                 __synchronize_hardirq(desc);
106                 /*
107                  * We made sure that no hardirq handler is
108                  * running. Now verify that no threaded handlers are
109                  * active.
110                  */
111                 wait_event(desc->wait_for_threads,
112                            !atomic_read(&desc->threads_active));
113         }
114 }
115 EXPORT_SYMBOL(synchronize_irq);
116
117 #ifdef CONFIG_SMP
118 cpumask_var_t irq_default_affinity;
119
120 static bool __irq_can_set_affinity(struct irq_desc *desc)
121 {
122         if (!desc || !irqd_can_balance(&desc->irq_data) ||
123             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124                 return false;
125         return true;
126 }
127
128 /**
129  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
130  *      @irq:           Interrupt to check
131  *
132  */
133 int irq_can_set_affinity(unsigned int irq)
134 {
135         return __irq_can_set_affinity(irq_to_desc(irq));
136 }
137
138 /**
139  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
140  * @irq:        Interrupt to check
141  *
142  * Like irq_can_set_affinity() above, but additionally checks for the
143  * AFFINITY_MANAGED flag.
144  */
145 bool irq_can_set_affinity_usr(unsigned int irq)
146 {
147         struct irq_desc *desc = irq_to_desc(irq);
148
149         return __irq_can_set_affinity(desc) &&
150                 !irqd_affinity_is_managed(&desc->irq_data);
151 }
152
153 /**
154  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
155  *      @desc:          irq descriptor which has affitnity changed
156  *
157  *      We just set IRQTF_AFFINITY and delegate the affinity setting
158  *      to the interrupt thread itself. We can not call
159  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
160  *      code can be called from hard interrupt context.
161  */
162 void irq_set_thread_affinity(struct irq_desc *desc)
163 {
164         struct irqaction *action;
165
166         for_each_action_of_desc(desc, action)
167                 if (action->thread)
168                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
169 }
170
171 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
172 static void irq_validate_effective_affinity(struct irq_data *data)
173 {
174         const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175         struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177         if (!cpumask_empty(m))
178                 return;
179         pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180                      chip->name, data->irq);
181 }
182
183 static inline void irq_init_effective_affinity(struct irq_data *data,
184                                                const struct cpumask *mask)
185 {
186         cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
187 }
188 #else
189 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
190 static inline void irq_init_effective_affinity(struct irq_data *data,
191                                                const struct cpumask *mask) { }
192 #endif
193
194 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
195                         bool force)
196 {
197         struct irq_desc *desc = irq_data_to_desc(data);
198         struct irq_chip *chip = irq_data_get_irq_chip(data);
199         int ret;
200
201         if (!chip || !chip->irq_set_affinity)
202                 return -EINVAL;
203
204         ret = chip->irq_set_affinity(data, mask, force);
205         switch (ret) {
206         case IRQ_SET_MASK_OK:
207         case IRQ_SET_MASK_OK_DONE:
208                 cpumask_copy(desc->irq_common_data.affinity, mask);
209         case IRQ_SET_MASK_OK_NOCOPY:
210                 irq_validate_effective_affinity(data);
211                 irq_set_thread_affinity(desc);
212                 ret = 0;
213         }
214
215         return ret;
216 }
217
218 static bool irq_set_affinity_deactivated(struct irq_data *data,
219                                          const struct cpumask *mask, bool force)
220 {
221         struct irq_desc *desc = irq_data_to_desc(data);
222
223         /*
224          * Handle irq chips which can handle affinity only in activated
225          * state correctly
226          *
227          * If the interrupt is not yet activated, just store the affinity
228          * mask and do not call the chip driver at all. On activation the
229          * driver has to make sure anyway that the interrupt is in a
230          * useable state so startup works.
231          */
232         if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
233             irqd_is_activated(data) || !irqd_affinity_on_activate(data))
234                 return false;
235
236         cpumask_copy(desc->irq_common_data.affinity, mask);
237         irq_init_effective_affinity(data, mask);
238         irqd_set(data, IRQD_AFFINITY_SET);
239         return true;
240 }
241
242 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
243                             bool force)
244 {
245         struct irq_chip *chip = irq_data_get_irq_chip(data);
246         struct irq_desc *desc = irq_data_to_desc(data);
247         int ret = 0;
248
249         if (!chip || !chip->irq_set_affinity)
250                 return -EINVAL;
251
252         if (irq_set_affinity_deactivated(data, mask, force))
253                 return 0;
254
255         if (irq_can_move_pcntxt(data)) {
256                 ret = irq_do_set_affinity(data, mask, force);
257         } else {
258                 irqd_set_move_pending(data);
259                 irq_copy_pending(desc, mask);
260         }
261
262         if (desc->affinity_notify) {
263                 kref_get(&desc->affinity_notify->kref);
264                 if (!schedule_work(&desc->affinity_notify->work)) {
265                         /* Work was already scheduled, drop our extra ref */
266                         kref_put(&desc->affinity_notify->kref,
267                                  desc->affinity_notify->release);
268                 }
269         }
270         irqd_set(data, IRQD_AFFINITY_SET);
271
272         return ret;
273 }
274
275 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
276 {
277         struct irq_desc *desc = irq_to_desc(irq);
278         unsigned long flags;
279         int ret;
280
281         if (!desc)
282                 return -EINVAL;
283
284         raw_spin_lock_irqsave(&desc->lock, flags);
285         ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
286         raw_spin_unlock_irqrestore(&desc->lock, flags);
287         return ret;
288 }
289
290 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
291 {
292         unsigned long flags;
293         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
294
295         if (!desc)
296                 return -EINVAL;
297         desc->affinity_hint = m;
298         irq_put_desc_unlock(desc, flags);
299         /* set the initial affinity to prevent every interrupt being on CPU0 */
300         if (m)
301                 __irq_set_affinity(irq, m, false);
302         return 0;
303 }
304 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
305
306 static void irq_affinity_notify(struct work_struct *work)
307 {
308         struct irq_affinity_notify *notify =
309                 container_of(work, struct irq_affinity_notify, work);
310         struct irq_desc *desc = irq_to_desc(notify->irq);
311         cpumask_var_t cpumask;
312         unsigned long flags;
313
314         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
315                 goto out;
316
317         raw_spin_lock_irqsave(&desc->lock, flags);
318         if (irq_move_pending(&desc->irq_data))
319                 irq_get_pending(cpumask, desc);
320         else
321                 cpumask_copy(cpumask, desc->irq_common_data.affinity);
322         raw_spin_unlock_irqrestore(&desc->lock, flags);
323
324         notify->notify(notify, cpumask);
325
326         free_cpumask_var(cpumask);
327 out:
328         kref_put(&notify->kref, notify->release);
329 }
330
331 /**
332  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
333  *      @irq:           Interrupt for which to enable/disable notification
334  *      @notify:        Context for notification, or %NULL to disable
335  *                      notification.  Function pointers must be initialised;
336  *                      the other fields will be initialised by this function.
337  *
338  *      Must be called in process context.  Notification may only be enabled
339  *      after the IRQ is allocated and must be disabled before the IRQ is
340  *      freed using free_irq().
341  */
342 int
343 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
344 {
345         struct irq_desc *desc = irq_to_desc(irq);
346         struct irq_affinity_notify *old_notify;
347         unsigned long flags;
348
349         /* The release function is promised process context */
350         might_sleep();
351
352         if (!desc)
353                 return -EINVAL;
354
355         /* Complete initialisation of *notify */
356         if (notify) {
357                 notify->irq = irq;
358                 kref_init(&notify->kref);
359                 INIT_WORK(&notify->work, irq_affinity_notify);
360         }
361
362         raw_spin_lock_irqsave(&desc->lock, flags);
363         old_notify = desc->affinity_notify;
364         desc->affinity_notify = notify;
365         raw_spin_unlock_irqrestore(&desc->lock, flags);
366
367         if (old_notify) {
368                 if (cancel_work_sync(&old_notify->work)) {
369                         /* Pending work had a ref, put that one too */
370                         kref_put(&old_notify->kref, old_notify->release);
371                 }
372                 kref_put(&old_notify->kref, old_notify->release);
373         }
374
375         return 0;
376 }
377 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
378
379 #ifndef CONFIG_AUTO_IRQ_AFFINITY
380 /*
381  * Generic version of the affinity autoselector.
382  */
383 int irq_setup_affinity(struct irq_desc *desc)
384 {
385         struct cpumask *set = irq_default_affinity;
386         int ret, node = irq_desc_get_node(desc);
387         static DEFINE_RAW_SPINLOCK(mask_lock);
388         static struct cpumask mask;
389
390         /* Excludes PER_CPU and NO_BALANCE interrupts */
391         if (!__irq_can_set_affinity(desc))
392                 return 0;
393
394         raw_spin_lock(&mask_lock);
395         /*
396          * Preserve the managed affinity setting and a userspace affinity
397          * setup, but make sure that one of the targets is online.
398          */
399         if (irqd_affinity_is_managed(&desc->irq_data) ||
400             irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
401                 if (cpumask_intersects(desc->irq_common_data.affinity,
402                                        cpu_online_mask))
403                         set = desc->irq_common_data.affinity;
404                 else
405                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
406         }
407
408         cpumask_and(&mask, cpu_online_mask, set);
409         if (cpumask_empty(&mask))
410                 cpumask_copy(&mask, cpu_online_mask);
411
412         if (node != NUMA_NO_NODE) {
413                 const struct cpumask *nodemask = cpumask_of_node(node);
414
415                 /* make sure at least one of the cpus in nodemask is online */
416                 if (cpumask_intersects(&mask, nodemask))
417                         cpumask_and(&mask, &mask, nodemask);
418         }
419         ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
420         raw_spin_unlock(&mask_lock);
421         return ret;
422 }
423 #else
424 /* Wrapper for ALPHA specific affinity selector magic */
425 int irq_setup_affinity(struct irq_desc *desc)
426 {
427         return irq_select_affinity(irq_desc_get_irq(desc));
428 }
429 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
430 #endif /* CONFIG_SMP */
431
432
433 /**
434  *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
435  *      @irq: interrupt number to set affinity
436  *      @vcpu_info: vCPU specific data
437  *
438  *      This function uses the vCPU specific data to set the vCPU
439  *      affinity for an irq. The vCPU specific data is passed from
440  *      outside, such as KVM. One example code path is as below:
441  *      KVM -> IOMMU -> irq_set_vcpu_affinity().
442  */
443 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
444 {
445         unsigned long flags;
446         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
447         struct irq_data *data;
448         struct irq_chip *chip;
449         int ret = -ENOSYS;
450
451         if (!desc)
452                 return -EINVAL;
453
454         data = irq_desc_get_irq_data(desc);
455         do {
456                 chip = irq_data_get_irq_chip(data);
457                 if (chip && chip->irq_set_vcpu_affinity)
458                         break;
459 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
460                 data = data->parent_data;
461 #else
462                 data = NULL;
463 #endif
464         } while (data);
465
466         if (data)
467                 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
468         irq_put_desc_unlock(desc, flags);
469
470         return ret;
471 }
472 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
473
474 void __disable_irq(struct irq_desc *desc)
475 {
476         if (!desc->depth++)
477                 irq_disable(desc);
478 }
479
480 static int __disable_irq_nosync(unsigned int irq)
481 {
482         unsigned long flags;
483         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
484
485         if (!desc)
486                 return -EINVAL;
487         __disable_irq(desc);
488         irq_put_desc_busunlock(desc, flags);
489         return 0;
490 }
491
492 /**
493  *      disable_irq_nosync - disable an irq without waiting
494  *      @irq: Interrupt to disable
495  *
496  *      Disable the selected interrupt line.  Disables and Enables are
497  *      nested.
498  *      Unlike disable_irq(), this function does not ensure existing
499  *      instances of the IRQ handler have completed before returning.
500  *
501  *      This function may be called from IRQ context.
502  */
503 void disable_irq_nosync(unsigned int irq)
504 {
505         __disable_irq_nosync(irq);
506 }
507 EXPORT_SYMBOL(disable_irq_nosync);
508
509 /**
510  *      disable_irq - disable an irq and wait for completion
511  *      @irq: Interrupt to disable
512  *
513  *      Disable the selected interrupt line.  Enables and Disables are
514  *      nested.
515  *      This function waits for any pending IRQ handlers for this interrupt
516  *      to complete before returning. If you use this function while
517  *      holding a resource the IRQ handler may need you will deadlock.
518  *
519  *      This function may be called - with care - from IRQ context.
520  */
521 void disable_irq(unsigned int irq)
522 {
523         if (!__disable_irq_nosync(irq))
524                 synchronize_irq(irq);
525 }
526 EXPORT_SYMBOL(disable_irq);
527
528 /**
529  *      disable_hardirq - disables an irq and waits for hardirq completion
530  *      @irq: Interrupt to disable
531  *
532  *      Disable the selected interrupt line.  Enables and Disables are
533  *      nested.
534  *      This function waits for any pending hard IRQ handlers for this
535  *      interrupt to complete before returning. If you use this function while
536  *      holding a resource the hard IRQ handler may need you will deadlock.
537  *
538  *      When used to optimistically disable an interrupt from atomic context
539  *      the return value must be checked.
540  *
541  *      Returns: false if a threaded handler is active.
542  *
543  *      This function may be called - with care - from IRQ context.
544  */
545 bool disable_hardirq(unsigned int irq)
546 {
547         if (!__disable_irq_nosync(irq))
548                 return synchronize_hardirq(irq);
549
550         return false;
551 }
552 EXPORT_SYMBOL_GPL(disable_hardirq);
553
554 void __enable_irq(struct irq_desc *desc)
555 {
556         switch (desc->depth) {
557         case 0:
558  err_out:
559                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
560                      irq_desc_get_irq(desc));
561                 break;
562         case 1: {
563                 if (desc->istate & IRQS_SUSPENDED)
564                         goto err_out;
565                 /* Prevent probing on this irq: */
566                 irq_settings_set_noprobe(desc);
567                 /*
568                  * Call irq_startup() not irq_enable() here because the
569                  * interrupt might be marked NOAUTOEN. So irq_startup()
570                  * needs to be invoked when it gets enabled the first
571                  * time. If it was already started up, then irq_startup()
572                  * will invoke irq_enable() under the hood.
573                  */
574                 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
575                 break;
576         }
577         default:
578                 desc->depth--;
579         }
580 }
581
582 /**
583  *      enable_irq - enable handling of an irq
584  *      @irq: Interrupt to enable
585  *
586  *      Undoes the effect of one call to disable_irq().  If this
587  *      matches the last disable, processing of interrupts on this
588  *      IRQ line is re-enabled.
589  *
590  *      This function may be called from IRQ context only when
591  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
592  */
593 void enable_irq(unsigned int irq)
594 {
595         unsigned long flags;
596         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
597
598         if (!desc)
599                 return;
600         if (WARN(!desc->irq_data.chip,
601                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
602                 goto out;
603
604         __enable_irq(desc);
605 out:
606         irq_put_desc_busunlock(desc, flags);
607 }
608 EXPORT_SYMBOL(enable_irq);
609
610 static int set_irq_wake_real(unsigned int irq, unsigned int on)
611 {
612         struct irq_desc *desc = irq_to_desc(irq);
613         int ret = -ENXIO;
614
615         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
616                 return 0;
617
618         if (desc->irq_data.chip->irq_set_wake)
619                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
620
621         return ret;
622 }
623
624 /**
625  *      irq_set_irq_wake - control irq power management wakeup
626  *      @irq:   interrupt to control
627  *      @on:    enable/disable power management wakeup
628  *
629  *      Enable/disable power management wakeup mode, which is
630  *      disabled by default.  Enables and disables must match,
631  *      just as they match for non-wakeup mode support.
632  *
633  *      Wakeup mode lets this IRQ wake the system from sleep
634  *      states like "suspend to RAM".
635  */
636 int irq_set_irq_wake(unsigned int irq, unsigned int on)
637 {
638         unsigned long flags;
639         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
640         int ret = 0;
641
642         if (!desc)
643                 return -EINVAL;
644
645         /* wakeup-capable irqs can be shared between drivers that
646          * don't need to have the same sleep mode behaviors.
647          */
648         if (on) {
649                 if (desc->wake_depth++ == 0) {
650                         ret = set_irq_wake_real(irq, on);
651                         if (ret)
652                                 desc->wake_depth = 0;
653                         else
654                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
655                 }
656         } else {
657                 if (desc->wake_depth == 0) {
658                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
659                 } else if (--desc->wake_depth == 0) {
660                         ret = set_irq_wake_real(irq, on);
661                         if (ret)
662                                 desc->wake_depth = 1;
663                         else
664                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
665                 }
666         }
667         irq_put_desc_busunlock(desc, flags);
668         return ret;
669 }
670 EXPORT_SYMBOL(irq_set_irq_wake);
671
672 /*
673  * Internal function that tells the architecture code whether a
674  * particular irq has been exclusively allocated or is available
675  * for driver use.
676  */
677 int can_request_irq(unsigned int irq, unsigned long irqflags)
678 {
679         unsigned long flags;
680         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
681         int canrequest = 0;
682
683         if (!desc)
684                 return 0;
685
686         if (irq_settings_can_request(desc)) {
687                 if (!desc->action ||
688                     irqflags & desc->action->flags & IRQF_SHARED)
689                         canrequest = 1;
690         }
691         irq_put_desc_unlock(desc, flags);
692         return canrequest;
693 }
694
695 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
696 {
697         struct irq_chip *chip = desc->irq_data.chip;
698         int ret, unmask = 0;
699
700         if (!chip || !chip->irq_set_type) {
701                 /*
702                  * IRQF_TRIGGER_* but the PIC does not support multiple
703                  * flow-types?
704                  */
705                 pr_debug("No set_type function for IRQ %d (%s)\n",
706                          irq_desc_get_irq(desc),
707                          chip ? (chip->name ? : "unknown") : "unknown");
708                 return 0;
709         }
710
711         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
712                 if (!irqd_irq_masked(&desc->irq_data))
713                         mask_irq(desc);
714                 if (!irqd_irq_disabled(&desc->irq_data))
715                         unmask = 1;
716         }
717
718         /* Mask all flags except trigger mode */
719         flags &= IRQ_TYPE_SENSE_MASK;
720         ret = chip->irq_set_type(&desc->irq_data, flags);
721
722         switch (ret) {
723         case IRQ_SET_MASK_OK:
724         case IRQ_SET_MASK_OK_DONE:
725                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
726                 irqd_set(&desc->irq_data, flags);
727
728         case IRQ_SET_MASK_OK_NOCOPY:
729                 flags = irqd_get_trigger_type(&desc->irq_data);
730                 irq_settings_set_trigger_mask(desc, flags);
731                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
732                 irq_settings_clr_level(desc);
733                 if (flags & IRQ_TYPE_LEVEL_MASK) {
734                         irq_settings_set_level(desc);
735                         irqd_set(&desc->irq_data, IRQD_LEVEL);
736                 }
737
738                 ret = 0;
739                 break;
740         default:
741                 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
742                        flags, irq_desc_get_irq(desc), chip->irq_set_type);
743         }
744         if (unmask)
745                 unmask_irq(desc);
746         return ret;
747 }
748
749 #ifdef CONFIG_HARDIRQS_SW_RESEND
750 int irq_set_parent(int irq, int parent_irq)
751 {
752         unsigned long flags;
753         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
754
755         if (!desc)
756                 return -EINVAL;
757
758         desc->parent_irq = parent_irq;
759
760         irq_put_desc_unlock(desc, flags);
761         return 0;
762 }
763 EXPORT_SYMBOL_GPL(irq_set_parent);
764 #endif
765
766 /*
767  * Default primary interrupt handler for threaded interrupts. Is
768  * assigned as primary handler when request_threaded_irq is called
769  * with handler == NULL. Useful for oneshot interrupts.
770  */
771 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
772 {
773         return IRQ_WAKE_THREAD;
774 }
775
776 /*
777  * Primary handler for nested threaded interrupts. Should never be
778  * called.
779  */
780 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
781 {
782         WARN(1, "Primary handler called for nested irq %d\n", irq);
783         return IRQ_NONE;
784 }
785
786 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
787 {
788         WARN(1, "Secondary action handler called for irq %d\n", irq);
789         return IRQ_NONE;
790 }
791
792 static int irq_wait_for_interrupt(struct irqaction *action)
793 {
794         set_current_state(TASK_INTERRUPTIBLE);
795
796         while (!kthread_should_stop()) {
797
798                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
799                                        &action->thread_flags)) {
800                         __set_current_state(TASK_RUNNING);
801                         return 0;
802                 }
803                 schedule();
804                 set_current_state(TASK_INTERRUPTIBLE);
805         }
806         __set_current_state(TASK_RUNNING);
807         return -1;
808 }
809
810 /*
811  * Oneshot interrupts keep the irq line masked until the threaded
812  * handler finished. unmask if the interrupt has not been disabled and
813  * is marked MASKED.
814  */
815 static void irq_finalize_oneshot(struct irq_desc *desc,
816                                  struct irqaction *action)
817 {
818         if (!(desc->istate & IRQS_ONESHOT) ||
819             action->handler == irq_forced_secondary_handler)
820                 return;
821 again:
822         chip_bus_lock(desc);
823         raw_spin_lock_irq(&desc->lock);
824
825         /*
826          * Implausible though it may be we need to protect us against
827          * the following scenario:
828          *
829          * The thread is faster done than the hard interrupt handler
830          * on the other CPU. If we unmask the irq line then the
831          * interrupt can come in again and masks the line, leaves due
832          * to IRQS_INPROGRESS and the irq line is masked forever.
833          *
834          * This also serializes the state of shared oneshot handlers
835          * versus "desc->threads_onehsot |= action->thread_mask;" in
836          * irq_wake_thread(). See the comment there which explains the
837          * serialization.
838          */
839         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
840                 raw_spin_unlock_irq(&desc->lock);
841                 chip_bus_sync_unlock(desc);
842                 cpu_relax();
843                 goto again;
844         }
845
846         /*
847          * Now check again, whether the thread should run. Otherwise
848          * we would clear the threads_oneshot bit of this thread which
849          * was just set.
850          */
851         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
852                 goto out_unlock;
853
854         desc->threads_oneshot &= ~action->thread_mask;
855
856         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
857             irqd_irq_masked(&desc->irq_data))
858                 unmask_threaded_irq(desc);
859
860 out_unlock:
861         raw_spin_unlock_irq(&desc->lock);
862         chip_bus_sync_unlock(desc);
863 }
864
865 #ifdef CONFIG_SMP
866 /*
867  * Check whether we need to change the affinity of the interrupt thread.
868  */
869 static void
870 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
871 {
872         cpumask_var_t mask;
873         bool valid = true;
874
875         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
876                 return;
877
878         /*
879          * In case we are out of memory we set IRQTF_AFFINITY again and
880          * try again next time
881          */
882         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
883                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
884                 return;
885         }
886
887         raw_spin_lock_irq(&desc->lock);
888         /*
889          * This code is triggered unconditionally. Check the affinity
890          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
891          */
892         if (cpumask_available(desc->irq_common_data.affinity))
893                 cpumask_copy(mask, desc->irq_common_data.affinity);
894         else
895                 valid = false;
896         raw_spin_unlock_irq(&desc->lock);
897
898         if (valid)
899                 set_cpus_allowed_ptr(current, mask);
900         free_cpumask_var(mask);
901 }
902 #else
903 static inline void
904 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
905 #endif
906
907 /*
908  * Interrupts which are not explicitely requested as threaded
909  * interrupts rely on the implicit bh/preempt disable of the hard irq
910  * context. So we need to disable bh here to avoid deadlocks and other
911  * side effects.
912  */
913 static irqreturn_t
914 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
915 {
916         irqreturn_t ret;
917
918         local_bh_disable();
919         if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
920                 local_irq_disable();
921         ret = action->thread_fn(action->irq, action->dev_id);
922         if (ret == IRQ_HANDLED)
923                 atomic_inc(&desc->threads_handled);
924
925         irq_finalize_oneshot(desc, action);
926         if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
927                 local_irq_enable();
928         local_bh_enable();
929         return ret;
930 }
931
932 /*
933  * Interrupts explicitly requested as threaded interrupts want to be
934  * preemtible - many of them need to sleep and wait for slow busses to
935  * complete.
936  */
937 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
938                 struct irqaction *action)
939 {
940         irqreturn_t ret;
941
942         ret = action->thread_fn(action->irq, action->dev_id);
943         if (ret == IRQ_HANDLED)
944                 atomic_inc(&desc->threads_handled);
945
946         irq_finalize_oneshot(desc, action);
947         return ret;
948 }
949
950 static void wake_threads_waitq(struct irq_desc *desc)
951 {
952         if (atomic_dec_and_test(&desc->threads_active))
953                 wake_up(&desc->wait_for_threads);
954 }
955
956 static void irq_thread_dtor(struct callback_head *unused)
957 {
958         struct task_struct *tsk = current;
959         struct irq_desc *desc;
960         struct irqaction *action;
961
962         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
963                 return;
964
965         action = kthread_data(tsk);
966
967         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
968                tsk->comm, tsk->pid, action->irq);
969
970
971         desc = irq_to_desc(action->irq);
972         /*
973          * If IRQTF_RUNTHREAD is set, we need to decrement
974          * desc->threads_active and wake possible waiters.
975          */
976         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
977                 wake_threads_waitq(desc);
978
979         /* Prevent a stale desc->threads_oneshot */
980         irq_finalize_oneshot(desc, action);
981 }
982
983 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
984 {
985         struct irqaction *secondary = action->secondary;
986
987         if (WARN_ON_ONCE(!secondary))
988                 return;
989
990         raw_spin_lock_irq(&desc->lock);
991         __irq_wake_thread(desc, secondary);
992         raw_spin_unlock_irq(&desc->lock);
993 }
994
995 /*
996  * Interrupt handler thread
997  */
998 static int irq_thread(void *data)
999 {
1000         struct callback_head on_exit_work;
1001         struct irqaction *action = data;
1002         struct irq_desc *desc = irq_to_desc(action->irq);
1003         irqreturn_t (*handler_fn)(struct irq_desc *desc,
1004                         struct irqaction *action);
1005
1006         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1007                                         &action->thread_flags))
1008                 handler_fn = irq_forced_thread_fn;
1009         else
1010                 handler_fn = irq_thread_fn;
1011
1012         init_task_work(&on_exit_work, irq_thread_dtor);
1013         task_work_add(current, &on_exit_work, false);
1014
1015         irq_thread_check_affinity(desc, action);
1016
1017         while (!irq_wait_for_interrupt(action)) {
1018                 irqreturn_t action_ret;
1019
1020                 irq_thread_check_affinity(desc, action);
1021
1022                 action_ret = handler_fn(desc, action);
1023                 if (action_ret == IRQ_WAKE_THREAD)
1024                         irq_wake_secondary(desc, action);
1025
1026                 wake_threads_waitq(desc);
1027         }
1028
1029         /*
1030          * This is the regular exit path. __free_irq() is stopping the
1031          * thread via kthread_stop() after calling
1032          * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
1033          * oneshot mask bit can be set. We cannot verify that as we
1034          * cannot touch the oneshot mask at this point anymore as
1035          * __setup_irq() might have given out currents thread_mask
1036          * again.
1037          */
1038         task_work_cancel(current, irq_thread_dtor);
1039         return 0;
1040 }
1041
1042 /**
1043  *      irq_wake_thread - wake the irq thread for the action identified by dev_id
1044  *      @irq:           Interrupt line
1045  *      @dev_id:        Device identity for which the thread should be woken
1046  *
1047  */
1048 void irq_wake_thread(unsigned int irq, void *dev_id)
1049 {
1050         struct irq_desc *desc = irq_to_desc(irq);
1051         struct irqaction *action;
1052         unsigned long flags;
1053
1054         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1055                 return;
1056
1057         raw_spin_lock_irqsave(&desc->lock, flags);
1058         for_each_action_of_desc(desc, action) {
1059                 if (action->dev_id == dev_id) {
1060                         if (action->thread)
1061                                 __irq_wake_thread(desc, action);
1062                         break;
1063                 }
1064         }
1065         raw_spin_unlock_irqrestore(&desc->lock, flags);
1066 }
1067 EXPORT_SYMBOL_GPL(irq_wake_thread);
1068
1069 static int irq_setup_forced_threading(struct irqaction *new)
1070 {
1071         if (!force_irqthreads)
1072                 return 0;
1073         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1074                 return 0;
1075
1076         /*
1077          * No further action required for interrupts which are requested as
1078          * threaded interrupts already
1079          */
1080         if (new->handler == irq_default_primary_handler)
1081                 return 0;
1082
1083         new->flags |= IRQF_ONESHOT;
1084
1085         /*
1086          * Handle the case where we have a real primary handler and a
1087          * thread handler. We force thread them as well by creating a
1088          * secondary action.
1089          */
1090         if (new->handler && new->thread_fn) {
1091                 /* Allocate the secondary action */
1092                 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1093                 if (!new->secondary)
1094                         return -ENOMEM;
1095                 new->secondary->handler = irq_forced_secondary_handler;
1096                 new->secondary->thread_fn = new->thread_fn;
1097                 new->secondary->dev_id = new->dev_id;
1098                 new->secondary->irq = new->irq;
1099                 new->secondary->name = new->name;
1100         }
1101         /* Deal with the primary handler */
1102         set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1103         new->thread_fn = new->handler;
1104         new->handler = irq_default_primary_handler;
1105         return 0;
1106 }
1107
1108 static int irq_request_resources(struct irq_desc *desc)
1109 {
1110         struct irq_data *d = &desc->irq_data;
1111         struct irq_chip *c = d->chip;
1112
1113         return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1114 }
1115
1116 static void irq_release_resources(struct irq_desc *desc)
1117 {
1118         struct irq_data *d = &desc->irq_data;
1119         struct irq_chip *c = d->chip;
1120
1121         if (c->irq_release_resources)
1122                 c->irq_release_resources(d);
1123 }
1124
1125 static int
1126 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1127 {
1128         struct task_struct *t;
1129         struct sched_param param = {
1130                 .sched_priority = MAX_USER_RT_PRIO/2,
1131         };
1132
1133         if (!secondary) {
1134                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1135                                    new->name);
1136         } else {
1137                 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1138                                    new->name);
1139                 param.sched_priority -= 1;
1140         }
1141
1142         if (IS_ERR(t))
1143                 return PTR_ERR(t);
1144
1145         sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1146
1147         /*
1148          * We keep the reference to the task struct even if
1149          * the thread dies to avoid that the interrupt code
1150          * references an already freed task_struct.
1151          */
1152         get_task_struct(t);
1153         new->thread = t;
1154         /*
1155          * Tell the thread to set its affinity. This is
1156          * important for shared interrupt handlers as we do
1157          * not invoke setup_affinity() for the secondary
1158          * handlers as everything is already set up. Even for
1159          * interrupts marked with IRQF_NO_BALANCE this is
1160          * correct as we want the thread to move to the cpu(s)
1161          * on which the requesting code placed the interrupt.
1162          */
1163         set_bit(IRQTF_AFFINITY, &new->thread_flags);
1164         return 0;
1165 }
1166
1167 /*
1168  * Internal function to register an irqaction - typically used to
1169  * allocate special interrupts that are part of the architecture.
1170  *
1171  * Locking rules:
1172  *
1173  * desc->request_mutex  Provides serialization against a concurrent free_irq()
1174  *   chip_bus_lock      Provides serialization for slow bus operations
1175  *     desc->lock       Provides serialization against hard interrupts
1176  *
1177  * chip_bus_lock and desc->lock are sufficient for all other management and
1178  * interrupt related functions. desc->request_mutex solely serializes
1179  * request/free_irq().
1180  */
1181 static int
1182 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1183 {
1184         struct irqaction *old, **old_ptr;
1185         unsigned long flags, thread_mask = 0;
1186         int ret, nested, shared = 0;
1187
1188         if (!desc)
1189                 return -EINVAL;
1190
1191         if (desc->irq_data.chip == &no_irq_chip)
1192                 return -ENOSYS;
1193         if (!try_module_get(desc->owner))
1194                 return -ENODEV;
1195
1196         new->irq = irq;
1197
1198         /*
1199          * If the trigger type is not specified by the caller,
1200          * then use the default for this interrupt.
1201          */
1202         if (!(new->flags & IRQF_TRIGGER_MASK))
1203                 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1204
1205         /*
1206          * Check whether the interrupt nests into another interrupt
1207          * thread.
1208          */
1209         nested = irq_settings_is_nested_thread(desc);
1210         if (nested) {
1211                 if (!new->thread_fn) {
1212                         ret = -EINVAL;
1213                         goto out_mput;
1214                 }
1215                 /*
1216                  * Replace the primary handler which was provided from
1217                  * the driver for non nested interrupt handling by the
1218                  * dummy function which warns when called.
1219                  */
1220                 new->handler = irq_nested_primary_handler;
1221         } else {
1222                 if (irq_settings_can_thread(desc)) {
1223                         ret = irq_setup_forced_threading(new);
1224                         if (ret)
1225                                 goto out_mput;
1226                 }
1227         }
1228
1229         /*
1230          * Create a handler thread when a thread function is supplied
1231          * and the interrupt does not nest into another interrupt
1232          * thread.
1233          */
1234         if (new->thread_fn && !nested) {
1235                 ret = setup_irq_thread(new, irq, false);
1236                 if (ret)
1237                         goto out_mput;
1238                 if (new->secondary) {
1239                         ret = setup_irq_thread(new->secondary, irq, true);
1240                         if (ret)
1241                                 goto out_thread;
1242                 }
1243         }
1244
1245         /*
1246          * Drivers are often written to work w/o knowledge about the
1247          * underlying irq chip implementation, so a request for a
1248          * threaded irq without a primary hard irq context handler
1249          * requires the ONESHOT flag to be set. Some irq chips like
1250          * MSI based interrupts are per se one shot safe. Check the
1251          * chip flags, so we can avoid the unmask dance at the end of
1252          * the threaded handler for those.
1253          */
1254         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1255                 new->flags &= ~IRQF_ONESHOT;
1256
1257         /*
1258          * Protects against a concurrent __free_irq() call which might wait
1259          * for synchronize_irq() to complete without holding the optional
1260          * chip bus lock and desc->lock.
1261          */
1262         mutex_lock(&desc->request_mutex);
1263
1264         /*
1265          * Acquire bus lock as the irq_request_resources() callback below
1266          * might rely on the serialization or the magic power management
1267          * functions which are abusing the irq_bus_lock() callback,
1268          */
1269         chip_bus_lock(desc);
1270
1271         /* First installed action requests resources. */
1272         if (!desc->action) {
1273                 ret = irq_request_resources(desc);
1274                 if (ret) {
1275                         pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1276                                new->name, irq, desc->irq_data.chip->name);
1277                         goto out_bus_unlock;
1278                 }
1279         }
1280
1281         /*
1282          * The following block of code has to be executed atomically
1283          * protected against a concurrent interrupt and any of the other
1284          * management calls which are not serialized via
1285          * desc->request_mutex or the optional bus lock.
1286          */
1287         raw_spin_lock_irqsave(&desc->lock, flags);
1288         old_ptr = &desc->action;
1289         old = *old_ptr;
1290         if (old) {
1291                 /*
1292                  * Can't share interrupts unless both agree to and are
1293                  * the same type (level, edge, polarity). So both flag
1294                  * fields must have IRQF_SHARED set and the bits which
1295                  * set the trigger type must match. Also all must
1296                  * agree on ONESHOT.
1297                  */
1298                 unsigned int oldtype;
1299
1300                 /*
1301                  * If nobody did set the configuration before, inherit
1302                  * the one provided by the requester.
1303                  */
1304                 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1305                         oldtype = irqd_get_trigger_type(&desc->irq_data);
1306                 } else {
1307                         oldtype = new->flags & IRQF_TRIGGER_MASK;
1308                         irqd_set_trigger_type(&desc->irq_data, oldtype);
1309                 }
1310
1311                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1312                     (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1313                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
1314                         goto mismatch;
1315
1316                 /* All handlers must agree on per-cpuness */
1317                 if ((old->flags & IRQF_PERCPU) !=
1318                     (new->flags & IRQF_PERCPU))
1319                         goto mismatch;
1320
1321                 /* add new interrupt at end of irq queue */
1322                 do {
1323                         /*
1324                          * Or all existing action->thread_mask bits,
1325                          * so we can find the next zero bit for this
1326                          * new action.
1327                          */
1328                         thread_mask |= old->thread_mask;
1329                         old_ptr = &old->next;
1330                         old = *old_ptr;
1331                 } while (old);
1332                 shared = 1;
1333         }
1334
1335         /*
1336          * Setup the thread mask for this irqaction for ONESHOT. For
1337          * !ONESHOT irqs the thread mask is 0 so we can avoid a
1338          * conditional in irq_wake_thread().
1339          */
1340         if (new->flags & IRQF_ONESHOT) {
1341                 /*
1342                  * Unlikely to have 32 resp 64 irqs sharing one line,
1343                  * but who knows.
1344                  */
1345                 if (thread_mask == ~0UL) {
1346                         ret = -EBUSY;
1347                         goto out_unlock;
1348                 }
1349                 /*
1350                  * The thread_mask for the action is or'ed to
1351                  * desc->thread_active to indicate that the
1352                  * IRQF_ONESHOT thread handler has been woken, but not
1353                  * yet finished. The bit is cleared when a thread
1354                  * completes. When all threads of a shared interrupt
1355                  * line have completed desc->threads_active becomes
1356                  * zero and the interrupt line is unmasked. See
1357                  * handle.c:irq_wake_thread() for further information.
1358                  *
1359                  * If no thread is woken by primary (hard irq context)
1360                  * interrupt handlers, then desc->threads_active is
1361                  * also checked for zero to unmask the irq line in the
1362                  * affected hard irq flow handlers
1363                  * (handle_[fasteoi|level]_irq).
1364                  *
1365                  * The new action gets the first zero bit of
1366                  * thread_mask assigned. See the loop above which or's
1367                  * all existing action->thread_mask bits.
1368                  */
1369                 new->thread_mask = 1 << ffz(thread_mask);
1370
1371         } else if (new->handler == irq_default_primary_handler &&
1372                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1373                 /*
1374                  * The interrupt was requested with handler = NULL, so
1375                  * we use the default primary handler for it. But it
1376                  * does not have the oneshot flag set. In combination
1377                  * with level interrupts this is deadly, because the
1378                  * default primary handler just wakes the thread, then
1379                  * the irq lines is reenabled, but the device still
1380                  * has the level irq asserted. Rinse and repeat....
1381                  *
1382                  * While this works for edge type interrupts, we play
1383                  * it safe and reject unconditionally because we can't
1384                  * say for sure which type this interrupt really
1385                  * has. The type flags are unreliable as the
1386                  * underlying chip implementation can override them.
1387                  */
1388                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1389                        irq);
1390                 ret = -EINVAL;
1391                 goto out_unlock;
1392         }
1393
1394         if (!shared) {
1395                 init_waitqueue_head(&desc->wait_for_threads);
1396
1397                 /* Setup the type (level, edge polarity) if configured: */
1398                 if (new->flags & IRQF_TRIGGER_MASK) {
1399                         ret = __irq_set_trigger(desc,
1400                                                 new->flags & IRQF_TRIGGER_MASK);
1401
1402                         if (ret)
1403                                 goto out_unlock;
1404                 }
1405
1406                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1407                                   IRQS_ONESHOT | IRQS_WAITING);
1408                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1409
1410                 if (new->flags & IRQF_PERCPU) {
1411                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1412                         irq_settings_set_per_cpu(desc);
1413                 }
1414
1415                 if (new->flags & IRQF_ONESHOT)
1416                         desc->istate |= IRQS_ONESHOT;
1417
1418                 /* Exclude IRQ from balancing if requested */
1419                 if (new->flags & IRQF_NOBALANCING) {
1420                         irq_settings_set_no_balancing(desc);
1421                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1422                 }
1423
1424                 if (irq_settings_can_autoenable(desc)) {
1425                         irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1426                 } else {
1427                         /*
1428                          * Shared interrupts do not go well with disabling
1429                          * auto enable. The sharing interrupt might request
1430                          * it while it's still disabled and then wait for
1431                          * interrupts forever.
1432                          */
1433                         WARN_ON_ONCE(new->flags & IRQF_SHARED);
1434                         /* Undo nested disables: */
1435                         desc->depth = 1;
1436                 }
1437
1438         } else if (new->flags & IRQF_TRIGGER_MASK) {
1439                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1440                 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1441
1442                 if (nmsk != omsk)
1443                         /* hope the handler works with current  trigger mode */
1444                         pr_warn("irq %d uses trigger mode %u; requested %u\n",
1445                                 irq, omsk, nmsk);
1446         }
1447
1448         *old_ptr = new;
1449
1450         irq_pm_install_action(desc, new);
1451
1452         /* Reset broken irq detection when installing new handler */
1453         desc->irq_count = 0;
1454         desc->irqs_unhandled = 0;
1455
1456         /*
1457          * Check whether we disabled the irq via the spurious handler
1458          * before. Reenable it and give it another chance.
1459          */
1460         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1461                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1462                 __enable_irq(desc);
1463         }
1464
1465         raw_spin_unlock_irqrestore(&desc->lock, flags);
1466         chip_bus_sync_unlock(desc);
1467         mutex_unlock(&desc->request_mutex);
1468
1469         irq_setup_timings(desc, new);
1470
1471         /*
1472          * Strictly no need to wake it up, but hung_task complains
1473          * when no hard interrupt wakes the thread up.
1474          */
1475         if (new->thread)
1476                 wake_up_process(new->thread);
1477         if (new->secondary)
1478                 wake_up_process(new->secondary->thread);
1479
1480         register_irq_proc(irq, desc);
1481         irq_add_debugfs_entry(irq, desc);
1482         new->dir = NULL;
1483         register_handler_proc(irq, new);
1484         return 0;
1485
1486 mismatch:
1487         if (!(new->flags & IRQF_PROBE_SHARED)) {
1488                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1489                        irq, new->flags, new->name, old->flags, old->name);
1490 #ifdef CONFIG_DEBUG_SHIRQ
1491                 dump_stack();
1492 #endif
1493         }
1494         ret = -EBUSY;
1495
1496 out_unlock:
1497         raw_spin_unlock_irqrestore(&desc->lock, flags);
1498
1499         if (!desc->action)
1500                 irq_release_resources(desc);
1501 out_bus_unlock:
1502         chip_bus_sync_unlock(desc);
1503         mutex_unlock(&desc->request_mutex);
1504
1505 out_thread:
1506         if (new->thread) {
1507                 struct task_struct *t = new->thread;
1508
1509                 new->thread = NULL;
1510                 kthread_stop(t);
1511                 put_task_struct(t);
1512         }
1513         if (new->secondary && new->secondary->thread) {
1514                 struct task_struct *t = new->secondary->thread;
1515
1516                 new->secondary->thread = NULL;
1517                 kthread_stop(t);
1518                 put_task_struct(t);
1519         }
1520 out_mput:
1521         module_put(desc->owner);
1522         return ret;
1523 }
1524
1525 /**
1526  *      setup_irq - setup an interrupt
1527  *      @irq: Interrupt line to setup
1528  *      @act: irqaction for the interrupt
1529  *
1530  * Used to statically setup interrupts in the early boot process.
1531  */
1532 int setup_irq(unsigned int irq, struct irqaction *act)
1533 {
1534         int retval;
1535         struct irq_desc *desc = irq_to_desc(irq);
1536
1537         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1538                 return -EINVAL;
1539
1540         retval = irq_chip_pm_get(&desc->irq_data);
1541         if (retval < 0)
1542                 return retval;
1543
1544         retval = __setup_irq(irq, desc, act);
1545
1546         if (retval)
1547                 irq_chip_pm_put(&desc->irq_data);
1548
1549         return retval;
1550 }
1551 EXPORT_SYMBOL_GPL(setup_irq);
1552
1553 /*
1554  * Internal function to unregister an irqaction - used to free
1555  * regular and special interrupts that are part of the architecture.
1556  */
1557 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1558 {
1559         struct irq_desc *desc = irq_to_desc(irq);
1560         struct irqaction *action, **action_ptr;
1561         unsigned long flags;
1562
1563         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1564
1565         if (!desc)
1566                 return NULL;
1567
1568         mutex_lock(&desc->request_mutex);
1569         chip_bus_lock(desc);
1570         raw_spin_lock_irqsave(&desc->lock, flags);
1571
1572         /*
1573          * There can be multiple actions per IRQ descriptor, find the right
1574          * one based on the dev_id:
1575          */
1576         action_ptr = &desc->action;
1577         for (;;) {
1578                 action = *action_ptr;
1579
1580                 if (!action) {
1581                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1582                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1583                         chip_bus_sync_unlock(desc);
1584                         mutex_unlock(&desc->request_mutex);
1585                         return NULL;
1586                 }
1587
1588                 if (action->dev_id == dev_id)
1589                         break;
1590                 action_ptr = &action->next;
1591         }
1592
1593         /* Found it - now remove it from the list of entries: */
1594         *action_ptr = action->next;
1595
1596         irq_pm_remove_action(desc, action);
1597
1598         /* If this was the last handler, shut down the IRQ line: */
1599         if (!desc->action) {
1600                 irq_settings_clr_disable_unlazy(desc);
1601                 irq_shutdown(desc);
1602         }
1603
1604 #ifdef CONFIG_SMP
1605         /* make sure affinity_hint is cleaned up */
1606         if (WARN_ON_ONCE(desc->affinity_hint))
1607                 desc->affinity_hint = NULL;
1608 #endif
1609
1610         raw_spin_unlock_irqrestore(&desc->lock, flags);
1611         /*
1612          * Drop bus_lock here so the changes which were done in the chip
1613          * callbacks above are synced out to the irq chips which hang
1614          * behind a slow bus (I2C, SPI) before calling synchronize_irq().
1615          *
1616          * Aside of that the bus_lock can also be taken from the threaded
1617          * handler in irq_finalize_oneshot() which results in a deadlock
1618          * because synchronize_irq() would wait forever for the thread to
1619          * complete, which is blocked on the bus lock.
1620          *
1621          * The still held desc->request_mutex() protects against a
1622          * concurrent request_irq() of this irq so the release of resources
1623          * and timing data is properly serialized.
1624          */
1625         chip_bus_sync_unlock(desc);
1626
1627         unregister_handler_proc(irq, action);
1628
1629         /* Make sure it's not being used on another CPU: */
1630         synchronize_irq(irq);
1631
1632 #ifdef CONFIG_DEBUG_SHIRQ
1633         /*
1634          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1635          * event to happen even now it's being freed, so let's make sure that
1636          * is so by doing an extra call to the handler ....
1637          *
1638          * ( We do this after actually deregistering it, to make sure that a
1639          *   'real' IRQ doesn't run in * parallel with our fake. )
1640          */
1641         if (action->flags & IRQF_SHARED) {
1642                 local_irq_save(flags);
1643                 action->handler(irq, dev_id);
1644                 local_irq_restore(flags);
1645         }
1646 #endif
1647
1648         if (action->thread) {
1649                 kthread_stop(action->thread);
1650                 put_task_struct(action->thread);
1651                 if (action->secondary && action->secondary->thread) {
1652                         kthread_stop(action->secondary->thread);
1653                         put_task_struct(action->secondary->thread);
1654                 }
1655         }
1656
1657         /* Last action releases resources */
1658         if (!desc->action) {
1659                 /*
1660                  * Reaquire bus lock as irq_release_resources() might
1661                  * require it to deallocate resources over the slow bus.
1662                  */
1663                 chip_bus_lock(desc);
1664                 irq_release_resources(desc);
1665                 chip_bus_sync_unlock(desc);
1666                 irq_remove_timings(desc);
1667         }
1668
1669         mutex_unlock(&desc->request_mutex);
1670
1671         irq_chip_pm_put(&desc->irq_data);
1672         module_put(desc->owner);
1673         kfree(action->secondary);
1674         return action;
1675 }
1676
1677 /**
1678  *      remove_irq - free an interrupt
1679  *      @irq: Interrupt line to free
1680  *      @act: irqaction for the interrupt
1681  *
1682  * Used to remove interrupts statically setup by the early boot process.
1683  */
1684 void remove_irq(unsigned int irq, struct irqaction *act)
1685 {
1686         struct irq_desc *desc = irq_to_desc(irq);
1687
1688         if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1689                 __free_irq(irq, act->dev_id);
1690 }
1691 EXPORT_SYMBOL_GPL(remove_irq);
1692
1693 /**
1694  *      free_irq - free an interrupt allocated with request_irq
1695  *      @irq: Interrupt line to free
1696  *      @dev_id: Device identity to free
1697  *
1698  *      Remove an interrupt handler. The handler is removed and if the
1699  *      interrupt line is no longer in use by any driver it is disabled.
1700  *      On a shared IRQ the caller must ensure the interrupt is disabled
1701  *      on the card it drives before calling this function. The function
1702  *      does not return until any executing interrupts for this IRQ
1703  *      have completed.
1704  *
1705  *      This function must not be called from interrupt context.
1706  *
1707  *      Returns the devname argument passed to request_irq.
1708  */
1709 const void *free_irq(unsigned int irq, void *dev_id)
1710 {
1711         struct irq_desc *desc = irq_to_desc(irq);
1712         struct irqaction *action;
1713         const char *devname;
1714
1715         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1716                 return NULL;
1717
1718 #ifdef CONFIG_SMP
1719         if (WARN_ON(desc->affinity_notify))
1720                 desc->affinity_notify = NULL;
1721 #endif
1722
1723         action = __free_irq(irq, dev_id);
1724
1725         if (!action)
1726                 return NULL;
1727
1728         devname = action->name;
1729         kfree(action);
1730         return devname;
1731 }
1732 EXPORT_SYMBOL(free_irq);
1733
1734 /**
1735  *      request_threaded_irq - allocate an interrupt line
1736  *      @irq: Interrupt line to allocate
1737  *      @handler: Function to be called when the IRQ occurs.
1738  *                Primary handler for threaded interrupts
1739  *                If NULL and thread_fn != NULL the default
1740  *                primary handler is installed
1741  *      @thread_fn: Function called from the irq handler thread
1742  *                  If NULL, no irq thread is created
1743  *      @irqflags: Interrupt type flags
1744  *      @devname: An ascii name for the claiming device
1745  *      @dev_id: A cookie passed back to the handler function
1746  *
1747  *      This call allocates interrupt resources and enables the
1748  *      interrupt line and IRQ handling. From the point this
1749  *      call is made your handler function may be invoked. Since
1750  *      your handler function must clear any interrupt the board
1751  *      raises, you must take care both to initialise your hardware
1752  *      and to set up the interrupt handler in the right order.
1753  *
1754  *      If you want to set up a threaded irq handler for your device
1755  *      then you need to supply @handler and @thread_fn. @handler is
1756  *      still called in hard interrupt context and has to check
1757  *      whether the interrupt originates from the device. If yes it
1758  *      needs to disable the interrupt on the device and return
1759  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1760  *      @thread_fn. This split handler design is necessary to support
1761  *      shared interrupts.
1762  *
1763  *      Dev_id must be globally unique. Normally the address of the
1764  *      device data structure is used as the cookie. Since the handler
1765  *      receives this value it makes sense to use it.
1766  *
1767  *      If your interrupt is shared you must pass a non NULL dev_id
1768  *      as this is required when freeing the interrupt.
1769  *
1770  *      Flags:
1771  *
1772  *      IRQF_SHARED             Interrupt is shared
1773  *      IRQF_TRIGGER_*          Specify active edge(s) or level
1774  *
1775  */
1776 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1777                          irq_handler_t thread_fn, unsigned long irqflags,
1778                          const char *devname, void *dev_id)
1779 {
1780         struct irqaction *action;
1781         struct irq_desc *desc;
1782         int retval;
1783
1784         if (irq == IRQ_NOTCONNECTED)
1785                 return -ENOTCONN;
1786
1787         /*
1788          * Sanity-check: shared interrupts must pass in a real dev-ID,
1789          * otherwise we'll have trouble later trying to figure out
1790          * which interrupt is which (messes up the interrupt freeing
1791          * logic etc).
1792          *
1793          * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1794          * it cannot be set along with IRQF_NO_SUSPEND.
1795          */
1796         if (((irqflags & IRQF_SHARED) && !dev_id) ||
1797             (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1798             ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1799                 return -EINVAL;
1800
1801         desc = irq_to_desc(irq);
1802         if (!desc)
1803                 return -EINVAL;
1804
1805         if (!irq_settings_can_request(desc) ||
1806             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1807                 return -EINVAL;
1808
1809         if (!handler) {
1810                 if (!thread_fn)
1811                         return -EINVAL;
1812                 handler = irq_default_primary_handler;
1813         }
1814
1815         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1816         if (!action)
1817                 return -ENOMEM;
1818
1819         action->handler = handler;
1820         action->thread_fn = thread_fn;
1821         action->flags = irqflags;
1822         action->name = devname;
1823         action->dev_id = dev_id;
1824
1825         retval = irq_chip_pm_get(&desc->irq_data);
1826         if (retval < 0) {
1827                 kfree(action);
1828                 return retval;
1829         }
1830
1831         retval = __setup_irq(irq, desc, action);
1832
1833         if (retval) {
1834                 irq_chip_pm_put(&desc->irq_data);
1835                 kfree(action->secondary);
1836                 kfree(action);
1837         }
1838
1839 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1840         if (!retval && (irqflags & IRQF_SHARED)) {
1841                 /*
1842                  * It's a shared IRQ -- the driver ought to be prepared for it
1843                  * to happen immediately, so let's make sure....
1844                  * We disable the irq to make sure that a 'real' IRQ doesn't
1845                  * run in parallel with our fake.
1846                  */
1847                 unsigned long flags;
1848
1849                 disable_irq(irq);
1850                 local_irq_save(flags);
1851
1852                 handler(irq, dev_id);
1853
1854                 local_irq_restore(flags);
1855                 enable_irq(irq);
1856         }
1857 #endif
1858         return retval;
1859 }
1860 EXPORT_SYMBOL(request_threaded_irq);
1861
1862 /**
1863  *      request_any_context_irq - allocate an interrupt line
1864  *      @irq: Interrupt line to allocate
1865  *      @handler: Function to be called when the IRQ occurs.
1866  *                Threaded handler for threaded interrupts.
1867  *      @flags: Interrupt type flags
1868  *      @name: An ascii name for the claiming device
1869  *      @dev_id: A cookie passed back to the handler function
1870  *
1871  *      This call allocates interrupt resources and enables the
1872  *      interrupt line and IRQ handling. It selects either a
1873  *      hardirq or threaded handling method depending on the
1874  *      context.
1875  *
1876  *      On failure, it returns a negative value. On success,
1877  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1878  */
1879 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1880                             unsigned long flags, const char *name, void *dev_id)
1881 {
1882         struct irq_desc *desc;
1883         int ret;
1884
1885         if (irq == IRQ_NOTCONNECTED)
1886                 return -ENOTCONN;
1887
1888         desc = irq_to_desc(irq);
1889         if (!desc)
1890                 return -EINVAL;
1891
1892         if (irq_settings_is_nested_thread(desc)) {
1893                 ret = request_threaded_irq(irq, NULL, handler,
1894                                            flags, name, dev_id);
1895                 return !ret ? IRQC_IS_NESTED : ret;
1896         }
1897
1898         ret = request_irq(irq, handler, flags, name, dev_id);
1899         return !ret ? IRQC_IS_HARDIRQ : ret;
1900 }
1901 EXPORT_SYMBOL_GPL(request_any_context_irq);
1902
1903 void enable_percpu_irq(unsigned int irq, unsigned int type)
1904 {
1905         unsigned int cpu = smp_processor_id();
1906         unsigned long flags;
1907         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1908
1909         if (!desc)
1910                 return;
1911
1912         /*
1913          * If the trigger type is not specified by the caller, then
1914          * use the default for this interrupt.
1915          */
1916         type &= IRQ_TYPE_SENSE_MASK;
1917         if (type == IRQ_TYPE_NONE)
1918                 type = irqd_get_trigger_type(&desc->irq_data);
1919
1920         if (type != IRQ_TYPE_NONE) {
1921                 int ret;
1922
1923                 ret = __irq_set_trigger(desc, type);
1924
1925                 if (ret) {
1926                         WARN(1, "failed to set type for IRQ%d\n", irq);
1927                         goto out;
1928                 }
1929         }
1930
1931         irq_percpu_enable(desc, cpu);
1932 out:
1933         irq_put_desc_unlock(desc, flags);
1934 }
1935 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1936
1937 /**
1938  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1939  * @irq:        Linux irq number to check for
1940  *
1941  * Must be called from a non migratable context. Returns the enable
1942  * state of a per cpu interrupt on the current cpu.
1943  */
1944 bool irq_percpu_is_enabled(unsigned int irq)
1945 {
1946         unsigned int cpu = smp_processor_id();
1947         struct irq_desc *desc;
1948         unsigned long flags;
1949         bool is_enabled;
1950
1951         desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1952         if (!desc)
1953                 return false;
1954
1955         is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1956         irq_put_desc_unlock(desc, flags);
1957
1958         return is_enabled;
1959 }
1960 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1961
1962 void disable_percpu_irq(unsigned int irq)
1963 {
1964         unsigned int cpu = smp_processor_id();
1965         unsigned long flags;
1966         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1967
1968         if (!desc)
1969                 return;
1970
1971         irq_percpu_disable(desc, cpu);
1972         irq_put_desc_unlock(desc, flags);
1973 }
1974 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1975
1976 /*
1977  * Internal function to unregister a percpu irqaction.
1978  */
1979 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1980 {
1981         struct irq_desc *desc = irq_to_desc(irq);
1982         struct irqaction *action;
1983         unsigned long flags;
1984
1985         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1986
1987         if (!desc)
1988                 return NULL;
1989
1990         raw_spin_lock_irqsave(&desc->lock, flags);
1991
1992         action = desc->action;
1993         if (!action || action->percpu_dev_id != dev_id) {
1994                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1995                 goto bad;
1996         }
1997
1998         if (!cpumask_empty(desc->percpu_enabled)) {
1999                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2000                      irq, cpumask_first(desc->percpu_enabled));
2001                 goto bad;
2002         }
2003
2004         /* Found it - now remove it from the list of entries: */
2005         desc->action = NULL;
2006
2007         raw_spin_unlock_irqrestore(&desc->lock, flags);
2008
2009         unregister_handler_proc(irq, action);
2010
2011         irq_chip_pm_put(&desc->irq_data);
2012         module_put(desc->owner);
2013         return action;
2014
2015 bad:
2016         raw_spin_unlock_irqrestore(&desc->lock, flags);
2017         return NULL;
2018 }
2019
2020 /**
2021  *      remove_percpu_irq - free a per-cpu interrupt
2022  *      @irq: Interrupt line to free
2023  *      @act: irqaction for the interrupt
2024  *
2025  * Used to remove interrupts statically setup by the early boot process.
2026  */
2027 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2028 {
2029         struct irq_desc *desc = irq_to_desc(irq);
2030
2031         if (desc && irq_settings_is_per_cpu_devid(desc))
2032             __free_percpu_irq(irq, act->percpu_dev_id);
2033 }
2034
2035 /**
2036  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
2037  *      @irq: Interrupt line to free
2038  *      @dev_id: Device identity to free
2039  *
2040  *      Remove a percpu interrupt handler. The handler is removed, but
2041  *      the interrupt line is not disabled. This must be done on each
2042  *      CPU before calling this function. The function does not return
2043  *      until any executing interrupts for this IRQ have completed.
2044  *
2045  *      This function must not be called from interrupt context.
2046  */
2047 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2048 {
2049         struct irq_desc *desc = irq_to_desc(irq);
2050
2051         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2052                 return;
2053
2054         chip_bus_lock(desc);
2055         kfree(__free_percpu_irq(irq, dev_id));
2056         chip_bus_sync_unlock(desc);
2057 }
2058 EXPORT_SYMBOL_GPL(free_percpu_irq);
2059
2060 /**
2061  *      setup_percpu_irq - setup a per-cpu interrupt
2062  *      @irq: Interrupt line to setup
2063  *      @act: irqaction for the interrupt
2064  *
2065  * Used to statically setup per-cpu interrupts in the early boot process.
2066  */
2067 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2068 {
2069         struct irq_desc *desc = irq_to_desc(irq);
2070         int retval;
2071
2072         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2073                 return -EINVAL;
2074
2075         retval = irq_chip_pm_get(&desc->irq_data);
2076         if (retval < 0)
2077                 return retval;
2078
2079         retval = __setup_irq(irq, desc, act);
2080
2081         if (retval)
2082                 irq_chip_pm_put(&desc->irq_data);
2083
2084         return retval;
2085 }
2086
2087 /**
2088  *      __request_percpu_irq - allocate a percpu interrupt line
2089  *      @irq: Interrupt line to allocate
2090  *      @handler: Function to be called when the IRQ occurs.
2091  *      @flags: Interrupt type flags (IRQF_TIMER only)
2092  *      @devname: An ascii name for the claiming device
2093  *      @dev_id: A percpu cookie passed back to the handler function
2094  *
2095  *      This call allocates interrupt resources and enables the
2096  *      interrupt on the local CPU. If the interrupt is supposed to be
2097  *      enabled on other CPUs, it has to be done on each CPU using
2098  *      enable_percpu_irq().
2099  *
2100  *      Dev_id must be globally unique. It is a per-cpu variable, and
2101  *      the handler gets called with the interrupted CPU's instance of
2102  *      that variable.
2103  */
2104 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2105                          unsigned long flags, const char *devname,
2106                          void __percpu *dev_id)
2107 {
2108         struct irqaction *action;
2109         struct irq_desc *desc;
2110         int retval;
2111
2112         if (!dev_id)
2113                 return -EINVAL;
2114
2115         desc = irq_to_desc(irq);
2116         if (!desc || !irq_settings_can_request(desc) ||
2117             !irq_settings_is_per_cpu_devid(desc))
2118                 return -EINVAL;
2119
2120         if (flags && flags != IRQF_TIMER)
2121                 return -EINVAL;
2122
2123         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2124         if (!action)
2125                 return -ENOMEM;
2126
2127         action->handler = handler;
2128         action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2129         action->name = devname;
2130         action->percpu_dev_id = dev_id;
2131
2132         retval = irq_chip_pm_get(&desc->irq_data);
2133         if (retval < 0) {
2134                 kfree(action);
2135                 return retval;
2136         }
2137
2138         retval = __setup_irq(irq, desc, action);
2139
2140         if (retval) {
2141                 irq_chip_pm_put(&desc->irq_data);
2142                 kfree(action);
2143         }
2144
2145         return retval;
2146 }
2147 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2148
2149 /**
2150  *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
2151  *      @irq: Interrupt line that is forwarded to a VM
2152  *      @which: One of IRQCHIP_STATE_* the caller wants to know about
2153  *      @state: a pointer to a boolean where the state is to be storeed
2154  *
2155  *      This call snapshots the internal irqchip state of an
2156  *      interrupt, returning into @state the bit corresponding to
2157  *      stage @which
2158  *
2159  *      This function should be called with preemption disabled if the
2160  *      interrupt controller has per-cpu registers.
2161  */
2162 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2163                           bool *state)
2164 {
2165         struct irq_desc *desc;
2166         struct irq_data *data;
2167         struct irq_chip *chip;
2168         unsigned long flags;
2169         int err = -EINVAL;
2170
2171         desc = irq_get_desc_buslock(irq, &flags, 0);
2172         if (!desc)
2173                 return err;
2174
2175         data = irq_desc_get_irq_data(desc);
2176
2177         do {
2178                 chip = irq_data_get_irq_chip(data);
2179                 if (chip->irq_get_irqchip_state)
2180                         break;
2181 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2182                 data = data->parent_data;
2183 #else
2184                 data = NULL;
2185 #endif
2186         } while (data);
2187
2188         if (data)
2189                 err = chip->irq_get_irqchip_state(data, which, state);
2190
2191         irq_put_desc_busunlock(desc, flags);
2192         return err;
2193 }
2194 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2195
2196 /**
2197  *      irq_set_irqchip_state - set the state of a forwarded interrupt.
2198  *      @irq: Interrupt line that is forwarded to a VM
2199  *      @which: State to be restored (one of IRQCHIP_STATE_*)
2200  *      @val: Value corresponding to @which
2201  *
2202  *      This call sets the internal irqchip state of an interrupt,
2203  *      depending on the value of @which.
2204  *
2205  *      This function should be called with preemption disabled if the
2206  *      interrupt controller has per-cpu registers.
2207  */
2208 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2209                           bool val)
2210 {
2211         struct irq_desc *desc;
2212         struct irq_data *data;
2213         struct irq_chip *chip;
2214         unsigned long flags;
2215         int err = -EINVAL;
2216
2217         desc = irq_get_desc_buslock(irq, &flags, 0);
2218         if (!desc)
2219                 return err;
2220
2221         data = irq_desc_get_irq_data(desc);
2222
2223         do {
2224                 chip = irq_data_get_irq_chip(data);
2225                 if (chip->irq_set_irqchip_state)
2226                         break;
2227 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2228                 data = data->parent_data;
2229 #else
2230                 data = NULL;
2231 #endif
2232         } while (data);
2233
2234         if (data)
2235                 err = chip->irq_set_irqchip_state(data, which, val);
2236
2237         irq_put_desc_busunlock(desc, flags);
2238         return err;
2239 }
2240 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);