GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static const char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         pr_debug("PM: Adding info for %s:%s\n",
128                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129         device_pm_check_callbacks(dev);
130         mutex_lock(&dpm_list_mtx);
131         if (dev->parent && dev->parent->power.is_prepared)
132                 dev_warn(dev, "parent %s should not be sleeping\n",
133                         dev_name(dev->parent));
134         list_add_tail(&dev->power.entry, &dpm_list);
135         dev->power.in_dpm_list = true;
136         mutex_unlock(&dpm_list_mtx);
137 }
138
139 /**
140  * device_pm_remove - Remove a device from the PM core's list of active devices.
141  * @dev: Device to be removed from the list.
142  */
143 void device_pm_remove(struct device *dev)
144 {
145         pr_debug("PM: Removing info for %s:%s\n",
146                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147         complete_all(&dev->power.completion);
148         mutex_lock(&dpm_list_mtx);
149         list_del_init(&dev->power.entry);
150         dev->power.in_dpm_list = false;
151         mutex_unlock(&dpm_list_mtx);
152         device_wakeup_disable(dev);
153         pm_runtime_remove(dev);
154         device_pm_check_callbacks(dev);
155 }
156
157 /**
158  * device_pm_move_before - Move device in the PM core's list of active devices.
159  * @deva: Device to move in dpm_list.
160  * @devb: Device @deva should come before.
161  */
162 void device_pm_move_before(struct device *deva, struct device *devb)
163 {
164         pr_debug("PM: Moving %s:%s before %s:%s\n",
165                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167         /* Delete deva from dpm_list and reinsert before devb. */
168         list_move_tail(&deva->power.entry, &devb->power.entry);
169 }
170
171 /**
172  * device_pm_move_after - Move device in the PM core's list of active devices.
173  * @deva: Device to move in dpm_list.
174  * @devb: Device @deva should come after.
175  */
176 void device_pm_move_after(struct device *deva, struct device *devb)
177 {
178         pr_debug("PM: Moving %s:%s after %s:%s\n",
179                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181         /* Delete deva from dpm_list and reinsert after devb. */
182         list_move(&deva->power.entry, &devb->power.entry);
183 }
184
185 /**
186  * device_pm_move_last - Move device to end of the PM core's list of devices.
187  * @dev: Device to move in dpm_list.
188  */
189 void device_pm_move_last(struct device *dev)
190 {
191         pr_debug("PM: Moving %s:%s to end of list\n",
192                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193         list_move_tail(&dev->power.entry, &dpm_list);
194 }
195
196 static ktime_t initcall_debug_start(struct device *dev)
197 {
198         ktime_t calltime = 0;
199
200         if (pm_print_times_enabled) {
201                 pr_info("calling  %s+ @ %i, parent: %s\n",
202                         dev_name(dev), task_pid_nr(current),
203                         dev->parent ? dev_name(dev->parent) : "none");
204                 calltime = ktime_get();
205         }
206
207         return calltime;
208 }
209
210 static void initcall_debug_report(struct device *dev, ktime_t calltime,
211                                   int error, pm_message_t state,
212                                   const char *info)
213 {
214         ktime_t rettime;
215         s64 nsecs;
216
217         rettime = ktime_get();
218         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
219
220         if (pm_print_times_enabled) {
221                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
222                         error, (unsigned long long)nsecs >> 10);
223         }
224 }
225
226 /**
227  * dpm_wait - Wait for a PM operation to complete.
228  * @dev: Device to wait for.
229  * @async: If unset, wait only if the device's power.async_suspend flag is set.
230  */
231 static void dpm_wait(struct device *dev, bool async)
232 {
233         if (!dev)
234                 return;
235
236         if (async || (pm_async_enabled && dev->power.async_suspend))
237                 wait_for_completion(&dev->power.completion);
238 }
239
240 static int dpm_wait_fn(struct device *dev, void *async_ptr)
241 {
242         dpm_wait(dev, *((bool *)async_ptr));
243         return 0;
244 }
245
246 static void dpm_wait_for_children(struct device *dev, bool async)
247 {
248        device_for_each_child(dev, &async, dpm_wait_fn);
249 }
250
251 static void dpm_wait_for_suppliers(struct device *dev, bool async)
252 {
253         struct device_link *link;
254         int idx;
255
256         idx = device_links_read_lock();
257
258         /*
259          * If the supplier goes away right after we've checked the link to it,
260          * we'll wait for its completion to change the state, but that's fine,
261          * because the only things that will block as a result are the SRCU
262          * callbacks freeing the link objects for the links in the list we're
263          * walking.
264          */
265         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
266                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
267                         dpm_wait(link->supplier, async);
268
269         device_links_read_unlock(idx);
270 }
271
272 static bool dpm_wait_for_superior(struct device *dev, bool async)
273 {
274         struct device *parent;
275
276         /*
277          * If the device is resumed asynchronously and the parent's callback
278          * deletes both the device and the parent itself, the parent object may
279          * be freed while this function is running, so avoid that by reference
280          * counting the parent once more unless the device has been deleted
281          * already (in which case return right away).
282          */
283         mutex_lock(&dpm_list_mtx);
284
285         if (!device_pm_initialized(dev)) {
286                 mutex_unlock(&dpm_list_mtx);
287                 return false;
288         }
289
290         parent = get_device(dev->parent);
291
292         mutex_unlock(&dpm_list_mtx);
293
294         dpm_wait(parent, async);
295         put_device(parent);
296
297         dpm_wait_for_suppliers(dev, async);
298
299         /*
300          * If the parent's callback has deleted the device, attempting to resume
301          * it would be invalid, so avoid doing that then.
302          */
303         return device_pm_initialized(dev);
304 }
305
306 static void dpm_wait_for_consumers(struct device *dev, bool async)
307 {
308         struct device_link *link;
309         int idx;
310
311         idx = device_links_read_lock();
312
313         /*
314          * The status of a device link can only be changed from "dormant" by a
315          * probe, but that cannot happen during system suspend/resume.  In
316          * theory it can change to "dormant" at that time, but then it is
317          * reasonable to wait for the target device anyway (eg. if it goes
318          * away, it's better to wait for it to go away completely and then
319          * continue instead of trying to continue in parallel with its
320          * unregistration).
321          */
322         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
323                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
324                         dpm_wait(link->consumer, async);
325
326         device_links_read_unlock(idx);
327 }
328
329 static void dpm_wait_for_subordinate(struct device *dev, bool async)
330 {
331         dpm_wait_for_children(dev, async);
332         dpm_wait_for_consumers(dev, async);
333 }
334
335 /**
336  * pm_op - Return the PM operation appropriate for given PM event.
337  * @ops: PM operations to choose from.
338  * @state: PM transition of the system being carried out.
339  */
340 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
341 {
342         switch (state.event) {
343 #ifdef CONFIG_SUSPEND
344         case PM_EVENT_SUSPEND:
345                 return ops->suspend;
346         case PM_EVENT_RESUME:
347                 return ops->resume;
348 #endif /* CONFIG_SUSPEND */
349 #ifdef CONFIG_HIBERNATE_CALLBACKS
350         case PM_EVENT_FREEZE:
351         case PM_EVENT_QUIESCE:
352                 return ops->freeze;
353         case PM_EVENT_HIBERNATE:
354                 return ops->poweroff;
355         case PM_EVENT_THAW:
356         case PM_EVENT_RECOVER:
357                 return ops->thaw;
358                 break;
359         case PM_EVENT_RESTORE:
360                 return ops->restore;
361 #endif /* CONFIG_HIBERNATE_CALLBACKS */
362         }
363
364         return NULL;
365 }
366
367 /**
368  * pm_late_early_op - Return the PM operation appropriate for given PM event.
369  * @ops: PM operations to choose from.
370  * @state: PM transition of the system being carried out.
371  *
372  * Runtime PM is disabled for @dev while this function is being executed.
373  */
374 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
375                                       pm_message_t state)
376 {
377         switch (state.event) {
378 #ifdef CONFIG_SUSPEND
379         case PM_EVENT_SUSPEND:
380                 return ops->suspend_late;
381         case PM_EVENT_RESUME:
382                 return ops->resume_early;
383 #endif /* CONFIG_SUSPEND */
384 #ifdef CONFIG_HIBERNATE_CALLBACKS
385         case PM_EVENT_FREEZE:
386         case PM_EVENT_QUIESCE:
387                 return ops->freeze_late;
388         case PM_EVENT_HIBERNATE:
389                 return ops->poweroff_late;
390         case PM_EVENT_THAW:
391         case PM_EVENT_RECOVER:
392                 return ops->thaw_early;
393         case PM_EVENT_RESTORE:
394                 return ops->restore_early;
395 #endif /* CONFIG_HIBERNATE_CALLBACKS */
396         }
397
398         return NULL;
399 }
400
401 /**
402  * pm_noirq_op - Return the PM operation appropriate for given PM event.
403  * @ops: PM operations to choose from.
404  * @state: PM transition of the system being carried out.
405  *
406  * The driver of @dev will not receive interrupts while this function is being
407  * executed.
408  */
409 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
410 {
411         switch (state.event) {
412 #ifdef CONFIG_SUSPEND
413         case PM_EVENT_SUSPEND:
414                 return ops->suspend_noirq;
415         case PM_EVENT_RESUME:
416                 return ops->resume_noirq;
417 #endif /* CONFIG_SUSPEND */
418 #ifdef CONFIG_HIBERNATE_CALLBACKS
419         case PM_EVENT_FREEZE:
420         case PM_EVENT_QUIESCE:
421                 return ops->freeze_noirq;
422         case PM_EVENT_HIBERNATE:
423                 return ops->poweroff_noirq;
424         case PM_EVENT_THAW:
425         case PM_EVENT_RECOVER:
426                 return ops->thaw_noirq;
427         case PM_EVENT_RESTORE:
428                 return ops->restore_noirq;
429 #endif /* CONFIG_HIBERNATE_CALLBACKS */
430         }
431
432         return NULL;
433 }
434
435 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
436 {
437         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
438                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
439                 ", may wakeup" : "");
440 }
441
442 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
443                         int error)
444 {
445         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
446                 dev_name(dev), pm_verb(state.event), info, error);
447 }
448
449 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
450                           const char *info)
451 {
452         ktime_t calltime;
453         u64 usecs64;
454         int usecs;
455
456         calltime = ktime_get();
457         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
458         do_div(usecs64, NSEC_PER_USEC);
459         usecs = usecs64;
460         if (usecs == 0)
461                 usecs = 1;
462
463         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
464                   info ?: "", info ? " " : "", pm_verb(state.event),
465                   error ? "aborted" : "complete",
466                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
467 }
468
469 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
470                             pm_message_t state, const char *info)
471 {
472         ktime_t calltime;
473         int error;
474
475         if (!cb)
476                 return 0;
477
478         calltime = initcall_debug_start(dev);
479
480         pm_dev_dbg(dev, state, info);
481         trace_device_pm_callback_start(dev, info, state.event);
482         error = cb(dev);
483         trace_device_pm_callback_end(dev, error);
484         suspend_report_result(cb, error);
485
486         initcall_debug_report(dev, calltime, error, state, info);
487
488         return error;
489 }
490
491 #ifdef CONFIG_DPM_WATCHDOG
492 struct dpm_watchdog {
493         struct device           *dev;
494         struct task_struct      *tsk;
495         struct timer_list       timer;
496 };
497
498 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
499         struct dpm_watchdog wd
500
501 /**
502  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
503  * @data: Watchdog object address.
504  *
505  * Called when a driver has timed out suspending or resuming.
506  * There's not much we can do here to recover so panic() to
507  * capture a crash-dump in pstore.
508  */
509 static void dpm_watchdog_handler(unsigned long data)
510 {
511         struct dpm_watchdog *wd = (void *)data;
512
513         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
514         show_stack(wd->tsk, NULL);
515         panic("%s %s: unrecoverable failure\n",
516                 dev_driver_string(wd->dev), dev_name(wd->dev));
517 }
518
519 /**
520  * dpm_watchdog_set - Enable pm watchdog for given device.
521  * @wd: Watchdog. Must be allocated on the stack.
522  * @dev: Device to handle.
523  */
524 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
525 {
526         struct timer_list *timer = &wd->timer;
527
528         wd->dev = dev;
529         wd->tsk = current;
530
531         init_timer_on_stack(timer);
532         /* use same timeout value for both suspend and resume */
533         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
534         timer->function = dpm_watchdog_handler;
535         timer->data = (unsigned long)wd;
536         add_timer(timer);
537 }
538
539 /**
540  * dpm_watchdog_clear - Disable suspend/resume watchdog.
541  * @wd: Watchdog to disable.
542  */
543 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
544 {
545         struct timer_list *timer = &wd->timer;
546
547         del_timer_sync(timer);
548         destroy_timer_on_stack(timer);
549 }
550 #else
551 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
552 #define dpm_watchdog_set(x, y)
553 #define dpm_watchdog_clear(x)
554 #endif
555
556 /*------------------------- Resume routines -------------------------*/
557
558 /**
559  * device_resume_noirq - Execute an "early resume" callback for given device.
560  * @dev: Device to handle.
561  * @state: PM transition of the system being carried out.
562  * @async: If true, the device is being resumed asynchronously.
563  *
564  * The driver of @dev will not receive interrupts while this function is being
565  * executed.
566  */
567 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
568 {
569         pm_callback_t callback = NULL;
570         const char *info = NULL;
571         int error = 0;
572
573         TRACE_DEVICE(dev);
574         TRACE_RESUME(0);
575
576         if (dev->power.syscore || dev->power.direct_complete)
577                 goto Out;
578
579         if (!dev->power.is_noirq_suspended)
580                 goto Out;
581
582         if (!dpm_wait_for_superior(dev, async))
583                 goto Out;
584
585         if (dev->pm_domain) {
586                 info = "noirq power domain ";
587                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
588         } else if (dev->type && dev->type->pm) {
589                 info = "noirq type ";
590                 callback = pm_noirq_op(dev->type->pm, state);
591         } else if (dev->class && dev->class->pm) {
592                 info = "noirq class ";
593                 callback = pm_noirq_op(dev->class->pm, state);
594         } else if (dev->bus && dev->bus->pm) {
595                 info = "noirq bus ";
596                 callback = pm_noirq_op(dev->bus->pm, state);
597         }
598
599         if (!callback && dev->driver && dev->driver->pm) {
600                 info = "noirq driver ";
601                 callback = pm_noirq_op(dev->driver->pm, state);
602         }
603
604         error = dpm_run_callback(callback, dev, state, info);
605         dev->power.is_noirq_suspended = false;
606
607  Out:
608         complete_all(&dev->power.completion);
609         TRACE_RESUME(error);
610         return error;
611 }
612
613 static bool is_async(struct device *dev)
614 {
615         return dev->power.async_suspend && pm_async_enabled
616                 && !pm_trace_is_enabled();
617 }
618
619 static void async_resume_noirq(void *data, async_cookie_t cookie)
620 {
621         struct device *dev = (struct device *)data;
622         int error;
623
624         error = device_resume_noirq(dev, pm_transition, true);
625         if (error)
626                 pm_dev_err(dev, pm_transition, " async", error);
627
628         put_device(dev);
629 }
630
631 void dpm_noirq_resume_devices(pm_message_t state)
632 {
633         struct device *dev;
634         ktime_t starttime = ktime_get();
635
636         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
637         mutex_lock(&dpm_list_mtx);
638         pm_transition = state;
639
640         /*
641          * Advanced the async threads upfront,
642          * in case the starting of async threads is
643          * delayed by non-async resuming devices.
644          */
645         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
646                 reinit_completion(&dev->power.completion);
647                 if (is_async(dev)) {
648                         get_device(dev);
649                         async_schedule(async_resume_noirq, dev);
650                 }
651         }
652
653         while (!list_empty(&dpm_noirq_list)) {
654                 dev = to_device(dpm_noirq_list.next);
655                 get_device(dev);
656                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
657                 mutex_unlock(&dpm_list_mtx);
658
659                 if (!is_async(dev)) {
660                         int error;
661
662                         error = device_resume_noirq(dev, state, false);
663                         if (error) {
664                                 suspend_stats.failed_resume_noirq++;
665                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
666                                 dpm_save_failed_dev(dev_name(dev));
667                                 pm_dev_err(dev, state, " noirq", error);
668                         }
669                 }
670
671                 mutex_lock(&dpm_list_mtx);
672                 put_device(dev);
673         }
674         mutex_unlock(&dpm_list_mtx);
675         async_synchronize_full();
676         dpm_show_time(starttime, state, 0, "noirq");
677         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
678 }
679
680 void dpm_noirq_end(void)
681 {
682         resume_device_irqs();
683         device_wakeup_disarm_wake_irqs();
684         cpuidle_resume();
685 }
686
687 /**
688  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
689  * @state: PM transition of the system being carried out.
690  *
691  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
692  * allow device drivers' interrupt handlers to be called.
693  */
694 void dpm_resume_noirq(pm_message_t state)
695 {
696         dpm_noirq_resume_devices(state);
697         dpm_noirq_end();
698 }
699
700 /**
701  * device_resume_early - Execute an "early resume" callback for given device.
702  * @dev: Device to handle.
703  * @state: PM transition of the system being carried out.
704  * @async: If true, the device is being resumed asynchronously.
705  *
706  * Runtime PM is disabled for @dev while this function is being executed.
707  */
708 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
709 {
710         pm_callback_t callback = NULL;
711         const char *info = NULL;
712         int error = 0;
713
714         TRACE_DEVICE(dev);
715         TRACE_RESUME(0);
716
717         if (dev->power.syscore || dev->power.direct_complete)
718                 goto Out;
719
720         if (!dev->power.is_late_suspended)
721                 goto Out;
722
723         if (!dpm_wait_for_superior(dev, async))
724                 goto Out;
725
726         if (dev->pm_domain) {
727                 info = "early power domain ";
728                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
729         } else if (dev->type && dev->type->pm) {
730                 info = "early type ";
731                 callback = pm_late_early_op(dev->type->pm, state);
732         } else if (dev->class && dev->class->pm) {
733                 info = "early class ";
734                 callback = pm_late_early_op(dev->class->pm, state);
735         } else if (dev->bus && dev->bus->pm) {
736                 info = "early bus ";
737                 callback = pm_late_early_op(dev->bus->pm, state);
738         }
739
740         if (!callback && dev->driver && dev->driver->pm) {
741                 info = "early driver ";
742                 callback = pm_late_early_op(dev->driver->pm, state);
743         }
744
745         error = dpm_run_callback(callback, dev, state, info);
746         dev->power.is_late_suspended = false;
747
748  Out:
749         TRACE_RESUME(error);
750
751         pm_runtime_enable(dev);
752         complete_all(&dev->power.completion);
753         return error;
754 }
755
756 static void async_resume_early(void *data, async_cookie_t cookie)
757 {
758         struct device *dev = (struct device *)data;
759         int error;
760
761         error = device_resume_early(dev, pm_transition, true);
762         if (error)
763                 pm_dev_err(dev, pm_transition, " async", error);
764
765         put_device(dev);
766 }
767
768 /**
769  * dpm_resume_early - Execute "early resume" callbacks for all devices.
770  * @state: PM transition of the system being carried out.
771  */
772 void dpm_resume_early(pm_message_t state)
773 {
774         struct device *dev;
775         ktime_t starttime = ktime_get();
776
777         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
778         mutex_lock(&dpm_list_mtx);
779         pm_transition = state;
780
781         /*
782          * Advanced the async threads upfront,
783          * in case the starting of async threads is
784          * delayed by non-async resuming devices.
785          */
786         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
787                 reinit_completion(&dev->power.completion);
788                 if (is_async(dev)) {
789                         get_device(dev);
790                         async_schedule(async_resume_early, dev);
791                 }
792         }
793
794         while (!list_empty(&dpm_late_early_list)) {
795                 dev = to_device(dpm_late_early_list.next);
796                 get_device(dev);
797                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
798                 mutex_unlock(&dpm_list_mtx);
799
800                 if (!is_async(dev)) {
801                         int error;
802
803                         error = device_resume_early(dev, state, false);
804                         if (error) {
805                                 suspend_stats.failed_resume_early++;
806                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
807                                 dpm_save_failed_dev(dev_name(dev));
808                                 pm_dev_err(dev, state, " early", error);
809                         }
810                 }
811                 mutex_lock(&dpm_list_mtx);
812                 put_device(dev);
813         }
814         mutex_unlock(&dpm_list_mtx);
815         async_synchronize_full();
816         dpm_show_time(starttime, state, 0, "early");
817         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
818 }
819
820 /**
821  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
822  * @state: PM transition of the system being carried out.
823  */
824 void dpm_resume_start(pm_message_t state)
825 {
826         dpm_resume_noirq(state);
827         dpm_resume_early(state);
828 }
829 EXPORT_SYMBOL_GPL(dpm_resume_start);
830
831 /**
832  * device_resume - Execute "resume" callbacks for given device.
833  * @dev: Device to handle.
834  * @state: PM transition of the system being carried out.
835  * @async: If true, the device is being resumed asynchronously.
836  */
837 static int device_resume(struct device *dev, pm_message_t state, bool async)
838 {
839         pm_callback_t callback = NULL;
840         const char *info = NULL;
841         int error = 0;
842         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
843
844         TRACE_DEVICE(dev);
845         TRACE_RESUME(0);
846
847         if (dev->power.syscore)
848                 goto Complete;
849
850         if (dev->power.direct_complete) {
851                 /* Match the pm_runtime_disable() in __device_suspend(). */
852                 pm_runtime_enable(dev);
853                 goto Complete;
854         }
855
856         if (!dpm_wait_for_superior(dev, async))
857                 goto Complete;
858
859         dpm_watchdog_set(&wd, dev);
860         device_lock(dev);
861
862         /*
863          * This is a fib.  But we'll allow new children to be added below
864          * a resumed device, even if the device hasn't been completed yet.
865          */
866         dev->power.is_prepared = false;
867
868         if (!dev->power.is_suspended)
869                 goto Unlock;
870
871         if (dev->pm_domain) {
872                 info = "power domain ";
873                 callback = pm_op(&dev->pm_domain->ops, state);
874                 goto Driver;
875         }
876
877         if (dev->type && dev->type->pm) {
878                 info = "type ";
879                 callback = pm_op(dev->type->pm, state);
880                 goto Driver;
881         }
882
883         if (dev->class) {
884                 if (dev->class->pm) {
885                         info = "class ";
886                         callback = pm_op(dev->class->pm, state);
887                         goto Driver;
888                 } else if (dev->class->resume) {
889                         info = "legacy class ";
890                         callback = dev->class->resume;
891                         goto End;
892                 }
893         }
894
895         if (dev->bus) {
896                 if (dev->bus->pm) {
897                         info = "bus ";
898                         callback = pm_op(dev->bus->pm, state);
899                 } else if (dev->bus->resume) {
900                         info = "legacy bus ";
901                         callback = dev->bus->resume;
902                         goto End;
903                 }
904         }
905
906  Driver:
907         if (!callback && dev->driver && dev->driver->pm) {
908                 info = "driver ";
909                 callback = pm_op(dev->driver->pm, state);
910         }
911
912  End:
913         error = dpm_run_callback(callback, dev, state, info);
914         dev->power.is_suspended = false;
915
916  Unlock:
917         device_unlock(dev);
918         dpm_watchdog_clear(&wd);
919
920  Complete:
921         complete_all(&dev->power.completion);
922
923         TRACE_RESUME(error);
924
925         return error;
926 }
927
928 static void async_resume(void *data, async_cookie_t cookie)
929 {
930         struct device *dev = (struct device *)data;
931         int error;
932
933         error = device_resume(dev, pm_transition, true);
934         if (error)
935                 pm_dev_err(dev, pm_transition, " async", error);
936         put_device(dev);
937 }
938
939 /**
940  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
941  * @state: PM transition of the system being carried out.
942  *
943  * Execute the appropriate "resume" callback for all devices whose status
944  * indicates that they are suspended.
945  */
946 void dpm_resume(pm_message_t state)
947 {
948         struct device *dev;
949         ktime_t starttime = ktime_get();
950
951         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
952         might_sleep();
953
954         mutex_lock(&dpm_list_mtx);
955         pm_transition = state;
956         async_error = 0;
957
958         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
959                 reinit_completion(&dev->power.completion);
960                 if (is_async(dev)) {
961                         get_device(dev);
962                         async_schedule(async_resume, dev);
963                 }
964         }
965
966         while (!list_empty(&dpm_suspended_list)) {
967                 dev = to_device(dpm_suspended_list.next);
968                 get_device(dev);
969                 if (!is_async(dev)) {
970                         int error;
971
972                         mutex_unlock(&dpm_list_mtx);
973
974                         error = device_resume(dev, state, false);
975                         if (error) {
976                                 suspend_stats.failed_resume++;
977                                 dpm_save_failed_step(SUSPEND_RESUME);
978                                 dpm_save_failed_dev(dev_name(dev));
979                                 pm_dev_err(dev, state, "", error);
980                         }
981
982                         mutex_lock(&dpm_list_mtx);
983                 }
984                 if (!list_empty(&dev->power.entry))
985                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
986                 put_device(dev);
987         }
988         mutex_unlock(&dpm_list_mtx);
989         async_synchronize_full();
990         dpm_show_time(starttime, state, 0, NULL);
991
992         cpufreq_resume();
993         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
994 }
995
996 /**
997  * device_complete - Complete a PM transition for given device.
998  * @dev: Device to handle.
999  * @state: PM transition of the system being carried out.
1000  */
1001 static void device_complete(struct device *dev, pm_message_t state)
1002 {
1003         void (*callback)(struct device *) = NULL;
1004         const char *info = NULL;
1005
1006         if (dev->power.syscore)
1007                 return;
1008
1009         device_lock(dev);
1010
1011         if (dev->pm_domain) {
1012                 info = "completing power domain ";
1013                 callback = dev->pm_domain->ops.complete;
1014         } else if (dev->type && dev->type->pm) {
1015                 info = "completing type ";
1016                 callback = dev->type->pm->complete;
1017         } else if (dev->class && dev->class->pm) {
1018                 info = "completing class ";
1019                 callback = dev->class->pm->complete;
1020         } else if (dev->bus && dev->bus->pm) {
1021                 info = "completing bus ";
1022                 callback = dev->bus->pm->complete;
1023         }
1024
1025         if (!callback && dev->driver && dev->driver->pm) {
1026                 info = "completing driver ";
1027                 callback = dev->driver->pm->complete;
1028         }
1029
1030         if (callback) {
1031                 pm_dev_dbg(dev, state, info);
1032                 callback(dev);
1033         }
1034
1035         device_unlock(dev);
1036
1037         pm_runtime_put(dev);
1038 }
1039
1040 /**
1041  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1042  * @state: PM transition of the system being carried out.
1043  *
1044  * Execute the ->complete() callbacks for all devices whose PM status is not
1045  * DPM_ON (this allows new devices to be registered).
1046  */
1047 void dpm_complete(pm_message_t state)
1048 {
1049         struct list_head list;
1050
1051         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1052         might_sleep();
1053
1054         INIT_LIST_HEAD(&list);
1055         mutex_lock(&dpm_list_mtx);
1056         while (!list_empty(&dpm_prepared_list)) {
1057                 struct device *dev = to_device(dpm_prepared_list.prev);
1058
1059                 get_device(dev);
1060                 dev->power.is_prepared = false;
1061                 list_move(&dev->power.entry, &list);
1062                 mutex_unlock(&dpm_list_mtx);
1063
1064                 trace_device_pm_callback_start(dev, "", state.event);
1065                 device_complete(dev, state);
1066                 trace_device_pm_callback_end(dev, 0);
1067
1068                 mutex_lock(&dpm_list_mtx);
1069                 put_device(dev);
1070         }
1071         list_splice(&list, &dpm_list);
1072         mutex_unlock(&dpm_list_mtx);
1073
1074         /* Allow device probing and trigger re-probing of deferred devices */
1075         device_unblock_probing();
1076         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1077 }
1078
1079 /**
1080  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1081  * @state: PM transition of the system being carried out.
1082  *
1083  * Execute "resume" callbacks for all devices and complete the PM transition of
1084  * the system.
1085  */
1086 void dpm_resume_end(pm_message_t state)
1087 {
1088         dpm_resume(state);
1089         dpm_complete(state);
1090 }
1091 EXPORT_SYMBOL_GPL(dpm_resume_end);
1092
1093
1094 /*------------------------- Suspend routines -------------------------*/
1095
1096 /**
1097  * resume_event - Return a "resume" message for given "suspend" sleep state.
1098  * @sleep_state: PM message representing a sleep state.
1099  *
1100  * Return a PM message representing the resume event corresponding to given
1101  * sleep state.
1102  */
1103 static pm_message_t resume_event(pm_message_t sleep_state)
1104 {
1105         switch (sleep_state.event) {
1106         case PM_EVENT_SUSPEND:
1107                 return PMSG_RESUME;
1108         case PM_EVENT_FREEZE:
1109         case PM_EVENT_QUIESCE:
1110                 return PMSG_RECOVER;
1111         case PM_EVENT_HIBERNATE:
1112                 return PMSG_RESTORE;
1113         }
1114         return PMSG_ON;
1115 }
1116
1117 /**
1118  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1119  * @dev: Device to handle.
1120  * @state: PM transition of the system being carried out.
1121  * @async: If true, the device is being suspended asynchronously.
1122  *
1123  * The driver of @dev will not receive interrupts while this function is being
1124  * executed.
1125  */
1126 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1127 {
1128         pm_callback_t callback = NULL;
1129         const char *info = NULL;
1130         int error = 0;
1131
1132         TRACE_DEVICE(dev);
1133         TRACE_SUSPEND(0);
1134
1135         dpm_wait_for_subordinate(dev, async);
1136
1137         if (async_error)
1138                 goto Complete;
1139
1140         if (pm_wakeup_pending()) {
1141                 async_error = -EBUSY;
1142                 goto Complete;
1143         }
1144
1145         if (dev->power.syscore || dev->power.direct_complete)
1146                 goto Complete;
1147
1148         if (dev->pm_domain) {
1149                 info = "noirq power domain ";
1150                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1151         } else if (dev->type && dev->type->pm) {
1152                 info = "noirq type ";
1153                 callback = pm_noirq_op(dev->type->pm, state);
1154         } else if (dev->class && dev->class->pm) {
1155                 info = "noirq class ";
1156                 callback = pm_noirq_op(dev->class->pm, state);
1157         } else if (dev->bus && dev->bus->pm) {
1158                 info = "noirq bus ";
1159                 callback = pm_noirq_op(dev->bus->pm, state);
1160         }
1161
1162         if (!callback && dev->driver && dev->driver->pm) {
1163                 info = "noirq driver ";
1164                 callback = pm_noirq_op(dev->driver->pm, state);
1165         }
1166
1167         error = dpm_run_callback(callback, dev, state, info);
1168         if (!error)
1169                 dev->power.is_noirq_suspended = true;
1170         else
1171                 async_error = error;
1172
1173 Complete:
1174         complete_all(&dev->power.completion);
1175         TRACE_SUSPEND(error);
1176         return error;
1177 }
1178
1179 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1180 {
1181         struct device *dev = (struct device *)data;
1182         int error;
1183
1184         error = __device_suspend_noirq(dev, pm_transition, true);
1185         if (error) {
1186                 dpm_save_failed_dev(dev_name(dev));
1187                 pm_dev_err(dev, pm_transition, " async", error);
1188         }
1189
1190         put_device(dev);
1191 }
1192
1193 static int device_suspend_noirq(struct device *dev)
1194 {
1195         reinit_completion(&dev->power.completion);
1196
1197         if (is_async(dev)) {
1198                 get_device(dev);
1199                 async_schedule(async_suspend_noirq, dev);
1200                 return 0;
1201         }
1202         return __device_suspend_noirq(dev, pm_transition, false);
1203 }
1204
1205 void dpm_noirq_begin(void)
1206 {
1207         cpuidle_pause();
1208         device_wakeup_arm_wake_irqs();
1209         suspend_device_irqs();
1210 }
1211
1212 int dpm_noirq_suspend_devices(pm_message_t state)
1213 {
1214         ktime_t starttime = ktime_get();
1215         int error = 0;
1216
1217         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1218         mutex_lock(&dpm_list_mtx);
1219         pm_transition = state;
1220         async_error = 0;
1221
1222         while (!list_empty(&dpm_late_early_list)) {
1223                 struct device *dev = to_device(dpm_late_early_list.prev);
1224
1225                 get_device(dev);
1226                 mutex_unlock(&dpm_list_mtx);
1227
1228                 error = device_suspend_noirq(dev);
1229
1230                 mutex_lock(&dpm_list_mtx);
1231                 if (error) {
1232                         pm_dev_err(dev, state, " noirq", error);
1233                         dpm_save_failed_dev(dev_name(dev));
1234                         put_device(dev);
1235                         break;
1236                 }
1237                 if (!list_empty(&dev->power.entry))
1238                         list_move(&dev->power.entry, &dpm_noirq_list);
1239                 put_device(dev);
1240
1241                 if (async_error)
1242                         break;
1243         }
1244         mutex_unlock(&dpm_list_mtx);
1245         async_synchronize_full();
1246         if (!error)
1247                 error = async_error;
1248
1249         if (error) {
1250                 suspend_stats.failed_suspend_noirq++;
1251                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1252         }
1253         dpm_show_time(starttime, state, error, "noirq");
1254         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1255         return error;
1256 }
1257
1258 /**
1259  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1260  * @state: PM transition of the system being carried out.
1261  *
1262  * Prevent device drivers' interrupt handlers from being called and invoke
1263  * "noirq" suspend callbacks for all non-sysdev devices.
1264  */
1265 int dpm_suspend_noirq(pm_message_t state)
1266 {
1267         int ret;
1268
1269         dpm_noirq_begin();
1270         ret = dpm_noirq_suspend_devices(state);
1271         if (ret)
1272                 dpm_resume_noirq(resume_event(state));
1273
1274         return ret;
1275 }
1276
1277 /**
1278  * device_suspend_late - Execute a "late suspend" callback for given device.
1279  * @dev: Device to handle.
1280  * @state: PM transition of the system being carried out.
1281  * @async: If true, the device is being suspended asynchronously.
1282  *
1283  * Runtime PM is disabled for @dev while this function is being executed.
1284  */
1285 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1286 {
1287         pm_callback_t callback = NULL;
1288         const char *info = NULL;
1289         int error = 0;
1290
1291         TRACE_DEVICE(dev);
1292         TRACE_SUSPEND(0);
1293
1294         __pm_runtime_disable(dev, false);
1295
1296         dpm_wait_for_subordinate(dev, async);
1297
1298         if (async_error)
1299                 goto Complete;
1300
1301         if (pm_wakeup_pending()) {
1302                 async_error = -EBUSY;
1303                 goto Complete;
1304         }
1305
1306         if (dev->power.syscore || dev->power.direct_complete)
1307                 goto Complete;
1308
1309         if (dev->pm_domain) {
1310                 info = "late power domain ";
1311                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1312         } else if (dev->type && dev->type->pm) {
1313                 info = "late type ";
1314                 callback = pm_late_early_op(dev->type->pm, state);
1315         } else if (dev->class && dev->class->pm) {
1316                 info = "late class ";
1317                 callback = pm_late_early_op(dev->class->pm, state);
1318         } else if (dev->bus && dev->bus->pm) {
1319                 info = "late bus ";
1320                 callback = pm_late_early_op(dev->bus->pm, state);
1321         }
1322
1323         if (!callback && dev->driver && dev->driver->pm) {
1324                 info = "late driver ";
1325                 callback = pm_late_early_op(dev->driver->pm, state);
1326         }
1327
1328         error = dpm_run_callback(callback, dev, state, info);
1329         if (!error)
1330                 dev->power.is_late_suspended = true;
1331         else
1332                 async_error = error;
1333
1334 Complete:
1335         TRACE_SUSPEND(error);
1336         complete_all(&dev->power.completion);
1337         return error;
1338 }
1339
1340 static void async_suspend_late(void *data, async_cookie_t cookie)
1341 {
1342         struct device *dev = (struct device *)data;
1343         int error;
1344
1345         error = __device_suspend_late(dev, pm_transition, true);
1346         if (error) {
1347                 dpm_save_failed_dev(dev_name(dev));
1348                 pm_dev_err(dev, pm_transition, " async", error);
1349         }
1350         put_device(dev);
1351 }
1352
1353 static int device_suspend_late(struct device *dev)
1354 {
1355         reinit_completion(&dev->power.completion);
1356
1357         if (is_async(dev)) {
1358                 get_device(dev);
1359                 async_schedule(async_suspend_late, dev);
1360                 return 0;
1361         }
1362
1363         return __device_suspend_late(dev, pm_transition, false);
1364 }
1365
1366 /**
1367  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1368  * @state: PM transition of the system being carried out.
1369  */
1370 int dpm_suspend_late(pm_message_t state)
1371 {
1372         ktime_t starttime = ktime_get();
1373         int error = 0;
1374
1375         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1376         mutex_lock(&dpm_list_mtx);
1377         pm_transition = state;
1378         async_error = 0;
1379
1380         while (!list_empty(&dpm_suspended_list)) {
1381                 struct device *dev = to_device(dpm_suspended_list.prev);
1382
1383                 get_device(dev);
1384                 mutex_unlock(&dpm_list_mtx);
1385
1386                 error = device_suspend_late(dev);
1387
1388                 mutex_lock(&dpm_list_mtx);
1389                 if (!list_empty(&dev->power.entry))
1390                         list_move(&dev->power.entry, &dpm_late_early_list);
1391
1392                 if (error) {
1393                         pm_dev_err(dev, state, " late", error);
1394                         dpm_save_failed_dev(dev_name(dev));
1395                         put_device(dev);
1396                         break;
1397                 }
1398                 put_device(dev);
1399
1400                 if (async_error)
1401                         break;
1402         }
1403         mutex_unlock(&dpm_list_mtx);
1404         async_synchronize_full();
1405         if (!error)
1406                 error = async_error;
1407         if (error) {
1408                 suspend_stats.failed_suspend_late++;
1409                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1410                 dpm_resume_early(resume_event(state));
1411         }
1412         dpm_show_time(starttime, state, error, "late");
1413         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1414         return error;
1415 }
1416
1417 /**
1418  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1419  * @state: PM transition of the system being carried out.
1420  */
1421 int dpm_suspend_end(pm_message_t state)
1422 {
1423         int error = dpm_suspend_late(state);
1424         if (error)
1425                 return error;
1426
1427         error = dpm_suspend_noirq(state);
1428         if (error) {
1429                 dpm_resume_early(resume_event(state));
1430                 return error;
1431         }
1432
1433         return 0;
1434 }
1435 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1436
1437 /**
1438  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1439  * @dev: Device to suspend.
1440  * @state: PM transition of the system being carried out.
1441  * @cb: Suspend callback to execute.
1442  * @info: string description of caller.
1443  */
1444 static int legacy_suspend(struct device *dev, pm_message_t state,
1445                           int (*cb)(struct device *dev, pm_message_t state),
1446                           const char *info)
1447 {
1448         int error;
1449         ktime_t calltime;
1450
1451         calltime = initcall_debug_start(dev);
1452
1453         trace_device_pm_callback_start(dev, info, state.event);
1454         error = cb(dev, state);
1455         trace_device_pm_callback_end(dev, error);
1456         suspend_report_result(cb, error);
1457
1458         initcall_debug_report(dev, calltime, error, state, info);
1459
1460         return error;
1461 }
1462
1463 static void dpm_clear_suppliers_direct_complete(struct device *dev)
1464 {
1465         struct device_link *link;
1466         int idx;
1467
1468         idx = device_links_read_lock();
1469
1470         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1471                 spin_lock_irq(&link->supplier->power.lock);
1472                 link->supplier->power.direct_complete = false;
1473                 spin_unlock_irq(&link->supplier->power.lock);
1474         }
1475
1476         device_links_read_unlock(idx);
1477 }
1478
1479 /**
1480  * device_suspend - Execute "suspend" callbacks for given device.
1481  * @dev: Device to handle.
1482  * @state: PM transition of the system being carried out.
1483  * @async: If true, the device is being suspended asynchronously.
1484  */
1485 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1486 {
1487         pm_callback_t callback = NULL;
1488         const char *info = NULL;
1489         int error = 0;
1490         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1491
1492         TRACE_DEVICE(dev);
1493         TRACE_SUSPEND(0);
1494
1495         dpm_wait_for_subordinate(dev, async);
1496
1497         if (async_error) {
1498                 dev->power.direct_complete = false;
1499                 goto Complete;
1500         }
1501
1502         /*
1503          * Wait for possible runtime PM transitions of the device in progress
1504          * to complete and if there's a runtime resume request pending for it,
1505          * resume it before proceeding with invoking the system-wide suspend
1506          * callbacks for it.
1507          *
1508          * If the system-wide suspend callbacks below change the configuration
1509          * of the device, they must disable runtime PM for it or otherwise
1510          * ensure that its runtime-resume callbacks will not be confused by that
1511          * change in case they are invoked going forward.
1512          */
1513         pm_runtime_barrier(dev);
1514
1515         if (pm_wakeup_pending()) {
1516                 dev->power.direct_complete = false;
1517                 async_error = -EBUSY;
1518                 goto Complete;
1519         }
1520
1521         if (dev->power.syscore)
1522                 goto Complete;
1523
1524         /* Avoid direct_complete to let wakeup_path propagate. */
1525         if (device_may_wakeup(dev) || dev->power.wakeup_path)
1526                 dev->power.direct_complete = false;
1527
1528         if (dev->power.direct_complete) {
1529                 if (pm_runtime_status_suspended(dev)) {
1530                         pm_runtime_disable(dev);
1531                         if (pm_runtime_status_suspended(dev))
1532                                 goto Complete;
1533
1534                         pm_runtime_enable(dev);
1535                 }
1536                 dev->power.direct_complete = false;
1537         }
1538
1539         dpm_watchdog_set(&wd, dev);
1540         device_lock(dev);
1541
1542         if (dev->pm_domain) {
1543                 info = "power domain ";
1544                 callback = pm_op(&dev->pm_domain->ops, state);
1545                 goto Run;
1546         }
1547
1548         if (dev->type && dev->type->pm) {
1549                 info = "type ";
1550                 callback = pm_op(dev->type->pm, state);
1551                 goto Run;
1552         }
1553
1554         if (dev->class) {
1555                 if (dev->class->pm) {
1556                         info = "class ";
1557                         callback = pm_op(dev->class->pm, state);
1558                         goto Run;
1559                 } else if (dev->class->suspend) {
1560                         pm_dev_dbg(dev, state, "legacy class ");
1561                         error = legacy_suspend(dev, state, dev->class->suspend,
1562                                                 "legacy class ");
1563                         goto End;
1564                 }
1565         }
1566
1567         if (dev->bus) {
1568                 if (dev->bus->pm) {
1569                         info = "bus ";
1570                         callback = pm_op(dev->bus->pm, state);
1571                 } else if (dev->bus->suspend) {
1572                         pm_dev_dbg(dev, state, "legacy bus ");
1573                         error = legacy_suspend(dev, state, dev->bus->suspend,
1574                                                 "legacy bus ");
1575                         goto End;
1576                 }
1577         }
1578
1579  Run:
1580         if (!callback && dev->driver && dev->driver->pm) {
1581                 info = "driver ";
1582                 callback = pm_op(dev->driver->pm, state);
1583         }
1584
1585         error = dpm_run_callback(callback, dev, state, info);
1586
1587  End:
1588         if (!error) {
1589                 struct device *parent = dev->parent;
1590
1591                 dev->power.is_suspended = true;
1592                 if (parent) {
1593                         spin_lock_irq(&parent->power.lock);
1594
1595                         dev->parent->power.direct_complete = false;
1596                         if (dev->power.wakeup_path
1597                             && !dev->parent->power.ignore_children)
1598                                 dev->parent->power.wakeup_path = true;
1599
1600                         spin_unlock_irq(&parent->power.lock);
1601                 }
1602                 dpm_clear_suppliers_direct_complete(dev);
1603         }
1604
1605         device_unlock(dev);
1606         dpm_watchdog_clear(&wd);
1607
1608  Complete:
1609         if (error)
1610                 async_error = error;
1611
1612         complete_all(&dev->power.completion);
1613         TRACE_SUSPEND(error);
1614         return error;
1615 }
1616
1617 static void async_suspend(void *data, async_cookie_t cookie)
1618 {
1619         struct device *dev = (struct device *)data;
1620         int error;
1621
1622         error = __device_suspend(dev, pm_transition, true);
1623         if (error) {
1624                 dpm_save_failed_dev(dev_name(dev));
1625                 pm_dev_err(dev, pm_transition, " async", error);
1626         }
1627
1628         put_device(dev);
1629 }
1630
1631 static int device_suspend(struct device *dev)
1632 {
1633         reinit_completion(&dev->power.completion);
1634
1635         if (is_async(dev)) {
1636                 get_device(dev);
1637                 async_schedule(async_suspend, dev);
1638                 return 0;
1639         }
1640
1641         return __device_suspend(dev, pm_transition, false);
1642 }
1643
1644 /**
1645  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1646  * @state: PM transition of the system being carried out.
1647  */
1648 int dpm_suspend(pm_message_t state)
1649 {
1650         ktime_t starttime = ktime_get();
1651         int error = 0;
1652
1653         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1654         might_sleep();
1655
1656         cpufreq_suspend();
1657
1658         mutex_lock(&dpm_list_mtx);
1659         pm_transition = state;
1660         async_error = 0;
1661         while (!list_empty(&dpm_prepared_list)) {
1662                 struct device *dev = to_device(dpm_prepared_list.prev);
1663
1664                 get_device(dev);
1665                 mutex_unlock(&dpm_list_mtx);
1666
1667                 error = device_suspend(dev);
1668
1669                 mutex_lock(&dpm_list_mtx);
1670                 if (error) {
1671                         pm_dev_err(dev, state, "", error);
1672                         dpm_save_failed_dev(dev_name(dev));
1673                         put_device(dev);
1674                         break;
1675                 }
1676                 if (!list_empty(&dev->power.entry))
1677                         list_move(&dev->power.entry, &dpm_suspended_list);
1678                 put_device(dev);
1679                 if (async_error)
1680                         break;
1681         }
1682         mutex_unlock(&dpm_list_mtx);
1683         async_synchronize_full();
1684         if (!error)
1685                 error = async_error;
1686         if (error) {
1687                 suspend_stats.failed_suspend++;
1688                 dpm_save_failed_step(SUSPEND_SUSPEND);
1689         }
1690         dpm_show_time(starttime, state, error, NULL);
1691         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1692         return error;
1693 }
1694
1695 /**
1696  * device_prepare - Prepare a device for system power transition.
1697  * @dev: Device to handle.
1698  * @state: PM transition of the system being carried out.
1699  *
1700  * Execute the ->prepare() callback(s) for given device.  No new children of the
1701  * device may be registered after this function has returned.
1702  */
1703 static int device_prepare(struct device *dev, pm_message_t state)
1704 {
1705         int (*callback)(struct device *) = NULL;
1706         int ret = 0;
1707
1708         if (dev->power.syscore)
1709                 return 0;
1710
1711         /*
1712          * If a device's parent goes into runtime suspend at the wrong time,
1713          * it won't be possible to resume the device.  To prevent this we
1714          * block runtime suspend here, during the prepare phase, and allow
1715          * it again during the complete phase.
1716          */
1717         pm_runtime_get_noresume(dev);
1718
1719         device_lock(dev);
1720
1721         dev->power.wakeup_path = device_may_wakeup(dev);
1722
1723         if (dev->power.no_pm_callbacks) {
1724                 ret = 1;        /* Let device go direct_complete */
1725                 goto unlock;
1726         }
1727
1728         if (dev->pm_domain)
1729                 callback = dev->pm_domain->ops.prepare;
1730         else if (dev->type && dev->type->pm)
1731                 callback = dev->type->pm->prepare;
1732         else if (dev->class && dev->class->pm)
1733                 callback = dev->class->pm->prepare;
1734         else if (dev->bus && dev->bus->pm)
1735                 callback = dev->bus->pm->prepare;
1736
1737         if (!callback && dev->driver && dev->driver->pm)
1738                 callback = dev->driver->pm->prepare;
1739
1740         if (callback)
1741                 ret = callback(dev);
1742
1743 unlock:
1744         device_unlock(dev);
1745
1746         if (ret < 0) {
1747                 suspend_report_result(callback, ret);
1748                 pm_runtime_put(dev);
1749                 return ret;
1750         }
1751         /*
1752          * A positive return value from ->prepare() means "this device appears
1753          * to be runtime-suspended and its state is fine, so if it really is
1754          * runtime-suspended, you can leave it in that state provided that you
1755          * will do the same thing with all of its descendants".  This only
1756          * applies to suspend transitions, however.
1757          */
1758         spin_lock_irq(&dev->power.lock);
1759         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1760         spin_unlock_irq(&dev->power.lock);
1761         return 0;
1762 }
1763
1764 /**
1765  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1766  * @state: PM transition of the system being carried out.
1767  *
1768  * Execute the ->prepare() callback(s) for all devices.
1769  */
1770 int dpm_prepare(pm_message_t state)
1771 {
1772         int error = 0;
1773
1774         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1775         might_sleep();
1776
1777         /*
1778          * Give a chance for the known devices to complete their probes, before
1779          * disable probing of devices. This sync point is important at least
1780          * at boot time + hibernation restore.
1781          */
1782         wait_for_device_probe();
1783         /*
1784          * It is unsafe if probing of devices will happen during suspend or
1785          * hibernation and system behavior will be unpredictable in this case.
1786          * So, let's prohibit device's probing here and defer their probes
1787          * instead. The normal behavior will be restored in dpm_complete().
1788          */
1789         device_block_probing();
1790
1791         mutex_lock(&dpm_list_mtx);
1792         while (!list_empty(&dpm_list)) {
1793                 struct device *dev = to_device(dpm_list.next);
1794
1795                 get_device(dev);
1796                 mutex_unlock(&dpm_list_mtx);
1797
1798                 trace_device_pm_callback_start(dev, "", state.event);
1799                 error = device_prepare(dev, state);
1800                 trace_device_pm_callback_end(dev, error);
1801
1802                 mutex_lock(&dpm_list_mtx);
1803                 if (error) {
1804                         if (error == -EAGAIN) {
1805                                 put_device(dev);
1806                                 error = 0;
1807                                 continue;
1808                         }
1809                         printk(KERN_INFO "PM: Device %s not prepared "
1810                                 "for power transition: code %d\n",
1811                                 dev_name(dev), error);
1812                         put_device(dev);
1813                         break;
1814                 }
1815                 dev->power.is_prepared = true;
1816                 if (!list_empty(&dev->power.entry))
1817                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1818                 put_device(dev);
1819         }
1820         mutex_unlock(&dpm_list_mtx);
1821         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1822         return error;
1823 }
1824
1825 /**
1826  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1827  * @state: PM transition of the system being carried out.
1828  *
1829  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1830  * callbacks for them.
1831  */
1832 int dpm_suspend_start(pm_message_t state)
1833 {
1834         int error;
1835
1836         error = dpm_prepare(state);
1837         if (error) {
1838                 suspend_stats.failed_prepare++;
1839                 dpm_save_failed_step(SUSPEND_PREPARE);
1840         } else
1841                 error = dpm_suspend(state);
1842         return error;
1843 }
1844 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1845
1846 void __suspend_report_result(const char *function, void *fn, int ret)
1847 {
1848         if (ret)
1849                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1850 }
1851 EXPORT_SYMBOL_GPL(__suspend_report_result);
1852
1853 /**
1854  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1855  * @dev: Device to wait for.
1856  * @subordinate: Device that needs to wait for @dev.
1857  */
1858 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1859 {
1860         dpm_wait(dev, subordinate->power.async_suspend);
1861         return async_error;
1862 }
1863 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1864
1865 /**
1866  * dpm_for_each_dev - device iterator.
1867  * @data: data for the callback.
1868  * @fn: function to be called for each device.
1869  *
1870  * Iterate over devices in dpm_list, and call @fn for each device,
1871  * passing it @data.
1872  */
1873 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1874 {
1875         struct device *dev;
1876
1877         if (!fn)
1878                 return;
1879
1880         device_pm_lock();
1881         list_for_each_entry(dev, &dpm_list, power.entry)
1882                 fn(dev, data);
1883         device_pm_unlock();
1884 }
1885 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1886
1887 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1888 {
1889         if (!ops)
1890                 return true;
1891
1892         return !ops->prepare &&
1893                !ops->suspend &&
1894                !ops->suspend_late &&
1895                !ops->suspend_noirq &&
1896                !ops->resume_noirq &&
1897                !ops->resume_early &&
1898                !ops->resume &&
1899                !ops->complete;
1900 }
1901
1902 void device_pm_check_callbacks(struct device *dev)
1903 {
1904         unsigned long flags;
1905
1906         spin_lock_irqsave(&dev->power.lock, flags);
1907         dev->power.no_pm_callbacks =
1908                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1909                  !dev->bus->suspend && !dev->bus->resume)) &&
1910                 (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
1911                  !dev->class->suspend && !dev->class->resume)) &&
1912                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1913                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1914                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1915                  !dev->driver->suspend && !dev->driver->resume));
1916         spin_unlock_irqrestore(&dev->power.lock, flags);
1917 }