2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched/mm.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
19 typedef int (*pm_callback_t)(struct device *);
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
24 const struct dev_pm_ops *ops;
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
30 else if (dev->class && dev->class->pm)
32 else if (dev->bus && dev->bus->pm)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
48 #define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
65 void update_pm_runtime_accounting(struct device *dev)
67 unsigned long now = jiffies;
70 delta = now - dev->power.accounting_timestamp;
72 dev->power.accounting_timestamp = now;
74 if (dev->power.disable_depth > 0)
77 if (dev->power.runtime_status == RPM_SUSPENDED)
78 dev->power.suspended_jiffies += delta;
80 dev->power.active_jiffies += delta;
83 static void __update_runtime_status(struct device *dev, enum rpm_status status)
85 update_pm_runtime_accounting(dev);
86 dev->power.runtime_status = status;
90 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91 * @dev: Device to handle.
93 static void pm_runtime_deactivate_timer(struct device *dev)
95 if (dev->power.timer_expires > 0) {
96 del_timer(&dev->power.suspend_timer);
97 dev->power.timer_expires = 0;
102 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
103 * @dev: Device to handle.
105 static void pm_runtime_cancel_pending(struct device *dev)
107 pm_runtime_deactivate_timer(dev);
109 * In case there's a request pending, make sure its work function will
110 * return without doing anything.
112 dev->power.request = RPM_REQ_NONE;
116 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
117 * @dev: Device to handle.
119 * Compute the autosuspend-delay expiration time based on the device's
120 * power.last_busy time. If the delay has already expired or is disabled
121 * (negative) or the power.use_autosuspend flag isn't set, return 0.
122 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
124 * This function may be called either with or without dev->power.lock held.
125 * Either way it can be racy, since power.last_busy may be updated at any time.
127 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
129 int autosuspend_delay;
131 unsigned long last_busy;
132 unsigned long expires = 0;
134 if (!dev->power.use_autosuspend)
137 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
138 if (autosuspend_delay < 0)
141 last_busy = READ_ONCE(dev->power.last_busy);
142 elapsed = jiffies - last_busy;
144 goto out; /* jiffies has wrapped around. */
147 * If the autosuspend_delay is >= 1 second, align the timer by rounding
148 * up to the nearest second.
150 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151 if (autosuspend_delay >= 1000)
152 expires = round_jiffies(expires);
154 if (elapsed >= expires - last_busy)
155 expires = 0; /* Already expired. */
160 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
162 static int dev_memalloc_noio(struct device *dev, void *data)
164 return dev->power.memalloc_noio;
168 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
169 * @dev: Device to handle.
170 * @enable: True for setting the flag and False for clearing the flag.
172 * Set the flag for all devices in the path from the device to the
173 * root device in the device tree if @enable is true, otherwise clear
174 * the flag for devices in the path whose siblings don't set the flag.
176 * The function should only be called by block device, or network
177 * device driver for solving the deadlock problem during runtime
180 * If memory allocation with GFP_KERNEL is called inside runtime
181 * resume/suspend callback of any one of its ancestors(or the
182 * block device itself), the deadlock may be triggered inside the
183 * memory allocation since it might not complete until the block
184 * device becomes active and the involed page I/O finishes. The
185 * situation is pointed out first by Alan Stern. Network device
186 * are involved in iSCSI kind of situation.
188 * The lock of dev_hotplug_mutex is held in the function for handling
189 * hotplug race because pm_runtime_set_memalloc_noio() may be called
192 * The function should be called between device_add() and device_del()
193 * on the affected device(block/network device).
195 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
197 static DEFINE_MUTEX(dev_hotplug_mutex);
199 mutex_lock(&dev_hotplug_mutex);
203 /* hold power lock since bitfield is not SMP-safe. */
204 spin_lock_irq(&dev->power.lock);
205 enabled = dev->power.memalloc_noio;
206 dev->power.memalloc_noio = enable;
207 spin_unlock_irq(&dev->power.lock);
210 * not need to enable ancestors any more if the device
213 if (enabled && enable)
219 * clear flag of the parent device only if all the
220 * children don't set the flag because ancestor's
221 * flag was set by any one of the descendants.
223 if (!dev || (!enable &&
224 device_for_each_child(dev, NULL,
228 mutex_unlock(&dev_hotplug_mutex);
230 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
233 * rpm_check_suspend_allowed - Test whether a device may be suspended.
234 * @dev: Device to test.
236 static int rpm_check_suspend_allowed(struct device *dev)
240 if (dev->power.runtime_error)
242 else if (dev->power.disable_depth > 0)
244 else if (atomic_read(&dev->power.usage_count) > 0)
246 else if (!dev->power.ignore_children &&
247 atomic_read(&dev->power.child_count))
250 /* Pending resume requests take precedence over suspends. */
251 else if ((dev->power.deferred_resume
252 && dev->power.runtime_status == RPM_SUSPENDING)
253 || (dev->power.request_pending
254 && dev->power.request == RPM_REQ_RESUME))
256 else if (__dev_pm_qos_read_value(dev) == 0)
258 else if (dev->power.runtime_status == RPM_SUSPENDED)
264 static int rpm_get_suppliers(struct device *dev)
266 struct device_link *link;
268 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
271 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
272 READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
275 retval = pm_runtime_get_sync(link->supplier);
276 /* Ignore suppliers with disabled runtime PM. */
277 if (retval < 0 && retval != -EACCES) {
278 pm_runtime_put_noidle(link->supplier);
281 refcount_inc(&link->rpm_active);
286 static void rpm_put_suppliers(struct device *dev)
288 struct device_link *link;
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
291 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
294 while (refcount_dec_not_one(&link->rpm_active))
295 pm_runtime_put(link->supplier);
300 * __rpm_callback - Run a given runtime PM callback for a given device.
301 * @cb: Runtime PM callback to run.
302 * @dev: Device to run the callback for.
304 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
305 __releases(&dev->power.lock) __acquires(&dev->power.lock)
308 bool use_links = dev->power.links_count > 0;
310 if (dev->power.irq_safe) {
311 spin_unlock(&dev->power.lock);
313 spin_unlock_irq(&dev->power.lock);
316 * Resume suppliers if necessary.
318 * The device's runtime PM status cannot change until this
319 * routine returns, so it is safe to read the status outside of
322 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
323 idx = device_links_read_lock();
325 retval = rpm_get_suppliers(dev);
329 device_links_read_unlock(idx);
335 if (dev->power.irq_safe) {
336 spin_lock(&dev->power.lock);
339 * If the device is suspending and the callback has returned
340 * success, drop the usage counters of the suppliers that have
341 * been reference counted on its resume.
343 * Do that if resume fails too.
346 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
347 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
348 idx = device_links_read_lock();
351 rpm_put_suppliers(dev);
353 device_links_read_unlock(idx);
356 spin_lock_irq(&dev->power.lock);
363 * rpm_idle - Notify device bus type if the device can be suspended.
364 * @dev: Device to notify the bus type about.
365 * @rpmflags: Flag bits.
367 * Check if the device's runtime PM status allows it to be suspended. If
368 * another idle notification has been started earlier, return immediately. If
369 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
370 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
371 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
373 * This function must be called under dev->power.lock with interrupts disabled.
375 static int rpm_idle(struct device *dev, int rpmflags)
377 int (*callback)(struct device *);
380 trace_rpm_idle_rcuidle(dev, rpmflags);
381 retval = rpm_check_suspend_allowed(dev);
383 ; /* Conditions are wrong. */
385 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
386 else if (dev->power.runtime_status != RPM_ACTIVE)
390 * Any pending request other than an idle notification takes
391 * precedence over us, except that the timer may be running.
393 else if (dev->power.request_pending &&
394 dev->power.request > RPM_REQ_IDLE)
397 /* Act as though RPM_NOWAIT is always set. */
398 else if (dev->power.idle_notification)
399 retval = -EINPROGRESS;
403 /* Pending requests need to be canceled. */
404 dev->power.request = RPM_REQ_NONE;
406 callback = RPM_GET_CALLBACK(dev, runtime_idle);
408 /* If no callback assume success. */
409 if (!callback || dev->power.no_callbacks)
412 /* Carry out an asynchronous or a synchronous idle notification. */
413 if (rpmflags & RPM_ASYNC) {
414 dev->power.request = RPM_REQ_IDLE;
415 if (!dev->power.request_pending) {
416 dev->power.request_pending = true;
417 queue_work(pm_wq, &dev->power.work);
419 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
423 dev->power.idle_notification = true;
425 if (dev->power.irq_safe)
426 spin_unlock(&dev->power.lock);
428 spin_unlock_irq(&dev->power.lock);
430 retval = callback(dev);
432 if (dev->power.irq_safe)
433 spin_lock(&dev->power.lock);
435 spin_lock_irq(&dev->power.lock);
437 dev->power.idle_notification = false;
438 wake_up_all(&dev->power.wait_queue);
441 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
442 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
446 * rpm_callback - Run a given runtime PM callback for a given device.
447 * @cb: Runtime PM callback to run.
448 * @dev: Device to run the callback for.
450 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
457 if (dev->power.memalloc_noio) {
458 unsigned int noio_flag;
461 * Deadlock might be caused if memory allocation with
462 * GFP_KERNEL happens inside runtime_suspend and
463 * runtime_resume callbacks of one block device's
464 * ancestor or the block device itself. Network
465 * device might be thought as part of iSCSI block
466 * device, so network device and its ancestor should
467 * be marked as memalloc_noio too.
469 noio_flag = memalloc_noio_save();
470 retval = __rpm_callback(cb, dev);
471 memalloc_noio_restore(noio_flag);
473 retval = __rpm_callback(cb, dev);
476 dev->power.runtime_error = retval;
477 return retval != -EACCES ? retval : -EIO;
481 * rpm_suspend - Carry out runtime suspend of given device.
482 * @dev: Device to suspend.
483 * @rpmflags: Flag bits.
485 * Check if the device's runtime PM status allows it to be suspended.
486 * Cancel a pending idle notification, autosuspend or suspend. If
487 * another suspend has been started earlier, either return immediately
488 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
489 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
490 * otherwise run the ->runtime_suspend() callback directly. When
491 * ->runtime_suspend succeeded, if a deferred resume was requested while
492 * the callback was running then carry it out, otherwise send an idle
493 * notification for its parent (if the suspend succeeded and both
494 * ignore_children of parent->power and irq_safe of dev->power are not set).
495 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
496 * flag is set and the next autosuspend-delay expiration time is in the
497 * future, schedule another autosuspend attempt.
499 * This function must be called under dev->power.lock with interrupts disabled.
501 static int rpm_suspend(struct device *dev, int rpmflags)
502 __releases(&dev->power.lock) __acquires(&dev->power.lock)
504 int (*callback)(struct device *);
505 struct device *parent = NULL;
508 trace_rpm_suspend_rcuidle(dev, rpmflags);
511 retval = rpm_check_suspend_allowed(dev);
514 ; /* Conditions are wrong. */
516 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
517 else if (dev->power.runtime_status == RPM_RESUMING &&
518 !(rpmflags & RPM_ASYNC))
523 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
524 if ((rpmflags & RPM_AUTO)
525 && dev->power.runtime_status != RPM_SUSPENDING) {
526 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
529 /* Pending requests need to be canceled. */
530 dev->power.request = RPM_REQ_NONE;
533 * Optimization: If the timer is already running and is
534 * set to expire at or before the autosuspend delay,
535 * avoid the overhead of resetting it. Just let it
536 * expire; pm_suspend_timer_fn() will take care of the
539 if (!(dev->power.timer_expires && time_before_eq(
540 dev->power.timer_expires, expires))) {
541 dev->power.timer_expires = expires;
542 mod_timer(&dev->power.suspend_timer, expires);
544 dev->power.timer_autosuspends = 1;
549 /* Other scheduled or pending requests need to be canceled. */
550 pm_runtime_cancel_pending(dev);
552 if (dev->power.runtime_status == RPM_SUSPENDING) {
555 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
556 retval = -EINPROGRESS;
560 if (dev->power.irq_safe) {
561 spin_unlock(&dev->power.lock);
565 spin_lock(&dev->power.lock);
569 /* Wait for the other suspend running in parallel with us. */
571 prepare_to_wait(&dev->power.wait_queue, &wait,
572 TASK_UNINTERRUPTIBLE);
573 if (dev->power.runtime_status != RPM_SUSPENDING)
576 spin_unlock_irq(&dev->power.lock);
580 spin_lock_irq(&dev->power.lock);
582 finish_wait(&dev->power.wait_queue, &wait);
586 if (dev->power.no_callbacks)
587 goto no_callback; /* Assume success. */
589 /* Carry out an asynchronous or a synchronous suspend. */
590 if (rpmflags & RPM_ASYNC) {
591 dev->power.request = (rpmflags & RPM_AUTO) ?
592 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
593 if (!dev->power.request_pending) {
594 dev->power.request_pending = true;
595 queue_work(pm_wq, &dev->power.work);
600 __update_runtime_status(dev, RPM_SUSPENDING);
602 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
604 dev_pm_enable_wake_irq_check(dev, true);
605 retval = rpm_callback(callback, dev);
610 __update_runtime_status(dev, RPM_SUSPENDED);
611 pm_runtime_deactivate_timer(dev);
614 parent = dev->parent;
615 atomic_add_unless(&parent->power.child_count, -1, 0);
617 wake_up_all(&dev->power.wait_queue);
619 if (dev->power.deferred_resume) {
620 dev->power.deferred_resume = false;
626 /* Maybe the parent is now able to suspend. */
627 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
628 spin_unlock(&dev->power.lock);
630 spin_lock(&parent->power.lock);
631 rpm_idle(parent, RPM_ASYNC);
632 spin_unlock(&parent->power.lock);
634 spin_lock(&dev->power.lock);
638 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
643 dev_pm_disable_wake_irq_check(dev);
644 __update_runtime_status(dev, RPM_ACTIVE);
645 dev->power.deferred_resume = false;
646 wake_up_all(&dev->power.wait_queue);
648 if (retval == -EAGAIN || retval == -EBUSY) {
649 dev->power.runtime_error = 0;
652 * If the callback routine failed an autosuspend, and
653 * if the last_busy time has been updated so that there
654 * is a new autosuspend expiration time, automatically
655 * reschedule another autosuspend.
657 if ((rpmflags & RPM_AUTO) &&
658 pm_runtime_autosuspend_expiration(dev) != 0)
661 pm_runtime_cancel_pending(dev);
667 * rpm_resume - Carry out runtime resume of given device.
668 * @dev: Device to resume.
669 * @rpmflags: Flag bits.
671 * Check if the device's runtime PM status allows it to be resumed. Cancel
672 * any scheduled or pending requests. If another resume has been started
673 * earlier, either return immediately or wait for it to finish, depending on the
674 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
675 * parallel with this function, either tell the other process to resume after
676 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
677 * flag is set then queue a resume request; otherwise run the
678 * ->runtime_resume() callback directly. Queue an idle notification for the
679 * device if the resume succeeded.
681 * This function must be called under dev->power.lock with interrupts disabled.
683 static int rpm_resume(struct device *dev, int rpmflags)
684 __releases(&dev->power.lock) __acquires(&dev->power.lock)
686 int (*callback)(struct device *);
687 struct device *parent = NULL;
690 trace_rpm_resume_rcuidle(dev, rpmflags);
693 if (dev->power.runtime_error)
695 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
696 && dev->power.runtime_status == RPM_ACTIVE)
698 else if (dev->power.disable_depth > 0)
704 * Other scheduled or pending requests need to be canceled. Small
705 * optimization: If an autosuspend timer is running, leave it running
706 * rather than cancelling it now only to restart it again in the near
709 dev->power.request = RPM_REQ_NONE;
710 if (!dev->power.timer_autosuspends)
711 pm_runtime_deactivate_timer(dev);
713 if (dev->power.runtime_status == RPM_ACTIVE) {
718 if (dev->power.runtime_status == RPM_RESUMING
719 || dev->power.runtime_status == RPM_SUSPENDING) {
722 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
723 if (dev->power.runtime_status == RPM_SUSPENDING)
724 dev->power.deferred_resume = true;
726 retval = -EINPROGRESS;
730 if (dev->power.irq_safe) {
731 spin_unlock(&dev->power.lock);
735 spin_lock(&dev->power.lock);
739 /* Wait for the operation carried out in parallel with us. */
741 prepare_to_wait(&dev->power.wait_queue, &wait,
742 TASK_UNINTERRUPTIBLE);
743 if (dev->power.runtime_status != RPM_RESUMING
744 && dev->power.runtime_status != RPM_SUSPENDING)
747 spin_unlock_irq(&dev->power.lock);
751 spin_lock_irq(&dev->power.lock);
753 finish_wait(&dev->power.wait_queue, &wait);
758 * See if we can skip waking up the parent. This is safe only if
759 * power.no_callbacks is set, because otherwise we don't know whether
760 * the resume will actually succeed.
762 if (dev->power.no_callbacks && !parent && dev->parent) {
763 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
764 if (dev->parent->power.disable_depth > 0
765 || dev->parent->power.ignore_children
766 || dev->parent->power.runtime_status == RPM_ACTIVE) {
767 atomic_inc(&dev->parent->power.child_count);
768 spin_unlock(&dev->parent->power.lock);
770 goto no_callback; /* Assume success. */
772 spin_unlock(&dev->parent->power.lock);
775 /* Carry out an asynchronous or a synchronous resume. */
776 if (rpmflags & RPM_ASYNC) {
777 dev->power.request = RPM_REQ_RESUME;
778 if (!dev->power.request_pending) {
779 dev->power.request_pending = true;
780 queue_work(pm_wq, &dev->power.work);
786 if (!parent && dev->parent) {
788 * Increment the parent's usage counter and resume it if
789 * necessary. Not needed if dev is irq-safe; then the
790 * parent is permanently resumed.
792 parent = dev->parent;
793 if (dev->power.irq_safe)
795 spin_unlock(&dev->power.lock);
797 pm_runtime_get_noresume(parent);
799 spin_lock(&parent->power.lock);
801 * Resume the parent if it has runtime PM enabled and not been
802 * set to ignore its children.
804 if (!parent->power.disable_depth
805 && !parent->power.ignore_children) {
806 rpm_resume(parent, 0);
807 if (parent->power.runtime_status != RPM_ACTIVE)
810 spin_unlock(&parent->power.lock);
812 spin_lock(&dev->power.lock);
819 if (dev->power.no_callbacks)
820 goto no_callback; /* Assume success. */
822 __update_runtime_status(dev, RPM_RESUMING);
824 callback = RPM_GET_CALLBACK(dev, runtime_resume);
826 dev_pm_disable_wake_irq_check(dev);
827 retval = rpm_callback(callback, dev);
829 __update_runtime_status(dev, RPM_SUSPENDED);
830 pm_runtime_cancel_pending(dev);
831 dev_pm_enable_wake_irq_check(dev, false);
834 __update_runtime_status(dev, RPM_ACTIVE);
835 pm_runtime_mark_last_busy(dev);
837 atomic_inc(&parent->power.child_count);
839 wake_up_all(&dev->power.wait_queue);
842 rpm_idle(dev, RPM_ASYNC);
845 if (parent && !dev->power.irq_safe) {
846 spin_unlock_irq(&dev->power.lock);
848 pm_runtime_put(parent);
850 spin_lock_irq(&dev->power.lock);
853 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
859 * pm_runtime_work - Universal runtime PM work function.
860 * @work: Work structure used for scheduling the execution of this function.
862 * Use @work to get the device object the work is to be done for, determine what
863 * is to be done and execute the appropriate runtime PM function.
865 static void pm_runtime_work(struct work_struct *work)
867 struct device *dev = container_of(work, struct device, power.work);
868 enum rpm_request req;
870 spin_lock_irq(&dev->power.lock);
872 if (!dev->power.request_pending)
875 req = dev->power.request;
876 dev->power.request = RPM_REQ_NONE;
877 dev->power.request_pending = false;
883 rpm_idle(dev, RPM_NOWAIT);
885 case RPM_REQ_SUSPEND:
886 rpm_suspend(dev, RPM_NOWAIT);
888 case RPM_REQ_AUTOSUSPEND:
889 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
892 rpm_resume(dev, RPM_NOWAIT);
897 spin_unlock_irq(&dev->power.lock);
901 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
902 * @data: Device pointer passed by pm_schedule_suspend().
904 * Check if the time is right and queue a suspend request.
906 static void pm_suspend_timer_fn(struct timer_list *t)
908 struct device *dev = from_timer(dev, t, power.suspend_timer);
910 unsigned long expires;
912 spin_lock_irqsave(&dev->power.lock, flags);
914 expires = dev->power.timer_expires;
915 /* If 'expire' is after 'jiffies' we've been called too early. */
916 if (expires > 0 && !time_after(expires, jiffies)) {
917 dev->power.timer_expires = 0;
918 rpm_suspend(dev, dev->power.timer_autosuspends ?
919 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
922 spin_unlock_irqrestore(&dev->power.lock, flags);
926 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
927 * @dev: Device to suspend.
928 * @delay: Time to wait before submitting a suspend request, in milliseconds.
930 int pm_schedule_suspend(struct device *dev, unsigned int delay)
935 spin_lock_irqsave(&dev->power.lock, flags);
938 retval = rpm_suspend(dev, RPM_ASYNC);
942 retval = rpm_check_suspend_allowed(dev);
946 /* Other scheduled or pending requests need to be canceled. */
947 pm_runtime_cancel_pending(dev);
949 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
950 dev->power.timer_expires += !dev->power.timer_expires;
951 dev->power.timer_autosuspends = 0;
952 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
955 spin_unlock_irqrestore(&dev->power.lock, flags);
959 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
962 * __pm_runtime_idle - Entry point for runtime idle operations.
963 * @dev: Device to send idle notification for.
964 * @rpmflags: Flag bits.
966 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
967 * return immediately if it is larger than zero. Then carry out an idle
968 * notification, either synchronous or asynchronous.
970 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
971 * or if pm_runtime_irq_safe() has been called.
973 int __pm_runtime_idle(struct device *dev, int rpmflags)
978 if (rpmflags & RPM_GET_PUT) {
979 if (!atomic_dec_and_test(&dev->power.usage_count))
983 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
985 spin_lock_irqsave(&dev->power.lock, flags);
986 retval = rpm_idle(dev, rpmflags);
987 spin_unlock_irqrestore(&dev->power.lock, flags);
991 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
994 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
995 * @dev: Device to suspend.
996 * @rpmflags: Flag bits.
998 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
999 * return immediately if it is larger than zero. Then carry out a suspend,
1000 * either synchronous or asynchronous.
1002 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1003 * or if pm_runtime_irq_safe() has been called.
1005 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1007 unsigned long flags;
1010 if (rpmflags & RPM_GET_PUT) {
1011 if (!atomic_dec_and_test(&dev->power.usage_count))
1015 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1017 spin_lock_irqsave(&dev->power.lock, flags);
1018 retval = rpm_suspend(dev, rpmflags);
1019 spin_unlock_irqrestore(&dev->power.lock, flags);
1023 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1026 * __pm_runtime_resume - Entry point for runtime resume operations.
1027 * @dev: Device to resume.
1028 * @rpmflags: Flag bits.
1030 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1031 * carry out a resume, either synchronous or asynchronous.
1033 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1034 * or if pm_runtime_irq_safe() has been called.
1036 int __pm_runtime_resume(struct device *dev, int rpmflags)
1038 unsigned long flags;
1041 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1042 dev->power.runtime_status != RPM_ACTIVE);
1044 if (rpmflags & RPM_GET_PUT)
1045 atomic_inc(&dev->power.usage_count);
1047 spin_lock_irqsave(&dev->power.lock, flags);
1048 retval = rpm_resume(dev, rpmflags);
1049 spin_unlock_irqrestore(&dev->power.lock, flags);
1053 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1056 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1057 * @dev: Device to handle.
1059 * Return -EINVAL if runtime PM is disabled for the device.
1061 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1062 * and the runtime PM usage counter is nonzero, increment the counter and
1063 * return 1. Otherwise return 0 without changing the counter.
1065 int pm_runtime_get_if_in_use(struct device *dev)
1067 unsigned long flags;
1070 spin_lock_irqsave(&dev->power.lock, flags);
1071 retval = dev->power.disable_depth > 0 ? -EINVAL :
1072 dev->power.runtime_status == RPM_ACTIVE
1073 && atomic_inc_not_zero(&dev->power.usage_count);
1074 spin_unlock_irqrestore(&dev->power.lock, flags);
1077 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1080 * __pm_runtime_set_status - Set runtime PM status of a device.
1081 * @dev: Device to handle.
1082 * @status: New runtime PM status of the device.
1084 * If runtime PM of the device is disabled or its power.runtime_error field is
1085 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1086 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1087 * However, if the device has a parent and the parent is not active, and the
1088 * parent's power.ignore_children flag is unset, the device's status cannot be
1089 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1091 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1092 * and the device parent's counter of unsuspended children is modified to
1093 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1094 * notification request for the parent is submitted.
1096 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1098 struct device *parent = dev->parent;
1099 unsigned long flags;
1100 bool notify_parent = false;
1103 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1106 spin_lock_irqsave(&dev->power.lock, flags);
1108 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1113 if (dev->power.runtime_status == status || !parent)
1116 if (status == RPM_SUSPENDED) {
1117 atomic_add_unless(&parent->power.child_count, -1, 0);
1118 notify_parent = !parent->power.ignore_children;
1120 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1123 * It is invalid to put an active child under a parent that is
1124 * not active, has runtime PM enabled and the
1125 * 'power.ignore_children' flag unset.
1127 if (!parent->power.disable_depth
1128 && !parent->power.ignore_children
1129 && parent->power.runtime_status != RPM_ACTIVE) {
1130 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1134 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1135 atomic_inc(&parent->power.child_count);
1138 spin_unlock(&parent->power.lock);
1145 __update_runtime_status(dev, status);
1146 dev->power.runtime_error = 0;
1148 spin_unlock_irqrestore(&dev->power.lock, flags);
1151 pm_request_idle(parent);
1155 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1158 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1159 * @dev: Device to handle.
1161 * Flush all pending requests for the device from pm_wq and wait for all
1162 * runtime PM operations involving the device in progress to complete.
1164 * Should be called under dev->power.lock with interrupts disabled.
1166 static void __pm_runtime_barrier(struct device *dev)
1168 pm_runtime_deactivate_timer(dev);
1170 if (dev->power.request_pending) {
1171 dev->power.request = RPM_REQ_NONE;
1172 spin_unlock_irq(&dev->power.lock);
1174 cancel_work_sync(&dev->power.work);
1176 spin_lock_irq(&dev->power.lock);
1177 dev->power.request_pending = false;
1180 if (dev->power.runtime_status == RPM_SUSPENDING
1181 || dev->power.runtime_status == RPM_RESUMING
1182 || dev->power.idle_notification) {
1185 /* Suspend, wake-up or idle notification in progress. */
1187 prepare_to_wait(&dev->power.wait_queue, &wait,
1188 TASK_UNINTERRUPTIBLE);
1189 if (dev->power.runtime_status != RPM_SUSPENDING
1190 && dev->power.runtime_status != RPM_RESUMING
1191 && !dev->power.idle_notification)
1193 spin_unlock_irq(&dev->power.lock);
1197 spin_lock_irq(&dev->power.lock);
1199 finish_wait(&dev->power.wait_queue, &wait);
1204 * pm_runtime_barrier - Flush pending requests and wait for completions.
1205 * @dev: Device to handle.
1207 * Prevent the device from being suspended by incrementing its usage counter and
1208 * if there's a pending resume request for the device, wake the device up.
1209 * Next, make sure that all pending requests for the device have been flushed
1210 * from pm_wq and wait for all runtime PM operations involving the device in
1211 * progress to complete.
1214 * 1, if there was a resume request pending and the device had to be woken up,
1217 int pm_runtime_barrier(struct device *dev)
1221 pm_runtime_get_noresume(dev);
1222 spin_lock_irq(&dev->power.lock);
1224 if (dev->power.request_pending
1225 && dev->power.request == RPM_REQ_RESUME) {
1230 __pm_runtime_barrier(dev);
1232 spin_unlock_irq(&dev->power.lock);
1233 pm_runtime_put_noidle(dev);
1237 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1240 * __pm_runtime_disable - Disable runtime PM of a device.
1241 * @dev: Device to handle.
1242 * @check_resume: If set, check if there's a resume request for the device.
1244 * Increment power.disable_depth for the device and if it was zero previously,
1245 * cancel all pending runtime PM requests for the device and wait for all
1246 * operations in progress to complete. The device can be either active or
1247 * suspended after its runtime PM has been disabled.
1249 * If @check_resume is set and there's a resume request pending when
1250 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1251 * function will wake up the device before disabling its runtime PM.
1253 void __pm_runtime_disable(struct device *dev, bool check_resume)
1255 spin_lock_irq(&dev->power.lock);
1257 if (dev->power.disable_depth > 0) {
1258 dev->power.disable_depth++;
1263 * Wake up the device if there's a resume request pending, because that
1264 * means there probably is some I/O to process and disabling runtime PM
1265 * shouldn't prevent the device from processing the I/O.
1267 if (check_resume && dev->power.request_pending
1268 && dev->power.request == RPM_REQ_RESUME) {
1270 * Prevent suspends and idle notifications from being carried
1271 * out after we have woken up the device.
1273 pm_runtime_get_noresume(dev);
1277 pm_runtime_put_noidle(dev);
1280 if (!dev->power.disable_depth++)
1281 __pm_runtime_barrier(dev);
1284 spin_unlock_irq(&dev->power.lock);
1286 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1289 * pm_runtime_enable - Enable runtime PM of a device.
1290 * @dev: Device to handle.
1292 void pm_runtime_enable(struct device *dev)
1294 unsigned long flags;
1296 spin_lock_irqsave(&dev->power.lock, flags);
1298 if (dev->power.disable_depth > 0)
1299 dev->power.disable_depth--;
1301 dev_warn(dev, "Unbalanced %s!\n", __func__);
1303 WARN(!dev->power.disable_depth &&
1304 dev->power.runtime_status == RPM_SUSPENDED &&
1305 !dev->power.ignore_children &&
1306 atomic_read(&dev->power.child_count) > 0,
1307 "Enabling runtime PM for inactive device (%s) with active children\n",
1310 spin_unlock_irqrestore(&dev->power.lock, flags);
1312 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1315 * pm_runtime_forbid - Block runtime PM of a device.
1316 * @dev: Device to handle.
1318 * Increase the device's usage count and clear its power.runtime_auto flag,
1319 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1322 void pm_runtime_forbid(struct device *dev)
1324 spin_lock_irq(&dev->power.lock);
1325 if (!dev->power.runtime_auto)
1328 dev->power.runtime_auto = false;
1329 atomic_inc(&dev->power.usage_count);
1333 spin_unlock_irq(&dev->power.lock);
1335 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1338 * pm_runtime_allow - Unblock runtime PM of a device.
1339 * @dev: Device to handle.
1341 * Decrease the device's usage count and set its power.runtime_auto flag.
1343 void pm_runtime_allow(struct device *dev)
1345 spin_lock_irq(&dev->power.lock);
1346 if (dev->power.runtime_auto)
1349 dev->power.runtime_auto = true;
1350 if (atomic_dec_and_test(&dev->power.usage_count))
1351 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1354 spin_unlock_irq(&dev->power.lock);
1356 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1359 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1360 * @dev: Device to handle.
1362 * Set the power.no_callbacks flag, which tells the PM core that this
1363 * device is power-managed through its parent and has no runtime PM
1364 * callbacks of its own. The runtime sysfs attributes will be removed.
1366 void pm_runtime_no_callbacks(struct device *dev)
1368 spin_lock_irq(&dev->power.lock);
1369 dev->power.no_callbacks = 1;
1370 spin_unlock_irq(&dev->power.lock);
1371 if (device_is_registered(dev))
1372 rpm_sysfs_remove(dev);
1374 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1377 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1378 * @dev: Device to handle
1380 * Set the power.irq_safe flag, which tells the PM core that the
1381 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1382 * always be invoked with the spinlock held and interrupts disabled. It also
1383 * causes the parent's usage counter to be permanently incremented, preventing
1384 * the parent from runtime suspending -- otherwise an irq-safe child might have
1385 * to wait for a non-irq-safe parent.
1387 void pm_runtime_irq_safe(struct device *dev)
1390 pm_runtime_get_sync(dev->parent);
1391 spin_lock_irq(&dev->power.lock);
1392 dev->power.irq_safe = 1;
1393 spin_unlock_irq(&dev->power.lock);
1395 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1398 * update_autosuspend - Handle a change to a device's autosuspend settings.
1399 * @dev: Device to handle.
1400 * @old_delay: The former autosuspend_delay value.
1401 * @old_use: The former use_autosuspend value.
1403 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1404 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1406 * This function must be called under dev->power.lock with interrupts disabled.
1408 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1410 int delay = dev->power.autosuspend_delay;
1412 /* Should runtime suspend be prevented now? */
1413 if (dev->power.use_autosuspend && delay < 0) {
1415 /* If it used to be allowed then prevent it. */
1416 if (!old_use || old_delay >= 0) {
1417 atomic_inc(&dev->power.usage_count);
1422 /* Runtime suspend should be allowed now. */
1425 /* If it used to be prevented then allow it. */
1426 if (old_use && old_delay < 0)
1427 atomic_dec(&dev->power.usage_count);
1429 /* Maybe we can autosuspend now. */
1430 rpm_idle(dev, RPM_AUTO);
1435 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1436 * @dev: Device to handle.
1437 * @delay: Value of the new delay in milliseconds.
1439 * Set the device's power.autosuspend_delay value. If it changes to negative
1440 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1441 * changes the other way, allow runtime suspends.
1443 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1445 int old_delay, old_use;
1447 spin_lock_irq(&dev->power.lock);
1448 old_delay = dev->power.autosuspend_delay;
1449 old_use = dev->power.use_autosuspend;
1450 dev->power.autosuspend_delay = delay;
1451 update_autosuspend(dev, old_delay, old_use);
1452 spin_unlock_irq(&dev->power.lock);
1454 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1457 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1458 * @dev: Device to handle.
1459 * @use: New value for use_autosuspend.
1461 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1462 * suspends as needed.
1464 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1466 int old_delay, old_use;
1468 spin_lock_irq(&dev->power.lock);
1469 old_delay = dev->power.autosuspend_delay;
1470 old_use = dev->power.use_autosuspend;
1471 dev->power.use_autosuspend = use;
1472 update_autosuspend(dev, old_delay, old_use);
1473 spin_unlock_irq(&dev->power.lock);
1475 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1478 * pm_runtime_init - Initialize runtime PM fields in given device object.
1479 * @dev: Device object to initialize.
1481 void pm_runtime_init(struct device *dev)
1483 dev->power.runtime_status = RPM_SUSPENDED;
1484 dev->power.idle_notification = false;
1486 dev->power.disable_depth = 1;
1487 atomic_set(&dev->power.usage_count, 0);
1489 dev->power.runtime_error = 0;
1491 atomic_set(&dev->power.child_count, 0);
1492 pm_suspend_ignore_children(dev, false);
1493 dev->power.runtime_auto = true;
1495 dev->power.request_pending = false;
1496 dev->power.request = RPM_REQ_NONE;
1497 dev->power.deferred_resume = false;
1498 dev->power.accounting_timestamp = jiffies;
1499 INIT_WORK(&dev->power.work, pm_runtime_work);
1501 dev->power.timer_expires = 0;
1502 timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
1504 init_waitqueue_head(&dev->power.wait_queue);
1508 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1509 * @dev: Device object to re-initialize.
1511 void pm_runtime_reinit(struct device *dev)
1513 if (!pm_runtime_enabled(dev)) {
1514 if (dev->power.runtime_status == RPM_ACTIVE)
1515 pm_runtime_set_suspended(dev);
1516 if (dev->power.irq_safe) {
1517 spin_lock_irq(&dev->power.lock);
1518 dev->power.irq_safe = 0;
1519 spin_unlock_irq(&dev->power.lock);
1521 pm_runtime_put(dev->parent);
1527 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1528 * @dev: Device object being removed from device hierarchy.
1530 void pm_runtime_remove(struct device *dev)
1532 __pm_runtime_disable(dev, false);
1533 pm_runtime_reinit(dev);
1537 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1538 * @dev: Device whose driver is going to be removed.
1540 * Check links from this device to any consumers and if any of them have active
1541 * runtime PM references to the device, drop the usage counter of the device
1542 * (as many times as needed).
1544 * Links with the DL_FLAG_MANAGED flag unset are ignored.
1546 * Since the device is guaranteed to be runtime-active at the point this is
1547 * called, nothing else needs to be done here.
1549 * Moreover, this is called after device_links_busy() has returned 'false', so
1550 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1551 * therefore rpm_active can't be manipulated concurrently.
1553 void pm_runtime_clean_up_links(struct device *dev)
1555 struct device_link *link;
1558 idx = device_links_read_lock();
1560 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1561 if (!(link->flags & DL_FLAG_MANAGED))
1564 while (refcount_dec_not_one(&link->rpm_active))
1565 pm_runtime_put_noidle(dev);
1568 device_links_read_unlock(idx);
1572 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1573 * @dev: Consumer device.
1575 void pm_runtime_get_suppliers(struct device *dev)
1577 struct device_link *link;
1580 idx = device_links_read_lock();
1582 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1583 if (link->flags & DL_FLAG_PM_RUNTIME) {
1584 link->supplier_preactivated = true;
1585 pm_runtime_get_sync(link->supplier);
1586 refcount_inc(&link->rpm_active);
1589 device_links_read_unlock(idx);
1593 * pm_runtime_put_suppliers - Drop references to supplier devices.
1594 * @dev: Consumer device.
1596 void pm_runtime_put_suppliers(struct device *dev)
1598 struct device_link *link;
1599 unsigned long flags;
1603 idx = device_links_read_lock();
1605 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1606 if (link->supplier_preactivated) {
1607 link->supplier_preactivated = false;
1608 spin_lock_irqsave(&dev->power.lock, flags);
1609 put = pm_runtime_status_suspended(dev) &&
1610 refcount_dec_not_one(&link->rpm_active);
1611 spin_unlock_irqrestore(&dev->power.lock, flags);
1613 pm_runtime_put(link->supplier);
1616 device_links_read_unlock(idx);
1619 void pm_runtime_new_link(struct device *dev)
1621 spin_lock_irq(&dev->power.lock);
1622 dev->power.links_count++;
1623 spin_unlock_irq(&dev->power.lock);
1626 void pm_runtime_drop_link(struct device *dev)
1628 spin_lock_irq(&dev->power.lock);
1629 WARN_ON(dev->power.links_count == 0);
1630 dev->power.links_count--;
1631 spin_unlock_irq(&dev->power.lock);
1634 static bool pm_runtime_need_not_resume(struct device *dev)
1636 return atomic_read(&dev->power.usage_count) <= 1 &&
1637 (atomic_read(&dev->power.child_count) == 0 ||
1638 dev->power.ignore_children);
1642 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1643 * @dev: Device to suspend.
1645 * Disable runtime PM so we safely can check the device's runtime PM status and
1646 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1647 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1648 * usage and children counters don't indicate that the device was in use before
1649 * the system-wide transition under way, decrement its parent's children counter
1650 * (if there is a parent). Keep runtime PM disabled to preserve the state
1651 * unless we encounter errors.
1653 * Typically this function may be invoked from a system suspend callback to make
1654 * sure the device is put into low power state and it should only be used during
1655 * system-wide PM transitions to sleep states. It assumes that the analogous
1656 * pm_runtime_force_resume() will be used to resume the device.
1658 int pm_runtime_force_suspend(struct device *dev)
1660 int (*callback)(struct device *);
1663 pm_runtime_disable(dev);
1664 if (pm_runtime_status_suspended(dev))
1667 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1669 ret = callback ? callback(dev) : 0;
1674 * If the device can stay in suspend after the system-wide transition
1675 * to the working state that will follow, drop the children counter of
1676 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1677 * function will be called again for it in the meantime.
1679 if (pm_runtime_need_not_resume(dev))
1680 pm_runtime_set_suspended(dev);
1682 __update_runtime_status(dev, RPM_SUSPENDED);
1687 pm_runtime_enable(dev);
1690 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1693 * pm_runtime_force_resume - Force a device into resume state if needed.
1694 * @dev: Device to resume.
1696 * Prior invoking this function we expect the user to have brought the device
1697 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1698 * those actions and bring the device into full power, if it is expected to be
1699 * used on system resume. In the other case, we defer the resume to be managed
1702 * Typically this function may be invoked from a system resume callback.
1704 int pm_runtime_force_resume(struct device *dev)
1706 int (*callback)(struct device *);
1709 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1713 * The value of the parent's children counter is correct already, so
1714 * just update the status of the device.
1716 __update_runtime_status(dev, RPM_ACTIVE);
1718 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1720 ret = callback ? callback(dev) : 0;
1722 pm_runtime_set_suspended(dev);
1726 pm_runtime_mark_last_busy(dev);
1728 pm_runtime_enable(dev);
1731 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);