4 * Kernel internal timers
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
45 #include <linux/random.h>
47 #include <asm/uaccess.h>
48 #include <asm/unistd.h>
49 #include <asm/div64.h>
50 #include <asm/timex.h>
53 #include "tick-internal.h"
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/timer.h>
58 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
60 EXPORT_SYMBOL(jiffies_64);
63 * per-CPU timer vector definitions:
65 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
66 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
67 #define TVN_SIZE (1 << TVN_BITS)
68 #define TVR_SIZE (1 << TVR_BITS)
69 #define TVN_MASK (TVN_SIZE - 1)
70 #define TVR_MASK (TVR_SIZE - 1)
71 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
74 struct hlist_head vec[TVN_SIZE];
78 struct hlist_head vec[TVR_SIZE];
83 struct timer_list *running_timer;
84 unsigned long timer_jiffies;
85 unsigned long next_timer;
86 unsigned long active_timers;
87 unsigned long all_timers;
89 bool migration_enabled;
96 } ____cacheline_aligned;
99 static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
101 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
102 unsigned int sysctl_timer_migration = 1;
104 void timers_update_migration(bool update_nohz)
106 bool on = sysctl_timer_migration && tick_nohz_active;
109 /* Avoid the loop, if nothing to update */
110 if (this_cpu_read(tvec_bases.migration_enabled) == on)
113 for_each_possible_cpu(cpu) {
114 per_cpu(tvec_bases.migration_enabled, cpu) = on;
115 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
118 per_cpu(tvec_bases.nohz_active, cpu) = true;
119 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
123 int timer_migration_handler(struct ctl_table *table, int write,
124 void __user *buffer, size_t *lenp,
127 static DEFINE_MUTEX(mutex);
131 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
133 timers_update_migration(false);
134 mutex_unlock(&mutex);
138 static inline struct tvec_base *get_target_base(struct tvec_base *base,
141 if (pinned || !base->migration_enabled)
142 return this_cpu_ptr(&tvec_bases);
143 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
146 static inline struct tvec_base *get_target_base(struct tvec_base *base,
149 return this_cpu_ptr(&tvec_bases);
153 static unsigned long round_jiffies_common(unsigned long j, int cpu,
157 unsigned long original = j;
160 * We don't want all cpus firing their timers at once hitting the
161 * same lock or cachelines, so we skew each extra cpu with an extra
162 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
164 * The skew is done by adding 3*cpunr, then round, then subtract this
165 * extra offset again.
172 * If the target jiffie is just after a whole second (which can happen
173 * due to delays of the timer irq, long irq off times etc etc) then
174 * we should round down to the whole second, not up. Use 1/4th second
175 * as cutoff for this rounding as an extreme upper bound for this.
176 * But never round down if @force_up is set.
178 if (rem < HZ/4 && !force_up) /* round down */
183 /* now that we have rounded, subtract the extra skew again */
187 * Make sure j is still in the future. Otherwise return the
190 return time_is_after_jiffies(j) ? j : original;
194 * __round_jiffies - function to round jiffies to a full second
195 * @j: the time in (absolute) jiffies that should be rounded
196 * @cpu: the processor number on which the timeout will happen
198 * __round_jiffies() rounds an absolute time in the future (in jiffies)
199 * up or down to (approximately) full seconds. This is useful for timers
200 * for which the exact time they fire does not matter too much, as long as
201 * they fire approximately every X seconds.
203 * By rounding these timers to whole seconds, all such timers will fire
204 * at the same time, rather than at various times spread out. The goal
205 * of this is to have the CPU wake up less, which saves power.
207 * The exact rounding is skewed for each processor to avoid all
208 * processors firing at the exact same time, which could lead
209 * to lock contention or spurious cache line bouncing.
211 * The return value is the rounded version of the @j parameter.
213 unsigned long __round_jiffies(unsigned long j, int cpu)
215 return round_jiffies_common(j, cpu, false);
217 EXPORT_SYMBOL_GPL(__round_jiffies);
220 * __round_jiffies_relative - function to round jiffies to a full second
221 * @j: the time in (relative) jiffies that should be rounded
222 * @cpu: the processor number on which the timeout will happen
224 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
225 * up or down to (approximately) full seconds. This is useful for timers
226 * for which the exact time they fire does not matter too much, as long as
227 * they fire approximately every X seconds.
229 * By rounding these timers to whole seconds, all such timers will fire
230 * at the same time, rather than at various times spread out. The goal
231 * of this is to have the CPU wake up less, which saves power.
233 * The exact rounding is skewed for each processor to avoid all
234 * processors firing at the exact same time, which could lead
235 * to lock contention or spurious cache line bouncing.
237 * The return value is the rounded version of the @j parameter.
239 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
241 unsigned long j0 = jiffies;
243 /* Use j0 because jiffies might change while we run */
244 return round_jiffies_common(j + j0, cpu, false) - j0;
246 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
249 * round_jiffies - function to round jiffies to a full second
250 * @j: the time in (absolute) jiffies that should be rounded
252 * round_jiffies() rounds an absolute time in the future (in jiffies)
253 * up or down to (approximately) full seconds. This is useful for timers
254 * for which the exact time they fire does not matter too much, as long as
255 * they fire approximately every X seconds.
257 * By rounding these timers to whole seconds, all such timers will fire
258 * at the same time, rather than at various times spread out. The goal
259 * of this is to have the CPU wake up less, which saves power.
261 * The return value is the rounded version of the @j parameter.
263 unsigned long round_jiffies(unsigned long j)
265 return round_jiffies_common(j, raw_smp_processor_id(), false);
267 EXPORT_SYMBOL_GPL(round_jiffies);
270 * round_jiffies_relative - function to round jiffies to a full second
271 * @j: the time in (relative) jiffies that should be rounded
273 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
274 * up or down to (approximately) full seconds. This is useful for timers
275 * for which the exact time they fire does not matter too much, as long as
276 * they fire approximately every X seconds.
278 * By rounding these timers to whole seconds, all such timers will fire
279 * at the same time, rather than at various times spread out. The goal
280 * of this is to have the CPU wake up less, which saves power.
282 * The return value is the rounded version of the @j parameter.
284 unsigned long round_jiffies_relative(unsigned long j)
286 return __round_jiffies_relative(j, raw_smp_processor_id());
288 EXPORT_SYMBOL_GPL(round_jiffies_relative);
291 * __round_jiffies_up - function to round jiffies up to a full second
292 * @j: the time in (absolute) jiffies that should be rounded
293 * @cpu: the processor number on which the timeout will happen
295 * This is the same as __round_jiffies() except that it will never
296 * round down. This is useful for timeouts for which the exact time
297 * of firing does not matter too much, as long as they don't fire too
300 unsigned long __round_jiffies_up(unsigned long j, int cpu)
302 return round_jiffies_common(j, cpu, true);
304 EXPORT_SYMBOL_GPL(__round_jiffies_up);
307 * __round_jiffies_up_relative - function to round jiffies up to a full second
308 * @j: the time in (relative) jiffies that should be rounded
309 * @cpu: the processor number on which the timeout will happen
311 * This is the same as __round_jiffies_relative() except that it will never
312 * round down. This is useful for timeouts for which the exact time
313 * of firing does not matter too much, as long as they don't fire too
316 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
318 unsigned long j0 = jiffies;
320 /* Use j0 because jiffies might change while we run */
321 return round_jiffies_common(j + j0, cpu, true) - j0;
323 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
326 * round_jiffies_up - function to round jiffies up to a full second
327 * @j: the time in (absolute) jiffies that should be rounded
329 * This is the same as round_jiffies() except that it will never
330 * round down. This is useful for timeouts for which the exact time
331 * of firing does not matter too much, as long as they don't fire too
334 unsigned long round_jiffies_up(unsigned long j)
336 return round_jiffies_common(j, raw_smp_processor_id(), true);
338 EXPORT_SYMBOL_GPL(round_jiffies_up);
341 * round_jiffies_up_relative - function to round jiffies up to a full second
342 * @j: the time in (relative) jiffies that should be rounded
344 * This is the same as round_jiffies_relative() except that it will never
345 * round down. This is useful for timeouts for which the exact time
346 * of firing does not matter too much, as long as they don't fire too
349 unsigned long round_jiffies_up_relative(unsigned long j)
351 return __round_jiffies_up_relative(j, raw_smp_processor_id());
353 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
356 * set_timer_slack - set the allowed slack for a timer
357 * @timer: the timer to be modified
358 * @slack_hz: the amount of time (in jiffies) allowed for rounding
360 * Set the amount of time, in jiffies, that a certain timer has
361 * in terms of slack. By setting this value, the timer subsystem
362 * will schedule the actual timer somewhere between
363 * the time mod_timer() asks for, and that time plus the slack.
365 * By setting the slack to -1, a percentage of the delay is used
368 void set_timer_slack(struct timer_list *timer, int slack_hz)
370 timer->slack = slack_hz;
372 EXPORT_SYMBOL_GPL(set_timer_slack);
375 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
377 unsigned long expires = timer->expires;
378 unsigned long idx = expires - base->timer_jiffies;
379 struct hlist_head *vec;
381 if (idx < TVR_SIZE) {
382 int i = expires & TVR_MASK;
383 vec = base->tv1.vec + i;
384 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
385 int i = (expires >> TVR_BITS) & TVN_MASK;
386 vec = base->tv2.vec + i;
387 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
388 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
389 vec = base->tv3.vec + i;
390 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
391 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
392 vec = base->tv4.vec + i;
393 } else if ((signed long) idx < 0) {
395 * Can happen if you add a timer with expires == jiffies,
396 * or you set a timer to go off in the past
398 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
401 /* If the timeout is larger than MAX_TVAL (on 64-bit
402 * architectures or with CONFIG_BASE_SMALL=1) then we
403 * use the maximum timeout.
405 if (idx > MAX_TVAL) {
407 expires = idx + base->timer_jiffies;
409 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
410 vec = base->tv5.vec + i;
413 hlist_add_head(&timer->entry, vec);
416 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
418 /* Advance base->jiffies, if the base is empty */
419 if (!base->all_timers++)
420 base->timer_jiffies = jiffies;
422 __internal_add_timer(base, timer);
424 * Update base->active_timers and base->next_timer
426 if (!(timer->flags & TIMER_DEFERRABLE)) {
427 if (!base->active_timers++ ||
428 time_before(timer->expires, base->next_timer))
429 base->next_timer = timer->expires;
433 * Check whether the other CPU is in dynticks mode and needs
434 * to be triggered to reevaluate the timer wheel.
435 * We are protected against the other CPU fiddling
436 * with the timer by holding the timer base lock. This also
437 * makes sure that a CPU on the way to stop its tick can not
438 * evaluate the timer wheel.
440 * Spare the IPI for deferrable timers on idle targets though.
441 * The next busy ticks will take care of it. Except full dynticks
442 * require special care against races with idle_cpu(), lets deal
445 if (base->nohz_active) {
446 if (!(timer->flags & TIMER_DEFERRABLE) ||
447 tick_nohz_full_cpu(base->cpu))
448 wake_up_nohz_cpu(base->cpu);
452 #ifdef CONFIG_TIMER_STATS
453 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
455 if (timer->start_site)
458 timer->start_site = addr;
459 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
460 timer->start_pid = current->pid;
463 static void timer_stats_account_timer(struct timer_list *timer)
468 * start_site can be concurrently reset by
469 * timer_stats_timer_clear_start_info()
471 site = READ_ONCE(timer->start_site);
475 timer_stats_update_stats(timer, timer->start_pid, site,
476 timer->function, timer->start_comm,
481 static void timer_stats_account_timer(struct timer_list *timer) {}
484 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
486 static struct debug_obj_descr timer_debug_descr;
488 static void *timer_debug_hint(void *addr)
490 return ((struct timer_list *) addr)->function;
494 * fixup_init is called when:
495 * - an active object is initialized
497 static int timer_fixup_init(void *addr, enum debug_obj_state state)
499 struct timer_list *timer = addr;
502 case ODEBUG_STATE_ACTIVE:
503 del_timer_sync(timer);
504 debug_object_init(timer, &timer_debug_descr);
511 /* Stub timer callback for improperly used timers. */
512 static void stub_timer(unsigned long data)
518 * fixup_activate is called when:
519 * - an active object is activated
520 * - an unknown object is activated (might be a statically initialized object)
522 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
524 struct timer_list *timer = addr;
528 case ODEBUG_STATE_NOTAVAILABLE:
530 * This is not really a fixup. The timer was
531 * statically initialized. We just make sure that it
532 * is tracked in the object tracker.
534 if (timer->entry.pprev == NULL &&
535 timer->entry.next == TIMER_ENTRY_STATIC) {
536 debug_object_init(timer, &timer_debug_descr);
537 debug_object_activate(timer, &timer_debug_descr);
540 setup_timer(timer, stub_timer, 0);
545 case ODEBUG_STATE_ACTIVE:
554 * fixup_free is called when:
555 * - an active object is freed
557 static int timer_fixup_free(void *addr, enum debug_obj_state state)
559 struct timer_list *timer = addr;
562 case ODEBUG_STATE_ACTIVE:
563 del_timer_sync(timer);
564 debug_object_free(timer, &timer_debug_descr);
572 * fixup_assert_init is called when:
573 * - an untracked/uninit-ed object is found
575 static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
577 struct timer_list *timer = addr;
580 case ODEBUG_STATE_NOTAVAILABLE:
581 if (timer->entry.next == TIMER_ENTRY_STATIC) {
583 * This is not really a fixup. The timer was
584 * statically initialized. We just make sure that it
585 * is tracked in the object tracker.
587 debug_object_init(timer, &timer_debug_descr);
590 setup_timer(timer, stub_timer, 0);
598 static struct debug_obj_descr timer_debug_descr = {
599 .name = "timer_list",
600 .debug_hint = timer_debug_hint,
601 .fixup_init = timer_fixup_init,
602 .fixup_activate = timer_fixup_activate,
603 .fixup_free = timer_fixup_free,
604 .fixup_assert_init = timer_fixup_assert_init,
607 static inline void debug_timer_init(struct timer_list *timer)
609 debug_object_init(timer, &timer_debug_descr);
612 static inline void debug_timer_activate(struct timer_list *timer)
614 debug_object_activate(timer, &timer_debug_descr);
617 static inline void debug_timer_deactivate(struct timer_list *timer)
619 debug_object_deactivate(timer, &timer_debug_descr);
622 static inline void debug_timer_free(struct timer_list *timer)
624 debug_object_free(timer, &timer_debug_descr);
627 static inline void debug_timer_assert_init(struct timer_list *timer)
629 debug_object_assert_init(timer, &timer_debug_descr);
632 static void do_init_timer(struct timer_list *timer, unsigned int flags,
633 const char *name, struct lock_class_key *key);
635 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
636 const char *name, struct lock_class_key *key)
638 debug_object_init_on_stack(timer, &timer_debug_descr);
639 do_init_timer(timer, flags, name, key);
641 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
643 void destroy_timer_on_stack(struct timer_list *timer)
645 debug_object_free(timer, &timer_debug_descr);
647 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
650 static inline void debug_timer_init(struct timer_list *timer) { }
651 static inline void debug_timer_activate(struct timer_list *timer) { }
652 static inline void debug_timer_deactivate(struct timer_list *timer) { }
653 static inline void debug_timer_assert_init(struct timer_list *timer) { }
656 static inline void debug_init(struct timer_list *timer)
658 debug_timer_init(timer);
659 trace_timer_init(timer);
663 debug_activate(struct timer_list *timer, unsigned long expires)
665 debug_timer_activate(timer);
666 trace_timer_start(timer, expires, timer->flags);
669 static inline void debug_deactivate(struct timer_list *timer)
671 debug_timer_deactivate(timer);
672 trace_timer_cancel(timer);
675 static inline void debug_assert_init(struct timer_list *timer)
677 debug_timer_assert_init(timer);
680 static void do_init_timer(struct timer_list *timer, unsigned int flags,
681 const char *name, struct lock_class_key *key)
683 timer->entry.pprev = NULL;
684 timer->flags = flags | raw_smp_processor_id();
686 #ifdef CONFIG_TIMER_STATS
687 timer->start_site = NULL;
688 timer->start_pid = -1;
689 memset(timer->start_comm, 0, TASK_COMM_LEN);
691 lockdep_init_map(&timer->lockdep_map, name, key, 0);
695 * init_timer_key - initialize a timer
696 * @timer: the timer to be initialized
697 * @flags: timer flags
698 * @name: name of the timer
699 * @key: lockdep class key of the fake lock used for tracking timer
700 * sync lock dependencies
702 * init_timer_key() must be done to a timer prior calling *any* of the
703 * other timer functions.
705 void init_timer_key(struct timer_list *timer, unsigned int flags,
706 const char *name, struct lock_class_key *key)
709 do_init_timer(timer, flags, name, key);
711 EXPORT_SYMBOL(init_timer_key);
713 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
715 struct hlist_node *entry = &timer->entry;
717 debug_deactivate(timer);
722 entry->next = LIST_POISON2;
726 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
728 detach_timer(timer, true);
729 if (!(timer->flags & TIMER_DEFERRABLE))
730 base->active_timers--;
734 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
737 if (!timer_pending(timer))
740 detach_timer(timer, clear_pending);
741 if (!(timer->flags & TIMER_DEFERRABLE)) {
742 base->active_timers--;
743 if (timer->expires == base->next_timer)
744 base->next_timer = base->timer_jiffies;
746 /* If this was the last timer, advance base->jiffies */
747 if (!--base->all_timers)
748 base->timer_jiffies = jiffies;
753 * We are using hashed locking: holding per_cpu(tvec_bases).lock
754 * means that all timers which are tied to this base via timer->base are
755 * locked, and the base itself is locked too.
757 * So __run_timers/migrate_timers can safely modify all timers which could
758 * be found on ->tvX lists.
760 * When the timer's base is locked and removed from the list, the
761 * TIMER_MIGRATING flag is set, FIXME
763 static struct tvec_base *lock_timer_base(struct timer_list *timer,
764 unsigned long *flags)
765 __acquires(timer->base->lock)
768 struct tvec_base *base;
772 * We need to use READ_ONCE() here, otherwise the compiler
773 * might re-read @tf between the check for TIMER_MIGRATING
776 tf = READ_ONCE(timer->flags);
778 if (!(tf & TIMER_MIGRATING)) {
779 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
780 spin_lock_irqsave(&base->lock, *flags);
781 if (timer->flags == tf)
783 spin_unlock_irqrestore(&base->lock, *flags);
790 __mod_timer(struct timer_list *timer, unsigned long expires,
791 bool pending_only, int pinned)
793 struct tvec_base *base, *new_base;
797 timer_stats_timer_set_start_info(timer);
798 BUG_ON(!timer->function);
800 base = lock_timer_base(timer, &flags);
802 ret = detach_if_pending(timer, base, false);
803 if (!ret && pending_only)
806 debug_activate(timer, expires);
808 new_base = get_target_base(base, pinned);
810 if (base != new_base) {
812 * We are trying to schedule the timer on the local CPU.
813 * However we can't change timer's base while it is running,
814 * otherwise del_timer_sync() can't detect that the timer's
815 * handler yet has not finished. This also guarantees that
816 * the timer is serialized wrt itself.
818 if (likely(base->running_timer != timer)) {
819 /* See the comment in lock_timer_base() */
820 timer->flags |= TIMER_MIGRATING;
822 spin_unlock(&base->lock);
824 spin_lock(&base->lock);
825 WRITE_ONCE(timer->flags,
826 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
830 timer->expires = expires;
831 internal_add_timer(base, timer);
834 spin_unlock_irqrestore(&base->lock, flags);
840 * mod_timer_pending - modify a pending timer's timeout
841 * @timer: the pending timer to be modified
842 * @expires: new timeout in jiffies
844 * mod_timer_pending() is the same for pending timers as mod_timer(),
845 * but will not re-activate and modify already deleted timers.
847 * It is useful for unserialized use of timers.
849 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
851 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
853 EXPORT_SYMBOL(mod_timer_pending);
856 * Decide where to put the timer while taking the slack into account
859 * 1) calculate the maximum (absolute) time
860 * 2) calculate the highest bit where the expires and new max are different
861 * 3) use this bit to make a mask
862 * 4) use the bitmask to round down the maximum time, so that all last
866 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
868 unsigned long expires_limit, mask;
871 if (timer->slack >= 0) {
872 expires_limit = expires + timer->slack;
874 long delta = expires - jiffies;
879 expires_limit = expires + delta / 256;
881 mask = expires ^ expires_limit;
887 mask = (1UL << bit) - 1;
889 expires_limit = expires_limit & ~(mask);
891 return expires_limit;
895 * mod_timer - modify a timer's timeout
896 * @timer: the timer to be modified
897 * @expires: new timeout in jiffies
899 * mod_timer() is a more efficient way to update the expire field of an
900 * active timer (if the timer is inactive it will be activated)
902 * mod_timer(timer, expires) is equivalent to:
904 * del_timer(timer); timer->expires = expires; add_timer(timer);
906 * Note that if there are multiple unserialized concurrent users of the
907 * same timer, then mod_timer() is the only safe way to modify the timeout,
908 * since add_timer() cannot modify an already running timer.
910 * The function returns whether it has modified a pending timer or not.
911 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
912 * active timer returns 1.)
914 int mod_timer(struct timer_list *timer, unsigned long expires)
916 expires = apply_slack(timer, expires);
919 * This is a common optimization triggered by the
920 * networking code - if the timer is re-modified
921 * to be the same thing then just return:
923 if (timer_pending(timer) && timer->expires == expires)
926 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
928 EXPORT_SYMBOL(mod_timer);
931 * mod_timer_pinned - modify a timer's timeout
932 * @timer: the timer to be modified
933 * @expires: new timeout in jiffies
935 * mod_timer_pinned() is a way to update the expire field of an
936 * active timer (if the timer is inactive it will be activated)
937 * and to ensure that the timer is scheduled on the current CPU.
939 * Note that this does not prevent the timer from being migrated
940 * when the current CPU goes offline. If this is a problem for
941 * you, use CPU-hotplug notifiers to handle it correctly, for
942 * example, cancelling the timer when the corresponding CPU goes
945 * mod_timer_pinned(timer, expires) is equivalent to:
947 * del_timer(timer); timer->expires = expires; add_timer(timer);
949 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
951 if (timer->expires == expires && timer_pending(timer))
954 return __mod_timer(timer, expires, false, TIMER_PINNED);
956 EXPORT_SYMBOL(mod_timer_pinned);
959 * add_timer - start a timer
960 * @timer: the timer to be added
962 * The kernel will do a ->function(->data) callback from the
963 * timer interrupt at the ->expires point in the future. The
964 * current time is 'jiffies'.
966 * The timer's ->expires, ->function (and if the handler uses it, ->data)
967 * fields must be set prior calling this function.
969 * Timers with an ->expires field in the past will be executed in the next
972 void add_timer(struct timer_list *timer)
974 BUG_ON(timer_pending(timer));
975 mod_timer(timer, timer->expires);
977 EXPORT_SYMBOL(add_timer);
980 * add_timer_on - start a timer on a particular CPU
981 * @timer: the timer to be added
982 * @cpu: the CPU to start it on
984 * This is not very scalable on SMP. Double adds are not possible.
986 void add_timer_on(struct timer_list *timer, int cpu)
988 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
989 struct tvec_base *base;
992 timer_stats_timer_set_start_info(timer);
993 BUG_ON(timer_pending(timer) || !timer->function);
996 * If @timer was on a different CPU, it should be migrated with the
997 * old base locked to prevent other operations proceeding with the
998 * wrong base locked. See lock_timer_base().
1000 base = lock_timer_base(timer, &flags);
1001 if (base != new_base) {
1002 timer->flags |= TIMER_MIGRATING;
1004 spin_unlock(&base->lock);
1006 spin_lock(&base->lock);
1007 WRITE_ONCE(timer->flags,
1008 (timer->flags & ~TIMER_BASEMASK) | cpu);
1011 debug_activate(timer, timer->expires);
1012 internal_add_timer(base, timer);
1013 spin_unlock_irqrestore(&base->lock, flags);
1015 EXPORT_SYMBOL_GPL(add_timer_on);
1018 * del_timer - deactive a timer.
1019 * @timer: the timer to be deactivated
1021 * del_timer() deactivates a timer - this works on both active and inactive
1024 * The function returns whether it has deactivated a pending timer or not.
1025 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1026 * active timer returns 1.)
1028 int del_timer(struct timer_list *timer)
1030 struct tvec_base *base;
1031 unsigned long flags;
1034 debug_assert_init(timer);
1036 timer_stats_timer_clear_start_info(timer);
1037 if (timer_pending(timer)) {
1038 base = lock_timer_base(timer, &flags);
1039 ret = detach_if_pending(timer, base, true);
1040 spin_unlock_irqrestore(&base->lock, flags);
1045 EXPORT_SYMBOL(del_timer);
1048 * try_to_del_timer_sync - Try to deactivate a timer
1049 * @timer: timer do del
1051 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1052 * exit the timer is not queued and the handler is not running on any CPU.
1054 int try_to_del_timer_sync(struct timer_list *timer)
1056 struct tvec_base *base;
1057 unsigned long flags;
1060 debug_assert_init(timer);
1062 base = lock_timer_base(timer, &flags);
1064 if (base->running_timer != timer) {
1065 timer_stats_timer_clear_start_info(timer);
1066 ret = detach_if_pending(timer, base, true);
1068 spin_unlock_irqrestore(&base->lock, flags);
1072 EXPORT_SYMBOL(try_to_del_timer_sync);
1076 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1077 * @timer: the timer to be deactivated
1079 * This function only differs from del_timer() on SMP: besides deactivating
1080 * the timer it also makes sure the handler has finished executing on other
1083 * Synchronization rules: Callers must prevent restarting of the timer,
1084 * otherwise this function is meaningless. It must not be called from
1085 * interrupt contexts unless the timer is an irqsafe one. The caller must
1086 * not hold locks which would prevent completion of the timer's
1087 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1088 * timer is not queued and the handler is not running on any CPU.
1090 * Note: For !irqsafe timers, you must not hold locks that are held in
1091 * interrupt context while calling this function. Even if the lock has
1092 * nothing to do with the timer in question. Here's why:
1098 * base->running_timer = mytimer;
1099 * spin_lock_irq(somelock);
1101 * spin_lock(somelock);
1102 * del_timer_sync(mytimer);
1103 * while (base->running_timer == mytimer);
1105 * Now del_timer_sync() will never return and never release somelock.
1106 * The interrupt on the other CPU is waiting to grab somelock but
1107 * it has interrupted the softirq that CPU0 is waiting to finish.
1109 * The function returns whether it has deactivated a pending timer or not.
1111 int del_timer_sync(struct timer_list *timer)
1113 #ifdef CONFIG_LOCKDEP
1114 unsigned long flags;
1117 * If lockdep gives a backtrace here, please reference
1118 * the synchronization rules above.
1120 local_irq_save(flags);
1121 lock_map_acquire(&timer->lockdep_map);
1122 lock_map_release(&timer->lockdep_map);
1123 local_irq_restore(flags);
1126 * don't use it in hardirq context, because it
1127 * could lead to deadlock.
1129 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1131 int ret = try_to_del_timer_sync(timer);
1137 EXPORT_SYMBOL(del_timer_sync);
1140 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1142 /* cascade all the timers from tv up one level */
1143 struct timer_list *timer;
1144 struct hlist_node *tmp;
1145 struct hlist_head tv_list;
1147 hlist_move_list(tv->vec + index, &tv_list);
1150 * We are removing _all_ timers from the list, so we
1151 * don't have to detach them individually.
1153 hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1154 /* No accounting, while moving them */
1155 __internal_add_timer(base, timer);
1161 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1164 int count = preempt_count();
1166 #ifdef CONFIG_LOCKDEP
1168 * It is permissible to free the timer from inside the
1169 * function that is called from it, this we need to take into
1170 * account for lockdep too. To avoid bogus "held lock freed"
1171 * warnings as well as problems when looking into
1172 * timer->lockdep_map, make a copy and use that here.
1174 struct lockdep_map lockdep_map;
1176 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1179 * Couple the lock chain with the lock chain at
1180 * del_timer_sync() by acquiring the lock_map around the fn()
1181 * call here and in del_timer_sync().
1183 lock_map_acquire(&lockdep_map);
1185 trace_timer_expire_entry(timer);
1187 trace_timer_expire_exit(timer);
1189 lock_map_release(&lockdep_map);
1191 if (count != preempt_count()) {
1192 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1193 fn, count, preempt_count());
1195 * Restore the preempt count. That gives us a decent
1196 * chance to survive and extract information. If the
1197 * callback kept a lock held, bad luck, but not worse
1198 * than the BUG() we had.
1200 preempt_count_set(count);
1204 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1207 * __run_timers - run all expired timers (if any) on this CPU.
1208 * @base: the timer vector to be processed.
1210 * This function cascades all vectors and executes all expired timer
1213 static inline void __run_timers(struct tvec_base *base)
1215 struct timer_list *timer;
1217 spin_lock_irq(&base->lock);
1219 while (time_after_eq(jiffies, base->timer_jiffies)) {
1220 struct hlist_head work_list;
1221 struct hlist_head *head = &work_list;
1224 if (!base->all_timers) {
1225 base->timer_jiffies = jiffies;
1229 index = base->timer_jiffies & TVR_MASK;
1235 (!cascade(base, &base->tv2, INDEX(0))) &&
1236 (!cascade(base, &base->tv3, INDEX(1))) &&
1237 !cascade(base, &base->tv4, INDEX(2)))
1238 cascade(base, &base->tv5, INDEX(3));
1239 ++base->timer_jiffies;
1240 hlist_move_list(base->tv1.vec + index, head);
1241 while (!hlist_empty(head)) {
1242 void (*fn)(unsigned long);
1246 timer = hlist_entry(head->first, struct timer_list, entry);
1247 fn = timer->function;
1249 irqsafe = timer->flags & TIMER_IRQSAFE;
1251 timer_stats_account_timer(timer);
1253 base->running_timer = timer;
1254 detach_expired_timer(timer, base);
1257 spin_unlock(&base->lock);
1258 call_timer_fn(timer, fn, data);
1259 spin_lock(&base->lock);
1261 spin_unlock_irq(&base->lock);
1262 call_timer_fn(timer, fn, data);
1263 spin_lock_irq(&base->lock);
1267 base->running_timer = NULL;
1268 spin_unlock_irq(&base->lock);
1271 #ifdef CONFIG_NO_HZ_COMMON
1273 * Find out when the next timer event is due to happen. This
1274 * is used on S/390 to stop all activity when a CPU is idle.
1275 * This function needs to be called with interrupts disabled.
1277 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1279 unsigned long timer_jiffies = base->timer_jiffies;
1280 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1281 int index, slot, array, found = 0;
1282 struct timer_list *nte;
1283 struct tvec *varray[4];
1285 /* Look for timer events in tv1. */
1286 index = slot = timer_jiffies & TVR_MASK;
1288 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1289 if (nte->flags & TIMER_DEFERRABLE)
1293 expires = nte->expires;
1294 /* Look at the cascade bucket(s)? */
1295 if (!index || slot < index)
1299 slot = (slot + 1) & TVR_MASK;
1300 } while (slot != index);
1303 /* Calculate the next cascade event */
1305 timer_jiffies += TVR_SIZE - index;
1306 timer_jiffies >>= TVR_BITS;
1308 /* Check tv2-tv5. */
1309 varray[0] = &base->tv2;
1310 varray[1] = &base->tv3;
1311 varray[2] = &base->tv4;
1312 varray[3] = &base->tv5;
1314 for (array = 0; array < 4; array++) {
1315 struct tvec *varp = varray[array];
1317 index = slot = timer_jiffies & TVN_MASK;
1319 hlist_for_each_entry(nte, varp->vec + slot, entry) {
1320 if (nte->flags & TIMER_DEFERRABLE)
1324 if (time_before(nte->expires, expires))
1325 expires = nte->expires;
1328 * Do we still search for the first timer or are
1329 * we looking up the cascade buckets ?
1332 /* Look at the cascade bucket(s)? */
1333 if (!index || slot < index)
1337 slot = (slot + 1) & TVN_MASK;
1338 } while (slot != index);
1341 timer_jiffies += TVN_SIZE - index;
1342 timer_jiffies >>= TVN_BITS;
1348 * Check, if the next hrtimer event is before the next timer wheel
1351 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1353 u64 nextevt = hrtimer_get_next_event();
1356 * If high resolution timers are enabled
1357 * hrtimer_get_next_event() returns KTIME_MAX.
1359 if (expires <= nextevt)
1363 * If the next timer is already expired, return the tick base
1364 * time so the tick is fired immediately.
1366 if (nextevt <= basem)
1370 * Round up to the next jiffie. High resolution timers are
1371 * off, so the hrtimers are expired in the tick and we need to
1372 * make sure that this tick really expires the timer to avoid
1373 * a ping pong of the nohz stop code.
1375 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1377 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1381 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1382 * @basej: base time jiffies
1383 * @basem: base time clock monotonic
1385 * Returns the tick aligned clock monotonic time of the next pending
1386 * timer or KTIME_MAX if no timer is pending.
1388 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1390 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1391 u64 expires = KTIME_MAX;
1392 unsigned long nextevt;
1395 * Pretend that there is no timer pending if the cpu is offline.
1396 * Possible pending timers will be migrated later to an active cpu.
1398 if (cpu_is_offline(smp_processor_id()))
1401 spin_lock(&base->lock);
1402 if (base->active_timers) {
1403 if (time_before_eq(base->next_timer, base->timer_jiffies))
1404 base->next_timer = __next_timer_interrupt(base);
1405 nextevt = base->next_timer;
1406 if (time_before_eq(nextevt, basej))
1409 expires = basem + (nextevt - basej) * TICK_NSEC;
1411 spin_unlock(&base->lock);
1413 return cmp_next_hrtimer_event(basem, expires);
1418 * Called from the timer interrupt handler to charge one tick to the current
1419 * process. user_tick is 1 if the tick is user time, 0 for system.
1421 void update_process_times(int user_tick)
1423 struct task_struct *p = current;
1425 /* Note: this timer irq context must be accounted for as well. */
1426 account_process_tick(p, user_tick);
1428 rcu_check_callbacks(user_tick);
1429 #ifdef CONFIG_IRQ_WORK
1434 run_posix_cpu_timers(p);
1438 * This function runs timers and the timer-tq in bottom half context.
1440 static void run_timer_softirq(struct softirq_action *h)
1442 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1444 if (time_after_eq(jiffies, base->timer_jiffies))
1449 * Called by the local, per-CPU timer interrupt on SMP.
1451 void run_local_timers(void)
1453 hrtimer_run_queues();
1454 raise_softirq(TIMER_SOFTIRQ);
1457 #ifdef __ARCH_WANT_SYS_ALARM
1460 * For backwards compatibility? This can be done in libc so Alpha
1461 * and all newer ports shouldn't need it.
1463 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1465 return alarm_setitimer(seconds);
1470 static void process_timeout(unsigned long __data)
1472 wake_up_process((struct task_struct *)__data);
1476 * schedule_timeout - sleep until timeout
1477 * @timeout: timeout value in jiffies
1479 * Make the current task sleep until @timeout jiffies have
1480 * elapsed. The routine will return immediately unless
1481 * the current task state has been set (see set_current_state()).
1483 * You can set the task state as follows -
1485 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1486 * pass before the routine returns. The routine will return 0
1488 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1489 * delivered to the current task. In this case the remaining time
1490 * in jiffies will be returned, or 0 if the timer expired in time
1492 * The current task state is guaranteed to be TASK_RUNNING when this
1495 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1496 * the CPU away without a bound on the timeout. In this case the return
1497 * value will be %MAX_SCHEDULE_TIMEOUT.
1499 * In all cases the return value is guaranteed to be non-negative.
1501 signed long __sched schedule_timeout(signed long timeout)
1503 struct timer_list timer;
1504 unsigned long expire;
1508 case MAX_SCHEDULE_TIMEOUT:
1510 * These two special cases are useful to be comfortable
1511 * in the caller. Nothing more. We could take
1512 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1513 * but I' d like to return a valid offset (>=0) to allow
1514 * the caller to do everything it want with the retval.
1520 * Another bit of PARANOID. Note that the retval will be
1521 * 0 since no piece of kernel is supposed to do a check
1522 * for a negative retval of schedule_timeout() (since it
1523 * should never happens anyway). You just have the printk()
1524 * that will tell you if something is gone wrong and where.
1527 printk(KERN_ERR "schedule_timeout: wrong timeout "
1528 "value %lx\n", timeout);
1530 current->state = TASK_RUNNING;
1535 expire = timeout + jiffies;
1537 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1538 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1540 del_singleshot_timer_sync(&timer);
1542 /* Remove the timer from the object tracker */
1543 destroy_timer_on_stack(&timer);
1545 timeout = expire - jiffies;
1548 return timeout < 0 ? 0 : timeout;
1550 EXPORT_SYMBOL(schedule_timeout);
1553 * We can use __set_current_state() here because schedule_timeout() calls
1554 * schedule() unconditionally.
1556 signed long __sched schedule_timeout_interruptible(signed long timeout)
1558 __set_current_state(TASK_INTERRUPTIBLE);
1559 return schedule_timeout(timeout);
1561 EXPORT_SYMBOL(schedule_timeout_interruptible);
1563 signed long __sched schedule_timeout_killable(signed long timeout)
1565 __set_current_state(TASK_KILLABLE);
1566 return schedule_timeout(timeout);
1568 EXPORT_SYMBOL(schedule_timeout_killable);
1570 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1572 __set_current_state(TASK_UNINTERRUPTIBLE);
1573 return schedule_timeout(timeout);
1575 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1577 #ifdef CONFIG_HOTPLUG_CPU
1578 static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1580 struct timer_list *timer;
1581 int cpu = new_base->cpu;
1583 while (!hlist_empty(head)) {
1584 timer = hlist_entry(head->first, struct timer_list, entry);
1585 /* We ignore the accounting on the dying cpu */
1586 detach_timer(timer, false);
1587 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1588 internal_add_timer(new_base, timer);
1592 static void migrate_timers(int cpu)
1594 struct tvec_base *old_base;
1595 struct tvec_base *new_base;
1598 BUG_ON(cpu_online(cpu));
1599 old_base = per_cpu_ptr(&tvec_bases, cpu);
1600 new_base = get_cpu_ptr(&tvec_bases);
1602 * The caller is globally serialized and nobody else
1603 * takes two locks at once, deadlock is not possible.
1605 spin_lock_irq(&new_base->lock);
1606 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1608 BUG_ON(old_base->running_timer);
1610 for (i = 0; i < TVR_SIZE; i++)
1611 migrate_timer_list(new_base, old_base->tv1.vec + i);
1612 for (i = 0; i < TVN_SIZE; i++) {
1613 migrate_timer_list(new_base, old_base->tv2.vec + i);
1614 migrate_timer_list(new_base, old_base->tv3.vec + i);
1615 migrate_timer_list(new_base, old_base->tv4.vec + i);
1616 migrate_timer_list(new_base, old_base->tv5.vec + i);
1619 old_base->active_timers = 0;
1620 old_base->all_timers = 0;
1622 spin_unlock(&old_base->lock);
1623 spin_unlock_irq(&new_base->lock);
1624 put_cpu_ptr(&tvec_bases);
1627 static int timer_cpu_notify(struct notifier_block *self,
1628 unsigned long action, void *hcpu)
1632 case CPU_DEAD_FROZEN:
1633 migrate_timers((long)hcpu);
1642 static inline void timer_register_cpu_notifier(void)
1644 cpu_notifier(timer_cpu_notify, 0);
1647 static inline void timer_register_cpu_notifier(void) { }
1648 #endif /* CONFIG_HOTPLUG_CPU */
1650 static void __init init_timer_cpu(int cpu)
1652 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
1655 spin_lock_init(&base->lock);
1657 base->timer_jiffies = jiffies;
1658 base->next_timer = base->timer_jiffies;
1661 static void __init init_timer_cpus(void)
1665 for_each_possible_cpu(cpu)
1666 init_timer_cpu(cpu);
1669 void __init init_timers(void)
1673 timer_register_cpu_notifier();
1674 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1678 * msleep - sleep safely even with waitqueue interruptions
1679 * @msecs: Time in milliseconds to sleep for
1681 void msleep(unsigned int msecs)
1683 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1686 timeout = schedule_timeout_uninterruptible(timeout);
1689 EXPORT_SYMBOL(msleep);
1692 * msleep_interruptible - sleep waiting for signals
1693 * @msecs: Time in milliseconds to sleep for
1695 unsigned long msleep_interruptible(unsigned int msecs)
1697 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1699 while (timeout && !signal_pending(current))
1700 timeout = schedule_timeout_interruptible(timeout);
1701 return jiffies_to_msecs(timeout);
1704 EXPORT_SYMBOL(msleep_interruptible);
1706 static void __sched do_usleep_range(unsigned long min, unsigned long max)
1709 unsigned long delta;
1711 kmin = ktime_set(0, min * NSEC_PER_USEC);
1712 delta = (max - min) * NSEC_PER_USEC;
1713 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1717 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1718 * @min: Minimum time in usecs to sleep
1719 * @max: Maximum time in usecs to sleep
1721 void __sched usleep_range(unsigned long min, unsigned long max)
1723 __set_current_state(TASK_UNINTERRUPTIBLE);
1724 do_usleep_range(min, max);
1726 EXPORT_SYMBOL(usleep_range);