GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / clocksource / arm_arch_timer.c
1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #define pr_fmt(fmt)     "arm_arch_timer: " fmt
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
30
31 #include <asm/arch_timer.h>
32 #include <asm/virt.h>
33
34 #include <clocksource/arm_arch_timer.h>
35
36 #undef pr_fmt
37 #define pr_fmt(fmt) "arch_timer: " fmt
38
39 #define CNTTIDR         0x08
40 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
41
42 #define CNTACR(n)       (0x40 + ((n) * 4))
43 #define CNTACR_RPCT     BIT(0)
44 #define CNTACR_RVCT     BIT(1)
45 #define CNTACR_RFRQ     BIT(2)
46 #define CNTACR_RVOFF    BIT(3)
47 #define CNTACR_RWVT     BIT(4)
48 #define CNTACR_RWPT     BIT(5)
49
50 #define CNTVCT_LO       0x08
51 #define CNTVCT_HI       0x0c
52 #define CNTFRQ          0x10
53 #define CNTP_TVAL       0x28
54 #define CNTP_CTL        0x2c
55 #define CNTV_TVAL       0x38
56 #define CNTV_CTL        0x3c
57
58 static unsigned arch_timers_present __initdata;
59
60 static void __iomem *arch_counter_base;
61
62 struct arch_timer {
63         void __iomem *base;
64         struct clock_event_device evt;
65 };
66
67 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68
69 static u32 arch_timer_rate;
70 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
71
72 static struct clock_event_device __percpu *arch_timer_evt;
73
74 static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
75 static bool arch_timer_c3stop;
76 static bool arch_timer_mem_use_virtual;
77 static bool arch_counter_suspend_stop;
78 static bool vdso_default = true;
79
80 static cpumask_t evtstrm_available = CPU_MASK_NONE;
81 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
82
83 static int __init early_evtstrm_cfg(char *buf)
84 {
85         return strtobool(buf, &evtstrm_enable);
86 }
87 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
88
89 /*
90  * Architected system timer support.
91  */
92
93 static __always_inline
94 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
95                           struct clock_event_device *clk)
96 {
97         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
98                 struct arch_timer *timer = to_arch_timer(clk);
99                 switch (reg) {
100                 case ARCH_TIMER_REG_CTRL:
101                         writel_relaxed(val, timer->base + CNTP_CTL);
102                         break;
103                 case ARCH_TIMER_REG_TVAL:
104                         writel_relaxed(val, timer->base + CNTP_TVAL);
105                         break;
106                 }
107         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
108                 struct arch_timer *timer = to_arch_timer(clk);
109                 switch (reg) {
110                 case ARCH_TIMER_REG_CTRL:
111                         writel_relaxed(val, timer->base + CNTV_CTL);
112                         break;
113                 case ARCH_TIMER_REG_TVAL:
114                         writel_relaxed(val, timer->base + CNTV_TVAL);
115                         break;
116                 }
117         } else {
118                 arch_timer_reg_write_cp15(access, reg, val);
119         }
120 }
121
122 static __always_inline
123 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
124                         struct clock_event_device *clk)
125 {
126         u32 val;
127
128         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
129                 struct arch_timer *timer = to_arch_timer(clk);
130                 switch (reg) {
131                 case ARCH_TIMER_REG_CTRL:
132                         val = readl_relaxed(timer->base + CNTP_CTL);
133                         break;
134                 case ARCH_TIMER_REG_TVAL:
135                         val = readl_relaxed(timer->base + CNTP_TVAL);
136                         break;
137                 }
138         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
139                 struct arch_timer *timer = to_arch_timer(clk);
140                 switch (reg) {
141                 case ARCH_TIMER_REG_CTRL:
142                         val = readl_relaxed(timer->base + CNTV_CTL);
143                         break;
144                 case ARCH_TIMER_REG_TVAL:
145                         val = readl_relaxed(timer->base + CNTV_TVAL);
146                         break;
147                 }
148         } else {
149                 val = arch_timer_reg_read_cp15(access, reg);
150         }
151
152         return val;
153 }
154
155 /*
156  * Default to cp15 based access because arm64 uses this function for
157  * sched_clock() before DT is probed and the cp15 method is guaranteed
158  * to exist on arm64. arm doesn't use this before DT is probed so even
159  * if we don't have the cp15 accessors we won't have a problem.
160  */
161 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
162 EXPORT_SYMBOL_GPL(arch_timer_read_counter);
163
164 static u64 arch_counter_read(struct clocksource *cs)
165 {
166         return arch_timer_read_counter();
167 }
168
169 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
170 {
171         return arch_timer_read_counter();
172 }
173
174 static struct clocksource clocksource_counter = {
175         .name   = "arch_sys_counter",
176         .rating = 400,
177         .read   = arch_counter_read,
178         .mask   = CLOCKSOURCE_MASK(56),
179         .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
180 };
181
182 static struct cyclecounter cyclecounter __ro_after_init = {
183         .read   = arch_counter_read_cc,
184         .mask   = CLOCKSOURCE_MASK(56),
185 };
186
187 struct ate_acpi_oem_info {
188         char oem_id[ACPI_OEM_ID_SIZE + 1];
189         char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
190         u32 oem_revision;
191 };
192
193 #ifdef CONFIG_FSL_ERRATUM_A008585
194 /*
195  * The number of retries is an arbitrary value well beyond the highest number
196  * of iterations the loop has been observed to take.
197  */
198 #define __fsl_a008585_read_reg(reg) ({                  \
199         u64 _old, _new;                                 \
200         int _retries = 200;                             \
201                                                         \
202         do {                                            \
203                 _old = read_sysreg(reg);                \
204                 _new = read_sysreg(reg);                \
205                 _retries--;                             \
206         } while (unlikely(_old != _new) && _retries);   \
207                                                         \
208         WARN_ON_ONCE(!_retries);                        \
209         _new;                                           \
210 })
211
212 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
213 {
214         return __fsl_a008585_read_reg(cntp_tval_el0);
215 }
216
217 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
218 {
219         return __fsl_a008585_read_reg(cntv_tval_el0);
220 }
221
222 static u64 notrace fsl_a008585_read_cntpct_el0(void)
223 {
224         return __fsl_a008585_read_reg(cntpct_el0);
225 }
226
227 static u64 notrace fsl_a008585_read_cntvct_el0(void)
228 {
229         return __fsl_a008585_read_reg(cntvct_el0);
230 }
231 #endif
232
233 #ifdef CONFIG_HISILICON_ERRATUM_161010101
234 /*
235  * Verify whether the value of the second read is larger than the first by
236  * less than 32 is the only way to confirm the value is correct, so clear the
237  * lower 5 bits to check whether the difference is greater than 32 or not.
238  * Theoretically the erratum should not occur more than twice in succession
239  * when reading the system counter, but it is possible that some interrupts
240  * may lead to more than twice read errors, triggering the warning, so setting
241  * the number of retries far beyond the number of iterations the loop has been
242  * observed to take.
243  */
244 #define __hisi_161010101_read_reg(reg) ({                               \
245         u64 _old, _new;                                         \
246         int _retries = 50;                                      \
247                                                                 \
248         do {                                                    \
249                 _old = read_sysreg(reg);                        \
250                 _new = read_sysreg(reg);                        \
251                 _retries--;                                     \
252         } while (unlikely((_new - _old) >> 5) && _retries);     \
253                                                                 \
254         WARN_ON_ONCE(!_retries);                                \
255         _new;                                                   \
256 })
257
258 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
259 {
260         return __hisi_161010101_read_reg(cntp_tval_el0);
261 }
262
263 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
264 {
265         return __hisi_161010101_read_reg(cntv_tval_el0);
266 }
267
268 static u64 notrace hisi_161010101_read_cntpct_el0(void)
269 {
270         return __hisi_161010101_read_reg(cntpct_el0);
271 }
272
273 static u64 notrace hisi_161010101_read_cntvct_el0(void)
274 {
275         return __hisi_161010101_read_reg(cntvct_el0);
276 }
277
278 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
279         /*
280          * Note that trailing spaces are required to properly match
281          * the OEM table information.
282          */
283         {
284                 .oem_id         = "HISI  ",
285                 .oem_table_id   = "HIP05   ",
286                 .oem_revision   = 0,
287         },
288         {
289                 .oem_id         = "HISI  ",
290                 .oem_table_id   = "HIP06   ",
291                 .oem_revision   = 0,
292         },
293         {
294                 .oem_id         = "HISI  ",
295                 .oem_table_id   = "HIP07   ",
296                 .oem_revision   = 0,
297         },
298         { /* Sentinel indicating the end of the OEM array */ },
299 };
300 #endif
301
302 #ifdef CONFIG_ARM64_ERRATUM_858921
303 static u64 notrace arm64_858921_read_cntpct_el0(void)
304 {
305         u64 old, new;
306
307         old = read_sysreg(cntpct_el0);
308         new = read_sysreg(cntpct_el0);
309         return (((old ^ new) >> 32) & 1) ? old : new;
310 }
311
312 static u64 notrace arm64_858921_read_cntvct_el0(void)
313 {
314         u64 old, new;
315
316         old = read_sysreg(cntvct_el0);
317         new = read_sysreg(cntvct_el0);
318         return (((old ^ new) >> 32) & 1) ? old : new;
319 }
320 #endif
321
322 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
323 /*
324  * The low bits of the counter registers are indeterminate while bit 10 or
325  * greater is rolling over. Since the counter value can jump both backward
326  * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
327  * with all ones or all zeros in the low bits. Bound the loop by the maximum
328  * number of CPU cycles in 3 consecutive 24 MHz counter periods.
329  */
330 #define __sun50i_a64_read_reg(reg) ({                                   \
331         u64 _val;                                                       \
332         int _retries = 150;                                             \
333                                                                         \
334         do {                                                            \
335                 _val = read_sysreg(reg);                                \
336                 _retries--;                                             \
337         } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries);        \
338                                                                         \
339         WARN_ON_ONCE(!_retries);                                        \
340         _val;                                                           \
341 })
342
343 static u64 notrace sun50i_a64_read_cntpct_el0(void)
344 {
345         return __sun50i_a64_read_reg(cntpct_el0);
346 }
347
348 static u64 notrace sun50i_a64_read_cntvct_el0(void)
349 {
350         return __sun50i_a64_read_reg(cntvct_el0);
351 }
352
353 static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
354 {
355         return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
356 }
357
358 static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
359 {
360         return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
361 }
362 #endif
363
364 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
365 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
366 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
367
368 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
369 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
370
371 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
372                                                 struct clock_event_device *clk)
373 {
374         unsigned long ctrl;
375         u64 cval;
376
377         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
378         ctrl |= ARCH_TIMER_CTRL_ENABLE;
379         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
380
381         if (access == ARCH_TIMER_PHYS_ACCESS) {
382                 cval = evt + arch_counter_get_cntpct();
383                 write_sysreg(cval, cntp_cval_el0);
384         } else {
385                 cval = evt + arch_counter_get_cntvct();
386                 write_sysreg(cval, cntv_cval_el0);
387         }
388
389         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
390 }
391
392 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
393                                             struct clock_event_device *clk)
394 {
395         erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
396         return 0;
397 }
398
399 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
400                                             struct clock_event_device *clk)
401 {
402         erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
403         return 0;
404 }
405
406 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
407 #ifdef CONFIG_FSL_ERRATUM_A008585
408         {
409                 .match_type = ate_match_dt,
410                 .id = "fsl,erratum-a008585",
411                 .desc = "Freescale erratum a005858",
412                 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
413                 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
414                 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
415                 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
416                 .set_next_event_phys = erratum_set_next_event_tval_phys,
417                 .set_next_event_virt = erratum_set_next_event_tval_virt,
418         },
419 #endif
420 #ifdef CONFIG_HISILICON_ERRATUM_161010101
421         {
422                 .match_type = ate_match_dt,
423                 .id = "hisilicon,erratum-161010101",
424                 .desc = "HiSilicon erratum 161010101",
425                 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
426                 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
427                 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
428                 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
429                 .set_next_event_phys = erratum_set_next_event_tval_phys,
430                 .set_next_event_virt = erratum_set_next_event_tval_virt,
431         },
432         {
433                 .match_type = ate_match_acpi_oem_info,
434                 .id = hisi_161010101_oem_info,
435                 .desc = "HiSilicon erratum 161010101",
436                 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
437                 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
438                 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
439                 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
440                 .set_next_event_phys = erratum_set_next_event_tval_phys,
441                 .set_next_event_virt = erratum_set_next_event_tval_virt,
442         },
443 #endif
444 #ifdef CONFIG_ARM64_ERRATUM_858921
445         {
446                 .match_type = ate_match_local_cap_id,
447                 .id = (void *)ARM64_WORKAROUND_858921,
448                 .desc = "ARM erratum 858921",
449                 .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
450                 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
451         },
452 #endif
453 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
454         {
455                 .match_type = ate_match_dt,
456                 .id = "allwinner,erratum-unknown1",
457                 .desc = "Allwinner erratum UNKNOWN1",
458                 .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
459                 .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
460                 .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
461                 .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
462                 .set_next_event_phys = erratum_set_next_event_tval_phys,
463                 .set_next_event_virt = erratum_set_next_event_tval_virt,
464         },
465 #endif
466 };
467
468 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
469                                const void *);
470
471 static
472 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
473                                  const void *arg)
474 {
475         const struct device_node *np = arg;
476
477         return of_property_read_bool(np, wa->id);
478 }
479
480 static
481 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
482                                         const void *arg)
483 {
484         return this_cpu_has_cap((uintptr_t)wa->id);
485 }
486
487
488 static
489 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
490                                        const void *arg)
491 {
492         static const struct ate_acpi_oem_info empty_oem_info = {};
493         const struct ate_acpi_oem_info *info = wa->id;
494         const struct acpi_table_header *table = arg;
495
496         /* Iterate over the ACPI OEM info array, looking for a match */
497         while (memcmp(info, &empty_oem_info, sizeof(*info))) {
498                 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
499                     !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
500                     info->oem_revision == table->oem_revision)
501                         return true;
502
503                 info++;
504         }
505
506         return false;
507 }
508
509 static const struct arch_timer_erratum_workaround *
510 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
511                           ate_match_fn_t match_fn,
512                           void *arg)
513 {
514         int i;
515
516         for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
517                 if (ool_workarounds[i].match_type != type)
518                         continue;
519
520                 if (match_fn(&ool_workarounds[i], arg))
521                         return &ool_workarounds[i];
522         }
523
524         return NULL;
525 }
526
527 static
528 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
529                                   bool local)
530 {
531         int i;
532
533         if (local) {
534                 __this_cpu_write(timer_unstable_counter_workaround, wa);
535         } else {
536                 for_each_possible_cpu(i)
537                         per_cpu(timer_unstable_counter_workaround, i) = wa;
538         }
539
540         /*
541          * Use the locked version, as we're called from the CPU
542          * hotplug framework. Otherwise, we end-up in deadlock-land.
543          */
544         static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
545
546         /*
547          * Don't use the vdso fastpath if errata require using the
548          * out-of-line counter accessor. We may change our mind pretty
549          * late in the game (with a per-CPU erratum, for example), so
550          * change both the default value and the vdso itself.
551          */
552         if (wa->read_cntvct_el0) {
553                 clocksource_counter.archdata.vdso_direct = false;
554                 vdso_default = false;
555         }
556 }
557
558 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
559                                             void *arg)
560 {
561         const struct arch_timer_erratum_workaround *wa;
562         ate_match_fn_t match_fn = NULL;
563         bool local = false;
564
565         switch (type) {
566         case ate_match_dt:
567                 match_fn = arch_timer_check_dt_erratum;
568                 break;
569         case ate_match_local_cap_id:
570                 match_fn = arch_timer_check_local_cap_erratum;
571                 local = true;
572                 break;
573         case ate_match_acpi_oem_info:
574                 match_fn = arch_timer_check_acpi_oem_erratum;
575                 break;
576         default:
577                 WARN_ON(1);
578                 return;
579         }
580
581         wa = arch_timer_iterate_errata(type, match_fn, arg);
582         if (!wa)
583                 return;
584
585         if (needs_unstable_timer_counter_workaround()) {
586                 const struct arch_timer_erratum_workaround *__wa;
587                 __wa = __this_cpu_read(timer_unstable_counter_workaround);
588                 if (__wa && wa != __wa)
589                         pr_warn("Can't enable workaround for %s (clashes with %s\n)",
590                                 wa->desc, __wa->desc);
591
592                 if (__wa)
593                         return;
594         }
595
596         arch_timer_enable_workaround(wa, local);
597         pr_info("Enabling %s workaround for %s\n",
598                 local ? "local" : "global", wa->desc);
599 }
600
601 #define erratum_handler(fn, r, ...)                                     \
602 ({                                                                      \
603         bool __val;                                                     \
604         if (needs_unstable_timer_counter_workaround()) {                \
605                 const struct arch_timer_erratum_workaround *__wa;       \
606                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
607                 if (__wa && __wa->fn) {                                 \
608                         r = __wa->fn(__VA_ARGS__);                      \
609                         __val = true;                                   \
610                 } else {                                                \
611                         __val = false;                                  \
612                 }                                                       \
613         } else {                                                        \
614                 __val = false;                                          \
615         }                                                               \
616         __val;                                                          \
617 })
618
619 static bool arch_timer_this_cpu_has_cntvct_wa(void)
620 {
621         const struct arch_timer_erratum_workaround *wa;
622
623         wa = __this_cpu_read(timer_unstable_counter_workaround);
624         return wa && wa->read_cntvct_el0;
625 }
626 #else
627 #define arch_timer_check_ool_workaround(t,a)            do { } while(0)
628 #define erratum_set_next_event_tval_virt(...)           ({BUG(); 0;})
629 #define erratum_set_next_event_tval_phys(...)           ({BUG(); 0;})
630 #define erratum_handler(fn, r, ...)                     ({false;})
631 #define arch_timer_this_cpu_has_cntvct_wa()             ({false;})
632 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
633
634 static __always_inline irqreturn_t timer_handler(const int access,
635                                         struct clock_event_device *evt)
636 {
637         unsigned long ctrl;
638
639         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
640         if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
641                 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
642                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
643                 evt->event_handler(evt);
644                 return IRQ_HANDLED;
645         }
646
647         return IRQ_NONE;
648 }
649
650 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
651 {
652         struct clock_event_device *evt = dev_id;
653
654         return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
655 }
656
657 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
658 {
659         struct clock_event_device *evt = dev_id;
660
661         return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
662 }
663
664 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
665 {
666         struct clock_event_device *evt = dev_id;
667
668         return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
669 }
670
671 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
672 {
673         struct clock_event_device *evt = dev_id;
674
675         return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
676 }
677
678 static __always_inline int timer_shutdown(const int access,
679                                           struct clock_event_device *clk)
680 {
681         unsigned long ctrl;
682
683         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
684         ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
685         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
686
687         return 0;
688 }
689
690 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
691 {
692         return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
693 }
694
695 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
696 {
697         return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
698 }
699
700 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
701 {
702         return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
703 }
704
705 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
706 {
707         return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
708 }
709
710 static __always_inline void set_next_event(const int access, unsigned long evt,
711                                            struct clock_event_device *clk)
712 {
713         unsigned long ctrl;
714         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
715         ctrl |= ARCH_TIMER_CTRL_ENABLE;
716         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
717         arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
718         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
719 }
720
721 static int arch_timer_set_next_event_virt(unsigned long evt,
722                                           struct clock_event_device *clk)
723 {
724         int ret;
725
726         if (erratum_handler(set_next_event_virt, ret, evt, clk))
727                 return ret;
728
729         set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
730         return 0;
731 }
732
733 static int arch_timer_set_next_event_phys(unsigned long evt,
734                                           struct clock_event_device *clk)
735 {
736         int ret;
737
738         if (erratum_handler(set_next_event_phys, ret, evt, clk))
739                 return ret;
740
741         set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
742         return 0;
743 }
744
745 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
746                                               struct clock_event_device *clk)
747 {
748         set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
749         return 0;
750 }
751
752 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
753                                               struct clock_event_device *clk)
754 {
755         set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
756         return 0;
757 }
758
759 static void __arch_timer_setup(unsigned type,
760                                struct clock_event_device *clk)
761 {
762         clk->features = CLOCK_EVT_FEAT_ONESHOT;
763
764         if (type == ARCH_TIMER_TYPE_CP15) {
765                 if (arch_timer_c3stop)
766                         clk->features |= CLOCK_EVT_FEAT_C3STOP;
767                 clk->name = "arch_sys_timer";
768                 clk->rating = 450;
769                 clk->cpumask = cpumask_of(smp_processor_id());
770                 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
771                 switch (arch_timer_uses_ppi) {
772                 case ARCH_TIMER_VIRT_PPI:
773                         clk->set_state_shutdown = arch_timer_shutdown_virt;
774                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
775                         clk->set_next_event = arch_timer_set_next_event_virt;
776                         break;
777                 case ARCH_TIMER_PHYS_SECURE_PPI:
778                 case ARCH_TIMER_PHYS_NONSECURE_PPI:
779                 case ARCH_TIMER_HYP_PPI:
780                         clk->set_state_shutdown = arch_timer_shutdown_phys;
781                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
782                         clk->set_next_event = arch_timer_set_next_event_phys;
783                         break;
784                 default:
785                         BUG();
786                 }
787
788                 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
789         } else {
790                 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
791                 clk->name = "arch_mem_timer";
792                 clk->rating = 400;
793                 clk->cpumask = cpu_possible_mask;
794                 if (arch_timer_mem_use_virtual) {
795                         clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
796                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
797                         clk->set_next_event =
798                                 arch_timer_set_next_event_virt_mem;
799                 } else {
800                         clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
801                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
802                         clk->set_next_event =
803                                 arch_timer_set_next_event_phys_mem;
804                 }
805         }
806
807         clk->set_state_shutdown(clk);
808
809         clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
810 }
811
812 static void arch_timer_evtstrm_enable(int divider)
813 {
814         u32 cntkctl = arch_timer_get_cntkctl();
815
816         cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
817         /* Set the divider and enable virtual event stream */
818         cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
819                         | ARCH_TIMER_VIRT_EVT_EN;
820         arch_timer_set_cntkctl(cntkctl);
821         elf_hwcap |= HWCAP_EVTSTRM;
822 #ifdef CONFIG_COMPAT
823         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
824 #endif
825         cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
826 }
827
828 static void arch_timer_configure_evtstream(void)
829 {
830         int evt_stream_div, lsb;
831
832         /*
833          * As the event stream can at most be generated at half the frequency
834          * of the counter, use half the frequency when computing the divider.
835          */
836         evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
837
838         /*
839          * Find the closest power of two to the divisor. If the adjacent bit
840          * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
841          */
842         lsb = fls(evt_stream_div) - 1;
843         if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
844                 lsb++;
845
846         /* enable event stream */
847         arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
848 }
849
850 static void arch_counter_set_user_access(void)
851 {
852         u32 cntkctl = arch_timer_get_cntkctl();
853
854         /* Disable user access to the timers and both counters */
855         /* Also disable virtual event stream */
856         cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
857                         | ARCH_TIMER_USR_VT_ACCESS_EN
858                         | ARCH_TIMER_USR_VCT_ACCESS_EN
859                         | ARCH_TIMER_VIRT_EVT_EN
860                         | ARCH_TIMER_USR_PCT_ACCESS_EN);
861
862         /*
863          * Enable user access to the virtual counter if it doesn't
864          * need to be workaround. The vdso may have been already
865          * disabled though.
866          */
867         if (arch_timer_this_cpu_has_cntvct_wa())
868                 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
869         else
870                 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
871
872         arch_timer_set_cntkctl(cntkctl);
873 }
874
875 static bool arch_timer_has_nonsecure_ppi(void)
876 {
877         return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
878                 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
879 }
880
881 static u32 check_ppi_trigger(int irq)
882 {
883         u32 flags = irq_get_trigger_type(irq);
884
885         if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
886                 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
887                 pr_warn("WARNING: Please fix your firmware\n");
888                 flags = IRQF_TRIGGER_LOW;
889         }
890
891         return flags;
892 }
893
894 static int arch_timer_starting_cpu(unsigned int cpu)
895 {
896         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
897         u32 flags;
898
899         __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
900
901         flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
902         enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
903
904         if (arch_timer_has_nonsecure_ppi()) {
905                 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
906                 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
907                                   flags);
908         }
909
910         arch_counter_set_user_access();
911         if (evtstrm_enable)
912                 arch_timer_configure_evtstream();
913
914         return 0;
915 }
916
917 /*
918  * For historical reasons, when probing with DT we use whichever (non-zero)
919  * rate was probed first, and don't verify that others match. If the first node
920  * probed has a clock-frequency property, this overrides the HW register.
921  */
922 static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
923 {
924         /* Who has more than one independent system counter? */
925         if (arch_timer_rate)
926                 return;
927
928         if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
929                 arch_timer_rate = rate;
930
931         /* Check the timer frequency. */
932         if (arch_timer_rate == 0)
933                 pr_warn("frequency not available\n");
934 }
935
936 static void arch_timer_banner(unsigned type)
937 {
938         pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
939                 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
940                 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
941                         " and " : "",
942                 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
943                 (unsigned long)arch_timer_rate / 1000000,
944                 (unsigned long)(arch_timer_rate / 10000) % 100,
945                 type & ARCH_TIMER_TYPE_CP15 ?
946                         (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
947                         "",
948                 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
949                 type & ARCH_TIMER_TYPE_MEM ?
950                         arch_timer_mem_use_virtual ? "virt" : "phys" :
951                         "");
952 }
953
954 u32 arch_timer_get_rate(void)
955 {
956         return arch_timer_rate;
957 }
958
959 bool arch_timer_evtstrm_available(void)
960 {
961         /*
962          * We might get called from a preemptible context. This is fine
963          * because availability of the event stream should be always the same
964          * for a preemptible context and context where we might resume a task.
965          */
966         return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
967 }
968
969 static u64 arch_counter_get_cntvct_mem(void)
970 {
971         u32 vct_lo, vct_hi, tmp_hi;
972
973         do {
974                 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
975                 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
976                 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
977         } while (vct_hi != tmp_hi);
978
979         return ((u64) vct_hi << 32) | vct_lo;
980 }
981
982 static struct arch_timer_kvm_info arch_timer_kvm_info;
983
984 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
985 {
986         return &arch_timer_kvm_info;
987 }
988
989 static void __init arch_counter_register(unsigned type)
990 {
991         u64 start_count;
992
993         /* Register the CP15 based counter if we have one */
994         if (type & ARCH_TIMER_TYPE_CP15) {
995                 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
996                     arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
997                         arch_timer_read_counter = arch_counter_get_cntvct;
998                 else
999                         arch_timer_read_counter = arch_counter_get_cntpct;
1000
1001                 clocksource_counter.archdata.vdso_direct = vdso_default;
1002         } else {
1003                 arch_timer_read_counter = arch_counter_get_cntvct_mem;
1004         }
1005
1006         if (!arch_counter_suspend_stop)
1007                 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1008         start_count = arch_timer_read_counter();
1009         clocksource_register_hz(&clocksource_counter, arch_timer_rate);
1010         cyclecounter.mult = clocksource_counter.mult;
1011         cyclecounter.shift = clocksource_counter.shift;
1012         timecounter_init(&arch_timer_kvm_info.timecounter,
1013                          &cyclecounter, start_count);
1014
1015         /* 56 bits minimum, so we assume worst case rollover */
1016         sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
1017 }
1018
1019 static void arch_timer_stop(struct clock_event_device *clk)
1020 {
1021         pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
1022
1023         disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
1024         if (arch_timer_has_nonsecure_ppi())
1025                 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
1026
1027         clk->set_state_shutdown(clk);
1028 }
1029
1030 static int arch_timer_dying_cpu(unsigned int cpu)
1031 {
1032         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
1033
1034         cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1035
1036         arch_timer_stop(clk);
1037         return 0;
1038 }
1039
1040 #ifdef CONFIG_CPU_PM
1041 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
1042 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
1043                                     unsigned long action, void *hcpu)
1044 {
1045         if (action == CPU_PM_ENTER) {
1046                 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
1047
1048                 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1049         } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
1050                 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
1051
1052                 if (elf_hwcap & HWCAP_EVTSTRM)
1053                         cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1054         }
1055         return NOTIFY_OK;
1056 }
1057
1058 static struct notifier_block arch_timer_cpu_pm_notifier = {
1059         .notifier_call = arch_timer_cpu_pm_notify,
1060 };
1061
1062 static int __init arch_timer_cpu_pm_init(void)
1063 {
1064         return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1065 }
1066
1067 static void __init arch_timer_cpu_pm_deinit(void)
1068 {
1069         WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1070 }
1071
1072 #else
1073 static int __init arch_timer_cpu_pm_init(void)
1074 {
1075         return 0;
1076 }
1077
1078 static void __init arch_timer_cpu_pm_deinit(void)
1079 {
1080 }
1081 #endif
1082
1083 static int __init arch_timer_register(void)
1084 {
1085         int err;
1086         int ppi;
1087
1088         arch_timer_evt = alloc_percpu(struct clock_event_device);
1089         if (!arch_timer_evt) {
1090                 err = -ENOMEM;
1091                 goto out;
1092         }
1093
1094         ppi = arch_timer_ppi[arch_timer_uses_ppi];
1095         switch (arch_timer_uses_ppi) {
1096         case ARCH_TIMER_VIRT_PPI:
1097                 err = request_percpu_irq(ppi, arch_timer_handler_virt,
1098                                          "arch_timer", arch_timer_evt);
1099                 break;
1100         case ARCH_TIMER_PHYS_SECURE_PPI:
1101         case ARCH_TIMER_PHYS_NONSECURE_PPI:
1102                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1103                                          "arch_timer", arch_timer_evt);
1104                 if (!err && arch_timer_has_nonsecure_ppi()) {
1105                         ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1106                         err = request_percpu_irq(ppi, arch_timer_handler_phys,
1107                                                  "arch_timer", arch_timer_evt);
1108                         if (err)
1109                                 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1110                                                 arch_timer_evt);
1111                 }
1112                 break;
1113         case ARCH_TIMER_HYP_PPI:
1114                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1115                                          "arch_timer", arch_timer_evt);
1116                 break;
1117         default:
1118                 BUG();
1119         }
1120
1121         if (err) {
1122                 pr_err("can't register interrupt %d (%d)\n", ppi, err);
1123                 goto out_free;
1124         }
1125
1126         err = arch_timer_cpu_pm_init();
1127         if (err)
1128                 goto out_unreg_notify;
1129
1130         /* Register and immediately configure the timer on the boot CPU */
1131         err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1132                                 "clockevents/arm/arch_timer:starting",
1133                                 arch_timer_starting_cpu, arch_timer_dying_cpu);
1134         if (err)
1135                 goto out_unreg_cpupm;
1136         return 0;
1137
1138 out_unreg_cpupm:
1139         arch_timer_cpu_pm_deinit();
1140
1141 out_unreg_notify:
1142         free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1143         if (arch_timer_has_nonsecure_ppi())
1144                 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1145                                 arch_timer_evt);
1146
1147 out_free:
1148         free_percpu(arch_timer_evt);
1149 out:
1150         return err;
1151 }
1152
1153 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1154 {
1155         int ret;
1156         irq_handler_t func;
1157         struct arch_timer *t;
1158
1159         t = kzalloc(sizeof(*t), GFP_KERNEL);
1160         if (!t)
1161                 return -ENOMEM;
1162
1163         t->base = base;
1164         t->evt.irq = irq;
1165         __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1166
1167         if (arch_timer_mem_use_virtual)
1168                 func = arch_timer_handler_virt_mem;
1169         else
1170                 func = arch_timer_handler_phys_mem;
1171
1172         ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1173         if (ret) {
1174                 pr_err("Failed to request mem timer irq\n");
1175                 kfree(t);
1176         }
1177
1178         return ret;
1179 }
1180
1181 static const struct of_device_id arch_timer_of_match[] __initconst = {
1182         { .compatible   = "arm,armv7-timer",    },
1183         { .compatible   = "arm,armv8-timer",    },
1184         {},
1185 };
1186
1187 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1188         { .compatible   = "arm,armv7-timer-mem", },
1189         {},
1190 };
1191
1192 static bool __init arch_timer_needs_of_probing(void)
1193 {
1194         struct device_node *dn;
1195         bool needs_probing = false;
1196         unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1197
1198         /* We have two timers, and both device-tree nodes are probed. */
1199         if ((arch_timers_present & mask) == mask)
1200                 return false;
1201
1202         /*
1203          * Only one type of timer is probed,
1204          * check if we have another type of timer node in device-tree.
1205          */
1206         if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1207                 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1208         else
1209                 dn = of_find_matching_node(NULL, arch_timer_of_match);
1210
1211         if (dn && of_device_is_available(dn))
1212                 needs_probing = true;
1213
1214         of_node_put(dn);
1215
1216         return needs_probing;
1217 }
1218
1219 static int __init arch_timer_common_init(void)
1220 {
1221         arch_timer_banner(arch_timers_present);
1222         arch_counter_register(arch_timers_present);
1223         return arch_timer_arch_init();
1224 }
1225
1226 /**
1227  * arch_timer_select_ppi() - Select suitable PPI for the current system.
1228  *
1229  * If HYP mode is available, we know that the physical timer
1230  * has been configured to be accessible from PL1. Use it, so
1231  * that a guest can use the virtual timer instead.
1232  *
1233  * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1234  * accesses to CNTP_*_EL1 registers are silently redirected to
1235  * their CNTHP_*_EL2 counterparts, and use a different PPI
1236  * number.
1237  *
1238  * If no interrupt provided for virtual timer, we'll have to
1239  * stick to the physical timer. It'd better be accessible...
1240  * For arm64 we never use the secure interrupt.
1241  *
1242  * Return: a suitable PPI type for the current system.
1243  */
1244 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1245 {
1246         if (is_kernel_in_hyp_mode())
1247                 return ARCH_TIMER_HYP_PPI;
1248
1249         if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1250                 return ARCH_TIMER_VIRT_PPI;
1251
1252         if (IS_ENABLED(CONFIG_ARM64))
1253                 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1254
1255         return ARCH_TIMER_PHYS_SECURE_PPI;
1256 }
1257
1258 static int __init arch_timer_of_init(struct device_node *np)
1259 {
1260         int i, ret;
1261         u32 rate;
1262
1263         if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1264                 pr_warn("multiple nodes in dt, skipping\n");
1265                 return 0;
1266         }
1267
1268         arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1269         for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1270                 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1271
1272         arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1273
1274         rate = arch_timer_get_cntfrq();
1275         arch_timer_of_configure_rate(rate, np);
1276
1277         arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1278
1279         /* Check for globally applicable workarounds */
1280         arch_timer_check_ool_workaround(ate_match_dt, np);
1281
1282         /*
1283          * If we cannot rely on firmware initializing the timer registers then
1284          * we should use the physical timers instead.
1285          */
1286         if (IS_ENABLED(CONFIG_ARM) &&
1287             of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1288                 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1289         else
1290                 arch_timer_uses_ppi = arch_timer_select_ppi();
1291
1292         if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1293                 pr_err("No interrupt available, giving up\n");
1294                 return -EINVAL;
1295         }
1296
1297         /* On some systems, the counter stops ticking when in suspend. */
1298         arch_counter_suspend_stop = of_property_read_bool(np,
1299                                                          "arm,no-tick-in-suspend");
1300
1301         ret = arch_timer_register();
1302         if (ret)
1303                 return ret;
1304
1305         if (arch_timer_needs_of_probing())
1306                 return 0;
1307
1308         return arch_timer_common_init();
1309 }
1310 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1311 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1312
1313 static u32 __init
1314 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1315 {
1316         void __iomem *base;
1317         u32 rate;
1318
1319         base = ioremap(frame->cntbase, frame->size);
1320         if (!base) {
1321                 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1322                 return 0;
1323         }
1324
1325         rate = readl_relaxed(base + CNTFRQ);
1326
1327         iounmap(base);
1328
1329         return rate;
1330 }
1331
1332 static struct arch_timer_mem_frame * __init
1333 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1334 {
1335         struct arch_timer_mem_frame *frame, *best_frame = NULL;
1336         void __iomem *cntctlbase;
1337         u32 cnttidr;
1338         int i;
1339
1340         cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1341         if (!cntctlbase) {
1342                 pr_err("Can't map CNTCTLBase @ %pa\n",
1343                         &timer_mem->cntctlbase);
1344                 return NULL;
1345         }
1346
1347         cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1348
1349         /*
1350          * Try to find a virtual capable frame. Otherwise fall back to a
1351          * physical capable frame.
1352          */
1353         for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1354                 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1355                              CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1356
1357                 frame = &timer_mem->frame[i];
1358                 if (!frame->valid)
1359                         continue;
1360
1361                 /* Try enabling everything, and see what sticks */
1362                 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1363                 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1364
1365                 if ((cnttidr & CNTTIDR_VIRT(i)) &&
1366                     !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1367                         best_frame = frame;
1368                         arch_timer_mem_use_virtual = true;
1369                         break;
1370                 }
1371
1372                 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1373                         continue;
1374
1375                 best_frame = frame;
1376         }
1377
1378         iounmap(cntctlbase);
1379
1380         return best_frame;
1381 }
1382
1383 static int __init
1384 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1385 {
1386         void __iomem *base;
1387         int ret, irq = 0;
1388
1389         if (arch_timer_mem_use_virtual)
1390                 irq = frame->virt_irq;
1391         else
1392                 irq = frame->phys_irq;
1393
1394         if (!irq) {
1395                 pr_err("Frame missing %s irq.\n",
1396                        arch_timer_mem_use_virtual ? "virt" : "phys");
1397                 return -EINVAL;
1398         }
1399
1400         if (!request_mem_region(frame->cntbase, frame->size,
1401                                 "arch_mem_timer"))
1402                 return -EBUSY;
1403
1404         base = ioremap(frame->cntbase, frame->size);
1405         if (!base) {
1406                 pr_err("Can't map frame's registers\n");
1407                 return -ENXIO;
1408         }
1409
1410         ret = arch_timer_mem_register(base, irq);
1411         if (ret) {
1412                 iounmap(base);
1413                 return ret;
1414         }
1415
1416         arch_counter_base = base;
1417         arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1418
1419         return 0;
1420 }
1421
1422 static int __init arch_timer_mem_of_init(struct device_node *np)
1423 {
1424         struct arch_timer_mem *timer_mem;
1425         struct arch_timer_mem_frame *frame;
1426         struct device_node *frame_node;
1427         struct resource res;
1428         int ret = -EINVAL;
1429         u32 rate;
1430
1431         timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1432         if (!timer_mem)
1433                 return -ENOMEM;
1434
1435         if (of_address_to_resource(np, 0, &res))
1436                 goto out;
1437         timer_mem->cntctlbase = res.start;
1438         timer_mem->size = resource_size(&res);
1439
1440         for_each_available_child_of_node(np, frame_node) {
1441                 u32 n;
1442                 struct arch_timer_mem_frame *frame;
1443
1444                 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1445                         pr_err(FW_BUG "Missing frame-number.\n");
1446                         of_node_put(frame_node);
1447                         goto out;
1448                 }
1449                 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1450                         pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1451                                ARCH_TIMER_MEM_MAX_FRAMES - 1);
1452                         of_node_put(frame_node);
1453                         goto out;
1454                 }
1455                 frame = &timer_mem->frame[n];
1456
1457                 if (frame->valid) {
1458                         pr_err(FW_BUG "Duplicated frame-number.\n");
1459                         of_node_put(frame_node);
1460                         goto out;
1461                 }
1462
1463                 if (of_address_to_resource(frame_node, 0, &res)) {
1464                         of_node_put(frame_node);
1465                         goto out;
1466                 }
1467                 frame->cntbase = res.start;
1468                 frame->size = resource_size(&res);
1469
1470                 frame->virt_irq = irq_of_parse_and_map(frame_node,
1471                                                        ARCH_TIMER_VIRT_SPI);
1472                 frame->phys_irq = irq_of_parse_and_map(frame_node,
1473                                                        ARCH_TIMER_PHYS_SPI);
1474
1475                 frame->valid = true;
1476         }
1477
1478         frame = arch_timer_mem_find_best_frame(timer_mem);
1479         if (!frame) {
1480                 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1481                         &timer_mem->cntctlbase);
1482                 ret = -EINVAL;
1483                 goto out;
1484         }
1485
1486         rate = arch_timer_mem_frame_get_cntfrq(frame);
1487         arch_timer_of_configure_rate(rate, np);
1488
1489         ret = arch_timer_mem_frame_register(frame);
1490         if (!ret && !arch_timer_needs_of_probing())
1491                 ret = arch_timer_common_init();
1492 out:
1493         kfree(timer_mem);
1494         return ret;
1495 }
1496 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1497                        arch_timer_mem_of_init);
1498
1499 #ifdef CONFIG_ACPI_GTDT
1500 static int __init
1501 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1502 {
1503         struct arch_timer_mem_frame *frame;
1504         u32 rate;
1505         int i;
1506
1507         for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1508                 frame = &timer_mem->frame[i];
1509
1510                 if (!frame->valid)
1511                         continue;
1512
1513                 rate = arch_timer_mem_frame_get_cntfrq(frame);
1514                 if (rate == arch_timer_rate)
1515                         continue;
1516
1517                 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1518                         &frame->cntbase,
1519                         (unsigned long)rate, (unsigned long)arch_timer_rate);
1520
1521                 return -EINVAL;
1522         }
1523
1524         return 0;
1525 }
1526
1527 static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1528 {
1529         struct arch_timer_mem *timers, *timer;
1530         struct arch_timer_mem_frame *frame, *best_frame = NULL;
1531         int timer_count, i, ret = 0;
1532
1533         timers = kcalloc(platform_timer_count, sizeof(*timers),
1534                             GFP_KERNEL);
1535         if (!timers)
1536                 return -ENOMEM;
1537
1538         ret = acpi_arch_timer_mem_init(timers, &timer_count);
1539         if (ret || !timer_count)
1540                 goto out;
1541
1542         /*
1543          * While unlikely, it's theoretically possible that none of the frames
1544          * in a timer expose the combination of feature we want.
1545          */
1546         for (i = 0; i < timer_count; i++) {
1547                 timer = &timers[i];
1548
1549                 frame = arch_timer_mem_find_best_frame(timer);
1550                 if (!best_frame)
1551                         best_frame = frame;
1552
1553                 ret = arch_timer_mem_verify_cntfrq(timer);
1554                 if (ret) {
1555                         pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1556                         goto out;
1557                 }
1558
1559                 if (!best_frame) /* implies !frame */
1560                         /*
1561                          * Only complain about missing suitable frames if we
1562                          * haven't already found one in a previous iteration.
1563                          */
1564                         pr_err("Unable to find a suitable frame in timer @ %pa\n",
1565                                 &timer->cntctlbase);
1566         }
1567
1568         if (best_frame)
1569                 ret = arch_timer_mem_frame_register(best_frame);
1570 out:
1571         kfree(timers);
1572         return ret;
1573 }
1574
1575 /* Initialize per-processor generic timer and memory-mapped timer(if present) */
1576 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1577 {
1578         int ret, platform_timer_count;
1579
1580         if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1581                 pr_warn("already initialized, skipping\n");
1582                 return -EINVAL;
1583         }
1584
1585         arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1586
1587         ret = acpi_gtdt_init(table, &platform_timer_count);
1588         if (ret) {
1589                 pr_err("Failed to init GTDT table.\n");
1590                 return ret;
1591         }
1592
1593         arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1594                 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1595
1596         arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1597                 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1598
1599         arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1600                 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1601
1602         arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1603
1604         /*
1605          * When probing via ACPI, we have no mechanism to override the sysreg
1606          * CNTFRQ value. This *must* be correct.
1607          */
1608         arch_timer_rate = arch_timer_get_cntfrq();
1609         if (!arch_timer_rate) {
1610                 pr_err(FW_BUG "frequency not available.\n");
1611                 return -EINVAL;
1612         }
1613
1614         arch_timer_uses_ppi = arch_timer_select_ppi();
1615         if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1616                 pr_err("No interrupt available, giving up\n");
1617                 return -EINVAL;
1618         }
1619
1620         /* Always-on capability */
1621         arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1622
1623         /* Check for globally applicable workarounds */
1624         arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1625
1626         ret = arch_timer_register();
1627         if (ret)
1628                 return ret;
1629
1630         if (platform_timer_count &&
1631             arch_timer_mem_acpi_init(platform_timer_count))
1632                 pr_err("Failed to initialize memory-mapped timer.\n");
1633
1634         return arch_timer_common_init();
1635 }
1636 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1637 #endif