2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/uaccess.h>
26 #include <clocksource/arm_arch_timer.h>
27 #include <asm/arch_timer.h>
28 #include <asm/kvm_hyp.h>
30 #include <kvm/arm_vgic.h>
31 #include <kvm/arm_arch_timer.h>
35 static struct timecounter *timecounter;
36 static unsigned int host_vtimer_irq;
37 static u32 host_vtimer_irq_flags;
39 static const struct kvm_irq_level default_ptimer_irq = {
44 static const struct kvm_irq_level default_vtimer_irq = {
49 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
51 vcpu_vtimer(vcpu)->active_cleared_last = false;
54 u64 kvm_phys_timer_read(void)
56 return timecounter->cc->read(timecounter->cc);
59 static bool timer_is_armed(struct arch_timer_cpu *timer)
64 /* timer_arm: as in "arm the timer", not as in ARM the company */
65 static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
68 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
72 static void timer_disarm(struct arch_timer_cpu *timer)
74 if (timer_is_armed(timer)) {
75 hrtimer_cancel(&timer->timer);
76 cancel_work_sync(&timer->expired);
81 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
83 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
86 * We disable the timer in the world switch and let it be
87 * handled by kvm_timer_sync_hwstate(). Getting a timer
88 * interrupt at this point is a sure sign of some major
91 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
96 * Work function for handling the backup timer that we schedule when a vcpu is
97 * no longer running, but had a timer programmed to fire in the future.
99 static void kvm_timer_inject_irq_work(struct work_struct *work)
101 struct kvm_vcpu *vcpu;
103 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
106 * If the vcpu is blocked we want to wake it up so that it will see
107 * the timer has expired when entering the guest.
109 kvm_vcpu_wake_up(vcpu);
112 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
116 cval = timer_ctx->cnt_cval;
117 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
122 ns = cyclecounter_cyc2ns(timecounter->cc,
132 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
134 return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
135 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
139 * Returns the earliest expiration time in ns among guest timers.
140 * Note that it will return 0 if none of timers can fire.
142 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
144 u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
145 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
146 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
148 if (kvm_timer_irq_can_fire(vtimer))
149 min_virt = kvm_timer_compute_delta(vtimer);
151 if (kvm_timer_irq_can_fire(ptimer))
152 min_phys = kvm_timer_compute_delta(ptimer);
154 /* If none of timers can fire, then return 0 */
155 if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
158 return min(min_virt, min_phys);
161 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
163 struct arch_timer_cpu *timer;
164 struct kvm_vcpu *vcpu;
167 timer = container_of(hrt, struct arch_timer_cpu, timer);
168 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
171 * Check that the timer has really expired from the guest's
172 * PoV (NTP on the host may have forced it to expire
173 * early). If we should have slept longer, restart it.
175 ns = kvm_timer_earliest_exp(vcpu);
177 hrtimer_forward_now(hrt, ns_to_ktime(ns));
178 return HRTIMER_RESTART;
181 schedule_work(&timer->expired);
182 return HRTIMER_NORESTART;
185 bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
189 if (!kvm_timer_irq_can_fire(timer_ctx))
192 cval = timer_ctx->cnt_cval;
193 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
199 * Reflect the timer output level into the kvm_run structure
201 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
203 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
204 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
205 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
207 /* Populate the device bitmap with the timer states */
208 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
209 KVM_ARM_DEV_EL1_PTIMER);
210 if (vtimer->irq.level)
211 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
212 if (ptimer->irq.level)
213 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
216 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
217 struct arch_timer_context *timer_ctx)
221 timer_ctx->active_cleared_last = false;
222 timer_ctx->irq.level = new_level;
223 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
224 timer_ctx->irq.level);
226 if (likely(irqchip_in_kernel(vcpu->kvm))) {
227 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
229 timer_ctx->irq.level,
236 * Check if there was a change in the timer state (should we raise or lower
237 * the line level to the GIC).
239 static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
241 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
242 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
243 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
246 * If userspace modified the timer registers via SET_ONE_REG before
247 * the vgic was initialized, we mustn't set the vtimer->irq.level value
248 * because the guest would never see the interrupt. Instead wait
249 * until we call this function from kvm_timer_flush_hwstate.
251 if (unlikely(!timer->enabled))
254 if (kvm_timer_should_fire(vtimer) != vtimer->irq.level)
255 kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer);
257 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
258 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
261 /* Schedule the background timer for the emulated timer. */
262 static void kvm_timer_emulate(struct kvm_vcpu *vcpu,
263 struct arch_timer_context *timer_ctx)
265 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
267 if (kvm_timer_should_fire(timer_ctx))
270 if (!kvm_timer_irq_can_fire(timer_ctx))
273 /* The timer has not yet expired, schedule a background timer */
274 timer_arm(timer, kvm_timer_compute_delta(timer_ctx));
278 * Schedule the background timer before calling kvm_vcpu_block, so that this
279 * thread is removed from its waitqueue and made runnable when there's a timer
280 * interrupt to handle.
282 void kvm_timer_schedule(struct kvm_vcpu *vcpu)
284 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
285 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
286 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
288 BUG_ON(timer_is_armed(timer));
291 * No need to schedule a background timer if any guest timer has
292 * already expired, because kvm_vcpu_block will return before putting
293 * the thread to sleep.
295 if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
299 * If both timers are not capable of raising interrupts (disabled or
300 * masked), then there's no more work for us to do.
302 if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
306 * The guest timers have not yet expired, schedule a background timer.
307 * Set the earliest expiration time among the guest timers.
309 timer_arm(timer, kvm_timer_earliest_exp(vcpu));
312 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
314 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
318 static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu)
320 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
325 * If we enter the guest with the virtual input level to the VGIC
326 * asserted, then we have already told the VGIC what we need to, and
327 * we don't need to exit from the guest until the guest deactivates
328 * the already injected interrupt, so therefore we should set the
329 * hardware active state to prevent unnecessary exits from the guest.
331 * Also, if we enter the guest with the virtual timer interrupt active,
332 * then it must be active on the physical distributor, because we set
333 * the HW bit and the guest must be able to deactivate the virtual and
334 * physical interrupt at the same time.
336 * Conversely, if the virtual input level is deasserted and the virtual
337 * interrupt is not active, then always clear the hardware active state
338 * to ensure that hardware interrupts from the timer triggers a guest
341 phys_active = vtimer->irq.level ||
342 kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
345 * We want to avoid hitting the (re)distributor as much as
346 * possible, as this is a potentially expensive MMIO access
347 * (not to mention locks in the irq layer), and a solution for
348 * this is to cache the "active" state in memory.
350 * Things to consider: we cannot cache an "active set" state,
351 * because the HW can change this behind our back (it becomes
352 * "clear" in the HW). We must then restrict the caching to
355 * The cache is invalidated on:
356 * - vcpu put, indicating that the HW cannot be trusted to be
357 * in a sane state on the next vcpu load,
358 * - any change in the interrupt state
361 * - cached value is "active clear"
362 * - value to be programmed is "active clear"
364 if (vtimer->active_cleared_last && !phys_active)
367 ret = irq_set_irqchip_state(host_vtimer_irq,
368 IRQCHIP_STATE_ACTIVE,
372 vtimer->active_cleared_last = !phys_active;
375 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
377 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
378 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
379 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
382 if (likely(irqchip_in_kernel(vcpu->kvm)))
385 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
386 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
388 return vtimer->irq.level != vlevel ||
389 ptimer->irq.level != plevel;
392 static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu)
394 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
397 * To prevent continuously exiting from the guest, we mask the
398 * physical interrupt such that the guest can make forward progress.
399 * Once we detect the output level being deasserted, we unmask the
400 * interrupt again so that we exit from the guest when the timer
403 if (vtimer->irq.level)
404 disable_percpu_irq(host_vtimer_irq);
406 enable_percpu_irq(host_vtimer_irq, 0);
410 * kvm_timer_flush_hwstate - prepare timers before running the vcpu
411 * @vcpu: The vcpu pointer
413 * Check if the virtual timer has expired while we were running in the host,
414 * and inject an interrupt if that was the case, making sure the timer is
415 * masked or disabled on the host so that we keep executing. Also schedule a
416 * software timer for the physical timer if it is enabled.
418 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
420 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
422 if (unlikely(!timer->enabled))
425 kvm_timer_update_state(vcpu);
427 /* Set the background timer for the physical timer emulation. */
428 kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu));
430 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
431 kvm_timer_flush_hwstate_user(vcpu);
433 kvm_timer_flush_hwstate_vgic(vcpu);
437 * kvm_timer_sync_hwstate - sync timer state from cpu
438 * @vcpu: The vcpu pointer
440 * Check if any of the timers have expired while we were running in the guest,
441 * and inject an interrupt if that was the case.
443 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
445 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
448 * This is to cancel the background timer for the physical timer
449 * emulation if it is set.
454 * The guest could have modified the timer registers or the timer
455 * could have expired, update the timer state.
457 kvm_timer_update_state(vcpu);
460 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
462 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
463 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
466 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
467 * and to 0 for ARMv7. We provide an implementation that always
468 * resets the timer to be disabled and unmasked and is compliant with
469 * the ARMv7 architecture.
473 kvm_timer_update_state(vcpu);
478 /* Make the updates of cntvoff for all vtimer contexts atomic */
479 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
482 struct kvm *kvm = vcpu->kvm;
483 struct kvm_vcpu *tmp;
485 mutex_lock(&kvm->lock);
486 kvm_for_each_vcpu(i, tmp, kvm)
487 vcpu_vtimer(tmp)->cntvoff = cntvoff;
490 * When called from the vcpu create path, the CPU being created is not
491 * included in the loop above, so we just set it here as well.
493 vcpu_vtimer(vcpu)->cntvoff = cntvoff;
494 mutex_unlock(&kvm->lock);
497 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
499 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
500 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
501 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
503 /* Synchronize cntvoff across all vtimers of a VM. */
504 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
505 vcpu_ptimer(vcpu)->cntvoff = 0;
507 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
508 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
509 timer->timer.function = kvm_timer_expire;
511 vtimer->irq.irq = default_vtimer_irq.irq;
512 ptimer->irq.irq = default_ptimer_irq.irq;
515 static void kvm_timer_init_interrupt(void *info)
517 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
520 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
522 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
525 case KVM_REG_ARM_TIMER_CTL:
526 vtimer->cnt_ctl = value;
528 case KVM_REG_ARM_TIMER_CNT:
529 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
531 case KVM_REG_ARM_TIMER_CVAL:
532 vtimer->cnt_cval = value;
538 kvm_timer_update_state(vcpu);
542 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
544 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
547 case KVM_REG_ARM_TIMER_CTL:
548 return vtimer->cnt_ctl;
549 case KVM_REG_ARM_TIMER_CNT:
550 return kvm_phys_timer_read() - vtimer->cntvoff;
551 case KVM_REG_ARM_TIMER_CVAL:
552 return vtimer->cnt_cval;
557 static int kvm_timer_starting_cpu(unsigned int cpu)
559 kvm_timer_init_interrupt(NULL);
563 static int kvm_timer_dying_cpu(unsigned int cpu)
565 disable_percpu_irq(host_vtimer_irq);
569 int kvm_timer_hyp_init(void)
571 struct arch_timer_kvm_info *info;
574 info = arch_timer_get_kvm_info();
575 timecounter = &info->timecounter;
577 if (!timecounter->cc) {
578 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
582 if (info->virtual_irq <= 0) {
583 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
587 host_vtimer_irq = info->virtual_irq;
589 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
590 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
591 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
592 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
594 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
597 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
598 "kvm guest timer", kvm_get_running_vcpus());
600 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
601 host_vtimer_irq, err);
605 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
607 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
608 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
609 kvm_timer_dying_cpu);
613 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
615 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
616 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
619 kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
622 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
624 int vtimer_irq, ptimer_irq;
627 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
628 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
632 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
633 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
637 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
638 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
639 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
646 int kvm_timer_enable(struct kvm_vcpu *vcpu)
648 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
649 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
650 struct irq_desc *desc;
651 struct irq_data *data;
658 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
659 if (!irqchip_in_kernel(vcpu->kvm))
662 if (!vgic_initialized(vcpu->kvm))
665 if (!timer_irqs_are_valid(vcpu)) {
666 kvm_debug("incorrectly configured timer irqs\n");
671 * Find the physical IRQ number corresponding to the host_vtimer_irq
673 desc = irq_to_desc(host_vtimer_irq);
675 kvm_err("%s: no interrupt descriptor\n", __func__);
679 data = irq_desc_get_irq_data(desc);
680 while (data->parent_data)
681 data = data->parent_data;
683 phys_irq = data->hwirq;
686 * Tell the VGIC that the virtual interrupt is tied to a
687 * physical interrupt. We do that once per VCPU.
689 ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
699 * On VHE system, we only need to configure trap on physical timer and counter
700 * accesses in EL0 and EL1 once, not for every world switch.
701 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
702 * and this makes those bits have no effect for the host kernel execution.
704 void kvm_timer_init_vhe(void)
706 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
707 u32 cnthctl_shift = 10;
711 * Disallow physical timer access for the guest.
712 * Physical counter access is allowed.
714 val = read_sysreg(cnthctl_el2);
715 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
716 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
717 write_sysreg(val, cnthctl_el2);
720 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
722 struct kvm_vcpu *vcpu;
725 kvm_for_each_vcpu(i, vcpu, kvm) {
726 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
727 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
731 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
733 int __user *uaddr = (int __user *)(long)attr->addr;
734 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
735 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
738 if (!irqchip_in_kernel(vcpu->kvm))
741 if (get_user(irq, uaddr))
744 if (!(irq_is_ppi(irq)))
747 if (vcpu->arch.timer_cpu.enabled)
750 switch (attr->attr) {
751 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
752 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
754 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
755 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
764 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
766 int __user *uaddr = (int __user *)(long)attr->addr;
767 struct arch_timer_context *timer;
770 switch (attr->attr) {
771 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
772 timer = vcpu_vtimer(vcpu);
774 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
775 timer = vcpu_ptimer(vcpu);
781 irq = timer->irq.irq;
782 return put_user(irq, uaddr);
785 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
787 switch (attr->attr) {
788 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
789 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: