GNU Linux-libre 4.9.337-gnu1
[releases.git] / arch / powerpc / kvm / book3s.c
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <hollisb@us.ibm.com>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23
24 #include <asm/reg.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
33 #include <asm/page.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
38
39 #include "book3s.h"
40 #include "trace.h"
41
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44 /* #define EXIT_DEBUG */
45
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47         { "exits",       VCPU_STAT(sum_exits) },
48         { "mmio",        VCPU_STAT(mmio_exits) },
49         { "sig",         VCPU_STAT(signal_exits) },
50         { "sysc",        VCPU_STAT(syscall_exits) },
51         { "inst_emu",    VCPU_STAT(emulated_inst_exits) },
52         { "dec",         VCPU_STAT(dec_exits) },
53         { "ext_intr",    VCPU_STAT(ext_intr_exits) },
54         { "queue_intr",  VCPU_STAT(queue_intr) },
55         { "halt_poll_success_ns",       VCPU_STAT(halt_poll_success_ns) },
56         { "halt_poll_fail_ns",          VCPU_STAT(halt_poll_fail_ns) },
57         { "halt_wait_ns",               VCPU_STAT(halt_wait_ns) },
58         { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
59         { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
60         { "halt_successful_wait",       VCPU_STAT(halt_successful_wait) },
61         { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
62         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63         { "pf_storage",  VCPU_STAT(pf_storage) },
64         { "sp_storage",  VCPU_STAT(sp_storage) },
65         { "pf_instruc",  VCPU_STAT(pf_instruc) },
66         { "sp_instruc",  VCPU_STAT(sp_instruc) },
67         { "ld",          VCPU_STAT(ld) },
68         { "ld_slow",     VCPU_STAT(ld_slow) },
69         { "st",          VCPU_STAT(st) },
70         { "st_slow",     VCPU_STAT(st_slow) },
71         { "pthru_all",       VCPU_STAT(pthru_all) },
72         { "pthru_host",      VCPU_STAT(pthru_host) },
73         { "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
74         { NULL }
75 };
76
77 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
78 {
79         if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
80                 ulong pc = kvmppc_get_pc(vcpu);
81                 ulong lr = kvmppc_get_lr(vcpu);
82                 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
83                         kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
84                 if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
85                         kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
86                 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
87         }
88 }
89 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
90
91 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
92 {
93         if (!is_kvmppc_hv_enabled(vcpu->kvm))
94                 return to_book3s(vcpu)->hior;
95         return 0;
96 }
97
98 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
99                         unsigned long pending_now, unsigned long old_pending)
100 {
101         if (is_kvmppc_hv_enabled(vcpu->kvm))
102                 return;
103         if (pending_now)
104                 kvmppc_set_int_pending(vcpu, 1);
105         else if (old_pending)
106                 kvmppc_set_int_pending(vcpu, 0);
107 }
108
109 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
110 {
111         ulong crit_raw;
112         ulong crit_r1;
113         bool crit;
114
115         if (is_kvmppc_hv_enabled(vcpu->kvm))
116                 return false;
117
118         crit_raw = kvmppc_get_critical(vcpu);
119         crit_r1 = kvmppc_get_gpr(vcpu, 1);
120
121         /* Truncate crit indicators in 32 bit mode */
122         if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
123                 crit_raw &= 0xffffffff;
124                 crit_r1 &= 0xffffffff;
125         }
126
127         /* Critical section when crit == r1 */
128         crit = (crit_raw == crit_r1);
129         /* ... and we're in supervisor mode */
130         crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
131
132         return crit;
133 }
134
135 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
136 {
137         kvmppc_unfixup_split_real(vcpu);
138         kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
139         kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
140         kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
141         vcpu->arch.mmu.reset_msr(vcpu);
142 }
143
144 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
145 {
146         unsigned int prio;
147
148         switch (vec) {
149         case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;         break;
150         case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;        break;
151         case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;         break;
152         case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;         break;
153         case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;         break;
154         case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;         break;
155         case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;             break;
156         case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL;       break;
157         case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;            break;
158         case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;              break;
159         case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;           break;
160         case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;          break;
161         case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;              break;
162         case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;                break;
163         case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;              break;
164         case 0xf40: prio = BOOK3S_IRQPRIO_VSX;                  break;
165         case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;          break;
166         default:    prio = BOOK3S_IRQPRIO_MAX;                  break;
167         }
168
169         return prio;
170 }
171
172 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
173                                           unsigned int vec)
174 {
175         unsigned long old_pending = vcpu->arch.pending_exceptions;
176
177         clear_bit(kvmppc_book3s_vec2irqprio(vec),
178                   &vcpu->arch.pending_exceptions);
179
180         kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
181                                   old_pending);
182 }
183
184 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
185 {
186         vcpu->stat.queue_intr++;
187
188         set_bit(kvmppc_book3s_vec2irqprio(vec),
189                 &vcpu->arch.pending_exceptions);
190 #ifdef EXIT_DEBUG
191         printk(KERN_INFO "Queueing interrupt %x\n", vec);
192 #endif
193 }
194 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
195
196 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
197 {
198         /* might as well deliver this straight away */
199         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
200 }
201 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
202
203 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
204 {
205         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
206 }
207 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
208
209 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
210 {
211         return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
212 }
213 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
214
215 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
216 {
217         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
218 }
219 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
220
221 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
222                                 struct kvm_interrupt *irq)
223 {
224         unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
225
226         if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
227                 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
228
229         kvmppc_book3s_queue_irqprio(vcpu, vec);
230 }
231
232 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
233 {
234         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
235         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
236 }
237
238 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
239                                     ulong flags)
240 {
241         kvmppc_set_dar(vcpu, dar);
242         kvmppc_set_dsisr(vcpu, flags);
243         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
244 }
245
246 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
247 {
248         u64 msr = kvmppc_get_msr(vcpu);
249         msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
250         msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
251         kvmppc_set_msr_fast(vcpu, msr);
252         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
253 }
254
255 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
256                                          unsigned int priority)
257 {
258         int deliver = 1;
259         int vec = 0;
260         bool crit = kvmppc_critical_section(vcpu);
261
262         switch (priority) {
263         case BOOK3S_IRQPRIO_DECREMENTER:
264                 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
265                 vec = BOOK3S_INTERRUPT_DECREMENTER;
266                 break;
267         case BOOK3S_IRQPRIO_EXTERNAL:
268         case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
269                 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
270                 vec = BOOK3S_INTERRUPT_EXTERNAL;
271                 break;
272         case BOOK3S_IRQPRIO_SYSTEM_RESET:
273                 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
274                 break;
275         case BOOK3S_IRQPRIO_MACHINE_CHECK:
276                 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
277                 break;
278         case BOOK3S_IRQPRIO_DATA_STORAGE:
279                 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
280                 break;
281         case BOOK3S_IRQPRIO_INST_STORAGE:
282                 vec = BOOK3S_INTERRUPT_INST_STORAGE;
283                 break;
284         case BOOK3S_IRQPRIO_DATA_SEGMENT:
285                 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
286                 break;
287         case BOOK3S_IRQPRIO_INST_SEGMENT:
288                 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
289                 break;
290         case BOOK3S_IRQPRIO_ALIGNMENT:
291                 vec = BOOK3S_INTERRUPT_ALIGNMENT;
292                 break;
293         case BOOK3S_IRQPRIO_PROGRAM:
294                 vec = BOOK3S_INTERRUPT_PROGRAM;
295                 break;
296         case BOOK3S_IRQPRIO_VSX:
297                 vec = BOOK3S_INTERRUPT_VSX;
298                 break;
299         case BOOK3S_IRQPRIO_ALTIVEC:
300                 vec = BOOK3S_INTERRUPT_ALTIVEC;
301                 break;
302         case BOOK3S_IRQPRIO_FP_UNAVAIL:
303                 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
304                 break;
305         case BOOK3S_IRQPRIO_SYSCALL:
306                 vec = BOOK3S_INTERRUPT_SYSCALL;
307                 break;
308         case BOOK3S_IRQPRIO_DEBUG:
309                 vec = BOOK3S_INTERRUPT_TRACE;
310                 break;
311         case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
312                 vec = BOOK3S_INTERRUPT_PERFMON;
313                 break;
314         case BOOK3S_IRQPRIO_FAC_UNAVAIL:
315                 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
316                 break;
317         default:
318                 deliver = 0;
319                 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
320                 break;
321         }
322
323 #if 0
324         printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
325 #endif
326
327         if (deliver)
328                 kvmppc_inject_interrupt(vcpu, vec, 0);
329
330         return deliver;
331 }
332
333 /*
334  * This function determines if an irqprio should be cleared once issued.
335  */
336 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
337 {
338         switch (priority) {
339                 case BOOK3S_IRQPRIO_DECREMENTER:
340                         /* DEC interrupts get cleared by mtdec */
341                         return false;
342                 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
343                         /* External interrupts get cleared by userspace */
344                         return false;
345         }
346
347         return true;
348 }
349
350 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
351 {
352         unsigned long *pending = &vcpu->arch.pending_exceptions;
353         unsigned long old_pending = vcpu->arch.pending_exceptions;
354         unsigned int priority;
355
356 #ifdef EXIT_DEBUG
357         if (vcpu->arch.pending_exceptions)
358                 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
359 #endif
360         priority = __ffs(*pending);
361         while (priority < BOOK3S_IRQPRIO_MAX) {
362                 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
363                     clear_irqprio(vcpu, priority)) {
364                         clear_bit(priority, &vcpu->arch.pending_exceptions);
365                         break;
366                 }
367
368                 priority = find_next_bit(pending,
369                                          BITS_PER_BYTE * sizeof(*pending),
370                                          priority + 1);
371         }
372
373         /* Tell the guest about our interrupt status */
374         kvmppc_update_int_pending(vcpu, *pending, old_pending);
375
376         return 0;
377 }
378 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
379
380 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
381                         bool *writable)
382 {
383         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
384         gfn_t gfn = gpa >> PAGE_SHIFT;
385
386         if (!(kvmppc_get_msr(vcpu) & MSR_SF))
387                 mp_pa = (uint32_t)mp_pa;
388
389         /* Magic page override */
390         gpa &= ~0xFFFULL;
391         if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
392                 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
393                 kvm_pfn_t pfn;
394
395                 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
396                 get_page(pfn_to_page(pfn));
397                 if (writable)
398                         *writable = true;
399                 return pfn;
400         }
401
402         return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
403 }
404 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
405
406 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
407                  enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
408 {
409         bool data = (xlid == XLATE_DATA);
410         bool iswrite = (xlrw == XLATE_WRITE);
411         int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
412         int r;
413
414         if (relocated) {
415                 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
416         } else {
417                 pte->eaddr = eaddr;
418                 pte->raddr = eaddr & KVM_PAM;
419                 pte->vpage = VSID_REAL | eaddr >> 12;
420                 pte->may_read = true;
421                 pte->may_write = true;
422                 pte->may_execute = true;
423                 r = 0;
424
425                 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
426                     !data) {
427                         if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
428                             ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
429                         pte->raddr &= ~SPLIT_HACK_MASK;
430                 }
431         }
432
433         return r;
434 }
435
436 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
437                                          u32 *inst)
438 {
439         ulong pc = kvmppc_get_pc(vcpu);
440         int r;
441
442         if (type == INST_SC)
443                 pc -= 4;
444
445         r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
446         if (r == EMULATE_DONE)
447                 return r;
448         else
449                 return EMULATE_AGAIN;
450 }
451 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
452
453 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
454 {
455         return 0;
456 }
457
458 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
459 {
460         return 0;
461 }
462
463 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
464 {
465 }
466
467 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
468                                   struct kvm_sregs *sregs)
469 {
470         return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
471 }
472
473 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
474                                   struct kvm_sregs *sregs)
475 {
476         return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
477 }
478
479 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
480 {
481         int i;
482
483         regs->pc = kvmppc_get_pc(vcpu);
484         regs->cr = kvmppc_get_cr(vcpu);
485         regs->ctr = kvmppc_get_ctr(vcpu);
486         regs->lr = kvmppc_get_lr(vcpu);
487         regs->xer = kvmppc_get_xer(vcpu);
488         regs->msr = kvmppc_get_msr(vcpu);
489         regs->srr0 = kvmppc_get_srr0(vcpu);
490         regs->srr1 = kvmppc_get_srr1(vcpu);
491         regs->pid = vcpu->arch.pid;
492         regs->sprg0 = kvmppc_get_sprg0(vcpu);
493         regs->sprg1 = kvmppc_get_sprg1(vcpu);
494         regs->sprg2 = kvmppc_get_sprg2(vcpu);
495         regs->sprg3 = kvmppc_get_sprg3(vcpu);
496         regs->sprg4 = kvmppc_get_sprg4(vcpu);
497         regs->sprg5 = kvmppc_get_sprg5(vcpu);
498         regs->sprg6 = kvmppc_get_sprg6(vcpu);
499         regs->sprg7 = kvmppc_get_sprg7(vcpu);
500
501         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
502                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
503
504         return 0;
505 }
506
507 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
508 {
509         int i;
510
511         kvmppc_set_pc(vcpu, regs->pc);
512         kvmppc_set_cr(vcpu, regs->cr);
513         kvmppc_set_ctr(vcpu, regs->ctr);
514         kvmppc_set_lr(vcpu, regs->lr);
515         kvmppc_set_xer(vcpu, regs->xer);
516         kvmppc_set_msr(vcpu, regs->msr);
517         kvmppc_set_srr0(vcpu, regs->srr0);
518         kvmppc_set_srr1(vcpu, regs->srr1);
519         kvmppc_set_sprg0(vcpu, regs->sprg0);
520         kvmppc_set_sprg1(vcpu, regs->sprg1);
521         kvmppc_set_sprg2(vcpu, regs->sprg2);
522         kvmppc_set_sprg3(vcpu, regs->sprg3);
523         kvmppc_set_sprg4(vcpu, regs->sprg4);
524         kvmppc_set_sprg5(vcpu, regs->sprg5);
525         kvmppc_set_sprg6(vcpu, regs->sprg6);
526         kvmppc_set_sprg7(vcpu, regs->sprg7);
527
528         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
529                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
530
531         return 0;
532 }
533
534 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
535 {
536         return -ENOTSUPP;
537 }
538
539 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
540 {
541         return -ENOTSUPP;
542 }
543
544 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
545                         union kvmppc_one_reg *val)
546 {
547         int r = 0;
548         long int i;
549
550         r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
551         if (r == -EINVAL) {
552                 r = 0;
553                 switch (id) {
554                 case KVM_REG_PPC_DAR:
555                         *val = get_reg_val(id, kvmppc_get_dar(vcpu));
556                         break;
557                 case KVM_REG_PPC_DSISR:
558                         *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
559                         break;
560                 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
561                         i = id - KVM_REG_PPC_FPR0;
562                         *val = get_reg_val(id, VCPU_FPR(vcpu, i));
563                         break;
564                 case KVM_REG_PPC_FPSCR:
565                         *val = get_reg_val(id, vcpu->arch.fp.fpscr);
566                         break;
567 #ifdef CONFIG_VSX
568                 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
569                         if (cpu_has_feature(CPU_FTR_VSX)) {
570                                 i = id - KVM_REG_PPC_VSR0;
571                                 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
572                                 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
573                         } else {
574                                 r = -ENXIO;
575                         }
576                         break;
577 #endif /* CONFIG_VSX */
578                 case KVM_REG_PPC_DEBUG_INST:
579                         *val = get_reg_val(id, INS_TW);
580                         break;
581 #ifdef CONFIG_KVM_XICS
582                 case KVM_REG_PPC_ICP_STATE:
583                         if (!vcpu->arch.icp) {
584                                 r = -ENXIO;
585                                 break;
586                         }
587                         *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
588                         break;
589 #endif /* CONFIG_KVM_XICS */
590                 case KVM_REG_PPC_FSCR:
591                         *val = get_reg_val(id, vcpu->arch.fscr);
592                         break;
593                 case KVM_REG_PPC_TAR:
594                         *val = get_reg_val(id, vcpu->arch.tar);
595                         break;
596                 case KVM_REG_PPC_EBBHR:
597                         *val = get_reg_val(id, vcpu->arch.ebbhr);
598                         break;
599                 case KVM_REG_PPC_EBBRR:
600                         *val = get_reg_val(id, vcpu->arch.ebbrr);
601                         break;
602                 case KVM_REG_PPC_BESCR:
603                         *val = get_reg_val(id, vcpu->arch.bescr);
604                         break;
605                 case KVM_REG_PPC_IC:
606                         *val = get_reg_val(id, vcpu->arch.ic);
607                         break;
608                 default:
609                         r = -EINVAL;
610                         break;
611                 }
612         }
613
614         return r;
615 }
616
617 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
618                         union kvmppc_one_reg *val)
619 {
620         int r = 0;
621         long int i;
622
623         r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
624         if (r == -EINVAL) {
625                 r = 0;
626                 switch (id) {
627                 case KVM_REG_PPC_DAR:
628                         kvmppc_set_dar(vcpu, set_reg_val(id, *val));
629                         break;
630                 case KVM_REG_PPC_DSISR:
631                         kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
632                         break;
633                 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
634                         i = id - KVM_REG_PPC_FPR0;
635                         VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
636                         break;
637                 case KVM_REG_PPC_FPSCR:
638                         vcpu->arch.fp.fpscr = set_reg_val(id, *val);
639                         break;
640 #ifdef CONFIG_VSX
641                 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
642                         if (cpu_has_feature(CPU_FTR_VSX)) {
643                                 i = id - KVM_REG_PPC_VSR0;
644                                 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
645                                 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
646                         } else {
647                                 r = -ENXIO;
648                         }
649                         break;
650 #endif /* CONFIG_VSX */
651 #ifdef CONFIG_KVM_XICS
652                 case KVM_REG_PPC_ICP_STATE:
653                         if (!vcpu->arch.icp) {
654                                 r = -ENXIO;
655                                 break;
656                         }
657                         r = kvmppc_xics_set_icp(vcpu,
658                                                 set_reg_val(id, *val));
659                         break;
660 #endif /* CONFIG_KVM_XICS */
661                 case KVM_REG_PPC_FSCR:
662                         vcpu->arch.fscr = set_reg_val(id, *val);
663                         break;
664                 case KVM_REG_PPC_TAR:
665                         vcpu->arch.tar = set_reg_val(id, *val);
666                         break;
667                 case KVM_REG_PPC_EBBHR:
668                         vcpu->arch.ebbhr = set_reg_val(id, *val);
669                         break;
670                 case KVM_REG_PPC_EBBRR:
671                         vcpu->arch.ebbrr = set_reg_val(id, *val);
672                         break;
673                 case KVM_REG_PPC_BESCR:
674                         vcpu->arch.bescr = set_reg_val(id, *val);
675                         break;
676                 case KVM_REG_PPC_IC:
677                         vcpu->arch.ic = set_reg_val(id, *val);
678                         break;
679                 default:
680                         r = -EINVAL;
681                         break;
682                 }
683         }
684
685         return r;
686 }
687
688 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
689 {
690         vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
691 }
692
693 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
694 {
695         vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
696 }
697
698 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
699 {
700         vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
701 }
702 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
703
704 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
705 {
706         return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
707 }
708
709 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
710                                   struct kvm_translation *tr)
711 {
712         return 0;
713 }
714
715 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
716                                         struct kvm_guest_debug *dbg)
717 {
718         vcpu->guest_debug = dbg->control;
719         return 0;
720 }
721
722 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
723 {
724         kvmppc_core_queue_dec(vcpu);
725         kvm_vcpu_kick(vcpu);
726 }
727
728 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
729 {
730         return kvm->arch.kvm_ops->vcpu_create(kvm, id);
731 }
732
733 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
734 {
735         vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
736 }
737
738 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
739 {
740         return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
741 }
742
743 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
744 {
745         return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
746 }
747
748 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
749                               struct kvm_memory_slot *dont)
750 {
751         kvm->arch.kvm_ops->free_memslot(free, dont);
752 }
753
754 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
755                                unsigned long npages)
756 {
757         return kvm->arch.kvm_ops->create_memslot(slot, npages);
758 }
759
760 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
761 {
762         kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
763 }
764
765 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
766                                 struct kvm_memory_slot *memslot,
767                                 const struct kvm_userspace_memory_region *mem)
768 {
769         return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
770 }
771
772 void kvmppc_core_commit_memory_region(struct kvm *kvm,
773                                 const struct kvm_userspace_memory_region *mem,
774                                 const struct kvm_memory_slot *old,
775                                 const struct kvm_memory_slot *new)
776 {
777         kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
778 }
779
780 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
781 {
782         return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
783 }
784 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
785
786 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
787 {
788         return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
789 }
790
791 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
792 {
793         return kvm->arch.kvm_ops->age_hva(kvm, start, end);
794 }
795
796 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
797 {
798         return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
799 }
800
801 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
802 {
803         kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
804 }
805
806 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
807 {
808         vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
809 }
810
811 int kvmppc_core_init_vm(struct kvm *kvm)
812 {
813
814 #ifdef CONFIG_PPC64
815         INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
816         INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
817         mutex_init(&kvm->arch.rtas_token_lock);
818 #endif
819
820         return kvm->arch.kvm_ops->init_vm(kvm);
821 }
822
823 void kvmppc_core_destroy_vm(struct kvm *kvm)
824 {
825         kvm->arch.kvm_ops->destroy_vm(kvm);
826
827 #ifdef CONFIG_PPC64
828         kvmppc_rtas_tokens_free(kvm);
829         WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
830 #endif
831 }
832
833 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
834 {
835         unsigned long size = kvmppc_get_gpr(vcpu, 4);
836         unsigned long addr = kvmppc_get_gpr(vcpu, 5);
837         u64 buf;
838         int srcu_idx;
839         int ret;
840
841         if (!is_power_of_2(size) || (size > sizeof(buf)))
842                 return H_TOO_HARD;
843
844         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
845         ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
846         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
847         if (ret != 0)
848                 return H_TOO_HARD;
849
850         switch (size) {
851         case 1:
852                 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
853                 break;
854
855         case 2:
856                 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
857                 break;
858
859         case 4:
860                 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
861                 break;
862
863         case 8:
864                 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
865                 break;
866
867         default:
868                 BUG();
869         }
870
871         return H_SUCCESS;
872 }
873 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
874
875 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
876 {
877         unsigned long size = kvmppc_get_gpr(vcpu, 4);
878         unsigned long addr = kvmppc_get_gpr(vcpu, 5);
879         unsigned long val = kvmppc_get_gpr(vcpu, 6);
880         u64 buf;
881         int srcu_idx;
882         int ret;
883
884         switch (size) {
885         case 1:
886                 *(u8 *)&buf = val;
887                 break;
888
889         case 2:
890                 *(__be16 *)&buf = cpu_to_be16(val);
891                 break;
892
893         case 4:
894                 *(__be32 *)&buf = cpu_to_be32(val);
895                 break;
896
897         case 8:
898                 *(__be64 *)&buf = cpu_to_be64(val);
899                 break;
900
901         default:
902                 return H_TOO_HARD;
903         }
904
905         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
906         ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
907         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
908         if (ret != 0)
909                 return H_TOO_HARD;
910
911         return H_SUCCESS;
912 }
913 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
914
915 int kvmppc_core_check_processor_compat(void)
916 {
917         /*
918          * We always return 0 for book3s. We check
919          * for compatibility while loading the HV
920          * or PR module
921          */
922         return 0;
923 }
924
925 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
926 {
927         return kvm->arch.kvm_ops->hcall_implemented(hcall);
928 }
929
930 static int kvmppc_book3s_init(void)
931 {
932         int r;
933
934         r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
935         if (r)
936                 return r;
937 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
938         r = kvmppc_book3s_init_pr();
939 #endif
940         return r;
941
942 }
943
944 static void kvmppc_book3s_exit(void)
945 {
946 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
947         kvmppc_book3s_exit_pr();
948 #endif
949         kvm_exit();
950 }
951
952 module_init(kvmppc_book3s_init);
953 module_exit(kvmppc_book3s_exit);
954
955 /* On 32bit this is our one and only kernel module */
956 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
957 MODULE_ALIAS_MISCDEV(KVM_MINOR);
958 MODULE_ALIAS("devname:kvm");
959 #endif