GNU Linux-libre 4.19.264-gnu1
[releases.git] / arch / powerpc / kvm / book3s_pr.c
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *    Paul Mackerras <paulus@samba.org>
8  *
9  * Description:
10  * Functions relating to running KVM on Book 3S processors where
11  * we don't have access to hypervisor mode, and we run the guest
12  * in problem state (user mode).
13  *
14  * This file is derived from arch/powerpc/kvm/44x.c,
15  * by Hollis Blanchard <hollisb@us.ibm.com>.
16  *
17  * This program is free software; you can redistribute it and/or modify
18  * it under the terms of the GNU General Public License, version 2, as
19  * published by the Free Software Foundation.
20  */
21
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26
27 #include <asm/reg.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <linux/uaccess.h>
31 #include <asm/io.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/kvm_book3s.h>
34 #include <asm/mmu_context.h>
35 #include <asm/switch_to.h>
36 #include <asm/firmware.h>
37 #include <asm/setup.h>
38 #include <linux/gfp.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/highmem.h>
42 #include <linux/module.h>
43 #include <linux/miscdevice.h>
44 #include <asm/asm-prototypes.h>
45 #include <asm/tm.h>
46
47 #include "book3s.h"
48
49 #define CREATE_TRACE_POINTS
50 #include "trace_pr.h"
51
52 /* #define EXIT_DEBUG */
53 /* #define DEBUG_EXT */
54
55 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56                              ulong msr);
57 #ifdef CONFIG_PPC_BOOK3S_64
58 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
59 #endif
60
61 /* Some compatibility defines */
62 #ifdef CONFIG_PPC_BOOK3S_32
63 #define MSR_USER32 MSR_USER
64 #define MSR_USER64 MSR_USER
65 #define HW_PAGE_SIZE PAGE_SIZE
66 #define HPTE_R_M   _PAGE_COHERENT
67 #endif
68
69 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
70 {
71         ulong msr = kvmppc_get_msr(vcpu);
72         return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
73 }
74
75 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
76 {
77         ulong msr = kvmppc_get_msr(vcpu);
78         ulong pc = kvmppc_get_pc(vcpu);
79
80         /* We are in DR only split real mode */
81         if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
82                 return;
83
84         /* We have not fixed up the guest already */
85         if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
86                 return;
87
88         /* The code is in fixupable address space */
89         if (pc & SPLIT_HACK_MASK)
90                 return;
91
92         vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
93         kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
94 }
95
96 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
97
98 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
99 {
100 #ifdef CONFIG_PPC_BOOK3S_64
101         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
102         memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
103         svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
104         svcpu->in_use = 0;
105         svcpu_put(svcpu);
106 #endif
107
108         /* Disable AIL if supported */
109         if (cpu_has_feature(CPU_FTR_HVMODE) &&
110             cpu_has_feature(CPU_FTR_ARCH_207S))
111                 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
112
113         vcpu->cpu = smp_processor_id();
114 #ifdef CONFIG_PPC_BOOK3S_32
115         current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
116 #endif
117
118         if (kvmppc_is_split_real(vcpu))
119                 kvmppc_fixup_split_real(vcpu);
120
121         kvmppc_restore_tm_pr(vcpu);
122 }
123
124 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
125 {
126 #ifdef CONFIG_PPC_BOOK3S_64
127         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
128         if (svcpu->in_use) {
129                 kvmppc_copy_from_svcpu(vcpu);
130         }
131         memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
132         to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
133         svcpu_put(svcpu);
134 #endif
135
136         if (kvmppc_is_split_real(vcpu))
137                 kvmppc_unfixup_split_real(vcpu);
138
139         kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
140         kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
141         kvmppc_save_tm_pr(vcpu);
142
143         /* Enable AIL if supported */
144         if (cpu_has_feature(CPU_FTR_HVMODE) &&
145             cpu_has_feature(CPU_FTR_ARCH_207S))
146                 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
147
148         vcpu->cpu = -1;
149 }
150
151 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
152 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
153 {
154         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
155
156         svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
157         svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
158         svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
159         svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
160         svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
161         svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
162         svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
163         svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
164         svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
165         svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
166         svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
167         svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
168         svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
169         svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
170         svcpu->cr  = vcpu->arch.regs.ccr;
171         svcpu->xer = vcpu->arch.regs.xer;
172         svcpu->ctr = vcpu->arch.regs.ctr;
173         svcpu->lr  = vcpu->arch.regs.link;
174         svcpu->pc  = vcpu->arch.regs.nip;
175 #ifdef CONFIG_PPC_BOOK3S_64
176         svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
177 #endif
178         /*
179          * Now also save the current time base value. We use this
180          * to find the guest purr and spurr value.
181          */
182         vcpu->arch.entry_tb = get_tb();
183         vcpu->arch.entry_vtb = get_vtb();
184         if (cpu_has_feature(CPU_FTR_ARCH_207S))
185                 vcpu->arch.entry_ic = mfspr(SPRN_IC);
186         svcpu->in_use = true;
187
188         svcpu_put(svcpu);
189 }
190
191 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
192 {
193         ulong guest_msr = kvmppc_get_msr(vcpu);
194         ulong smsr = guest_msr;
195
196         /* Guest MSR values */
197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198         smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
199                 MSR_TM | MSR_TS_MASK;
200 #else
201         smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
202 #endif
203         /* Process MSR values */
204         smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
205         /* External providers the guest reserved */
206         smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
207         /* 64-bit Process MSR values */
208 #ifdef CONFIG_PPC_BOOK3S_64
209         smsr |= MSR_ISF | MSR_HV;
210 #endif
211 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
212         /*
213          * in guest privileged state, we want to fail all TM transactions.
214          * So disable MSR TM bit so that all tbegin. will be able to be
215          * trapped into host.
216          */
217         if (!(guest_msr & MSR_PR))
218                 smsr &= ~MSR_TM;
219 #endif
220         vcpu->arch.shadow_msr = smsr;
221 }
222
223 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
224 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
225 {
226         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
227 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
228         ulong old_msr;
229 #endif
230
231         /*
232          * Maybe we were already preempted and synced the svcpu from
233          * our preempt notifiers. Don't bother touching this svcpu then.
234          */
235         if (!svcpu->in_use)
236                 goto out;
237
238         vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
239         vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
240         vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
241         vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
242         vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
243         vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
244         vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
245         vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
246         vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
247         vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
248         vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
249         vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
250         vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
251         vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
252         vcpu->arch.regs.ccr  = svcpu->cr;
253         vcpu->arch.regs.xer = svcpu->xer;
254         vcpu->arch.regs.ctr = svcpu->ctr;
255         vcpu->arch.regs.link  = svcpu->lr;
256         vcpu->arch.regs.nip  = svcpu->pc;
257         vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
258         vcpu->arch.fault_dar   = svcpu->fault_dar;
259         vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
260         vcpu->arch.last_inst   = svcpu->last_inst;
261 #ifdef CONFIG_PPC_BOOK3S_64
262         vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
263 #endif
264         /*
265          * Update purr and spurr using time base on exit.
266          */
267         vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
268         vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
269         to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
270         if (cpu_has_feature(CPU_FTR_ARCH_207S))
271                 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
272
273 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
274         /*
275          * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
276          * notifying host:
277          *  modified by unprivileged instructions like "tbegin"/"tend"/
278          * "tresume"/"tsuspend" in PR KVM guest.
279          *
280          * It is necessary to sync here to calculate a correct shadow_msr.
281          *
282          * privileged guest's tbegin will be failed at present. So we
283          * only take care of problem state guest.
284          */
285         old_msr = kvmppc_get_msr(vcpu);
286         if (unlikely((old_msr & MSR_PR) &&
287                 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
288                                 (old_msr & (MSR_TS_MASK)))) {
289                 old_msr &= ~(MSR_TS_MASK);
290                 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
291                 kvmppc_set_msr_fast(vcpu, old_msr);
292                 kvmppc_recalc_shadow_msr(vcpu);
293         }
294 #endif
295
296         svcpu->in_use = false;
297
298 out:
299         svcpu_put(svcpu);
300 }
301
302 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
303 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
304 {
305         tm_enable();
306         vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
307         vcpu->arch.texasr = mfspr(SPRN_TEXASR);
308         vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
309         tm_disable();
310 }
311
312 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
313 {
314         tm_enable();
315         mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
316         mtspr(SPRN_TEXASR, vcpu->arch.texasr);
317         mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
318         tm_disable();
319 }
320
321 /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
322  * hardware.
323  */
324 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
325 {
326         ulong exit_nr;
327         ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
328                 (MSR_FP | MSR_VEC | MSR_VSX);
329
330         if (!ext_diff)
331                 return;
332
333         if (ext_diff == MSR_FP)
334                 exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
335         else if (ext_diff == MSR_VEC)
336                 exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
337         else
338                 exit_nr = BOOK3S_INTERRUPT_VSX;
339
340         kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
341 }
342
343 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
344 {
345         if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
346                 kvmppc_save_tm_sprs(vcpu);
347                 return;
348         }
349
350         kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
351         kvmppc_giveup_ext(vcpu, MSR_VSX);
352
353         preempt_disable();
354         _kvmppc_save_tm_pr(vcpu, mfmsr());
355         preempt_enable();
356 }
357
358 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
359 {
360         if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
361                 kvmppc_restore_tm_sprs(vcpu);
362                 if (kvmppc_get_msr(vcpu) & MSR_TM) {
363                         kvmppc_handle_lost_math_exts(vcpu);
364                         if (vcpu->arch.fscr & FSCR_TAR)
365                                 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
366                 }
367                 return;
368         }
369
370         preempt_disable();
371         _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
372         preempt_enable();
373
374         if (kvmppc_get_msr(vcpu) & MSR_TM) {
375                 kvmppc_handle_lost_math_exts(vcpu);
376                 if (vcpu->arch.fscr & FSCR_TAR)
377                         kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
378         }
379 }
380 #endif
381
382 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
383 {
384         int r = 1; /* Indicate we want to get back into the guest */
385
386         /* We misuse TLB_FLUSH to indicate that we want to clear
387            all shadow cache entries */
388         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
389                 kvmppc_mmu_pte_flush(vcpu, 0, 0);
390
391         return r;
392 }
393
394 /************* MMU Notifiers *************/
395 static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
396                              unsigned long end)
397 {
398         long i;
399         struct kvm_vcpu *vcpu;
400         struct kvm_memslots *slots;
401         struct kvm_memory_slot *memslot;
402
403         slots = kvm_memslots(kvm);
404         kvm_for_each_memslot(memslot, slots) {
405                 unsigned long hva_start, hva_end;
406                 gfn_t gfn, gfn_end;
407
408                 hva_start = max(start, memslot->userspace_addr);
409                 hva_end = min(end, memslot->userspace_addr +
410                                         (memslot->npages << PAGE_SHIFT));
411                 if (hva_start >= hva_end)
412                         continue;
413                 /*
414                  * {gfn(page) | page intersects with [hva_start, hva_end)} =
415                  * {gfn, gfn+1, ..., gfn_end-1}.
416                  */
417                 gfn = hva_to_gfn_memslot(hva_start, memslot);
418                 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
419                 kvm_for_each_vcpu(i, vcpu, kvm)
420                         kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
421                                               gfn_end << PAGE_SHIFT);
422         }
423 }
424
425 static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
426                                   unsigned long end)
427 {
428         do_kvm_unmap_hva(kvm, start, end);
429
430         return 0;
431 }
432
433 static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
434                           unsigned long end)
435 {
436         /* XXX could be more clever ;) */
437         return 0;
438 }
439
440 static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
441 {
442         /* XXX could be more clever ;) */
443         return 0;
444 }
445
446 static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
447 {
448         /* The page will get remapped properly on its next fault */
449         do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
450 }
451
452 /*****************************************/
453
454 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
455 {
456         ulong old_msr;
457
458         /* For PAPR guest, make sure MSR reflects guest mode */
459         if (vcpu->arch.papr_enabled)
460                 msr = (msr & ~MSR_HV) | MSR_ME;
461
462 #ifdef EXIT_DEBUG
463         printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
464 #endif
465
466 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
467         /* We should never target guest MSR to TS=10 && PR=0,
468          * since we always fail transaction for guest privilege
469          * state.
470          */
471         if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
472                 kvmppc_emulate_tabort(vcpu,
473                         TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
474 #endif
475
476         old_msr = kvmppc_get_msr(vcpu);
477         msr &= to_book3s(vcpu)->msr_mask;
478         kvmppc_set_msr_fast(vcpu, msr);
479         kvmppc_recalc_shadow_msr(vcpu);
480
481         if (msr & MSR_POW) {
482                 if (!vcpu->arch.pending_exceptions) {
483                         kvm_vcpu_block(vcpu);
484                         kvm_clear_request(KVM_REQ_UNHALT, vcpu);
485                         vcpu->stat.halt_wakeup++;
486
487                         /* Unset POW bit after we woke up */
488                         msr &= ~MSR_POW;
489                         kvmppc_set_msr_fast(vcpu, msr);
490                 }
491         }
492
493         if (kvmppc_is_split_real(vcpu))
494                 kvmppc_fixup_split_real(vcpu);
495         else
496                 kvmppc_unfixup_split_real(vcpu);
497
498         if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
499                    (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
500                 kvmppc_mmu_flush_segments(vcpu);
501                 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
502
503                 /* Preload magic page segment when in kernel mode */
504                 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
505                         struct kvm_vcpu_arch *a = &vcpu->arch;
506
507                         if (msr & MSR_DR)
508                                 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
509                         else
510                                 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
511                 }
512         }
513
514         /*
515          * When switching from 32 to 64-bit, we may have a stale 32-bit
516          * magic page around, we need to flush it. Typically 32-bit magic
517          * page will be instantiated when calling into RTAS. Note: We
518          * assume that such transition only happens while in kernel mode,
519          * ie, we never transition from user 32-bit to kernel 64-bit with
520          * a 32-bit magic page around.
521          */
522         if (vcpu->arch.magic_page_pa &&
523             !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
524                 /* going from RTAS to normal kernel code */
525                 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
526                                      ~0xFFFUL);
527         }
528
529         /* Preload FPU if it's enabled */
530         if (kvmppc_get_msr(vcpu) & MSR_FP)
531                 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
532
533 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
534         if (kvmppc_get_msr(vcpu) & MSR_TM)
535                 kvmppc_handle_lost_math_exts(vcpu);
536 #endif
537 }
538
539 void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
540 {
541         u32 host_pvr;
542
543         vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
544         vcpu->arch.pvr = pvr;
545 #ifdef CONFIG_PPC_BOOK3S_64
546         if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
547                 kvmppc_mmu_book3s_64_init(vcpu);
548                 if (!to_book3s(vcpu)->hior_explicit)
549                         to_book3s(vcpu)->hior = 0xfff00000;
550                 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
551                 vcpu->arch.cpu_type = KVM_CPU_3S_64;
552         } else
553 #endif
554         {
555                 kvmppc_mmu_book3s_32_init(vcpu);
556                 if (!to_book3s(vcpu)->hior_explicit)
557                         to_book3s(vcpu)->hior = 0;
558                 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
559                 vcpu->arch.cpu_type = KVM_CPU_3S_32;
560         }
561
562         kvmppc_sanity_check(vcpu);
563
564         /* If we are in hypervisor level on 970, we can tell the CPU to
565          * treat DCBZ as 32 bytes store */
566         vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
567         if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
568             !strcmp(cur_cpu_spec->platform, "ppc970"))
569                 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
570
571         /* Cell performs badly if MSR_FEx are set. So let's hope nobody
572            really needs them in a VM on Cell and force disable them. */
573         if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
574                 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
575
576         /*
577          * If they're asking for POWER6 or later, set the flag
578          * indicating that we can do multiple large page sizes
579          * and 1TB segments.
580          * Also set the flag that indicates that tlbie has the large
581          * page bit in the RB operand instead of the instruction.
582          */
583         switch (PVR_VER(pvr)) {
584         case PVR_POWER6:
585         case PVR_POWER7:
586         case PVR_POWER7p:
587         case PVR_POWER8:
588         case PVR_POWER8E:
589         case PVR_POWER8NVL:
590                 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
591                         BOOK3S_HFLAG_NEW_TLBIE;
592                 break;
593         }
594
595 #ifdef CONFIG_PPC_BOOK3S_32
596         /* 32 bit Book3S always has 32 byte dcbz */
597         vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
598 #endif
599
600         /* On some CPUs we can execute paired single operations natively */
601         asm ( "mfpvr %0" : "=r"(host_pvr));
602         switch (host_pvr) {
603         case 0x00080200:        /* lonestar 2.0 */
604         case 0x00088202:        /* lonestar 2.2 */
605         case 0x70000100:        /* gekko 1.0 */
606         case 0x00080100:        /* gekko 2.0 */
607         case 0x00083203:        /* gekko 2.3a */
608         case 0x00083213:        /* gekko 2.3b */
609         case 0x00083204:        /* gekko 2.4 */
610         case 0x00083214:        /* gekko 2.4e (8SE) - retail HW2 */
611         case 0x00087200:        /* broadway */
612                 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
613                 /* Enable HID2.PSE - in case we need it later */
614                 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
615         }
616 }
617
618 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
619  * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
620  * emulate 32 bytes dcbz length.
621  *
622  * The Book3s_64 inventors also realized this case and implemented a special bit
623  * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
624  *
625  * My approach here is to patch the dcbz instruction on executing pages.
626  */
627 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
628 {
629         struct page *hpage;
630         u64 hpage_offset;
631         u32 *page;
632         int i;
633
634         hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
635         if (is_error_page(hpage))
636                 return;
637
638         hpage_offset = pte->raddr & ~PAGE_MASK;
639         hpage_offset &= ~0xFFFULL;
640         hpage_offset /= 4;
641
642         get_page(hpage);
643         page = kmap_atomic(hpage);
644
645         /* patch dcbz into reserved instruction, so we trap */
646         for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
647                 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
648                         page[i] &= cpu_to_be32(0xfffffff7);
649
650         kunmap_atomic(page);
651         put_page(hpage);
652 }
653
654 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
655 {
656         ulong mp_pa = vcpu->arch.magic_page_pa;
657
658         if (!(kvmppc_get_msr(vcpu) & MSR_SF))
659                 mp_pa = (uint32_t)mp_pa;
660
661         gpa &= ~0xFFFULL;
662         if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
663                 return true;
664         }
665
666         return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
667 }
668
669 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
670                             ulong eaddr, int vec)
671 {
672         bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
673         bool iswrite = false;
674         int r = RESUME_GUEST;
675         int relocated;
676         int page_found = 0;
677         struct kvmppc_pte pte = { 0 };
678         bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
679         bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
680         u64 vsid;
681
682         relocated = data ? dr : ir;
683         if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
684                 iswrite = true;
685
686         /* Resolve real address if translation turned on */
687         if (relocated) {
688                 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
689         } else {
690                 pte.may_execute = true;
691                 pte.may_read = true;
692                 pte.may_write = true;
693                 pte.raddr = eaddr & KVM_PAM;
694                 pte.eaddr = eaddr;
695                 pte.vpage = eaddr >> 12;
696                 pte.page_size = MMU_PAGE_64K;
697                 pte.wimg = HPTE_R_M;
698         }
699
700         switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
701         case 0:
702                 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
703                 break;
704         case MSR_DR:
705                 if (!data &&
706                     (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
707                     ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
708                         pte.raddr &= ~SPLIT_HACK_MASK;
709                 /* fall through */
710         case MSR_IR:
711                 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
712
713                 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
714                         pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
715                 else
716                         pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
717                 pte.vpage |= vsid;
718
719                 if (vsid == -1)
720                         page_found = -EINVAL;
721                 break;
722         }
723
724         if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
725            (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
726                 /*
727                  * If we do the dcbz hack, we have to NX on every execution,
728                  * so we can patch the executing code. This renders our guest
729                  * NX-less.
730                  */
731                 pte.may_execute = !data;
732         }
733
734         if (page_found == -ENOENT || page_found == -EPERM) {
735                 /* Page not found in guest PTE entries, or protection fault */
736                 u64 flags;
737
738                 if (page_found == -EPERM)
739                         flags = DSISR_PROTFAULT;
740                 else
741                         flags = DSISR_NOHPTE;
742                 if (data) {
743                         flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
744                         kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
745                 } else {
746                         kvmppc_core_queue_inst_storage(vcpu, flags);
747                 }
748         } else if (page_found == -EINVAL) {
749                 /* Page not found in guest SLB */
750                 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
751                 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
752         } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
753                 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
754                         /*
755                          * There is already a host HPTE there, presumably
756                          * a read-only one for a page the guest thinks
757                          * is writable, so get rid of it first.
758                          */
759                         kvmppc_mmu_unmap_page(vcpu, &pte);
760                 }
761                 /* The guest's PTE is not mapped yet. Map on the host */
762                 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
763                         /* Exit KVM if mapping failed */
764                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
765                         return RESUME_HOST;
766                 }
767                 if (data)
768                         vcpu->stat.sp_storage++;
769                 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
770                          (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
771                         kvmppc_patch_dcbz(vcpu, &pte);
772         } else {
773                 /* MMIO */
774                 vcpu->stat.mmio_exits++;
775                 vcpu->arch.paddr_accessed = pte.raddr;
776                 vcpu->arch.vaddr_accessed = pte.eaddr;
777                 r = kvmppc_emulate_mmio(run, vcpu);
778                 if ( r == RESUME_HOST_NV )
779                         r = RESUME_HOST;
780         }
781
782         return r;
783 }
784
785 /* Give up external provider (FPU, Altivec, VSX) */
786 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
787 {
788         struct thread_struct *t = &current->thread;
789
790         /*
791          * VSX instructions can access FP and vector registers, so if
792          * we are giving up VSX, make sure we give up FP and VMX as well.
793          */
794         if (msr & MSR_VSX)
795                 msr |= MSR_FP | MSR_VEC;
796
797         msr &= vcpu->arch.guest_owned_ext;
798         if (!msr)
799                 return;
800
801 #ifdef DEBUG_EXT
802         printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
803 #endif
804
805         if (msr & MSR_FP) {
806                 /*
807                  * Note that on CPUs with VSX, giveup_fpu stores
808                  * both the traditional FP registers and the added VSX
809                  * registers into thread.fp_state.fpr[].
810                  */
811                 if (t->regs->msr & MSR_FP)
812                         giveup_fpu(current);
813                 t->fp_save_area = NULL;
814         }
815
816 #ifdef CONFIG_ALTIVEC
817         if (msr & MSR_VEC) {
818                 if (current->thread.regs->msr & MSR_VEC)
819                         giveup_altivec(current);
820                 t->vr_save_area = NULL;
821         }
822 #endif
823
824         vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
825         kvmppc_recalc_shadow_msr(vcpu);
826 }
827
828 /* Give up facility (TAR / EBB / DSCR) */
829 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
830 {
831 #ifdef CONFIG_PPC_BOOK3S_64
832         if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
833                 /* Facility not available to the guest, ignore giveup request*/
834                 return;
835         }
836
837         switch (fac) {
838         case FSCR_TAR_LG:
839                 vcpu->arch.tar = mfspr(SPRN_TAR);
840                 mtspr(SPRN_TAR, current->thread.tar);
841                 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
842                 break;
843         }
844 #endif
845 }
846
847 /* Handle external providers (FPU, Altivec, VSX) */
848 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
849                              ulong msr)
850 {
851         struct thread_struct *t = &current->thread;
852
853         /* When we have paired singles, we emulate in software */
854         if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
855                 return RESUME_GUEST;
856
857         if (!(kvmppc_get_msr(vcpu) & msr)) {
858                 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
859                 return RESUME_GUEST;
860         }
861
862         if (msr == MSR_VSX) {
863                 /* No VSX?  Give an illegal instruction interrupt */
864 #ifdef CONFIG_VSX
865                 if (!cpu_has_feature(CPU_FTR_VSX))
866 #endif
867                 {
868                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
869                         return RESUME_GUEST;
870                 }
871
872                 /*
873                  * We have to load up all the FP and VMX registers before
874                  * we can let the guest use VSX instructions.
875                  */
876                 msr = MSR_FP | MSR_VEC | MSR_VSX;
877         }
878
879         /* See if we already own all the ext(s) needed */
880         msr &= ~vcpu->arch.guest_owned_ext;
881         if (!msr)
882                 return RESUME_GUEST;
883
884 #ifdef DEBUG_EXT
885         printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
886 #endif
887
888         if (msr & MSR_FP) {
889                 preempt_disable();
890                 enable_kernel_fp();
891                 load_fp_state(&vcpu->arch.fp);
892                 disable_kernel_fp();
893                 t->fp_save_area = &vcpu->arch.fp;
894                 preempt_enable();
895         }
896
897         if (msr & MSR_VEC) {
898 #ifdef CONFIG_ALTIVEC
899                 preempt_disable();
900                 enable_kernel_altivec();
901                 load_vr_state(&vcpu->arch.vr);
902                 disable_kernel_altivec();
903                 t->vr_save_area = &vcpu->arch.vr;
904                 preempt_enable();
905 #endif
906         }
907
908         t->regs->msr |= msr;
909         vcpu->arch.guest_owned_ext |= msr;
910         kvmppc_recalc_shadow_msr(vcpu);
911
912         return RESUME_GUEST;
913 }
914
915 /*
916  * Kernel code using FP or VMX could have flushed guest state to
917  * the thread_struct; if so, get it back now.
918  */
919 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
920 {
921         unsigned long lost_ext;
922
923         lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
924         if (!lost_ext)
925                 return;
926
927         if (lost_ext & MSR_FP) {
928                 preempt_disable();
929                 enable_kernel_fp();
930                 load_fp_state(&vcpu->arch.fp);
931                 disable_kernel_fp();
932                 preempt_enable();
933         }
934 #ifdef CONFIG_ALTIVEC
935         if (lost_ext & MSR_VEC) {
936                 preempt_disable();
937                 enable_kernel_altivec();
938                 load_vr_state(&vcpu->arch.vr);
939                 disable_kernel_altivec();
940                 preempt_enable();
941         }
942 #endif
943         current->thread.regs->msr |= lost_ext;
944 }
945
946 #ifdef CONFIG_PPC_BOOK3S_64
947
948 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
949 {
950         /* Inject the Interrupt Cause field and trigger a guest interrupt */
951         vcpu->arch.fscr &= ~(0xffULL << 56);
952         vcpu->arch.fscr |= (fac << 56);
953         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
954 }
955
956 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
957 {
958         enum emulation_result er = EMULATE_FAIL;
959
960         if (!(kvmppc_get_msr(vcpu) & MSR_PR))
961                 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
962
963         if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
964                 /* Couldn't emulate, trigger interrupt in guest */
965                 kvmppc_trigger_fac_interrupt(vcpu, fac);
966         }
967 }
968
969 /* Enable facilities (TAR, EBB, DSCR) for the guest */
970 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
971 {
972         bool guest_fac_enabled;
973         BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
974
975         /*
976          * Not every facility is enabled by FSCR bits, check whether the
977          * guest has this facility enabled at all.
978          */
979         switch (fac) {
980         case FSCR_TAR_LG:
981         case FSCR_EBB_LG:
982                 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
983                 break;
984         case FSCR_TM_LG:
985                 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
986                 break;
987         default:
988                 guest_fac_enabled = false;
989                 break;
990         }
991
992         if (!guest_fac_enabled) {
993                 /* Facility not enabled by the guest */
994                 kvmppc_trigger_fac_interrupt(vcpu, fac);
995                 return RESUME_GUEST;
996         }
997
998         switch (fac) {
999         case FSCR_TAR_LG:
1000                 /* TAR switching isn't lazy in Linux yet */
1001                 current->thread.tar = mfspr(SPRN_TAR);
1002                 mtspr(SPRN_TAR, vcpu->arch.tar);
1003                 vcpu->arch.shadow_fscr |= FSCR_TAR;
1004                 break;
1005         default:
1006                 kvmppc_emulate_fac(vcpu, fac);
1007                 break;
1008         }
1009
1010 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1011         /* Since we disabled MSR_TM at privilege state, the mfspr instruction
1012          * for TM spr can trigger TM fac unavailable. In this case, the
1013          * emulation is handled by kvmppc_emulate_fac(), which invokes
1014          * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1015          * RT for NV registers. So it need to restore those NV reg to reflect
1016          * the update.
1017          */
1018         if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1019                 return RESUME_GUEST_NV;
1020 #endif
1021
1022         return RESUME_GUEST;
1023 }
1024
1025 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1026 {
1027         if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1028                 /* TAR got dropped, drop it in shadow too */
1029                 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1030         } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1031                 vcpu->arch.fscr = fscr;
1032                 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1033                 return;
1034         }
1035
1036         vcpu->arch.fscr = fscr;
1037 }
1038 #endif
1039
1040 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1041 {
1042         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1043                 u64 msr = kvmppc_get_msr(vcpu);
1044
1045                 kvmppc_set_msr(vcpu, msr | MSR_SE);
1046         }
1047 }
1048
1049 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1050 {
1051         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1052                 u64 msr = kvmppc_get_msr(vcpu);
1053
1054                 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1055         }
1056 }
1057
1058 static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
1059                                   unsigned int exit_nr)
1060 {
1061         enum emulation_result er;
1062         ulong flags;
1063         u32 last_inst;
1064         int emul, r;
1065
1066         /*
1067          * shadow_srr1 only contains valid flags if we came here via a program
1068          * exception. The other exceptions (emulation assist, FP unavailable,
1069          * etc.) do not provide flags in SRR1, so use an illegal-instruction
1070          * exception when injecting a program interrupt into the guest.
1071          */
1072         if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
1073                 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1074         else
1075                 flags = SRR1_PROGILL;
1076
1077         emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1078         if (emul != EMULATE_DONE)
1079                 return RESUME_GUEST;
1080
1081         if (kvmppc_get_msr(vcpu) & MSR_PR) {
1082 #ifdef EXIT_DEBUG
1083                 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1084                         kvmppc_get_pc(vcpu), last_inst);
1085 #endif
1086                 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
1087                         kvmppc_core_queue_program(vcpu, flags);
1088                         return RESUME_GUEST;
1089                 }
1090         }
1091
1092         vcpu->stat.emulated_inst_exits++;
1093         er = kvmppc_emulate_instruction(run, vcpu);
1094         switch (er) {
1095         case EMULATE_DONE:
1096                 r = RESUME_GUEST_NV;
1097                 break;
1098         case EMULATE_AGAIN:
1099                 r = RESUME_GUEST;
1100                 break;
1101         case EMULATE_FAIL:
1102                 pr_crit("%s: emulation at %lx failed (%08x)\n",
1103                         __func__, kvmppc_get_pc(vcpu), last_inst);
1104                 kvmppc_core_queue_program(vcpu, flags);
1105                 r = RESUME_GUEST;
1106                 break;
1107         case EMULATE_DO_MMIO:
1108                 run->exit_reason = KVM_EXIT_MMIO;
1109                 r = RESUME_HOST_NV;
1110                 break;
1111         case EMULATE_EXIT_USER:
1112                 r = RESUME_HOST_NV;
1113                 break;
1114         default:
1115                 BUG();
1116         }
1117
1118         return r;
1119 }
1120
1121 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
1122                           unsigned int exit_nr)
1123 {
1124         int r = RESUME_HOST;
1125         int s;
1126
1127         vcpu->stat.sum_exits++;
1128
1129         run->exit_reason = KVM_EXIT_UNKNOWN;
1130         run->ready_for_interrupt_injection = 1;
1131
1132         /* We get here with MSR.EE=1 */
1133
1134         trace_kvm_exit(exit_nr, vcpu);
1135         guest_exit();
1136
1137         switch (exit_nr) {
1138         case BOOK3S_INTERRUPT_INST_STORAGE:
1139         {
1140                 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1141                 vcpu->stat.pf_instruc++;
1142
1143                 if (kvmppc_is_split_real(vcpu))
1144                         kvmppc_fixup_split_real(vcpu);
1145
1146 #ifdef CONFIG_PPC_BOOK3S_32
1147                 /* We set segments as unused segments when invalidating them. So
1148                  * treat the respective fault as segment fault. */
1149                 {
1150                         struct kvmppc_book3s_shadow_vcpu *svcpu;
1151                         u32 sr;
1152
1153                         svcpu = svcpu_get(vcpu);
1154                         sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
1155                         svcpu_put(svcpu);
1156                         if (sr == SR_INVALID) {
1157                                 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1158                                 r = RESUME_GUEST;
1159                                 break;
1160                         }
1161                 }
1162 #endif
1163
1164                 /* only care about PTEG not found errors, but leave NX alone */
1165                 if (shadow_srr1 & 0x40000000) {
1166                         int idx = srcu_read_lock(&vcpu->kvm->srcu);
1167                         r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
1168                         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1169                         vcpu->stat.sp_instruc++;
1170                 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1171                           (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1172                         /*
1173                          * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1174                          *     so we can't use the NX bit inside the guest. Let's cross our fingers,
1175                          *     that no guest that needs the dcbz hack does NX.
1176                          */
1177                         kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1178                         r = RESUME_GUEST;
1179                 } else {
1180                         kvmppc_core_queue_inst_storage(vcpu,
1181                                                 shadow_srr1 & 0x58000000);
1182                         r = RESUME_GUEST;
1183                 }
1184                 break;
1185         }
1186         case BOOK3S_INTERRUPT_DATA_STORAGE:
1187         {
1188                 ulong dar = kvmppc_get_fault_dar(vcpu);
1189                 u32 fault_dsisr = vcpu->arch.fault_dsisr;
1190                 vcpu->stat.pf_storage++;
1191
1192 #ifdef CONFIG_PPC_BOOK3S_32
1193                 /* We set segments as unused segments when invalidating them. So
1194                  * treat the respective fault as segment fault. */
1195                 {
1196                         struct kvmppc_book3s_shadow_vcpu *svcpu;
1197                         u32 sr;
1198
1199                         svcpu = svcpu_get(vcpu);
1200                         sr = svcpu->sr[dar >> SID_SHIFT];
1201                         svcpu_put(svcpu);
1202                         if (sr == SR_INVALID) {
1203                                 kvmppc_mmu_map_segment(vcpu, dar);
1204                                 r = RESUME_GUEST;
1205                                 break;
1206                         }
1207                 }
1208 #endif
1209
1210                 /*
1211                  * We need to handle missing shadow PTEs, and
1212                  * protection faults due to us mapping a page read-only
1213                  * when the guest thinks it is writable.
1214                  */
1215                 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1216                         int idx = srcu_read_lock(&vcpu->kvm->srcu);
1217                         r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
1218                         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1219                 } else {
1220                         kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
1221                         r = RESUME_GUEST;
1222                 }
1223                 break;
1224         }
1225         case BOOK3S_INTERRUPT_DATA_SEGMENT:
1226                 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
1227                         kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
1228                         kvmppc_book3s_queue_irqprio(vcpu,
1229                                 BOOK3S_INTERRUPT_DATA_SEGMENT);
1230                 }
1231                 r = RESUME_GUEST;
1232                 break;
1233         case BOOK3S_INTERRUPT_INST_SEGMENT:
1234                 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1235                         kvmppc_book3s_queue_irqprio(vcpu,
1236                                 BOOK3S_INTERRUPT_INST_SEGMENT);
1237                 }
1238                 r = RESUME_GUEST;
1239                 break;
1240         /* We're good on these - the host merely wanted to get our attention */
1241         case BOOK3S_INTERRUPT_DECREMENTER:
1242         case BOOK3S_INTERRUPT_HV_DECREMENTER:
1243         case BOOK3S_INTERRUPT_DOORBELL:
1244         case BOOK3S_INTERRUPT_H_DOORBELL:
1245                 vcpu->stat.dec_exits++;
1246                 r = RESUME_GUEST;
1247                 break;
1248         case BOOK3S_INTERRUPT_EXTERNAL:
1249         case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1250         case BOOK3S_INTERRUPT_EXTERNAL_HV:
1251         case BOOK3S_INTERRUPT_H_VIRT:
1252                 vcpu->stat.ext_intr_exits++;
1253                 r = RESUME_GUEST;
1254                 break;
1255         case BOOK3S_INTERRUPT_HMI:
1256         case BOOK3S_INTERRUPT_PERFMON:
1257         case BOOK3S_INTERRUPT_SYSTEM_RESET:
1258                 r = RESUME_GUEST;
1259                 break;
1260         case BOOK3S_INTERRUPT_PROGRAM:
1261         case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1262                 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1263                 break;
1264         case BOOK3S_INTERRUPT_SYSCALL:
1265         {
1266                 u32 last_sc;
1267                 int emul;
1268
1269                 /* Get last sc for papr */
1270                 if (vcpu->arch.papr_enabled) {
1271                         /* The sc instuction points SRR0 to the next inst */
1272                         emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1273                         if (emul != EMULATE_DONE) {
1274                                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1275                                 r = RESUME_GUEST;
1276                                 break;
1277                         }
1278                 }
1279
1280                 if (vcpu->arch.papr_enabled &&
1281                     (last_sc == 0x44000022) &&
1282                     !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1283                         /* SC 1 papr hypercalls */
1284                         ulong cmd = kvmppc_get_gpr(vcpu, 3);
1285                         int i;
1286
1287 #ifdef CONFIG_PPC_BOOK3S_64
1288                         if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1289                                 r = RESUME_GUEST;
1290                                 break;
1291                         }
1292 #endif
1293
1294                         run->papr_hcall.nr = cmd;
1295                         for (i = 0; i < 9; ++i) {
1296                                 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1297                                 run->papr_hcall.args[i] = gpr;
1298                         }
1299                         run->exit_reason = KVM_EXIT_PAPR_HCALL;
1300                         vcpu->arch.hcall_needed = 1;
1301                         r = RESUME_HOST;
1302                 } else if (vcpu->arch.osi_enabled &&
1303                     (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1304                     (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1305                         /* MOL hypercalls */
1306                         u64 *gprs = run->osi.gprs;
1307                         int i;
1308
1309                         run->exit_reason = KVM_EXIT_OSI;
1310                         for (i = 0; i < 32; i++)
1311                                 gprs[i] = kvmppc_get_gpr(vcpu, i);
1312                         vcpu->arch.osi_needed = 1;
1313                         r = RESUME_HOST_NV;
1314                 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1315                     (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1316                         /* KVM PV hypercalls */
1317                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1318                         r = RESUME_GUEST;
1319                 } else {
1320                         /* Guest syscalls */
1321                         vcpu->stat.syscall_exits++;
1322                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1323                         r = RESUME_GUEST;
1324                 }
1325                 break;
1326         }
1327         case BOOK3S_INTERRUPT_FP_UNAVAIL:
1328         case BOOK3S_INTERRUPT_ALTIVEC:
1329         case BOOK3S_INTERRUPT_VSX:
1330         {
1331                 int ext_msr = 0;
1332                 int emul;
1333                 u32 last_inst;
1334
1335                 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1336                         /* Do paired single instruction emulation */
1337                         emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1338                                                     &last_inst);
1339                         if (emul == EMULATE_DONE)
1340                                 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1341                         else
1342                                 r = RESUME_GUEST;
1343
1344                         break;
1345                 }
1346
1347                 /* Enable external provider */
1348                 switch (exit_nr) {
1349                 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1350                         ext_msr = MSR_FP;
1351                         break;
1352
1353                 case BOOK3S_INTERRUPT_ALTIVEC:
1354                         ext_msr = MSR_VEC;
1355                         break;
1356
1357                 case BOOK3S_INTERRUPT_VSX:
1358                         ext_msr = MSR_VSX;
1359                         break;
1360                 }
1361
1362                 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1363                 break;
1364         }
1365         case BOOK3S_INTERRUPT_ALIGNMENT:
1366         {
1367                 u32 last_inst;
1368                 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1369
1370                 if (emul == EMULATE_DONE) {
1371                         u32 dsisr;
1372                         u64 dar;
1373
1374                         dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1375                         dar = kvmppc_alignment_dar(vcpu, last_inst);
1376
1377                         kvmppc_set_dsisr(vcpu, dsisr);
1378                         kvmppc_set_dar(vcpu, dar);
1379
1380                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1381                 }
1382                 r = RESUME_GUEST;
1383                 break;
1384         }
1385 #ifdef CONFIG_PPC_BOOK3S_64
1386         case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1387                 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1388                 break;
1389 #endif
1390         case BOOK3S_INTERRUPT_MACHINE_CHECK:
1391                 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1392                 r = RESUME_GUEST;
1393                 break;
1394         case BOOK3S_INTERRUPT_TRACE:
1395                 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1396                         run->exit_reason = KVM_EXIT_DEBUG;
1397                         r = RESUME_HOST;
1398                 } else {
1399                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1400                         r = RESUME_GUEST;
1401                 }
1402                 break;
1403         default:
1404         {
1405                 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1406                 /* Ugh - bork here! What did we get? */
1407                 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1408                         exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1409                 r = RESUME_HOST;
1410                 BUG();
1411                 break;
1412         }
1413         }
1414
1415         if (!(r & RESUME_HOST)) {
1416                 /* To avoid clobbering exit_reason, only check for signals if
1417                  * we aren't already exiting to userspace for some other
1418                  * reason. */
1419
1420                 /*
1421                  * Interrupts could be timers for the guest which we have to
1422                  * inject again, so let's postpone them until we're in the guest
1423                  * and if we really did time things so badly, then we just exit
1424                  * again due to a host external interrupt.
1425                  */
1426                 s = kvmppc_prepare_to_enter(vcpu);
1427                 if (s <= 0)
1428                         r = s;
1429                 else {
1430                         /* interrupts now hard-disabled */
1431                         kvmppc_fix_ee_before_entry();
1432                 }
1433
1434                 kvmppc_handle_lost_ext(vcpu);
1435         }
1436
1437         trace_kvm_book3s_reenter(r, vcpu);
1438
1439         return r;
1440 }
1441
1442 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1443                                             struct kvm_sregs *sregs)
1444 {
1445         struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1446         int i;
1447
1448         sregs->pvr = vcpu->arch.pvr;
1449
1450         sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1451         if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1452                 for (i = 0; i < 64; i++) {
1453                         sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1454                         sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1455                 }
1456         } else {
1457                 for (i = 0; i < 16; i++)
1458                         sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1459
1460                 for (i = 0; i < 8; i++) {
1461                         sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1462                         sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1463                 }
1464         }
1465
1466         return 0;
1467 }
1468
1469 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1470                                             struct kvm_sregs *sregs)
1471 {
1472         struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1473         int i;
1474
1475         kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1476
1477         vcpu3s->sdr1 = sregs->u.s.sdr1;
1478 #ifdef CONFIG_PPC_BOOK3S_64
1479         if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1480                 /* Flush all SLB entries */
1481                 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1482                 vcpu->arch.mmu.slbia(vcpu);
1483
1484                 for (i = 0; i < 64; i++) {
1485                         u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1486                         u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1487
1488                         if (rb & SLB_ESID_V)
1489                                 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1490                 }
1491         } else
1492 #endif
1493         {
1494                 for (i = 0; i < 16; i++) {
1495                         vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1496                 }
1497                 for (i = 0; i < 8; i++) {
1498                         kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1499                                        (u32)sregs->u.s.ppc32.ibat[i]);
1500                         kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1501                                        (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1502                         kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1503                                        (u32)sregs->u.s.ppc32.dbat[i]);
1504                         kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1505                                        (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1506                 }
1507         }
1508
1509         /* Flush the MMU after messing with the segments */
1510         kvmppc_mmu_pte_flush(vcpu, 0, 0);
1511
1512         return 0;
1513 }
1514
1515 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1516                                  union kvmppc_one_reg *val)
1517 {
1518         int r = 0;
1519
1520         switch (id) {
1521         case KVM_REG_PPC_DEBUG_INST:
1522                 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1523                 break;
1524         case KVM_REG_PPC_HIOR:
1525                 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1526                 break;
1527         case KVM_REG_PPC_VTB:
1528                 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1529                 break;
1530         case KVM_REG_PPC_LPCR:
1531         case KVM_REG_PPC_LPCR_64:
1532                 /*
1533                  * We are only interested in the LPCR_ILE bit
1534                  */
1535                 if (vcpu->arch.intr_msr & MSR_LE)
1536                         *val = get_reg_val(id, LPCR_ILE);
1537                 else
1538                         *val = get_reg_val(id, 0);
1539                 break;
1540 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1541         case KVM_REG_PPC_TFHAR:
1542                 *val = get_reg_val(id, vcpu->arch.tfhar);
1543                 break;
1544         case KVM_REG_PPC_TFIAR:
1545                 *val = get_reg_val(id, vcpu->arch.tfiar);
1546                 break;
1547         case KVM_REG_PPC_TEXASR:
1548                 *val = get_reg_val(id, vcpu->arch.texasr);
1549                 break;
1550         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1551                 *val = get_reg_val(id,
1552                                 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1553                 break;
1554         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1555         {
1556                 int i, j;
1557
1558                 i = id - KVM_REG_PPC_TM_VSR0;
1559                 if (i < 32)
1560                         for (j = 0; j < TS_FPRWIDTH; j++)
1561                                 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1562                 else {
1563                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1564                                 val->vval = vcpu->arch.vr_tm.vr[i-32];
1565                         else
1566                                 r = -ENXIO;
1567                 }
1568                 break;
1569         }
1570         case KVM_REG_PPC_TM_CR:
1571                 *val = get_reg_val(id, vcpu->arch.cr_tm);
1572                 break;
1573         case KVM_REG_PPC_TM_XER:
1574                 *val = get_reg_val(id, vcpu->arch.xer_tm);
1575                 break;
1576         case KVM_REG_PPC_TM_LR:
1577                 *val = get_reg_val(id, vcpu->arch.lr_tm);
1578                 break;
1579         case KVM_REG_PPC_TM_CTR:
1580                 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1581                 break;
1582         case KVM_REG_PPC_TM_FPSCR:
1583                 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1584                 break;
1585         case KVM_REG_PPC_TM_AMR:
1586                 *val = get_reg_val(id, vcpu->arch.amr_tm);
1587                 break;
1588         case KVM_REG_PPC_TM_PPR:
1589                 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1590                 break;
1591         case KVM_REG_PPC_TM_VRSAVE:
1592                 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1593                 break;
1594         case KVM_REG_PPC_TM_VSCR:
1595                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1596                         *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1597                 else
1598                         r = -ENXIO;
1599                 break;
1600         case KVM_REG_PPC_TM_DSCR:
1601                 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1602                 break;
1603         case KVM_REG_PPC_TM_TAR:
1604                 *val = get_reg_val(id, vcpu->arch.tar_tm);
1605                 break;
1606 #endif
1607         default:
1608                 r = -EINVAL;
1609                 break;
1610         }
1611
1612         return r;
1613 }
1614
1615 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1616 {
1617         if (new_lpcr & LPCR_ILE)
1618                 vcpu->arch.intr_msr |= MSR_LE;
1619         else
1620                 vcpu->arch.intr_msr &= ~MSR_LE;
1621 }
1622
1623 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1624                                  union kvmppc_one_reg *val)
1625 {
1626         int r = 0;
1627
1628         switch (id) {
1629         case KVM_REG_PPC_HIOR:
1630                 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1631                 to_book3s(vcpu)->hior_explicit = true;
1632                 break;
1633         case KVM_REG_PPC_VTB:
1634                 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1635                 break;
1636         case KVM_REG_PPC_LPCR:
1637         case KVM_REG_PPC_LPCR_64:
1638                 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1639                 break;
1640 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1641         case KVM_REG_PPC_TFHAR:
1642                 vcpu->arch.tfhar = set_reg_val(id, *val);
1643                 break;
1644         case KVM_REG_PPC_TFIAR:
1645                 vcpu->arch.tfiar = set_reg_val(id, *val);
1646                 break;
1647         case KVM_REG_PPC_TEXASR:
1648                 vcpu->arch.texasr = set_reg_val(id, *val);
1649                 break;
1650         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1651                 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1652                         set_reg_val(id, *val);
1653                 break;
1654         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1655         {
1656                 int i, j;
1657
1658                 i = id - KVM_REG_PPC_TM_VSR0;
1659                 if (i < 32)
1660                         for (j = 0; j < TS_FPRWIDTH; j++)
1661                                 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1662                 else
1663                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1664                                 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1665                         else
1666                                 r = -ENXIO;
1667                 break;
1668         }
1669         case KVM_REG_PPC_TM_CR:
1670                 vcpu->arch.cr_tm = set_reg_val(id, *val);
1671                 break;
1672         case KVM_REG_PPC_TM_XER:
1673                 vcpu->arch.xer_tm = set_reg_val(id, *val);
1674                 break;
1675         case KVM_REG_PPC_TM_LR:
1676                 vcpu->arch.lr_tm = set_reg_val(id, *val);
1677                 break;
1678         case KVM_REG_PPC_TM_CTR:
1679                 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1680                 break;
1681         case KVM_REG_PPC_TM_FPSCR:
1682                 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1683                 break;
1684         case KVM_REG_PPC_TM_AMR:
1685                 vcpu->arch.amr_tm = set_reg_val(id, *val);
1686                 break;
1687         case KVM_REG_PPC_TM_PPR:
1688                 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1689                 break;
1690         case KVM_REG_PPC_TM_VRSAVE:
1691                 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1692                 break;
1693         case KVM_REG_PPC_TM_VSCR:
1694                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1695                         vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1696                 else
1697                         r = -ENXIO;
1698                 break;
1699         case KVM_REG_PPC_TM_DSCR:
1700                 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1701                 break;
1702         case KVM_REG_PPC_TM_TAR:
1703                 vcpu->arch.tar_tm = set_reg_val(id, *val);
1704                 break;
1705 #endif
1706         default:
1707                 r = -EINVAL;
1708                 break;
1709         }
1710
1711         return r;
1712 }
1713
1714 static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1715                                                    unsigned int id)
1716 {
1717         struct kvmppc_vcpu_book3s *vcpu_book3s;
1718         struct kvm_vcpu *vcpu;
1719         int err = -ENOMEM;
1720         unsigned long p;
1721
1722         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1723         if (!vcpu)
1724                 goto out;
1725
1726         vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1727         if (!vcpu_book3s)
1728                 goto free_vcpu;
1729         vcpu->arch.book3s = vcpu_book3s;
1730
1731 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1732         vcpu->arch.shadow_vcpu =
1733                 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1734         if (!vcpu->arch.shadow_vcpu)
1735                 goto free_vcpu3s;
1736 #endif
1737
1738         err = kvm_vcpu_init(vcpu, kvm, id);
1739         if (err)
1740                 goto free_shadow_vcpu;
1741
1742         err = -ENOMEM;
1743         p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1744         if (!p)
1745                 goto uninit_vcpu;
1746         vcpu->arch.shared = (void *)p;
1747 #ifdef CONFIG_PPC_BOOK3S_64
1748         /* Always start the shared struct in native endian mode */
1749 #ifdef __BIG_ENDIAN__
1750         vcpu->arch.shared_big_endian = true;
1751 #else
1752         vcpu->arch.shared_big_endian = false;
1753 #endif
1754
1755         /*
1756          * Default to the same as the host if we're on sufficiently
1757          * recent machine that we have 1TB segments;
1758          * otherwise default to PPC970FX.
1759          */
1760         vcpu->arch.pvr = 0x3C0301;
1761         if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1762                 vcpu->arch.pvr = mfspr(SPRN_PVR);
1763         vcpu->arch.intr_msr = MSR_SF;
1764 #else
1765         /* default to book3s_32 (750) */
1766         vcpu->arch.pvr = 0x84202;
1767 #endif
1768         kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1769         vcpu->arch.slb_nr = 64;
1770
1771         vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1772
1773         err = kvmppc_mmu_init(vcpu);
1774         if (err < 0)
1775                 goto free_shared_page;
1776
1777         return vcpu;
1778
1779 free_shared_page:
1780         free_page((unsigned long)vcpu->arch.shared);
1781 uninit_vcpu:
1782         kvm_vcpu_uninit(vcpu);
1783 free_shadow_vcpu:
1784 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1785         kfree(vcpu->arch.shadow_vcpu);
1786 free_vcpu3s:
1787 #endif
1788         vfree(vcpu_book3s);
1789 free_vcpu:
1790         kmem_cache_free(kvm_vcpu_cache, vcpu);
1791 out:
1792         return ERR_PTR(err);
1793 }
1794
1795 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1796 {
1797         struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1798
1799         free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1800         kvm_vcpu_uninit(vcpu);
1801 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1802         kfree(vcpu->arch.shadow_vcpu);
1803 #endif
1804         vfree(vcpu_book3s);
1805         kmem_cache_free(kvm_vcpu_cache, vcpu);
1806 }
1807
1808 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1809 {
1810         int ret;
1811 #ifdef CONFIG_ALTIVEC
1812         unsigned long uninitialized_var(vrsave);
1813 #endif
1814
1815         /* Check if we can run the vcpu at all */
1816         if (!vcpu->arch.sane) {
1817                 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1818                 ret = -EINVAL;
1819                 goto out;
1820         }
1821
1822         kvmppc_setup_debug(vcpu);
1823
1824         /*
1825          * Interrupts could be timers for the guest which we have to inject
1826          * again, so let's postpone them until we're in the guest and if we
1827          * really did time things so badly, then we just exit again due to
1828          * a host external interrupt.
1829          */
1830         ret = kvmppc_prepare_to_enter(vcpu);
1831         if (ret <= 0)
1832                 goto out;
1833         /* interrupts now hard-disabled */
1834
1835         /* Save FPU, Altivec and VSX state */
1836         giveup_all(current);
1837
1838         /* Preload FPU if it's enabled */
1839         if (kvmppc_get_msr(vcpu) & MSR_FP)
1840                 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1841
1842         kvmppc_fix_ee_before_entry();
1843
1844         ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1845
1846         kvmppc_clear_debug(vcpu);
1847
1848         /* No need for guest_exit. It's done in handle_exit.
1849            We also get here with interrupts enabled. */
1850
1851         /* Make sure we save the guest FPU/Altivec/VSX state */
1852         kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1853
1854         /* Make sure we save the guest TAR/EBB/DSCR state */
1855         kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1856
1857 out:
1858         vcpu->mode = OUTSIDE_GUEST_MODE;
1859         return ret;
1860 }
1861
1862 /*
1863  * Get (and clear) the dirty memory log for a memory slot.
1864  */
1865 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1866                                          struct kvm_dirty_log *log)
1867 {
1868         struct kvm_memslots *slots;
1869         struct kvm_memory_slot *memslot;
1870         struct kvm_vcpu *vcpu;
1871         ulong ga, ga_end;
1872         int is_dirty = 0;
1873         int r;
1874         unsigned long n;
1875
1876         mutex_lock(&kvm->slots_lock);
1877
1878         r = kvm_get_dirty_log(kvm, log, &is_dirty);
1879         if (r)
1880                 goto out;
1881
1882         /* If nothing is dirty, don't bother messing with page tables. */
1883         if (is_dirty) {
1884                 slots = kvm_memslots(kvm);
1885                 memslot = id_to_memslot(slots, log->slot);
1886
1887                 ga = memslot->base_gfn << PAGE_SHIFT;
1888                 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1889
1890                 kvm_for_each_vcpu(n, vcpu, kvm)
1891                         kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1892
1893                 n = kvm_dirty_bitmap_bytes(memslot);
1894                 memset(memslot->dirty_bitmap, 0, n);
1895         }
1896
1897         r = 0;
1898 out:
1899         mutex_unlock(&kvm->slots_lock);
1900         return r;
1901 }
1902
1903 static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1904                                          struct kvm_memory_slot *memslot)
1905 {
1906         return;
1907 }
1908
1909 static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1910                                         struct kvm_memory_slot *memslot,
1911                                         const struct kvm_userspace_memory_region *mem)
1912 {
1913         return 0;
1914 }
1915
1916 static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1917                                 const struct kvm_userspace_memory_region *mem,
1918                                 const struct kvm_memory_slot *old,
1919                                 const struct kvm_memory_slot *new)
1920 {
1921         return;
1922 }
1923
1924 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1925                                         struct kvm_memory_slot *dont)
1926 {
1927         return;
1928 }
1929
1930 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1931                                          unsigned long npages)
1932 {
1933         return 0;
1934 }
1935
1936
1937 #ifdef CONFIG_PPC64
1938 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1939                                          struct kvm_ppc_smmu_info *info)
1940 {
1941         long int i;
1942         struct kvm_vcpu *vcpu;
1943
1944         info->flags = 0;
1945
1946         /* SLB is always 64 entries */
1947         info->slb_size = 64;
1948
1949         /* Standard 4k base page size segment */
1950         info->sps[0].page_shift = 12;
1951         info->sps[0].slb_enc = 0;
1952         info->sps[0].enc[0].page_shift = 12;
1953         info->sps[0].enc[0].pte_enc = 0;
1954
1955         /*
1956          * 64k large page size.
1957          * We only want to put this in if the CPUs we're emulating
1958          * support it, but unfortunately we don't have a vcpu easily
1959          * to hand here to test.  Just pick the first vcpu, and if
1960          * that doesn't exist yet, report the minimum capability,
1961          * i.e., no 64k pages.
1962          * 1T segment support goes along with 64k pages.
1963          */
1964         i = 1;
1965         vcpu = kvm_get_vcpu(kvm, 0);
1966         if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1967                 info->flags = KVM_PPC_1T_SEGMENTS;
1968                 info->sps[i].page_shift = 16;
1969                 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1970                 info->sps[i].enc[0].page_shift = 16;
1971                 info->sps[i].enc[0].pte_enc = 1;
1972                 ++i;
1973         }
1974
1975         /* Standard 16M large page size segment */
1976         info->sps[i].page_shift = 24;
1977         info->sps[i].slb_enc = SLB_VSID_L;
1978         info->sps[i].enc[0].page_shift = 24;
1979         info->sps[i].enc[0].pte_enc = 0;
1980
1981         return 0;
1982 }
1983
1984 static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
1985 {
1986         if (!cpu_has_feature(CPU_FTR_ARCH_300))
1987                 return -ENODEV;
1988         /* Require flags and process table base and size to all be zero. */
1989         if (cfg->flags || cfg->process_table)
1990                 return -EINVAL;
1991         return 0;
1992 }
1993
1994 #else
1995 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1996                                          struct kvm_ppc_smmu_info *info)
1997 {
1998         /* We should not get called */
1999         BUG();
2000 }
2001 #endif /* CONFIG_PPC64 */
2002
2003 static unsigned int kvm_global_user_count = 0;
2004 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
2005
2006 static int kvmppc_core_init_vm_pr(struct kvm *kvm)
2007 {
2008         mutex_init(&kvm->arch.hpt_mutex);
2009
2010 #ifdef CONFIG_PPC_BOOK3S_64
2011         /* Start out with the default set of hcalls enabled */
2012         kvmppc_pr_init_default_hcalls(kvm);
2013 #endif
2014
2015         if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2016                 spin_lock(&kvm_global_user_count_lock);
2017                 if (++kvm_global_user_count == 1)
2018                         pseries_disable_reloc_on_exc();
2019                 spin_unlock(&kvm_global_user_count_lock);
2020         }
2021         return 0;
2022 }
2023
2024 static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
2025 {
2026 #ifdef CONFIG_PPC64
2027         WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
2028 #endif
2029
2030         if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2031                 spin_lock(&kvm_global_user_count_lock);
2032                 BUG_ON(kvm_global_user_count == 0);
2033                 if (--kvm_global_user_count == 0)
2034                         pseries_enable_reloc_on_exc();
2035                 spin_unlock(&kvm_global_user_count_lock);
2036         }
2037 }
2038
2039 static int kvmppc_core_check_processor_compat_pr(void)
2040 {
2041         /*
2042          * PR KVM can work on POWER9 inside a guest partition
2043          * running in HPT mode.  It can't work if we are using
2044          * radix translation (because radix provides no way for
2045          * a process to have unique translations in quadrant 3).
2046          */
2047         if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
2048                 return -EIO;
2049         return 0;
2050 }
2051
2052 static long kvm_arch_vm_ioctl_pr(struct file *filp,
2053                                  unsigned int ioctl, unsigned long arg)
2054 {
2055         return -ENOTTY;
2056 }
2057
2058 static struct kvmppc_ops kvm_ops_pr = {
2059         .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
2060         .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
2061         .get_one_reg = kvmppc_get_one_reg_pr,
2062         .set_one_reg = kvmppc_set_one_reg_pr,
2063         .vcpu_load   = kvmppc_core_vcpu_load_pr,
2064         .vcpu_put    = kvmppc_core_vcpu_put_pr,
2065         .set_msr     = kvmppc_set_msr_pr,
2066         .vcpu_run    = kvmppc_vcpu_run_pr,
2067         .vcpu_create = kvmppc_core_vcpu_create_pr,
2068         .vcpu_free   = kvmppc_core_vcpu_free_pr,
2069         .check_requests = kvmppc_core_check_requests_pr,
2070         .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
2071         .flush_memslot = kvmppc_core_flush_memslot_pr,
2072         .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
2073         .commit_memory_region = kvmppc_core_commit_memory_region_pr,
2074         .unmap_hva_range = kvm_unmap_hva_range_pr,
2075         .age_hva  = kvm_age_hva_pr,
2076         .test_age_hva = kvm_test_age_hva_pr,
2077         .set_spte_hva = kvm_set_spte_hva_pr,
2078         .mmu_destroy  = kvmppc_mmu_destroy_pr,
2079         .free_memslot = kvmppc_core_free_memslot_pr,
2080         .create_memslot = kvmppc_core_create_memslot_pr,
2081         .init_vm = kvmppc_core_init_vm_pr,
2082         .destroy_vm = kvmppc_core_destroy_vm_pr,
2083         .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
2084         .emulate_op = kvmppc_core_emulate_op_pr,
2085         .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
2086         .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
2087         .fast_vcpu_kick = kvm_vcpu_kick,
2088         .arch_vm_ioctl  = kvm_arch_vm_ioctl_pr,
2089 #ifdef CONFIG_PPC_BOOK3S_64
2090         .hcall_implemented = kvmppc_hcall_impl_pr,
2091         .configure_mmu = kvm_configure_mmu_pr,
2092 #endif
2093         .giveup_ext = kvmppc_giveup_ext,
2094 };
2095
2096
2097 int kvmppc_book3s_init_pr(void)
2098 {
2099         int r;
2100
2101         r = kvmppc_core_check_processor_compat_pr();
2102         if (r < 0)
2103                 return r;
2104
2105         kvm_ops_pr.owner = THIS_MODULE;
2106         kvmppc_pr_ops = &kvm_ops_pr;
2107
2108         r = kvmppc_mmu_hpte_sysinit();
2109         return r;
2110 }
2111
2112 void kvmppc_book3s_exit_pr(void)
2113 {
2114         kvmppc_pr_ops = NULL;
2115         kvmppc_mmu_hpte_sysexit();
2116 }
2117
2118 /*
2119  * We only support separate modules for book3s 64
2120  */
2121 #ifdef CONFIG_PPC_BOOK3S_64
2122
2123 module_init(kvmppc_book3s_init_pr);
2124 module_exit(kvmppc_book3s_exit_pr);
2125
2126 MODULE_LICENSE("GPL");
2127 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2128 MODULE_ALIAS("devname:kvm");
2129 #endif