GNU Linux-libre 4.19.264-gnu1
[releases.git] / arch / powerpc / kvm / book3s_64_vio.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18  */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/sched/signal.h>
28 #include <linux/hugetlb.h>
29 #include <linux/list.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/iommu.h>
32 #include <linux/file.h>
33
34 #include <asm/kvm_ppc.h>
35 #include <asm/kvm_book3s.h>
36 #include <asm/book3s/64/mmu-hash.h>
37 #include <asm/hvcall.h>
38 #include <asm/synch.h>
39 #include <asm/ppc-opcode.h>
40 #include <asm/kvm_host.h>
41 #include <asm/udbg.h>
42 #include <asm/iommu.h>
43 #include <asm/tce.h>
44 #include <asm/mmu_context.h>
45
46 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
47 {
48         return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
49 }
50
51 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
52 {
53         unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
54                         (tce_pages * sizeof(struct page *));
55
56         return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
57 }
58
59 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
60 {
61         long ret = 0;
62
63         if (!current || !current->mm)
64                 return ret; /* process exited */
65
66         down_write(&current->mm->mmap_sem);
67
68         if (inc) {
69                 unsigned long locked, lock_limit;
70
71                 locked = current->mm->locked_vm + stt_pages;
72                 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
73                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
74                         ret = -ENOMEM;
75                 else
76                         current->mm->locked_vm += stt_pages;
77         } else {
78                 if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
79                         stt_pages = current->mm->locked_vm;
80
81                 current->mm->locked_vm -= stt_pages;
82         }
83
84         pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
85                         inc ? '+' : '-',
86                         stt_pages << PAGE_SHIFT,
87                         current->mm->locked_vm << PAGE_SHIFT,
88                         rlimit(RLIMIT_MEMLOCK),
89                         ret ? " - exceeded" : "");
90
91         up_write(&current->mm->mmap_sem);
92
93         return ret;
94 }
95
96 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
97 {
98         struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
99                         struct kvmppc_spapr_tce_iommu_table, rcu);
100
101         iommu_tce_table_put(stit->tbl);
102
103         kfree(stit);
104 }
105
106 static void kvm_spapr_tce_liobn_put(struct kref *kref)
107 {
108         struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
109                         struct kvmppc_spapr_tce_iommu_table, kref);
110
111         list_del_rcu(&stit->next);
112
113         call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
114 }
115
116 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
117                 struct iommu_group *grp)
118 {
119         int i;
120         struct kvmppc_spapr_tce_table *stt;
121         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
122         struct iommu_table_group *table_group = NULL;
123
124         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
125
126                 table_group = iommu_group_get_iommudata(grp);
127                 if (WARN_ON(!table_group))
128                         continue;
129
130                 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
131                         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
132                                 if (table_group->tables[i] != stit->tbl)
133                                         continue;
134
135                                 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
136                         }
137                 }
138         }
139 }
140
141 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
142                 struct iommu_group *grp)
143 {
144         struct kvmppc_spapr_tce_table *stt = NULL;
145         bool found = false;
146         struct iommu_table *tbl = NULL;
147         struct iommu_table_group *table_group;
148         long i;
149         struct kvmppc_spapr_tce_iommu_table *stit;
150         struct fd f;
151
152         f = fdget(tablefd);
153         if (!f.file)
154                 return -EBADF;
155
156         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
157                 if (stt == f.file->private_data) {
158                         found = true;
159                         break;
160                 }
161         }
162
163         fdput(f);
164
165         if (!found)
166                 return -EINVAL;
167
168         table_group = iommu_group_get_iommudata(grp);
169         if (WARN_ON(!table_group))
170                 return -EFAULT;
171
172         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
173                 struct iommu_table *tbltmp = table_group->tables[i];
174
175                 if (!tbltmp)
176                         continue;
177                 /* Make sure hardware table parameters are compatible */
178                 if ((tbltmp->it_page_shift <= stt->page_shift) &&
179                                 (tbltmp->it_offset << tbltmp->it_page_shift ==
180                                  stt->offset << stt->page_shift) &&
181                                 (tbltmp->it_size << tbltmp->it_page_shift >=
182                                  stt->size << stt->page_shift)) {
183                         /*
184                          * Reference the table to avoid races with
185                          * add/remove DMA windows.
186                          */
187                         tbl = iommu_tce_table_get(tbltmp);
188                         break;
189                 }
190         }
191         if (!tbl)
192                 return -EINVAL;
193
194         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
195                 if (tbl != stit->tbl)
196                         continue;
197
198                 if (!kref_get_unless_zero(&stit->kref)) {
199                         /* stit is being destroyed */
200                         iommu_tce_table_put(tbl);
201                         return -ENOTTY;
202                 }
203                 /*
204                  * The table is already known to this KVM, we just increased
205                  * its KVM reference counter and can return.
206                  */
207                 return 0;
208         }
209
210         stit = kzalloc(sizeof(*stit), GFP_KERNEL);
211         if (!stit) {
212                 iommu_tce_table_put(tbl);
213                 return -ENOMEM;
214         }
215
216         stit->tbl = tbl;
217         kref_init(&stit->kref);
218
219         list_add_rcu(&stit->next, &stt->iommu_tables);
220
221         return 0;
222 }
223
224 static void release_spapr_tce_table(struct rcu_head *head)
225 {
226         struct kvmppc_spapr_tce_table *stt = container_of(head,
227                         struct kvmppc_spapr_tce_table, rcu);
228         unsigned long i, npages = kvmppc_tce_pages(stt->size);
229
230         for (i = 0; i < npages; i++)
231                 __free_page(stt->pages[i]);
232
233         kfree(stt);
234 }
235
236 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
237 {
238         struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
239         struct page *page;
240
241         if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
242                 return VM_FAULT_SIGBUS;
243
244         page = stt->pages[vmf->pgoff];
245         get_page(page);
246         vmf->page = page;
247         return 0;
248 }
249
250 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
251         .fault = kvm_spapr_tce_fault,
252 };
253
254 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
255 {
256         vma->vm_ops = &kvm_spapr_tce_vm_ops;
257         return 0;
258 }
259
260 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
261 {
262         struct kvmppc_spapr_tce_table *stt = filp->private_data;
263         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
264         struct kvm *kvm = stt->kvm;
265
266         mutex_lock(&kvm->lock);
267         list_del_rcu(&stt->list);
268         mutex_unlock(&kvm->lock);
269
270         list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
271                 WARN_ON(!kref_read(&stit->kref));
272                 while (1) {
273                         if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
274                                 break;
275                 }
276         }
277
278         kvm_put_kvm(stt->kvm);
279
280         kvmppc_account_memlimit(
281                 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
282         call_rcu(&stt->rcu, release_spapr_tce_table);
283
284         return 0;
285 }
286
287 static const struct file_operations kvm_spapr_tce_fops = {
288         .mmap           = kvm_spapr_tce_mmap,
289         .release        = kvm_spapr_tce_release,
290 };
291
292 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
293                                    struct kvm_create_spapr_tce_64 *args)
294 {
295         struct kvmppc_spapr_tce_table *stt = NULL;
296         struct kvmppc_spapr_tce_table *siter;
297         unsigned long npages, size = args->size;
298         int ret = -ENOMEM;
299         int i;
300
301         if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
302                 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
303                 return -EINVAL;
304
305         npages = kvmppc_tce_pages(size);
306         ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
307         if (ret)
308                 return ret;
309
310         ret = -ENOMEM;
311         stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
312                       GFP_KERNEL);
313         if (!stt)
314                 goto fail_acct;
315
316         stt->liobn = args->liobn;
317         stt->page_shift = args->page_shift;
318         stt->offset = args->offset;
319         stt->size = size;
320         stt->kvm = kvm;
321         INIT_LIST_HEAD_RCU(&stt->iommu_tables);
322
323         for (i = 0; i < npages; i++) {
324                 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
325                 if (!stt->pages[i])
326                         goto fail;
327         }
328
329         mutex_lock(&kvm->lock);
330
331         /* Check this LIOBN hasn't been previously allocated */
332         ret = 0;
333         list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
334                 if (siter->liobn == args->liobn) {
335                         ret = -EBUSY;
336                         break;
337                 }
338         }
339
340         if (!ret)
341                 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
342                                        stt, O_RDWR | O_CLOEXEC);
343
344         if (ret >= 0) {
345                 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
346                 kvm_get_kvm(kvm);
347         }
348
349         mutex_unlock(&kvm->lock);
350
351         if (ret >= 0)
352                 return ret;
353
354  fail:
355         for (i = 0; i < npages; i++)
356                 if (stt->pages[i])
357                         __free_page(stt->pages[i]);
358
359         kfree(stt);
360  fail_acct:
361         kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
362         return ret;
363 }
364
365 static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
366 {
367         unsigned long hpa = 0;
368         enum dma_data_direction dir = DMA_NONE;
369
370         iommu_tce_xchg(tbl, entry, &hpa, &dir);
371 }
372
373 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
374                 struct iommu_table *tbl, unsigned long entry)
375 {
376         struct mm_iommu_table_group_mem_t *mem = NULL;
377         const unsigned long pgsize = 1ULL << tbl->it_page_shift;
378         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
379
380         if (!pua)
381                 /* it_userspace allocation might be delayed */
382                 return H_TOO_HARD;
383
384         mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
385         if (!mem)
386                 return H_TOO_HARD;
387
388         mm_iommu_mapped_dec(mem);
389
390         *pua = cpu_to_be64(0);
391
392         return H_SUCCESS;
393 }
394
395 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
396                 struct iommu_table *tbl, unsigned long entry)
397 {
398         enum dma_data_direction dir = DMA_NONE;
399         unsigned long hpa = 0;
400         long ret;
401
402         if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
403                 return H_TOO_HARD;
404
405         if (dir == DMA_NONE)
406                 return H_SUCCESS;
407
408         ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
409         if (ret != H_SUCCESS)
410                 iommu_tce_xchg(tbl, entry, &hpa, &dir);
411
412         return ret;
413 }
414
415 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
416                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
417                 unsigned long entry)
418 {
419         unsigned long i, ret = H_SUCCESS;
420         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
421         unsigned long io_entry = entry * subpages;
422
423         for (i = 0; i < subpages; ++i) {
424                 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
425                 if (ret != H_SUCCESS)
426                         break;
427         }
428
429         return ret;
430 }
431
432 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
433                 unsigned long entry, unsigned long ua,
434                 enum dma_data_direction dir)
435 {
436         long ret;
437         unsigned long hpa;
438         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
439         struct mm_iommu_table_group_mem_t *mem;
440
441         if (!pua)
442                 /* it_userspace allocation might be delayed */
443                 return H_TOO_HARD;
444
445         mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
446         if (!mem)
447                 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
448                 return H_TOO_HARD;
449
450         if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
451                 return H_TOO_HARD;
452
453         if (mm_iommu_mapped_inc(mem))
454                 return H_TOO_HARD;
455
456         ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
457         if (WARN_ON_ONCE(ret)) {
458                 mm_iommu_mapped_dec(mem);
459                 return H_TOO_HARD;
460         }
461
462         if (dir != DMA_NONE)
463                 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
464
465         *pua = cpu_to_be64(ua);
466
467         return 0;
468 }
469
470 static long kvmppc_tce_iommu_map(struct kvm *kvm,
471                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
472                 unsigned long entry, unsigned long ua,
473                 enum dma_data_direction dir)
474 {
475         unsigned long i, pgoff, ret = H_SUCCESS;
476         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
477         unsigned long io_entry = entry * subpages;
478
479         for (i = 0, pgoff = 0; i < subpages;
480                         ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
481
482                 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
483                                 io_entry + i, ua + pgoff, dir);
484                 if (ret != H_SUCCESS)
485                         break;
486         }
487
488         return ret;
489 }
490
491 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
492                       unsigned long ioba, unsigned long tce)
493 {
494         struct kvmppc_spapr_tce_table *stt;
495         long ret, idx;
496         struct kvmppc_spapr_tce_iommu_table *stit;
497         unsigned long entry, ua = 0;
498         enum dma_data_direction dir;
499
500         /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
501         /*          liobn, ioba, tce); */
502
503         stt = kvmppc_find_table(vcpu->kvm, liobn);
504         if (!stt)
505                 return H_TOO_HARD;
506
507         ret = kvmppc_ioba_validate(stt, ioba, 1);
508         if (ret != H_SUCCESS)
509                 return ret;
510
511         ret = kvmppc_tce_validate(stt, tce);
512         if (ret != H_SUCCESS)
513                 return ret;
514
515         dir = iommu_tce_direction(tce);
516
517         idx = srcu_read_lock(&vcpu->kvm->srcu);
518
519         if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
520                         tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
521                 ret = H_PARAMETER;
522                 goto unlock_exit;
523         }
524
525         entry = ioba >> stt->page_shift;
526
527         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
528                 if (dir == DMA_NONE)
529                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
530                                         stit->tbl, entry);
531                 else
532                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
533                                         entry, ua, dir);
534
535                 if (ret == H_SUCCESS)
536                         continue;
537
538                 if (ret == H_TOO_HARD)
539                         goto unlock_exit;
540
541                 WARN_ON_ONCE(1);
542                 kvmppc_clear_tce(stit->tbl, entry);
543         }
544
545         kvmppc_tce_put(stt, entry, tce);
546
547 unlock_exit:
548         srcu_read_unlock(&vcpu->kvm->srcu, idx);
549
550         return ret;
551 }
552 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
553
554 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
555                 unsigned long liobn, unsigned long ioba,
556                 unsigned long tce_list, unsigned long npages)
557 {
558         struct kvmppc_spapr_tce_table *stt;
559         long i, ret = H_SUCCESS, idx;
560         unsigned long entry, ua = 0;
561         u64 __user *tces;
562         u64 tce;
563         struct kvmppc_spapr_tce_iommu_table *stit;
564
565         stt = kvmppc_find_table(vcpu->kvm, liobn);
566         if (!stt)
567                 return H_TOO_HARD;
568
569         entry = ioba >> stt->page_shift;
570         /*
571          * SPAPR spec says that the maximum size of the list is 512 TCEs
572          * so the whole table fits in 4K page
573          */
574         if (npages > 512)
575                 return H_PARAMETER;
576
577         if (tce_list & (SZ_4K - 1))
578                 return H_PARAMETER;
579
580         ret = kvmppc_ioba_validate(stt, ioba, npages);
581         if (ret != H_SUCCESS)
582                 return ret;
583
584         idx = srcu_read_lock(&vcpu->kvm->srcu);
585         if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
586                 ret = H_TOO_HARD;
587                 goto unlock_exit;
588         }
589         tces = (u64 __user *) ua;
590
591         for (i = 0; i < npages; ++i) {
592                 if (get_user(tce, tces + i)) {
593                         ret = H_TOO_HARD;
594                         goto unlock_exit;
595                 }
596                 tce = be64_to_cpu(tce);
597
598                 ret = kvmppc_tce_validate(stt, tce);
599                 if (ret != H_SUCCESS)
600                         goto unlock_exit;
601
602                 if (kvmppc_gpa_to_ua(vcpu->kvm,
603                                 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
604                                 &ua, NULL)) {
605                         ret = H_PARAMETER;
606                         goto unlock_exit;
607                 }
608
609                 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
610                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
611                                         stit->tbl, entry + i, ua,
612                                         iommu_tce_direction(tce));
613
614                         if (ret == H_SUCCESS)
615                                 continue;
616
617                         if (ret == H_TOO_HARD)
618                                 goto unlock_exit;
619
620                         WARN_ON_ONCE(1);
621                         kvmppc_clear_tce(stit->tbl, entry);
622                 }
623
624                 kvmppc_tce_put(stt, entry + i, tce);
625         }
626
627 unlock_exit:
628         srcu_read_unlock(&vcpu->kvm->srcu, idx);
629
630         return ret;
631 }
632 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
633
634 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
635                 unsigned long liobn, unsigned long ioba,
636                 unsigned long tce_value, unsigned long npages)
637 {
638         struct kvmppc_spapr_tce_table *stt;
639         long i, ret;
640         struct kvmppc_spapr_tce_iommu_table *stit;
641
642         stt = kvmppc_find_table(vcpu->kvm, liobn);
643         if (!stt)
644                 return H_TOO_HARD;
645
646         ret = kvmppc_ioba_validate(stt, ioba, npages);
647         if (ret != H_SUCCESS)
648                 return ret;
649
650         /* Check permission bits only to allow userspace poison TCE for debug */
651         if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
652                 return H_PARAMETER;
653
654         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
655                 unsigned long entry = ioba >> stt->page_shift;
656
657                 for (i = 0; i < npages; ++i) {
658                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
659                                         stit->tbl, entry + i);
660
661                         if (ret == H_SUCCESS)
662                                 continue;
663
664                         if (ret == H_TOO_HARD)
665                                 return ret;
666
667                         WARN_ON_ONCE(1);
668                         kvmppc_clear_tce(stit->tbl, entry);
669                 }
670         }
671
672         for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
673                 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
674
675         return H_SUCCESS;
676 }
677 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);