GNU Linux-libre 4.9.309-gnu1
[releases.git] / arch / x86 / mm / tlb.c
1 #include <linux/init.h>
2
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
8 #include <linux/cpu.h>
9 #include <linux/debugfs.h>
10
11 #include <asm/tlbflush.h>
12 #include <asm/mmu_context.h>
13 #include <asm/nospec-branch.h>
14 #include <asm/cache.h>
15 #include <asm/apic.h>
16 #include <asm/uv/uv.h>
17 #include <asm/kaiser.h>
18
19 /*
20  *      TLB flushing, formerly SMP-only
21  *              c/o Linus Torvalds.
22  *
23  *      These mean you can really definitely utterly forget about
24  *      writing to user space from interrupts. (Its not allowed anyway).
25  *
26  *      Optimizations Manfred Spraul <manfred@colorfullife.com>
27  *
28  *      More scalable flush, from Andi Kleen
29  *
30  *      Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
31  */
32
33 /*
34  * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
35  * stored in cpu_tlb_state.last_user_mm_ibpb.
36  */
37 #define LAST_USER_MM_IBPB       0x1UL
38
39 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
40
41 struct flush_tlb_info {
42         struct mm_struct *flush_mm;
43         unsigned long flush_start;
44         unsigned long flush_end;
45 };
46
47 static void load_new_mm_cr3(pgd_t *pgdir)
48 {
49         unsigned long new_mm_cr3 = __pa(pgdir);
50
51         if (kaiser_enabled) {
52                 /*
53                  * We reuse the same PCID for different tasks, so we must
54                  * flush all the entries for the PCID out when we change tasks.
55                  * Flush KERN below, flush USER when returning to userspace in
56                  * kaiser's SWITCH_USER_CR3 (_SWITCH_TO_USER_CR3) macro.
57                  *
58                  * invpcid_flush_single_context(X86_CR3_PCID_ASID_USER) could
59                  * do it here, but can only be used if X86_FEATURE_INVPCID is
60                  * available - and many machines support pcid without invpcid.
61                  *
62                  * If X86_CR3_PCID_KERN_FLUSH actually added something, then it
63                  * would be needed in the write_cr3() below - if PCIDs enabled.
64                  */
65                 BUILD_BUG_ON(X86_CR3_PCID_KERN_FLUSH);
66                 kaiser_flush_tlb_on_return_to_user();
67         }
68
69         /*
70          * Caution: many callers of this function expect
71          * that load_cr3() is serializing and orders TLB
72          * fills with respect to the mm_cpumask writes.
73          */
74         write_cr3(new_mm_cr3);
75 }
76
77 /*
78  * We cannot call mmdrop() because we are in interrupt context,
79  * instead update mm->cpu_vm_mask.
80  */
81 void leave_mm(int cpu)
82 {
83         struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
84         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
85                 BUG();
86         if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
87                 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
88                 load_new_mm_cr3(swapper_pg_dir);
89                 /*
90                  * This gets called in the idle path where RCU
91                  * functions differently.  Tracing normally
92                  * uses RCU, so we have to call the tracepoint
93                  * specially here.
94                  */
95                 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
96         }
97 }
98 EXPORT_SYMBOL_GPL(leave_mm);
99
100 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
101                struct task_struct *tsk)
102 {
103         unsigned long flags;
104
105         local_irq_save(flags);
106         switch_mm_irqs_off(prev, next, tsk);
107         local_irq_restore(flags);
108 }
109
110 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
111 {
112         unsigned long next_tif = task_thread_info(next)->flags;
113         unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
114
115         return (unsigned long)next->mm | ibpb;
116 }
117
118 static void cond_ibpb(struct task_struct *next)
119 {
120         if (!next || !next->mm)
121                 return;
122
123         /*
124          * Both, the conditional and the always IBPB mode use the mm
125          * pointer to avoid the IBPB when switching between tasks of the
126          * same process. Using the mm pointer instead of mm->context.ctx_id
127          * opens a hypothetical hole vs. mm_struct reuse, which is more or
128          * less impossible to control by an attacker. Aside of that it
129          * would only affect the first schedule so the theoretically
130          * exposed data is not really interesting.
131          */
132         if (static_branch_likely(&switch_mm_cond_ibpb)) {
133                 unsigned long prev_mm, next_mm;
134
135                 /*
136                  * This is a bit more complex than the always mode because
137                  * it has to handle two cases:
138                  *
139                  * 1) Switch from a user space task (potential attacker)
140                  *    which has TIF_SPEC_IB set to a user space task
141                  *    (potential victim) which has TIF_SPEC_IB not set.
142                  *
143                  * 2) Switch from a user space task (potential attacker)
144                  *    which has TIF_SPEC_IB not set to a user space task
145                  *    (potential victim) which has TIF_SPEC_IB set.
146                  *
147                  * This could be done by unconditionally issuing IBPB when
148                  * a task which has TIF_SPEC_IB set is either scheduled in
149                  * or out. Though that results in two flushes when:
150                  *
151                  * - the same user space task is scheduled out and later
152                  *   scheduled in again and only a kernel thread ran in
153                  *   between.
154                  *
155                  * - a user space task belonging to the same process is
156                  *   scheduled in after a kernel thread ran in between
157                  *
158                  * - a user space task belonging to the same process is
159                  *   scheduled in immediately.
160                  *
161                  * Optimize this with reasonably small overhead for the
162                  * above cases. Mangle the TIF_SPEC_IB bit into the mm
163                  * pointer of the incoming task which is stored in
164                  * cpu_tlbstate.last_user_mm_ibpb for comparison.
165                  */
166                 next_mm = mm_mangle_tif_spec_ib(next);
167                 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
168
169                 /*
170                  * Issue IBPB only if the mm's are different and one or
171                  * both have the IBPB bit set.
172                  */
173                 if (next_mm != prev_mm &&
174                     (next_mm | prev_mm) & LAST_USER_MM_IBPB)
175                         indirect_branch_prediction_barrier();
176
177                 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
178         }
179
180         if (static_branch_unlikely(&switch_mm_always_ibpb)) {
181                 /*
182                  * Only flush when switching to a user space task with a
183                  * different context than the user space task which ran
184                  * last on this CPU.
185                  */
186                 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
187                         indirect_branch_prediction_barrier();
188                         this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
189                 }
190         }
191 }
192
193 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
194                         struct task_struct *tsk)
195 {
196         unsigned cpu = smp_processor_id();
197
198         if (likely(prev != next)) {
199                 /*
200                  * Avoid user/user BTB poisoning by flushing the branch
201                  * predictor when switching between processes. This stops
202                  * one process from doing Spectre-v2 attacks on another.
203                  */
204                 cond_ibpb(tsk);
205
206                 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
207                         /*
208                          * If our current stack is in vmalloc space and isn't
209                          * mapped in the new pgd, we'll double-fault.  Forcibly
210                          * map it.
211                          */
212                         unsigned int stack_pgd_index = pgd_index(current_stack_pointer);
213
214                         pgd_t *pgd = next->pgd + stack_pgd_index;
215
216                         if (unlikely(pgd_none(*pgd)))
217                                 set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
218                 }
219
220                 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
221                 this_cpu_write(cpu_tlbstate.active_mm, next);
222
223                 cpumask_set_cpu(cpu, mm_cpumask(next));
224
225                 /*
226                  * Re-load page tables.
227                  *
228                  * This logic has an ordering constraint:
229                  *
230                  *  CPU 0: Write to a PTE for 'next'
231                  *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
232                  *  CPU 1: set bit 1 in next's mm_cpumask
233                  *  CPU 1: load from the PTE that CPU 0 writes (implicit)
234                  *
235                  * We need to prevent an outcome in which CPU 1 observes
236                  * the new PTE value and CPU 0 observes bit 1 clear in
237                  * mm_cpumask.  (If that occurs, then the IPI will never
238                  * be sent, and CPU 0's TLB will contain a stale entry.)
239                  *
240                  * The bad outcome can occur if either CPU's load is
241                  * reordered before that CPU's store, so both CPUs must
242                  * execute full barriers to prevent this from happening.
243                  *
244                  * Thus, switch_mm needs a full barrier between the
245                  * store to mm_cpumask and any operation that could load
246                  * from next->pgd.  TLB fills are special and can happen
247                  * due to instruction fetches or for no reason at all,
248                  * and neither LOCK nor MFENCE orders them.
249                  * Fortunately, load_cr3() is serializing and gives the
250                  * ordering guarantee we need.
251                  *
252                  */
253                 load_new_mm_cr3(next->pgd);
254
255                 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
256
257                 /* Stop flush ipis for the previous mm */
258                 cpumask_clear_cpu(cpu, mm_cpumask(prev));
259
260                 /* Load per-mm CR4 state */
261                 load_mm_cr4(next);
262
263 #ifdef CONFIG_MODIFY_LDT_SYSCALL
264                 /*
265                  * Load the LDT, if the LDT is different.
266                  *
267                  * It's possible that prev->context.ldt doesn't match
268                  * the LDT register.  This can happen if leave_mm(prev)
269                  * was called and then modify_ldt changed
270                  * prev->context.ldt but suppressed an IPI to this CPU.
271                  * In this case, prev->context.ldt != NULL, because we
272                  * never set context.ldt to NULL while the mm still
273                  * exists.  That means that next->context.ldt !=
274                  * prev->context.ldt, because mms never share an LDT.
275                  */
276                 if (unlikely(prev->context.ldt != next->context.ldt))
277                         load_mm_ldt(next);
278 #endif
279         } else {
280                 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
281                 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
282
283                 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
284                         /*
285                          * On established mms, the mm_cpumask is only changed
286                          * from irq context, from ptep_clear_flush() while in
287                          * lazy tlb mode, and here. Irqs are blocked during
288                          * schedule, protecting us from simultaneous changes.
289                          */
290                         cpumask_set_cpu(cpu, mm_cpumask(next));
291
292                         /*
293                          * We were in lazy tlb mode and leave_mm disabled
294                          * tlb flush IPI delivery. We must reload CR3
295                          * to make sure to use no freed page tables.
296                          *
297                          * As above, load_cr3() is serializing and orders TLB
298                          * fills with respect to the mm_cpumask write.
299                          */
300                         load_new_mm_cr3(next->pgd);
301                         trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
302                         load_mm_cr4(next);
303                         load_mm_ldt(next);
304                 }
305         }
306 }
307
308 /*
309  * The flush IPI assumes that a thread switch happens in this order:
310  * [cpu0: the cpu that switches]
311  * 1) switch_mm() either 1a) or 1b)
312  * 1a) thread switch to a different mm
313  * 1a1) set cpu_tlbstate to TLBSTATE_OK
314  *      Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
315  *      if cpu0 was in lazy tlb mode.
316  * 1a2) update cpu active_mm
317  *      Now cpu0 accepts tlb flushes for the new mm.
318  * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
319  *      Now the other cpus will send tlb flush ipis.
320  * 1a4) change cr3.
321  * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
322  *      Stop ipi delivery for the old mm. This is not synchronized with
323  *      the other cpus, but flush_tlb_func ignore flush ipis for the wrong
324  *      mm, and in the worst case we perform a superfluous tlb flush.
325  * 1b) thread switch without mm change
326  *      cpu active_mm is correct, cpu0 already handles flush ipis.
327  * 1b1) set cpu_tlbstate to TLBSTATE_OK
328  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
329  *      Atomically set the bit [other cpus will start sending flush ipis],
330  *      and test the bit.
331  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
332  * 2) switch %%esp, ie current
333  *
334  * The interrupt must handle 2 special cases:
335  * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
336  * - the cpu performs speculative tlb reads, i.e. even if the cpu only
337  *   runs in kernel space, the cpu could load tlb entries for user space
338  *   pages.
339  *
340  * The good news is that cpu_tlbstate is local to each cpu, no
341  * write/read ordering problems.
342  */
343
344 /*
345  * TLB flush funcation:
346  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
347  * 2) Leave the mm if we are in the lazy tlb mode.
348  */
349 static void flush_tlb_func(void *info)
350 {
351         struct flush_tlb_info *f = info;
352
353         inc_irq_stat(irq_tlb_count);
354
355         if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
356                 return;
357
358         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
359         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
360                 if (f->flush_end == TLB_FLUSH_ALL) {
361                         local_flush_tlb();
362                         trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
363                 } else {
364                         unsigned long addr;
365                         unsigned long nr_pages =
366                                 (f->flush_end - f->flush_start) / PAGE_SIZE;
367                         addr = f->flush_start;
368                         while (addr < f->flush_end) {
369                                 __flush_tlb_single(addr);
370                                 addr += PAGE_SIZE;
371                         }
372                         trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
373                 }
374         } else
375                 leave_mm(smp_processor_id());
376
377 }
378
379 void native_flush_tlb_others(const struct cpumask *cpumask,
380                                  struct mm_struct *mm, unsigned long start,
381                                  unsigned long end)
382 {
383         struct flush_tlb_info info;
384
385         info.flush_mm = mm;
386         info.flush_start = start;
387         info.flush_end = end;
388
389         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
390         if (end == TLB_FLUSH_ALL)
391                 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
392         else
393                 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
394                                 (end - start) >> PAGE_SHIFT);
395
396         if (is_uv_system()) {
397                 unsigned int cpu;
398
399                 cpu = smp_processor_id();
400                 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
401                 if (cpumask)
402                         smp_call_function_many(cpumask, flush_tlb_func,
403                                                                 &info, 1);
404                 return;
405         }
406         smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
407 }
408
409 /*
410  * See Documentation/x86/tlb.txt for details.  We choose 33
411  * because it is large enough to cover the vast majority (at
412  * least 95%) of allocations, and is small enough that we are
413  * confident it will not cause too much overhead.  Each single
414  * flush is about 100 ns, so this caps the maximum overhead at
415  * _about_ 3,000 ns.
416  *
417  * This is in units of pages.
418  */
419 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
420
421 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
422                                 unsigned long end, unsigned long vmflag)
423 {
424         unsigned long addr;
425         /* do a global flush by default */
426         unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
427
428         preempt_disable();
429
430         if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
431                 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
432         if (base_pages_to_flush > tlb_single_page_flush_ceiling)
433                 base_pages_to_flush = TLB_FLUSH_ALL;
434
435         if (current->active_mm != mm) {
436                 /* Synchronize with switch_mm. */
437                 smp_mb();
438
439                 goto out;
440         }
441
442         if (!current->mm) {
443                 leave_mm(smp_processor_id());
444
445                 /* Synchronize with switch_mm. */
446                 smp_mb();
447
448                 goto out;
449         }
450
451         /*
452          * Both branches below are implicit full barriers (MOV to CR or
453          * INVLPG) that synchronize with switch_mm.
454          */
455         if (base_pages_to_flush == TLB_FLUSH_ALL) {
456                 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
457                 local_flush_tlb();
458         } else {
459                 /* flush range by one by one 'invlpg' */
460                 for (addr = start; addr < end;  addr += PAGE_SIZE) {
461                         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
462                         __flush_tlb_single(addr);
463                 }
464         }
465         trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
466 out:
467         if (base_pages_to_flush == TLB_FLUSH_ALL) {
468                 start = 0UL;
469                 end = TLB_FLUSH_ALL;
470         }
471         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
472                 flush_tlb_others(mm_cpumask(mm), mm, start, end);
473         preempt_enable();
474 }
475
476 static void do_flush_tlb_all(void *info)
477 {
478         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
479         __flush_tlb_all();
480         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
481                 leave_mm(smp_processor_id());
482 }
483
484 void flush_tlb_all(void)
485 {
486         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
487         on_each_cpu(do_flush_tlb_all, NULL, 1);
488 }
489
490 static void do_kernel_range_flush(void *info)
491 {
492         struct flush_tlb_info *f = info;
493         unsigned long addr;
494
495         /* flush range by one by one 'invlpg' */
496         for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
497                 __flush_tlb_single(addr);
498 }
499
500 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
501 {
502
503         /* Balance as user space task's flush, a bit conservative */
504         if (end == TLB_FLUSH_ALL ||
505             (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
506                 on_each_cpu(do_flush_tlb_all, NULL, 1);
507         } else {
508                 struct flush_tlb_info info;
509                 info.flush_start = start;
510                 info.flush_end = end;
511                 on_each_cpu(do_kernel_range_flush, &info, 1);
512         }
513 }
514
515 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
516                              size_t count, loff_t *ppos)
517 {
518         char buf[32];
519         unsigned int len;
520
521         len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
522         return simple_read_from_buffer(user_buf, count, ppos, buf, len);
523 }
524
525 static ssize_t tlbflush_write_file(struct file *file,
526                  const char __user *user_buf, size_t count, loff_t *ppos)
527 {
528         char buf[32];
529         ssize_t len;
530         int ceiling;
531
532         len = min(count, sizeof(buf) - 1);
533         if (copy_from_user(buf, user_buf, len))
534                 return -EFAULT;
535
536         buf[len] = '\0';
537         if (kstrtoint(buf, 0, &ceiling))
538                 return -EINVAL;
539
540         if (ceiling < 0)
541                 return -EINVAL;
542
543         tlb_single_page_flush_ceiling = ceiling;
544         return count;
545 }
546
547 static const struct file_operations fops_tlbflush = {
548         .read = tlbflush_read_file,
549         .write = tlbflush_write_file,
550         .llseek = default_llseek,
551 };
552
553 static int __init create_tlb_single_page_flush_ceiling(void)
554 {
555         debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
556                             arch_debugfs_dir, NULL, &fops_tlbflush);
557         return 0;
558 }
559 late_initcall(create_tlb_single_page_flush_ceiling);