2 * User address space access functions.
4 * Copyright 1997 Andi Kleen <ak@muc.de>
5 * Copyright 1997 Linus Torvalds
6 * Copyright 2002 Andi Kleen <ak@suse.de>
8 #include <linux/export.h>
9 #include <linux/uaccess.h>
10 #include <linux/highmem.h>
16 unsigned long __clear_user(void __user *addr, unsigned long size)
20 /* no memory constraint because it doesn't change any memory gcc knows
24 " testq %[size8],%[size8]\n"
27 "0: movq $0,(%[dst])\n"
29 " decl %%ecx ; jnz 0b\n"
30 "4: movq %[size1],%%rcx\n"
31 " testl %%ecx,%%ecx\n"
33 "1: movb $0,(%[dst])\n"
35 " decl %%ecx ; jnz 1b\n"
37 ".section .fixup,\"ax\"\n"
38 "3: lea 0(%[size1],%[size8],8),%[size8]\n"
43 : [size8] "=&c"(size), [dst] "=&D" (__d0)
44 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
48 EXPORT_SYMBOL(__clear_user);
50 unsigned long clear_user(void __user *to, unsigned long n)
52 if (access_ok(VERIFY_WRITE, to, n))
53 return __clear_user(to, n);
56 EXPORT_SYMBOL(clear_user);
59 * Try to copy last bytes and clear the rest if needed.
60 * Since protection fault in copy_from/to_user is not a normal situation,
61 * it is not necessary to optimize tail handling.
63 __visible unsigned long
64 copy_user_handle_tail(char *to, char *from, unsigned len)
66 for (; len; --len, to++) {
69 if (__get_user_nocheck(c, from++, sizeof(char)))
71 if (__put_user_nocheck(c, to, sizeof(char)))
79 * Similar to copy_user_handle_tail, probe for the write fault point,
80 * but reuse __memcpy_mcsafe in case a new read error is encountered.
81 * clac() is handled in _copy_to_iter_mcsafe().
83 __visible unsigned long
84 mcsafe_handle_tail(char *to, char *from, unsigned len)
86 for (; len; --len, to++, from++) {
88 * Call the assembly routine back directly since
89 * memcpy_mcsafe() may silently fallback to memcpy.
91 unsigned long rem = __memcpy_mcsafe(to, from, 1);
99 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
101 * clean_cache_range - write back a cache range with CLWB
102 * @vaddr: virtual start address
103 * @size: number of bytes to write back
105 * Write back a cache range using the CLWB (cache line write back)
106 * instruction. Note that @size is internally rounded up to be cache
109 static void clean_cache_range(void *addr, size_t size)
111 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
112 unsigned long clflush_mask = x86_clflush_size - 1;
113 void *vend = addr + size;
116 for (p = (void *)((unsigned long)addr & ~clflush_mask);
117 p < vend; p += x86_clflush_size)
121 void arch_wb_cache_pmem(void *addr, size_t size)
123 clean_cache_range(addr, size);
125 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
127 long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
129 unsigned long flushed, dest = (unsigned long) dst;
130 long rc = __copy_user_nocache(dst, src, size, 0);
133 * __copy_user_nocache() uses non-temporal stores for the bulk
134 * of the transfer, but we need to manually flush if the
135 * transfer is unaligned. A cached memory copy is used when
136 * destination or size is not naturally aligned. That is:
137 * - Require 8-byte alignment when size is 8 bytes or larger.
138 * - Require 4-byte alignment when size is 4 bytes.
141 if (!IS_ALIGNED(dest, 4) || size != 4)
142 clean_cache_range(dst, size);
144 if (!IS_ALIGNED(dest, 8)) {
145 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
146 clean_cache_range(dst, 1);
149 flushed = dest - (unsigned long) dst;
150 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
151 clean_cache_range(dst + size - 1, 1);
157 void memcpy_flushcache(void *_dst, const void *_src, size_t size)
159 unsigned long dest = (unsigned long) _dst;
160 unsigned long source = (unsigned long) _src;
162 /* cache copy and flush to align dest */
163 if (!IS_ALIGNED(dest, 8)) {
164 size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
166 memcpy((void *) dest, (void *) source, len);
167 clean_cache_range((void *) dest, len);
175 /* 4x8 movnti loop */
177 asm("movq (%0), %%r8\n"
179 "movq 16(%0), %%r10\n"
180 "movq 24(%0), %%r11\n"
181 "movnti %%r8, (%1)\n"
182 "movnti %%r9, 8(%1)\n"
183 "movnti %%r10, 16(%1)\n"
184 "movnti %%r11, 24(%1)\n"
185 :: "r" (source), "r" (dest)
186 : "memory", "r8", "r9", "r10", "r11");
192 /* 1x8 movnti loop */
194 asm("movq (%0), %%r8\n"
195 "movnti %%r8, (%1)\n"
196 :: "r" (source), "r" (dest)
203 /* 1x4 movnti loop */
205 asm("movl (%0), %%r8d\n"
206 "movnti %%r8d, (%1)\n"
207 :: "r" (source), "r" (dest)
214 /* cache copy for remaining bytes */
216 memcpy((void *) dest, (void *) source, size);
217 clean_cache_range((void *) dest, size);
220 EXPORT_SYMBOL_GPL(memcpy_flushcache);
222 void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
225 char *from = kmap_atomic(page);
227 memcpy_flushcache(to, from + offset, len);