2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/mmiotrace.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
27 * Fix up the linear direct mapping of the kernel to avoid cache attribute
30 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
31 enum page_cache_mode pcm)
33 unsigned long nrpages = size >> PAGE_SHIFT;
37 case _PAGE_CACHE_MODE_UC:
39 err = _set_memory_uc(vaddr, nrpages);
41 case _PAGE_CACHE_MODE_WC:
42 err = _set_memory_wc(vaddr, nrpages);
44 case _PAGE_CACHE_MODE_WT:
45 err = _set_memory_wt(vaddr, nrpages);
47 case _PAGE_CACHE_MODE_WB:
48 err = _set_memory_wb(vaddr, nrpages);
55 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
60 for (i = 0; i < nr_pages; ++i)
61 if (pfn_valid(start_pfn + i) &&
62 !PageReserved(pfn_to_page(start_pfn + i)))
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. It transparently creates kernel huge I/O mapping when
71 * the physical address is aligned by a huge page size (1GB or 2MB) and
72 * the requested size is at least the huge page size.
74 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
75 * Therefore, the mapping code falls back to use a smaller page toward 4KB
76 * when a mapping range is covered by non-WB type of MTRRs.
78 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
79 * have to convert them into an offset in a page-aligned mapping, but the
80 * caller shouldn't need to know that small detail.
82 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
83 unsigned long size, enum page_cache_mode pcm, void *caller)
85 unsigned long offset, vaddr;
86 resource_size_t pfn, last_pfn, last_addr;
87 const resource_size_t unaligned_phys_addr = phys_addr;
88 const unsigned long unaligned_size = size;
89 struct vm_struct *area;
90 enum page_cache_mode new_pcm;
93 void __iomem *ret_addr;
95 /* Don't allow wraparound or zero size */
96 last_addr = phys_addr + size - 1;
97 if (!size || last_addr < phys_addr)
100 if (!phys_addr_valid(phys_addr)) {
101 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
102 (unsigned long long)phys_addr);
108 * Don't remap the low PCI/ISA area, it's always mapped..
110 if (is_ISA_range(phys_addr, last_addr))
111 return (__force void __iomem *)phys_to_virt(phys_addr);
114 * Don't allow anybody to remap normal RAM that we're using..
116 pfn = phys_addr >> PAGE_SHIFT;
117 last_pfn = last_addr >> PAGE_SHIFT;
118 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
119 __ioremap_check_ram) == 1) {
120 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
121 &phys_addr, &last_addr);
126 * Mappings have to be page-aligned
128 offset = phys_addr & ~PAGE_MASK;
129 phys_addr &= PAGE_MASK;
130 size = PAGE_ALIGN(last_addr+1) - phys_addr;
133 * Mask out any bits not part of the actual physical
134 * address, like memory encryption bits.
136 phys_addr &= PHYSICAL_PAGE_MASK;
138 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
141 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
145 if (pcm != new_pcm) {
146 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
148 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
149 (unsigned long long)phys_addr,
150 (unsigned long long)(phys_addr + size),
152 goto err_free_memtype;
157 prot = PAGE_KERNEL_IO;
159 case _PAGE_CACHE_MODE_UC:
161 prot = __pgprot(pgprot_val(prot) |
162 cachemode2protval(_PAGE_CACHE_MODE_UC));
164 case _PAGE_CACHE_MODE_UC_MINUS:
165 prot = __pgprot(pgprot_val(prot) |
166 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
168 case _PAGE_CACHE_MODE_WC:
169 prot = __pgprot(pgprot_val(prot) |
170 cachemode2protval(_PAGE_CACHE_MODE_WC));
172 case _PAGE_CACHE_MODE_WT:
173 prot = __pgprot(pgprot_val(prot) |
174 cachemode2protval(_PAGE_CACHE_MODE_WT));
176 case _PAGE_CACHE_MODE_WB:
183 area = get_vm_area_caller(size, VM_IOREMAP, caller);
185 goto err_free_memtype;
186 area->phys_addr = phys_addr;
187 vaddr = (unsigned long) area->addr;
189 if (kernel_map_sync_memtype(phys_addr, size, pcm))
192 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
195 ret_addr = (void __iomem *) (vaddr + offset);
196 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
199 * Check if the request spans more than any BAR in the iomem resource
202 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
203 pr_warn("caller %pS mapping multiple BARs\n", caller);
209 free_memtype(phys_addr, phys_addr + size);
214 * ioremap_nocache - map bus memory into CPU space
215 * @phys_addr: bus address of the memory
216 * @size: size of the resource to map
218 * ioremap_nocache performs a platform specific sequence of operations to
219 * make bus memory CPU accessible via the readb/readw/readl/writeb/
220 * writew/writel functions and the other mmio helpers. The returned
221 * address is not guaranteed to be usable directly as a virtual
224 * This version of ioremap ensures that the memory is marked uncachable
225 * on the CPU as well as honouring existing caching rules from things like
226 * the PCI bus. Note that there are other caches and buffers on many
227 * busses. In particular driver authors should read up on PCI writes
229 * It's useful if some control registers are in such an area and
230 * write combining or read caching is not desirable:
232 * Must be freed with iounmap.
234 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
237 * Ideally, this should be:
238 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
240 * Till we fix all X drivers to use ioremap_wc(), we will use
241 * UC MINUS. Drivers that are certain they need or can already
242 * be converted over to strong UC can use ioremap_uc().
244 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
246 return __ioremap_caller(phys_addr, size, pcm,
247 __builtin_return_address(0));
249 EXPORT_SYMBOL(ioremap_nocache);
252 * ioremap_uc - map bus memory into CPU space as strongly uncachable
253 * @phys_addr: bus address of the memory
254 * @size: size of the resource to map
256 * ioremap_uc performs a platform specific sequence of operations to
257 * make bus memory CPU accessible via the readb/readw/readl/writeb/
258 * writew/writel functions and the other mmio helpers. The returned
259 * address is not guaranteed to be usable directly as a virtual
262 * This version of ioremap ensures that the memory is marked with a strong
263 * preference as completely uncachable on the CPU when possible. For non-PAT
264 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
265 * systems this will set the PAT entry for the pages as strong UC. This call
266 * will honor existing caching rules from things like the PCI bus. Note that
267 * there are other caches and buffers on many busses. In particular driver
268 * authors should read up on PCI writes.
270 * It's useful if some control registers are in such an area and
271 * write combining or read caching is not desirable:
273 * Must be freed with iounmap.
275 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
277 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
279 return __ioremap_caller(phys_addr, size, pcm,
280 __builtin_return_address(0));
282 EXPORT_SYMBOL_GPL(ioremap_uc);
285 * ioremap_wc - map memory into CPU space write combined
286 * @phys_addr: bus address of the memory
287 * @size: size of the resource to map
289 * This version of ioremap ensures that the memory is marked write combining.
290 * Write combining allows faster writes to some hardware devices.
292 * Must be freed with iounmap.
294 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
296 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
297 __builtin_return_address(0));
299 EXPORT_SYMBOL(ioremap_wc);
302 * ioremap_wt - map memory into CPU space write through
303 * @phys_addr: bus address of the memory
304 * @size: size of the resource to map
306 * This version of ioremap ensures that the memory is marked write through.
307 * Write through stores data into memory while keeping the cache up-to-date.
309 * Must be freed with iounmap.
311 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
313 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
314 __builtin_return_address(0));
316 EXPORT_SYMBOL(ioremap_wt);
318 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
320 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
321 __builtin_return_address(0));
323 EXPORT_SYMBOL(ioremap_cache);
325 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
326 unsigned long prot_val)
328 return __ioremap_caller(phys_addr, size,
329 pgprot2cachemode(__pgprot(prot_val)),
330 __builtin_return_address(0));
332 EXPORT_SYMBOL(ioremap_prot);
335 * iounmap - Free a IO remapping
336 * @addr: virtual address from ioremap_*
338 * Caller must ensure there is only one unmapping for the same pointer.
340 void iounmap(volatile void __iomem *addr)
342 struct vm_struct *p, *o;
344 if ((void __force *)addr <= high_memory)
348 * __ioremap special-cases the PCI/ISA range by not instantiating a
349 * vm_area and by simply returning an address into the kernel mapping
350 * of ISA space. So handle that here.
352 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
353 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
356 mmiotrace_iounmap(addr);
358 addr = (volatile void __iomem *)
359 (PAGE_MASK & (unsigned long __force)addr);
361 /* Use the vm area unlocked, assuming the caller
362 ensures there isn't another iounmap for the same address
363 in parallel. Reuse of the virtual address is prevented by
364 leaving it in the global lists until we're done with it.
365 cpa takes care of the direct mappings. */
366 p = find_vm_area((void __force *)addr);
369 printk(KERN_ERR "iounmap: bad address %p\n", addr);
374 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
376 /* Finally remove it */
377 o = remove_vm_area((void __force *)addr);
378 BUG_ON(p != o || o == NULL);
381 EXPORT_SYMBOL(iounmap);
383 int __init arch_ioremap_pud_supported(void)
386 return boot_cpu_has(X86_FEATURE_GBPAGES);
392 int __init arch_ioremap_pmd_supported(void)
394 return boot_cpu_has(X86_FEATURE_PSE);
398 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
401 void *xlate_dev_mem_ptr(phys_addr_t phys)
403 unsigned long start = phys & PAGE_MASK;
404 unsigned long offset = phys & ~PAGE_MASK;
407 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
408 if (page_is_ram(start >> PAGE_SHIFT))
411 vaddr = ioremap_cache(start, PAGE_SIZE);
412 /* Only add the offset on success and return NULL if the ioremap() failed: */
419 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
421 if (page_is_ram(phys >> PAGE_SHIFT))
424 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
427 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
429 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
431 /* Don't assume we're using swapper_pg_dir at this point */
432 pgd_t *base = __va(read_cr3());
433 pgd_t *pgd = &base[pgd_index(addr)];
434 pud_t *pud = pud_offset(pgd, addr);
435 pmd_t *pmd = pmd_offset(pud, addr);
440 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
442 return &bm_pte[pte_index(addr)];
445 bool __init is_early_ioremap_ptep(pte_t *ptep)
447 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
450 void __init early_ioremap_init(void)
455 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
457 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
460 early_ioremap_setup();
462 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
463 memset(bm_pte, 0, sizeof(bm_pte));
464 pmd_populate_kernel(&init_mm, pmd, bm_pte);
467 * The boot-ioremap range spans multiple pmds, for which
468 * we are not prepared:
470 #define __FIXADDR_TOP (-PAGE_SIZE)
471 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
472 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
474 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
476 printk(KERN_WARNING "pmd %p != %p\n",
477 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
478 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
479 fix_to_virt(FIX_BTMAP_BEGIN));
480 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
481 fix_to_virt(FIX_BTMAP_END));
483 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
484 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
489 void __init __early_set_fixmap(enum fixed_addresses idx,
490 phys_addr_t phys, pgprot_t flags)
492 unsigned long addr = __fix_to_virt(idx);
495 if (idx >= __end_of_fixed_addresses) {
499 pte = early_ioremap_pte(addr);
501 if (pgprot_val(flags))
502 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
504 pte_clear(&init_mm, addr, pte);
505 __flush_tlb_one(addr);