2 * arch/arm/mm/highmem.c -- ARM highmem support
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
21 void *kmap(struct page *page)
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
30 void kunmap(struct page *page)
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
37 EXPORT_SYMBOL(kunmap);
39 void *kmap_atomic(struct page *page, enum km_type type)
46 if (!PageHighMem(page))
47 return page_address(page);
49 debug_kmap_atomic(type);
51 #ifdef CONFIG_DEBUG_HIGHMEM
53 * There is no cache coherency issue when non VIVT, so force the
54 * dedicated kmap usage for better debugging purposes in that case.
60 kmap = kmap_high_get(page);
64 idx = type + KM_TYPE_NR * smp_processor_id();
65 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
66 #ifdef CONFIG_DEBUG_HIGHMEM
68 * With debugging enabled, kunmap_atomic forces that entry to 0.
69 * Make sure it was indeed properly unmapped.
71 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
73 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
75 * When debugging is off, kunmap_atomic leaves the previous mapping
76 * in place, so this TLB flush ensures the TLB is updated with the
79 local_flush_tlb_kernel_page(vaddr);
83 EXPORT_SYMBOL(kmap_atomic);
85 void kunmap_atomic(void *kvaddr, enum km_type type)
87 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
88 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
90 if (kvaddr >= (void *)FIXADDR_START) {
92 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
93 #ifdef CONFIG_DEBUG_HIGHMEM
94 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
95 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
96 local_flush_tlb_kernel_page(vaddr);
98 (void) idx; /* to kill a warning */
100 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
101 /* this address was obtained through kmap_high_get() */
102 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
106 EXPORT_SYMBOL(kunmap_atomic);
108 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
115 idx = type + KM_TYPE_NR * smp_processor_id();
116 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
117 #ifdef CONFIG_DEBUG_HIGHMEM
118 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
120 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
121 local_flush_tlb_kernel_page(vaddr);
123 return (void *)vaddr;
126 struct page *kmap_atomic_to_page(const void *ptr)
128 unsigned long vaddr = (unsigned long)ptr;
131 if (vaddr < FIXADDR_START)
132 return virt_to_page(ptr);
134 pte = TOP_PTE(vaddr);
135 return pte_page(*pte);
138 #ifdef CONFIG_CPU_CACHE_VIPT
140 #include <linux/percpu.h>
143 * The VIVT cache of a highmem page is always flushed before the page
144 * is unmapped. Hence unmapped highmem pages need no cache maintenance
147 * However unmapped pages may still be cached with a VIPT cache, and
148 * it is not possible to perform cache maintenance on them using physical
149 * addresses unfortunately. So we have no choice but to set up a temporary
150 * virtual mapping for that purpose.
152 * Yet this VIPT cache maintenance may be triggered from DMA support
153 * functions which are possibly called from interrupt context. As we don't
154 * want to keep interrupt disabled all the time when such maintenance is
155 * taking place, we therefore allow for some reentrancy by preserving and
156 * restoring the previous fixmap entry before the interrupted context is
157 * resumed. If the reentrancy depth is 0 then there is no need to restore
158 * the previous fixmap, and leaving the current one in place allow it to
159 * be reused the next time without a TLB flush (common with DMA).
162 static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
164 void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
166 unsigned int idx, cpu;
168 unsigned long vaddr, flags;
174 cpu = smp_processor_id();
175 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
177 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
179 ptep = TOP_PTE(vaddr);
180 pte = mk_pte(page, kmap_prot);
182 raw_local_irq_save(flags);
184 if (pte_val(*ptep) == pte_val(pte)) {
188 set_pte_ext(ptep, pte, 0);
189 local_flush_tlb_kernel_page(vaddr);
191 raw_local_irq_restore(flags);
193 return (void *)vaddr;
196 void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
198 unsigned int idx, cpu = smp_processor_id();
199 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
200 unsigned long vaddr, flags;
203 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
204 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
205 ptep = TOP_PTE(vaddr);
206 pte = mk_pte(page, kmap_prot);
208 BUG_ON(pte_val(*ptep) != pte_val(pte));
211 raw_local_irq_save(flags);
213 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
214 set_pte_ext(ptep, saved_pte, 0);
215 local_flush_tlb_kernel_page(vaddr);
217 raw_local_irq_restore(flags);
223 #endif /* CONFIG_CPU_CACHE_VIPT */