2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
19 #include <linux/swap.h>
20 #include <linux/highmem.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/spinlock.h>
24 #include <linux/cpumask.h>
25 #include <linux/module.h>
27 #include <linux/vmalloc.h>
28 #include <linux/smp.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/fixmap.h>
34 #include <asm/tlbflush.h>
35 #include <asm/homecache.h>
37 #define K(x) ((x) << (PAGE_SHIFT-10))
40 * The normal show_free_areas() is too verbose on Tile, with dozens
41 * of processors and often four NUMA zones each with high and lowmem.
43 void show_mem(unsigned int filter)
47 pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
48 " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
49 " pagecache:%lu swap:%lu\n",
50 (global_page_state(NR_ACTIVE_ANON) +
51 global_page_state(NR_ACTIVE_FILE)),
52 (global_page_state(NR_INACTIVE_ANON) +
53 global_page_state(NR_INACTIVE_FILE)),
54 global_page_state(NR_FILE_DIRTY),
55 global_page_state(NR_WRITEBACK),
56 global_page_state(NR_UNSTABLE_NFS),
57 global_page_state(NR_FREE_PAGES),
58 (global_page_state(NR_SLAB_RECLAIMABLE) +
59 global_page_state(NR_SLAB_UNRECLAIMABLE)),
60 global_page_state(NR_FILE_MAPPED),
61 global_page_state(NR_PAGETABLE),
62 global_page_state(NR_BOUNCE),
63 global_page_state(NR_FILE_PAGES),
67 unsigned long flags, order, total = 0, largest_order = -1;
69 if (!populated_zone(zone))
72 spin_lock_irqsave(&zone->lock, flags);
73 for (order = 0; order < MAX_ORDER; order++) {
74 int nr = zone->free_area[order].nr_free;
77 largest_order = order;
79 spin_unlock_irqrestore(&zone->lock, flags);
80 pr_err("Node %d %7s: %lukB (largest %luKb)\n",
81 zone_to_nid(zone), zone->name,
82 K(total), largest_order ? K(1UL) << largest_order : 0);
87 * shatter_huge_page() - ensure a given address is mapped by a small page.
89 * This function converts a huge PTE mapping kernel LOWMEM into a bunch
90 * of small PTEs with the same caching. No cache flush required, but we
91 * must do a global TLB flush.
93 * Any caller that wishes to modify a kernel mapping that might
94 * have been made with a huge page should call this function,
95 * since doing so properly avoids race conditions with installing the
96 * newly-shattered page and then flushing all the TLB entries.
98 * @addr: Address at which to shatter any existing huge page.
100 void shatter_huge_page(unsigned long addr)
105 unsigned long flags = 0; /* happy compiler */
106 #ifdef __PAGETABLE_PMD_FOLDED
107 struct list_head *pos;
110 /* Get a pointer to the pmd entry that we need to change. */
112 BUG_ON(pgd_addr_invalid(addr));
113 BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */
114 pgd = swapper_pg_dir + pgd_index(addr);
115 pud = pud_offset(pgd, addr);
116 BUG_ON(!pud_present(*pud));
117 pmd = pmd_offset(pud, addr);
118 BUG_ON(!pmd_present(*pmd));
119 if (!pmd_huge_page(*pmd))
122 spin_lock_irqsave(&init_mm.page_table_lock, flags);
123 if (!pmd_huge_page(*pmd)) {
124 /* Lost the race to convert the huge page. */
125 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
129 /* Shatter the huge page into the preallocated L2 page table. */
130 pmd_populate_kernel(&init_mm, pmd,
131 get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
133 #ifdef __PAGETABLE_PMD_FOLDED
134 /* Walk every pgd on the system and update the pmd there. */
135 spin_lock(&pgd_lock);
136 list_for_each(pos, &pgd_list) {
138 pgd = list_to_pgd(pos) + pgd_index(addr);
139 pud = pud_offset(pgd, addr);
140 copy_pmd = pmd_offset(pud, addr);
141 __set_pmd(copy_pmd, *pmd);
143 spin_unlock(&pgd_lock);
146 /* Tell every cpu to notice the change. */
147 flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
148 cpu_possible_mask, NULL, 0);
150 /* Hold the lock until the TLB flush is finished to avoid races. */
151 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
155 * List of all pgd's needed so it can invalidate entries in both cached
156 * and uncached pgd's. This is essentially codepath-based locking
157 * against pageattr.c; it is the unique case in which a valid change
158 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
159 * vmalloc faults work because attached pagetables are never freed.
161 * The lock is always taken with interrupts disabled, unlike on x86
162 * and other platforms, because we need to take the lock in
163 * shatter_huge_page(), which may be called from an interrupt context.
164 * We are not at risk from the tlbflush IPI deadlock that was seen on
165 * x86, since we use the flush_remote() API to have the hypervisor do
166 * the TLB flushes regardless of irq disabling.
168 DEFINE_SPINLOCK(pgd_lock);
171 static inline void pgd_list_add(pgd_t *pgd)
173 list_add(pgd_to_list(pgd), &pgd_list);
176 static inline void pgd_list_del(pgd_t *pgd)
178 list_del(pgd_to_list(pgd));
181 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
182 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
184 static void pgd_ctor(pgd_t *pgd)
188 memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
189 spin_lock_irqsave(&pgd_lock, flags);
193 * Check that the user interrupt vector has no L2.
194 * It never should for the swapper, and new page tables
195 * should always start with an empty user interrupt vector.
197 BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
200 memcpy(pgd + KERNEL_PGD_INDEX_START,
201 swapper_pg_dir + KERNEL_PGD_INDEX_START,
202 KERNEL_PGD_PTRS * sizeof(pgd_t));
205 spin_unlock_irqrestore(&pgd_lock, flags);
208 static void pgd_dtor(pgd_t *pgd)
210 unsigned long flags; /* can be called from interrupt context */
212 spin_lock_irqsave(&pgd_lock, flags);
214 spin_unlock_irqrestore(&pgd_lock, flags);
217 pgd_t *pgd_alloc(struct mm_struct *mm)
219 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
225 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
228 kmem_cache_free(pgd_cache, pgd);
232 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
234 struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
237 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
241 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
246 * Make every page have a page_count() of one, not just the first.
247 * We don't use __GFP_COMP since it doesn't look like it works
248 * correctly with tlb_remove_page().
250 for (i = 1; i < order; ++i) {
251 init_page_count(p+i);
252 inc_zone_page_state(p+i, NR_PAGETABLE);
255 pgtable_page_ctor(p);
260 * Free page immediately (used in __pte_alloc if we raced with another
261 * process). We have to correct whatever pte_alloc_one() did before
262 * returning the pages to the allocator.
264 void pgtable_free(struct mm_struct *mm, struct page *p, int order)
268 pgtable_page_dtor(p);
271 for (i = 1; i < order; ++i) {
273 dec_zone_page_state(p+i, NR_PAGETABLE);
277 void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
278 unsigned long address, int order)
282 pgtable_page_dtor(pte);
283 tlb_remove_page(tlb, pte);
285 for (i = 1; i < order; ++i) {
286 tlb_remove_page(tlb, pte + i);
287 dec_zone_page_state(pte + i, NR_PAGETABLE);
294 * FIXME: needs to be atomic vs hypervisor writes. For now we make the
295 * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
297 int ptep_test_and_clear_young(struct vm_area_struct *vma,
298 unsigned long addr, pte_t *ptep)
300 #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
301 # error Code assumes HV_PTE "accessed" bit in second byte
303 u8 *tmp = (u8 *)ptep;
304 u8 second_byte = tmp[1];
305 if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
307 tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
312 * This implementation is atomic vs hypervisor writes, since the hypervisor
313 * always writes the low word (where "accessed" and "dirty" are) and this
314 * routine only writes the high word.
316 void ptep_set_wrprotect(struct mm_struct *mm,
317 unsigned long addr, pte_t *ptep)
319 #if HV_PTE_INDEX_WRITABLE < 32
320 # error Code assumes HV_PTE "writable" bit in high word
322 u32 *tmp = (u32 *)ptep;
323 tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
329 * Return a pointer to the PTE that corresponds to the given
330 * address in the given page table. A NULL page table just uses
331 * the standard kernel page table; the preferred API in this case
334 * The returned pointer can point to a huge page in other levels
335 * of the page table than the bottom, if the huge page is present
336 * in the page table. For bottom-level PTEs, the returned pointer
337 * can point to a PTE that is either present or not.
339 pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
345 if (pgd_addr_invalid(addr))
348 pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
349 pud = pud_offset(pgd, addr);
350 if (!pud_present(*pud))
352 if (pud_huge_page(*pud))
354 pmd = pmd_offset(pud, addr);
355 if (!pmd_present(*pmd))
357 if (pmd_huge_page(*pmd))
359 return pte_offset_kernel(pmd, addr);
361 EXPORT_SYMBOL(virt_to_pte);
363 pte_t *virt_to_kpte(unsigned long kaddr)
365 BUG_ON(kaddr < PAGE_OFFSET);
366 return virt_to_pte(NULL, kaddr);
368 EXPORT_SYMBOL(virt_to_kpte);
370 pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
372 unsigned int width = smp_width;
375 BUG_ON(y >= smp_height);
376 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
377 BUG_ON(cpu < 0 || cpu >= NR_CPUS);
378 BUG_ON(!cpu_is_valid_lotar(cpu));
379 return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
382 int get_remote_cache_cpu(pgprot_t prot)
384 HV_LOTAR lotar = hv_pte_get_lotar(prot);
385 int x = HV_LOTAR_X(lotar);
386 int y = HV_LOTAR_Y(lotar);
387 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
388 return x + y * smp_width;
392 * Convert a kernel VA to a PA and homing information.
394 int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
396 struct page *page = virt_to_page(va);
397 pte_t null_pte = { 0 };
401 /* Note that this is not writing a page table, just returning a pte. */
402 *pte = pte_set_home(null_pte, page_home(page));
404 return 0; /* return non-zero if not hfh? */
406 EXPORT_SYMBOL(va_to_cpa_and_pte);
408 void __set_pte(pte_t *ptep, pte_t pte)
413 # if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
414 # error Must write the present and migrating bits last
416 if (pte_present(pte)) {
417 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
419 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
421 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
423 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
425 #endif /* __tilegx__ */
428 void set_pte(pte_t *ptep, pte_t pte)
430 if (pte_present(pte) &&
431 (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
432 /* The PTE actually references physical memory. */
433 unsigned long pfn = pte_pfn(pte);
434 if (pfn_valid(pfn)) {
435 /* Update the home of the PTE from the struct page. */
436 pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
437 } else if (hv_pte_get_mode(pte) == 0) {
438 /* remap_pfn_range(), etc, must supply PTE mode. */
439 panic("set_pte(): out-of-range PFN and mode 0\n");
443 __set_pte(ptep, pte);
446 /* Can this mm load a PTE with cached_priority set? */
447 static inline int mm_is_priority_cached(struct mm_struct *mm)
449 return mm->context.priority_cached != 0;
453 * Add a priority mapping to an mm_context and
454 * notify the hypervisor if this is the first one.
456 void start_mm_caching(struct mm_struct *mm)
458 if (!mm_is_priority_cached(mm)) {
459 mm->context.priority_cached = -1UL;
460 hv_set_caching(-1UL);
465 * Validate and return the priority_cached flag. We know if it's zero
466 * that we don't need to scan, since we immediately set it non-zero
467 * when we first consider a MAP_CACHE_PRIORITY mapping.
469 * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
470 * since we're in an interrupt context (servicing switch_mm) we don't
471 * worry about it and don't unset the "priority_cached" field.
472 * Presumably we'll come back later and have more luck and clear
473 * the value then; for now we'll just keep the cache marked for priority.
475 static unsigned long update_priority_cached(struct mm_struct *mm)
477 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
478 struct vm_area_struct *vm;
479 for (vm = mm->mmap; vm; vm = vm->vm_next) {
480 if (hv_pte_get_cached_priority(vm->vm_page_prot))
484 mm->context.priority_cached = 0;
485 up_write(&mm->mmap_sem);
487 return mm->context.priority_cached;
490 /* Set caching correctly for an mm that we are switching to. */
491 void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
493 if (!mm_is_priority_cached(next)) {
495 * If the new mm doesn't use priority caching, just see if we
496 * need the hv_set_caching(), or can assume it's already zero.
498 if (mm_is_priority_cached(prev))
501 hv_set_caching(update_priority_cached(next));
507 /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
508 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
512 struct vm_struct *area;
513 unsigned long offset, last_addr;
516 /* Don't allow wraparound or zero size */
517 last_addr = phys_addr + size - 1;
518 if (!size || last_addr < phys_addr)
521 /* Create a read/write, MMIO VA mapping homed at the requested shim. */
522 pgprot = PAGE_KERNEL;
523 pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
524 pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
527 * Mappings have to be page-aligned
529 offset = phys_addr & ~PAGE_MASK;
530 phys_addr &= PAGE_MASK;
531 size = PAGE_ALIGN(last_addr+1) - phys_addr;
536 area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
539 area->phys_addr = phys_addr;
541 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
542 phys_addr, pgprot)) {
546 return (__force void __iomem *) (offset + (char *)addr);
548 EXPORT_SYMBOL(ioremap_prot);
550 /* Unmap an MMIO VA mapping. */
551 void iounmap(volatile void __iomem *addr_in)
553 volatile void __iomem *addr = (volatile void __iomem *)
554 (PAGE_MASK & (unsigned long __force)addr_in);
556 vunmap((void * __force)addr);
558 /* x86 uses this complicated flow instead of vunmap(). Is
559 * there any particular reason we should do the same? */
560 struct vm_struct *p, *o;
562 /* Use the vm area unlocked, assuming the caller
563 ensures there isn't another iounmap for the same address
564 in parallel. Reuse of the virtual address is prevented by
565 leaving it in the global lists until we're done with it.
566 cpa takes care of the direct mappings. */
567 p = find_vm_area((void *)addr);
570 pr_err("iounmap: bad address %p\n", addr);
575 /* Finally remove it */
576 o = remove_vm_area((void *)addr);
577 BUG_ON(p != o || o == NULL);
581 EXPORT_SYMBOL(iounmap);
583 #endif /* CHIP_HAS_MMIO() */