4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
18 #include <linux/vmstat.h>
19 #include <linux/sched.h>
20 #include <linux/math64.h>
21 #include <linux/writeback.h>
22 #include <linux/compaction.h>
24 #ifdef CONFIG_VM_EVENT_COUNTERS
25 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
26 EXPORT_PER_CPU_SYMBOL(vm_event_states);
28 static void sum_vm_events(unsigned long *ret)
33 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
35 for_each_online_cpu(cpu) {
36 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
38 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
39 ret[i] += this->event[i];
44 * Accumulate the vm event counters across all CPUs.
45 * The result is unavoidably approximate - it can change
46 * during and after execution of this function.
48 void all_vm_events(unsigned long *ret)
54 EXPORT_SYMBOL_GPL(all_vm_events);
57 * Fold the foreign cpu events into our own.
59 * This is adding to the events on one processor
60 * but keeps the global counts constant.
62 void vm_events_fold_cpu(int cpu)
64 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
67 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
68 count_vm_events(i, fold_state->event[i]);
69 fold_state->event[i] = 0;
73 #endif /* CONFIG_VM_EVENT_COUNTERS */
76 * Manage combined zone based / global counters
78 * vm_stat contains the global counters
80 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
81 EXPORT_SYMBOL(vm_stat);
85 int calculate_pressure_threshold(struct zone *zone)
88 int watermark_distance;
91 * As vmstats are not up to date, there is drift between the estimated
92 * and real values. For high thresholds and a high number of CPUs, it
93 * is possible for the min watermark to be breached while the estimated
94 * value looks fine. The pressure threshold is a reduced value such
95 * that even the maximum amount of drift will not accidentally breach
98 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
99 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
102 * Maximum threshold is 125
104 threshold = min(125, threshold);
109 int calculate_normal_threshold(struct zone *zone)
112 int mem; /* memory in 128 MB units */
115 * The threshold scales with the number of processors and the amount
116 * of memory per zone. More memory means that we can defer updates for
117 * longer, more processors could lead to more contention.
118 * fls() is used to have a cheap way of logarithmic scaling.
120 * Some sample thresholds:
122 * Threshold Processors (fls) Zonesize fls(mem+1)
123 * ------------------------------------------------------------------
140 * 125 1024 10 8-16 GB 8
141 * 125 1024 10 16-32 GB 9
144 mem = zone->managed_pages >> (27 - PAGE_SHIFT);
146 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
149 * Maximum threshold is 125
151 threshold = min(125, threshold);
157 * Refresh the thresholds for each zone.
159 void refresh_zone_stat_thresholds(void)
165 for_each_populated_zone(zone) {
166 unsigned long max_drift, tolerate_drift;
168 threshold = calculate_normal_threshold(zone);
170 for_each_online_cpu(cpu)
171 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
175 * Only set percpu_drift_mark if there is a danger that
176 * NR_FREE_PAGES reports the low watermark is ok when in fact
177 * the min watermark could be breached by an allocation
179 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
180 max_drift = num_online_cpus() * threshold;
181 if (max_drift > tolerate_drift)
182 zone->percpu_drift_mark = high_wmark_pages(zone) +
187 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
188 int (*calculate_pressure)(struct zone *))
195 for (i = 0; i < pgdat->nr_zones; i++) {
196 zone = &pgdat->node_zones[i];
197 if (!zone->percpu_drift_mark)
200 threshold = (*calculate_pressure)(zone);
201 for_each_possible_cpu(cpu)
202 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
208 * For use when we know that interrupts are disabled.
210 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
213 struct per_cpu_pageset __percpu *pcp = zone->pageset;
214 s8 __percpu *p = pcp->vm_stat_diff + item;
218 x = delta + __this_cpu_read(*p);
220 t = __this_cpu_read(pcp->stat_threshold);
222 if (unlikely(x > t || x < -t)) {
223 zone_page_state_add(x, zone, item);
226 __this_cpu_write(*p, x);
228 EXPORT_SYMBOL(__mod_zone_page_state);
231 * Optimized increment and decrement functions.
233 * These are only for a single page and therefore can take a struct page *
234 * argument instead of struct zone *. This allows the inclusion of the code
235 * generated for page_zone(page) into the optimized functions.
237 * No overflow check is necessary and therefore the differential can be
238 * incremented or decremented in place which may allow the compilers to
239 * generate better code.
240 * The increment or decrement is known and therefore one boundary check can
243 * NOTE: These functions are very performance sensitive. Change only
246 * Some processors have inc/dec instructions that are atomic vs an interrupt.
247 * However, the code must first determine the differential location in a zone
248 * based on the processor number and then inc/dec the counter. There is no
249 * guarantee without disabling preemption that the processor will not change
250 * in between and therefore the atomicity vs. interrupt cannot be exploited
251 * in a useful way here.
253 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
255 struct per_cpu_pageset __percpu *pcp = zone->pageset;
256 s8 __percpu *p = pcp->vm_stat_diff + item;
259 v = __this_cpu_inc_return(*p);
260 t = __this_cpu_read(pcp->stat_threshold);
261 if (unlikely(v > t)) {
262 s8 overstep = t >> 1;
264 zone_page_state_add(v + overstep, zone, item);
265 __this_cpu_write(*p, -overstep);
269 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
271 __inc_zone_state(page_zone(page), item);
273 EXPORT_SYMBOL(__inc_zone_page_state);
275 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
277 struct per_cpu_pageset __percpu *pcp = zone->pageset;
278 s8 __percpu *p = pcp->vm_stat_diff + item;
281 v = __this_cpu_dec_return(*p);
282 t = __this_cpu_read(pcp->stat_threshold);
283 if (unlikely(v < - t)) {
284 s8 overstep = t >> 1;
286 zone_page_state_add(v - overstep, zone, item);
287 __this_cpu_write(*p, overstep);
291 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
293 __dec_zone_state(page_zone(page), item);
295 EXPORT_SYMBOL(__dec_zone_page_state);
297 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
299 * If we have cmpxchg_local support then we do not need to incur the overhead
300 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
302 * mod_state() modifies the zone counter state through atomic per cpu
305 * Overstep mode specifies how overstep should handled:
307 * 1 Overstepping half of threshold
308 * -1 Overstepping minus half of threshold
310 static inline void mod_state(struct zone *zone,
311 enum zone_stat_item item, int delta, int overstep_mode)
313 struct per_cpu_pageset __percpu *pcp = zone->pageset;
314 s8 __percpu *p = pcp->vm_stat_diff + item;
318 z = 0; /* overflow to zone counters */
321 * The fetching of the stat_threshold is racy. We may apply
322 * a counter threshold to the wrong the cpu if we get
323 * rescheduled while executing here. However, the next
324 * counter update will apply the threshold again and
325 * therefore bring the counter under the threshold again.
327 * Most of the time the thresholds are the same anyways
328 * for all cpus in a zone.
330 t = this_cpu_read(pcp->stat_threshold);
332 o = this_cpu_read(*p);
335 if (n > t || n < -t) {
336 int os = overstep_mode * (t >> 1) ;
338 /* Overflow must be added to zone counters */
342 } while (this_cpu_cmpxchg(*p, o, n) != o);
345 zone_page_state_add(z, zone, item);
348 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
351 mod_state(zone, item, delta, 0);
353 EXPORT_SYMBOL(mod_zone_page_state);
355 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
357 mod_state(zone, item, 1, 1);
360 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
362 mod_state(page_zone(page), item, 1, 1);
364 EXPORT_SYMBOL(inc_zone_page_state);
366 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
368 mod_state(page_zone(page), item, -1, -1);
370 EXPORT_SYMBOL(dec_zone_page_state);
373 * Use interrupt disable to serialize counter updates
375 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
380 local_irq_save(flags);
381 __mod_zone_page_state(zone, item, delta);
382 local_irq_restore(flags);
384 EXPORT_SYMBOL(mod_zone_page_state);
386 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
390 local_irq_save(flags);
391 __inc_zone_state(zone, item);
392 local_irq_restore(flags);
395 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
400 zone = page_zone(page);
401 local_irq_save(flags);
402 __inc_zone_state(zone, item);
403 local_irq_restore(flags);
405 EXPORT_SYMBOL(inc_zone_page_state);
407 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
411 local_irq_save(flags);
412 __dec_zone_page_state(page, item);
413 local_irq_restore(flags);
415 EXPORT_SYMBOL(dec_zone_page_state);
419 * Update the zone counters for one cpu.
421 * The cpu specified must be either the current cpu or a processor that
422 * is not online. If it is the current cpu then the execution thread must
423 * be pinned to the current cpu.
425 * Note that refresh_cpu_vm_stats strives to only access
426 * node local memory. The per cpu pagesets on remote zones are placed
427 * in the memory local to the processor using that pageset. So the
428 * loop over all zones will access a series of cachelines local to
431 * The call to zone_page_state_add updates the cachelines with the
432 * statistics in the remote zone struct as well as the global cachelines
433 * with the global counters. These could cause remote node cache line
434 * bouncing and will have to be only done when necessary.
436 bool refresh_cpu_vm_stats(int cpu)
440 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
441 bool vm_activity = false;
443 for_each_populated_zone(zone) {
444 struct per_cpu_pageset *p;
446 p = per_cpu_ptr(zone->pageset, cpu);
448 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
449 if (p->vm_stat_diff[i]) {
453 local_irq_save(flags);
454 v = p->vm_stat_diff[i];
455 p->vm_stat_diff[i] = 0;
456 local_irq_restore(flags);
457 atomic_long_add(v, &zone->vm_stat[i]);
460 /* 3 seconds idle till flush */
467 * Deal with draining the remote pageset of this
470 * Check if there are pages remaining in this pageset
471 * if not then there is nothing to expire.
473 if (!p->expire || !p->pcp.count)
477 * We never drain zones local to this processor.
479 if (zone_to_nid(zone) == numa_node_id()) {
490 drain_zone_pages(zone, &p->pcp);
495 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
496 if (global_diff[i]) {
497 atomic_long_add(global_diff[i], &vm_stat[i]);
506 * this is only called if !populated_zone(zone), which implies no other users of
507 * pset->vm_stat_diff[] exsist.
509 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
513 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
514 if (pset->vm_stat_diff[i]) {
515 int v = pset->vm_stat_diff[i];
516 pset->vm_stat_diff[i] = 0;
517 atomic_long_add(v, &zone->vm_stat[i]);
518 atomic_long_add(v, &vm_stat[i]);
525 * zonelist = the list of zones passed to the allocator
526 * z = the zone from which the allocation occurred.
528 * Must be called with interrupts disabled.
530 * When __GFP_OTHER_NODE is set assume the node of the preferred
531 * zone is the local node. This is useful for daemons who allocate
532 * memory on behalf of other processes.
534 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
536 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
537 __inc_zone_state(z, NUMA_HIT);
539 __inc_zone_state(z, NUMA_MISS);
540 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
542 if (z->node == ((flags & __GFP_OTHER_NODE) ?
543 preferred_zone->node : numa_node_id()))
544 __inc_zone_state(z, NUMA_LOCAL);
546 __inc_zone_state(z, NUMA_OTHER);
550 #ifdef CONFIG_COMPACTION
552 struct contig_page_info {
553 unsigned long free_pages;
554 unsigned long free_blocks_total;
555 unsigned long free_blocks_suitable;
559 * Calculate the number of free pages in a zone, how many contiguous
560 * pages are free and how many are large enough to satisfy an allocation of
561 * the target size. Note that this function makes no attempt to estimate
562 * how many suitable free blocks there *might* be if MOVABLE pages were
563 * migrated. Calculating that is possible, but expensive and can be
564 * figured out from userspace
566 static void fill_contig_page_info(struct zone *zone,
567 unsigned int suitable_order,
568 struct contig_page_info *info)
572 info->free_pages = 0;
573 info->free_blocks_total = 0;
574 info->free_blocks_suitable = 0;
576 for (order = 0; order < MAX_ORDER; order++) {
577 unsigned long blocks;
579 /* Count number of free blocks */
580 blocks = zone->free_area[order].nr_free;
581 info->free_blocks_total += blocks;
583 /* Count free base pages */
584 info->free_pages += blocks << order;
586 /* Count the suitable free blocks */
587 if (order >= suitable_order)
588 info->free_blocks_suitable += blocks <<
589 (order - suitable_order);
594 * A fragmentation index only makes sense if an allocation of a requested
595 * size would fail. If that is true, the fragmentation index indicates
596 * whether external fragmentation or a lack of memory was the problem.
597 * The value can be used to determine if page reclaim or compaction
600 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
602 unsigned long requested = 1UL << order;
604 if (!info->free_blocks_total)
607 /* Fragmentation index only makes sense when a request would fail */
608 if (info->free_blocks_suitable)
612 * Index is between 0 and 1 so return within 3 decimal places
614 * 0 => allocation would fail due to lack of memory
615 * 1 => allocation would fail due to fragmentation
617 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
620 /* Same as __fragmentation index but allocs contig_page_info on stack */
621 int fragmentation_index(struct zone *zone, unsigned int order)
623 struct contig_page_info info;
625 fill_contig_page_info(zone, order, &info);
626 return __fragmentation_index(order, &info);
630 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
631 #include <linux/proc_fs.h>
632 #include <linux/seq_file.h>
634 static char * const migratetype_names[MIGRATE_TYPES] = {
642 #ifdef CONFIG_MEMORY_ISOLATION
647 static void *frag_start(struct seq_file *m, loff_t *pos)
651 for (pgdat = first_online_pgdat();
653 pgdat = next_online_pgdat(pgdat))
659 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
661 pg_data_t *pgdat = (pg_data_t *)arg;
664 return next_online_pgdat(pgdat);
667 static void frag_stop(struct seq_file *m, void *arg)
671 /* Walk all the zones in a node and print using a callback */
672 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
673 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
676 struct zone *node_zones = pgdat->node_zones;
679 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
680 if (!populated_zone(zone))
683 spin_lock_irqsave(&zone->lock, flags);
684 print(m, pgdat, zone);
685 spin_unlock_irqrestore(&zone->lock, flags);
690 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
691 #ifdef CONFIG_ZONE_DMA
692 #define TEXT_FOR_DMA(xx) xx "_dma",
694 #define TEXT_FOR_DMA(xx)
697 #ifdef CONFIG_ZONE_DMA32
698 #define TEXT_FOR_DMA32(xx) xx "_dma32",
700 #define TEXT_FOR_DMA32(xx)
703 #ifdef CONFIG_HIGHMEM
704 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
706 #define TEXT_FOR_HIGHMEM(xx)
709 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
710 TEXT_FOR_HIGHMEM(xx) xx "_movable",
712 const char * const vmstat_text[] = {
713 /* Zoned VM counters */
726 "nr_slab_reclaimable",
727 "nr_slab_unreclaimable",
728 "nr_page_table_pages",
733 "nr_vmscan_immediate_reclaim",
749 "nr_anon_transparent_hugepages",
751 "nr_dirty_threshold",
752 "nr_dirty_background_threshold",
754 #ifdef CONFIG_VM_EVENT_COUNTERS
760 TEXTS_FOR_ZONES("pgalloc")
769 TEXTS_FOR_ZONES("pgrefill")
770 TEXTS_FOR_ZONES("pgsteal_kswapd")
771 TEXTS_FOR_ZONES("pgsteal_direct")
772 TEXTS_FOR_ZONES("pgscan_kswapd")
773 TEXTS_FOR_ZONES("pgscan_direct")
774 "pgscan_direct_throttle",
777 "zone_reclaim_failed",
782 "kswapd_low_wmark_hit_quickly",
783 "kswapd_high_wmark_hit_quickly",
789 #ifdef CONFIG_NUMA_BALANCING
791 "numa_huge_pte_updates",
793 "numa_hint_faults_local",
794 "numa_pages_migrated",
796 #ifdef CONFIG_MIGRATION
800 #ifdef CONFIG_COMPACTION
801 "compact_migrate_scanned",
802 "compact_free_scanned",
809 #ifdef CONFIG_HUGETLB_PAGE
810 "htlb_buddy_alloc_success",
811 "htlb_buddy_alloc_fail",
813 "unevictable_pgs_culled",
814 "unevictable_pgs_scanned",
815 "unevictable_pgs_rescued",
816 "unevictable_pgs_mlocked",
817 "unevictable_pgs_munlocked",
818 "unevictable_pgs_cleared",
819 "unevictable_pgs_stranded",
821 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
823 "thp_fault_fallback",
824 "thp_collapse_alloc",
825 "thp_collapse_alloc_failed",
827 "thp_zero_page_alloc",
828 "thp_zero_page_alloc_failed",
831 #endif /* CONFIG_VM_EVENTS_COUNTERS */
833 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
836 #ifdef CONFIG_PROC_FS
837 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
842 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
843 for (order = 0; order < MAX_ORDER; ++order)
844 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
849 * This walks the free areas for each zone.
851 static int frag_show(struct seq_file *m, void *arg)
853 pg_data_t *pgdat = (pg_data_t *)arg;
854 walk_zones_in_node(m, pgdat, frag_show_print);
858 static void pagetypeinfo_showfree_print(struct seq_file *m,
859 pg_data_t *pgdat, struct zone *zone)
863 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
864 seq_printf(m, "Node %4d, zone %8s, type %12s ",
867 migratetype_names[mtype]);
868 for (order = 0; order < MAX_ORDER; ++order) {
869 unsigned long freecount = 0;
870 struct free_area *area;
871 struct list_head *curr;
873 area = &(zone->free_area[order]);
875 list_for_each(curr, &area->free_list[mtype])
877 seq_printf(m, "%6lu ", freecount);
883 /* Print out the free pages at each order for each migatetype */
884 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
887 pg_data_t *pgdat = (pg_data_t *)arg;
890 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
891 for (order = 0; order < MAX_ORDER; ++order)
892 seq_printf(m, "%6d ", order);
895 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
900 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
901 pg_data_t *pgdat, struct zone *zone)
905 unsigned long start_pfn = zone->zone_start_pfn;
906 unsigned long end_pfn = zone_end_pfn(zone);
907 unsigned long count[MIGRATE_TYPES] = { 0, };
909 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
915 page = pfn_to_page(pfn);
917 /* Watch for unexpected holes punched in the memmap */
918 if (!memmap_valid_within(pfn, page, zone))
921 mtype = get_pageblock_migratetype(page);
923 if (mtype < MIGRATE_TYPES)
928 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
929 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
930 seq_printf(m, "%12lu ", count[mtype]);
934 /* Print out the free pages at each order for each migratetype */
935 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
938 pg_data_t *pgdat = (pg_data_t *)arg;
940 seq_printf(m, "\n%-23s", "Number of blocks type ");
941 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
942 seq_printf(m, "%12s ", migratetype_names[mtype]);
944 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
950 * This prints out statistics in relation to grouping pages by mobility.
951 * It is expensive to collect so do not constantly read the file.
953 static int pagetypeinfo_show(struct seq_file *m, void *arg)
955 pg_data_t *pgdat = (pg_data_t *)arg;
957 /* check memoryless node */
958 if (!node_state(pgdat->node_id, N_MEMORY))
961 seq_printf(m, "Page block order: %d\n", pageblock_order);
962 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
964 pagetypeinfo_showfree(m, pgdat);
965 pagetypeinfo_showblockcount(m, pgdat);
970 static const struct seq_operations fragmentation_op = {
977 static int fragmentation_open(struct inode *inode, struct file *file)
979 return seq_open(file, &fragmentation_op);
982 static const struct file_operations fragmentation_file_operations = {
983 .open = fragmentation_open,
986 .release = seq_release,
989 static const struct seq_operations pagetypeinfo_op = {
993 .show = pagetypeinfo_show,
996 static int pagetypeinfo_open(struct inode *inode, struct file *file)
998 return seq_open(file, &pagetypeinfo_op);
1001 static const struct file_operations pagetypeinfo_file_ops = {
1002 .open = pagetypeinfo_open,
1004 .llseek = seq_lseek,
1005 .release = seq_release,
1008 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1012 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1022 zone_page_state(zone, NR_FREE_PAGES),
1023 min_wmark_pages(zone),
1024 low_wmark_pages(zone),
1025 high_wmark_pages(zone),
1026 zone->pages_scanned,
1027 zone->spanned_pages,
1028 zone->present_pages,
1029 zone->managed_pages);
1031 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1032 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1033 zone_page_state(zone, i));
1036 "\n protection: (%lu",
1037 zone->lowmem_reserve[0]);
1038 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1039 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
1043 for_each_online_cpu(i) {
1044 struct per_cpu_pageset *pageset;
1046 pageset = per_cpu_ptr(zone->pageset, i);
1055 pageset->pcp.batch);
1057 seq_printf(m, "\n vm stats threshold: %d",
1058 pageset->stat_threshold);
1062 "\n all_unreclaimable: %u"
1064 "\n inactive_ratio: %u",
1065 zone->all_unreclaimable,
1066 zone->zone_start_pfn,
1067 zone->inactive_ratio);
1072 * Output information about zones in @pgdat.
1074 static int zoneinfo_show(struct seq_file *m, void *arg)
1076 pg_data_t *pgdat = (pg_data_t *)arg;
1077 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1081 static const struct seq_operations zoneinfo_op = {
1082 .start = frag_start, /* iterate over all zones. The same as in
1086 .show = zoneinfo_show,
1089 static int zoneinfo_open(struct inode *inode, struct file *file)
1091 return seq_open(file, &zoneinfo_op);
1094 static const struct file_operations proc_zoneinfo_file_operations = {
1095 .open = zoneinfo_open,
1097 .llseek = seq_lseek,
1098 .release = seq_release,
1101 enum writeback_stat_item {
1103 NR_DIRTY_BG_THRESHOLD,
1104 NR_VM_WRITEBACK_STAT_ITEMS,
1107 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1110 int i, stat_items_size;
1112 if (*pos >= ARRAY_SIZE(vmstat_text))
1114 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1115 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1117 #ifdef CONFIG_VM_EVENT_COUNTERS
1118 stat_items_size += sizeof(struct vm_event_state);
1121 v = kmalloc(stat_items_size, GFP_KERNEL);
1124 return ERR_PTR(-ENOMEM);
1125 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1126 v[i] = global_page_state(i);
1127 v += NR_VM_ZONE_STAT_ITEMS;
1129 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1130 v + NR_DIRTY_THRESHOLD);
1131 v += NR_VM_WRITEBACK_STAT_ITEMS;
1133 #ifdef CONFIG_VM_EVENT_COUNTERS
1135 v[PGPGIN] /= 2; /* sectors -> kbytes */
1138 return (unsigned long *)m->private + *pos;
1141 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1144 if (*pos >= ARRAY_SIZE(vmstat_text))
1146 return (unsigned long *)m->private + *pos;
1149 static int vmstat_show(struct seq_file *m, void *arg)
1151 unsigned long *l = arg;
1152 unsigned long off = l - (unsigned long *)m->private;
1154 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1158 static void vmstat_stop(struct seq_file *m, void *arg)
1164 static const struct seq_operations vmstat_op = {
1165 .start = vmstat_start,
1166 .next = vmstat_next,
1167 .stop = vmstat_stop,
1168 .show = vmstat_show,
1171 static int vmstat_open(struct inode *inode, struct file *file)
1173 return seq_open(file, &vmstat_op);
1176 static const struct file_operations proc_vmstat_file_operations = {
1177 .open = vmstat_open,
1179 .llseek = seq_lseek,
1180 .release = seq_release,
1182 #endif /* CONFIG_PROC_FS */
1185 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1186 int sysctl_stat_interval __read_mostly = HZ;
1187 static struct cpumask vmstat_off_cpus;
1188 struct delayed_work vmstat_monitor_work;
1190 static inline bool need_vmstat(int cpu)
1195 for_each_populated_zone(zone) {
1196 struct per_cpu_pageset *p;
1198 p = per_cpu_ptr(zone->pageset, cpu);
1200 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1201 if (p->vm_stat_diff[i])
1204 if (zone_to_nid(zone) != numa_node_id() && p->pcp.count)
1211 static void vmstat_update(struct work_struct *w);
1213 static void start_cpu_timer(int cpu)
1215 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1217 cpumask_clear_cpu(cpu, &vmstat_off_cpus);
1218 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1221 static void __cpuinit setup_cpu_timer(int cpu)
1223 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1225 INIT_DEFERRABLE_WORK(work, vmstat_update);
1226 start_cpu_timer(cpu);
1229 static void vmstat_update_monitor(struct work_struct *w)
1233 for_each_cpu_and(cpu, &vmstat_off_cpus, cpu_online_mask)
1234 if (need_vmstat(cpu))
1235 start_cpu_timer(cpu);
1237 queue_delayed_work(system_unbound_wq, &vmstat_monitor_work,
1238 round_jiffies_relative(sysctl_stat_interval));
1242 static void vmstat_update(struct work_struct *w)
1244 int cpu = smp_processor_id();
1246 if (likely(refresh_cpu_vm_stats(cpu)))
1247 schedule_delayed_work(&__get_cpu_var(vmstat_work),
1248 round_jiffies_relative(sysctl_stat_interval));
1250 cpumask_set_cpu(cpu, &vmstat_off_cpus);
1254 * Use the cpu notifier to insure that the thresholds are recalculated
1257 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1258 unsigned long action,
1261 long cpu = (long)hcpu;
1265 case CPU_ONLINE_FROZEN:
1266 refresh_zone_stat_thresholds();
1267 setup_cpu_timer(cpu);
1268 node_set_state(cpu_to_node(cpu), N_CPU);
1270 case CPU_DOWN_PREPARE:
1271 case CPU_DOWN_PREPARE_FROZEN:
1272 if (!cpumask_test_cpu(cpu, &vmstat_off_cpus)) {
1273 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1274 per_cpu(vmstat_work, cpu).work.func = NULL;
1277 case CPU_DOWN_FAILED:
1278 case CPU_DOWN_FAILED_FROZEN:
1279 setup_cpu_timer(cpu);
1282 case CPU_DEAD_FROZEN:
1283 refresh_zone_stat_thresholds();
1291 static struct notifier_block __cpuinitdata vmstat_notifier =
1292 { &vmstat_cpuup_callback, NULL, 0 };
1295 static int __init setup_vmstat(void)
1300 register_cpu_notifier(&vmstat_notifier);
1302 INIT_DEFERRABLE_WORK(&vmstat_monitor_work,
1303 vmstat_update_monitor);
1304 queue_delayed_work(system_unbound_wq,
1305 &vmstat_monitor_work,
1306 round_jiffies_relative(HZ));
1308 for_each_online_cpu(cpu)
1309 setup_cpu_timer(cpu);
1311 #ifdef CONFIG_PROC_FS
1312 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1313 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1314 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1315 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1319 module_init(setup_vmstat)
1321 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1322 #include <linux/debugfs.h>
1326 * Return an index indicating how much of the available free memory is
1327 * unusable for an allocation of the requested size.
1329 static int unusable_free_index(unsigned int order,
1330 struct contig_page_info *info)
1332 /* No free memory is interpreted as all free memory is unusable */
1333 if (info->free_pages == 0)
1337 * Index should be a value between 0 and 1. Return a value to 3
1340 * 0 => no fragmentation
1341 * 1 => high fragmentation
1343 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1347 static void unusable_show_print(struct seq_file *m,
1348 pg_data_t *pgdat, struct zone *zone)
1352 struct contig_page_info info;
1354 seq_printf(m, "Node %d, zone %8s ",
1357 for (order = 0; order < MAX_ORDER; ++order) {
1358 fill_contig_page_info(zone, order, &info);
1359 index = unusable_free_index(order, &info);
1360 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1367 * Display unusable free space index
1369 * The unusable free space index measures how much of the available free
1370 * memory cannot be used to satisfy an allocation of a given size and is a
1371 * value between 0 and 1. The higher the value, the more of free memory is
1372 * unusable and by implication, the worse the external fragmentation is. This
1373 * can be expressed as a percentage by multiplying by 100.
1375 static int unusable_show(struct seq_file *m, void *arg)
1377 pg_data_t *pgdat = (pg_data_t *)arg;
1379 /* check memoryless node */
1380 if (!node_state(pgdat->node_id, N_MEMORY))
1383 walk_zones_in_node(m, pgdat, unusable_show_print);
1388 static const struct seq_operations unusable_op = {
1389 .start = frag_start,
1392 .show = unusable_show,
1395 static int unusable_open(struct inode *inode, struct file *file)
1397 return seq_open(file, &unusable_op);
1400 static const struct file_operations unusable_file_ops = {
1401 .open = unusable_open,
1403 .llseek = seq_lseek,
1404 .release = seq_release,
1407 static void extfrag_show_print(struct seq_file *m,
1408 pg_data_t *pgdat, struct zone *zone)
1413 /* Alloc on stack as interrupts are disabled for zone walk */
1414 struct contig_page_info info;
1416 seq_printf(m, "Node %d, zone %8s ",
1419 for (order = 0; order < MAX_ORDER; ++order) {
1420 fill_contig_page_info(zone, order, &info);
1421 index = __fragmentation_index(order, &info);
1422 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1429 * Display fragmentation index for orders that allocations would fail for
1431 static int extfrag_show(struct seq_file *m, void *arg)
1433 pg_data_t *pgdat = (pg_data_t *)arg;
1435 walk_zones_in_node(m, pgdat, extfrag_show_print);
1440 static const struct seq_operations extfrag_op = {
1441 .start = frag_start,
1444 .show = extfrag_show,
1447 static int extfrag_open(struct inode *inode, struct file *file)
1449 return seq_open(file, &extfrag_op);
1452 static const struct file_operations extfrag_file_ops = {
1453 .open = extfrag_open,
1455 .llseek = seq_lseek,
1456 .release = seq_release,
1459 static int __init extfrag_debug_init(void)
1461 struct dentry *extfrag_debug_root;
1463 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1464 if (!extfrag_debug_root)
1467 if (!debugfs_create_file("unusable_index", 0444,
1468 extfrag_debug_root, NULL, &unusable_file_ops))
1471 if (!debugfs_create_file("extfrag_index", 0444,
1472 extfrag_debug_root, NULL, &extfrag_file_ops))
1477 debugfs_remove_recursive(extfrag_debug_root);
1481 module_init(extfrag_debug_init);