#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cpu.h>
-#include <linux/cpumask.h>
#include <linux/vmstat.h>
#include <linux/sched.h>
#include <linux/math64.h>
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*/
-bool refresh_cpu_vm_stats(int cpu)
+void refresh_cpu_vm_stats(int cpu)
{
struct zone *zone;
int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
- bool vm_activity = false;
for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
if (p->expire)
continue;
- if (p->pcp.count) {
- vm_activity = true;
+ if (p->pcp.count)
drain_zone_pages(zone, &p->pcp);
- }
#endif
}
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (global_diff[i]) {
+ if (global_diff[i])
atomic_long_add(global_diff[i], &vm_stat[i]);
- vm_activity = true;
- }
-
- return vm_activity;
-
}
/*
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
-static struct cpumask vmstat_off_cpus;
-struct delayed_work vmstat_monitor_work;
-
-static inline bool need_vmstat(int cpu)
-{
- struct zone *zone;
- int i;
-
- for_each_populated_zone(zone) {
- struct per_cpu_pageset *p;
- p = per_cpu_ptr(zone->pageset, cpu);
-
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (p->vm_stat_diff[i])
- return true;
-
- if (zone_to_nid(zone) != numa_node_id() && p->pcp.count)
- return true;
- }
-
- return false;
-}
-
-static void vmstat_update(struct work_struct *w);
-
-static void start_cpu_timer(int cpu)
+static void vmstat_update(struct work_struct *w)
{
- struct delayed_work *work = &per_cpu(vmstat_work, cpu);
-
- cpumask_clear_cpu(cpu, &vmstat_off_cpus);
- schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
+ refresh_cpu_vm_stats(smp_processor_id());
+ schedule_delayed_work(&__get_cpu_var(vmstat_work),
+ round_jiffies_relative(sysctl_stat_interval));
}
-static void __cpuinit setup_cpu_timer(int cpu)
+static void __cpuinit start_cpu_timer(int cpu)
{
struct delayed_work *work = &per_cpu(vmstat_work, cpu);
INIT_DEFERRABLE_WORK(work, vmstat_update);
- start_cpu_timer(cpu);
-}
-
-static void vmstat_update_monitor(struct work_struct *w)
-{
- int cpu;
-
- for_each_cpu_and(cpu, &vmstat_off_cpus, cpu_online_mask)
- if (need_vmstat(cpu))
- start_cpu_timer(cpu);
-
- queue_delayed_work(system_unbound_wq, &vmstat_monitor_work,
- round_jiffies_relative(sysctl_stat_interval));
-}
-
-
-static void vmstat_update(struct work_struct *w)
-{
- int cpu = smp_processor_id();
-
- if (likely(refresh_cpu_vm_stats(cpu)))
- schedule_delayed_work(&__get_cpu_var(vmstat_work),
- round_jiffies_relative(sysctl_stat_interval));
- else
- cpumask_set_cpu(cpu, &vmstat_off_cpus);
+ schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
}
/*
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
refresh_zone_stat_thresholds();
- setup_cpu_timer(cpu);
+ start_cpu_timer(cpu);
node_set_state(cpu_to_node(cpu), N_CPU);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
- if (!cpumask_test_cpu(cpu, &vmstat_off_cpus)) {
- cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
- per_cpu(vmstat_work, cpu).work.func = NULL;
- }
+ cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
+ per_cpu(vmstat_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- setup_cpu_timer(cpu);
+ start_cpu_timer(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
register_cpu_notifier(&vmstat_notifier);
- INIT_DEFERRABLE_WORK(&vmstat_monitor_work,
- vmstat_update_monitor);
- queue_delayed_work(system_unbound_wq,
- &vmstat_monitor_work,
- round_jiffies_relative(HZ));
-
for_each_online_cpu(cpu)
- setup_cpu_timer(cpu);
+ start_cpu_timer(cpu);
#endif
#ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);