projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[PATCH] cpusets: formalize intermediate GFP_KERNEL containment
[firefly-linux-kernel-4.4.55.git]
/
mm
/
vmscan.c
diff --git
a/mm/vmscan.c
b/mm/vmscan.c
index cfffe5098d538e6d54d1954c523d455924cbf7fc..a740778f688da277d35eb0d15a60168257dbe42e 100644
(file)
--- a/
mm/vmscan.c
+++ b/
mm/vmscan.c
@@
-822,6
+822,8
@@
shrink_zone(struct zone *zone, struct scan_control *sc)
unsigned long nr_active;
unsigned long nr_inactive;
unsigned long nr_active;
unsigned long nr_inactive;
+ atomic_inc(&zone->reclaim_in_progress);
+
/*
* Add one to `nr_to_scan' just to make sure that the kernel will
* slowly sift through the active list.
/*
* Add one to `nr_to_scan' just to make sure that the kernel will
* slowly sift through the active list.
@@
-861,6
+863,8
@@
shrink_zone(struct zone *zone, struct scan_control *sc)
}
throttle_vm_writeout();
}
throttle_vm_writeout();
+
+ atomic_dec(&zone->reclaim_in_progress);
}
/*
}
/*
@@
-890,7
+894,7
@@
shrink_caches(struct zone **zones, struct scan_control *sc)
if (zone->present_pages == 0)
continue;
if (zone->present_pages == 0)
continue;
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
continue;
zone->temp_priority = sc->priority;
continue;
zone->temp_priority = sc->priority;
@@
-900,9
+904,7
@@
shrink_caches(struct zone **zones, struct scan_control *sc)
if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
- atomic_inc(&zone->reclaim_in_progress);
shrink_zone(zone, sc);
shrink_zone(zone, sc);
- atomic_dec(&zone->reclaim_in_progress);
}
}
}
}
@@
-938,7
+940,7
@@
int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
continue;
zone->temp_priority = DEF_PRIORITY;
continue;
zone->temp_priority = DEF_PRIORITY;
@@
-984,7
+986,7
@@
out:
for (i = 0; zones[i] != 0; i++) {
struct zone *zone = zones[i];
for (i = 0; zones[i] != 0; i++) {
struct zone *zone = zones[i];
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
continue;
zone->prev_priority = zone->temp_priority;
continue;
zone->prev_priority = zone->temp_priority;
@@
-1254,7
+1256,7
@@
void wakeup_kswapd(struct zone *zone, int order)
return;
if (pgdat->kswapd_max_order < order)
pgdat->kswapd_max_order = order;
return;
if (pgdat->kswapd_max_order < order)
pgdat->kswapd_max_order = order;
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
return;
if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
return;
return;
if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
return;
@@
-1358,14
+1360,13
@@
int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
/* Don't reclaim the zone if there are other reclaimers active */
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
/* Don't reclaim the zone if there are other reclaimers active */
- if (
!atomic_inc_and_test(&zone->reclaim_in_progress)
)
+ if (
atomic_read(&zone->reclaim_in_progress) > 0
)
goto out;
shrink_zone(zone, &sc);
total_reclaimed = sc.nr_reclaimed;
out:
goto out;
shrink_zone(zone, &sc);
total_reclaimed = sc.nr_reclaimed;
out:
- atomic_dec(&zone->reclaim_in_progress);
return total_reclaimed;
}
return total_reclaimed;
}
@@
-1375,6
+1376,9
@@
asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
struct zone *z;
int i;
struct zone *z;
int i;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
if (node >= MAX_NUMNODES || !node_online(node))
return -EINVAL;
if (node >= MAX_NUMNODES || !node_online(node))
return -EINVAL;