unsigned int generation;
};
+enum mem_cgroup_filter_t {
+ VISIT, /* visit current node */
+ SKIP, /* skip the current node and continue traversal */
+ SKIP_TREE, /* skip the whole subtree and continue traversal */
+};
+
+/*
+ * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
+ * iterate through the hierarchy tree. Each tree element is checked by the
+ * predicate before it is returned by the iterator. If a filter returns
+ * SKIP or SKIP_TREE then the iterator code continues traversal (with the
+ * next node down the hierarchy or the next node that doesn't belong under the
+ * memcg's subtree).
+ */
+typedef enum mem_cgroup_filter_t
+(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
+
#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
- struct mem_cgroup *,
- struct mem_cgroup_reclaim_cookie *);
+struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim,
+ mem_cgroup_iter_filter cond);
+
+static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim)
+{
+ return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
+}
+
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
mem_cgroup_update_page_stat(page, idx, -1);
}
-bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
+enum mem_cgroup_filter_t
+mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
struct mem_cgroup *root);
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
struct page *oldpage, struct page *newpage, bool migration_ok)
{
}
+static inline struct mem_cgroup *
+mem_cgroup_iter_cond(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim,
+ mem_cgroup_iter_filter cond)
+{
+ /* first call must return non-NULL, second return NULL */
+ return (struct mem_cgroup *)(unsigned long)!prev;
+}
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
}
static inline
-bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
+enum mem_cgroup_filter_t
+mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
struct mem_cgroup *root)
{
- return false;
+ return VISIT;
}
static inline void mem_cgroup_split_huge_fixup(struct page *head)
return memcg;
}
+static enum mem_cgroup_filter_t
+mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
+ mem_cgroup_iter_filter cond)
+{
+ if (!cond)
+ return VISIT;
+ return cond(memcg, root);
+}
+
/*
* Returns a next (in a pre-order walk) alive memcg (with elevated css
* ref. count) or NULL if the whole root's subtree has been visited.
* helper function to be used by mem_cgroup_iter
*/
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
- struct mem_cgroup *last_visited)
+ struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond)
{
struct cgroup_subsys_state *prev_css, *next_css;
if (next_css) {
struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
- if (css_tryget(&mem->css))
- return mem;
- else {
+ switch (mem_cgroup_filter(mem, root, cond)) {
+ case SKIP:
prev_css = next_css;
goto skip_node;
+ case SKIP_TREE:
+ if (mem == root)
+ return NULL;
+ /*
+ * css_rightmost_descendant is not an optimal way to
+ * skip through a subtree (especially for imbalanced
+ * trees leaning to right) but that's what we have right
+ * now. More effective solution would be traversing
+ * right-up for first non-NULL without calling
+ * css_next_descendant_pre afterwards.
+ */
+ prev_css = css_rightmost_descendant(next_css);
+ goto skip_node;
+ case VISIT:
+ if (css_tryget(&mem->css))
+ return mem;
+ else {
+ prev_css = next_css;
+ goto skip_node;
+ }
+ break;
}
}
* @root: hierarchy root
* @prev: previously returned memcg, NULL on first invocation
* @reclaim: cookie for shared reclaim walks, NULL for full walks
+ * @cond: filter for visited nodes, NULL for no filter
*
* Returns references to children of the hierarchy below @root, or
* @root itself, or %NULL after a full round-trip.
* divide up the memcgs in the hierarchy among all concurrent
* reclaimers operating on the same zone and priority.
*/
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
+struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim)
+ struct mem_cgroup_reclaim_cookie *reclaim,
+ mem_cgroup_iter_filter cond)
{
struct mem_cgroup *memcg = NULL;
struct mem_cgroup *last_visited = NULL;
- if (mem_cgroup_disabled())
- return NULL;
+ if (mem_cgroup_disabled()) {
+ /* first call must return non-NULL, second return NULL */
+ return (struct mem_cgroup *)(unsigned long)!prev;
+ }
if (!root)
root = root_mem_cgroup;
if (!root->use_hierarchy && root != root_mem_cgroup) {
if (prev)
goto out_css_put;
- return root;
+ if (mem_cgroup_filter(root, root, cond) == VISIT)
+ return root;
+ return NULL;
}
rcu_read_lock();
last_visited = mem_cgroup_iter_load(iter, root, &seq);
}
- memcg = __mem_cgroup_iter_next(root, last_visited);
+ memcg = __mem_cgroup_iter_next(root, last_visited, cond);
if (reclaim) {
mem_cgroup_iter_update(iter, last_visited, memcg, seq);
reclaim->generation = iter->generation;
}
- if (prev && !memcg)
+ /*
+ * We have finished the whole tree walk or no group has been
+ * visited because filter told us to skip the root node.
+ */
+ if (!memcg && (prev || (cond && !last_visited)))
goto out_unlock;
}
out_unlock:
* a) it is over its soft limit
* b) any parent up the hierarchy is over its soft limit
*/
-bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
+enum mem_cgroup_filter_t
+mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
struct mem_cgroup *root)
{
struct mem_cgroup *parent = memcg;
if (res_counter_soft_limit_excess(&memcg->res))
- return true;
+ return VISIT;
/*
* If any parent up to the root in the hierarchy is over its soft limit
*/
while((parent = parent_mem_cgroup(parent))) {
if (res_counter_soft_limit_excess(&parent->res))
- return true;
+ return VISIT;
if (parent == root)
break;
}
- return false;
+ return SKIP;
}
/*
.zone = zone,
.priority = sc->priority,
};
- struct mem_cgroup *memcg;
+ struct mem_cgroup *memcg = NULL;
+ mem_cgroup_iter_filter filter = (soft_reclaim) ?
+ mem_cgroup_soft_reclaim_eligible : NULL;
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;
- memcg = mem_cgroup_iter(root, NULL, &reclaim);
- do {
+ while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
struct lruvec *lruvec;
- if (soft_reclaim &&
- !mem_cgroup_soft_reclaim_eligible(memcg, root)) {
- memcg = mem_cgroup_iter(root, memcg, &reclaim);
- continue;
- }
-
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
shrink_lruvec(lruvec, sc);
mem_cgroup_iter_break(root, memcg);
break;
}
- memcg = mem_cgroup_iter(root, memcg, &reclaim);
- } while (memcg);
+ }
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
sc->nr_scanned - nr_scanned,