2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_fs.h>
41 #define pid_hashfn(nr, ns) \
42 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
43 static struct hlist_head *pid_hash;
44 static unsigned int pidhash_shift = 4;
45 struct pid init_struct_pid = INIT_STRUCT_PID;
47 int pid_max = PID_MAX_DEFAULT;
49 #define RESERVED_PIDS 300
51 int pid_max_min = RESERVED_PIDS + 1;
52 int pid_max_max = PID_MAX_LIMIT;
54 #define BITS_PER_PAGE (PAGE_SIZE*8)
55 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
57 static inline int mk_pid(struct pid_namespace *pid_ns,
58 struct pidmap *map, int off)
60 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
63 #define find_next_offset(map, off) \
64 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
67 * PID-map pages start out as NULL, they get allocated upon
68 * first use and are never deallocated. This way a low pid_max
69 * value does not cause lots of bitmaps to be allocated, but
70 * the scheme scales to up to 4 million PIDs, runtime.
72 struct pid_namespace init_pid_ns = {
74 .refcount = ATOMIC_INIT(2),
77 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
81 .child_reaper = &init_task,
82 .user_ns = &init_user_ns,
84 EXPORT_SYMBOL_GPL(init_pid_ns);
86 int is_container_init(struct task_struct *tsk)
93 if (pid != NULL && pid->numbers[pid->level].nr == 1)
99 EXPORT_SYMBOL(is_container_init);
102 * Note: disable interrupts while the pidmap_lock is held as an
103 * interrupt might come in and do read_lock(&tasklist_lock).
105 * If we don't disable interrupts there is a nasty deadlock between
106 * detach_pid()->free_pid() and another cpu that does
107 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
108 * read_lock(&tasklist_lock);
110 * After we clean up the tasklist_lock and know there are no
111 * irq handlers that take it we can leave the interrupts enabled.
112 * For now it is easier to be safe than to prove it can't happen.
115 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
117 static void free_pidmap(struct upid *upid)
120 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
121 int offset = nr & BITS_PER_PAGE_MASK;
123 clear_bit(offset, map->page);
124 atomic_inc(&map->nr_free);
128 * If we started walking pids at 'base', is 'a' seen before 'b'?
130 static int pid_before(int base, int a, int b)
133 * This is the same as saying
135 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
136 * and that mapping orders 'a' and 'b' with respect to 'base'.
138 return (unsigned)(a - base) < (unsigned)(b - base);
142 * We might be racing with someone else trying to set pid_ns->last_pid
143 * at the pid allocation time (there's also a sysctl for this, but racing
144 * with this one is OK, see comment in kernel/pid_namespace.c about it).
145 * We want the winner to have the "later" value, because if the
146 * "earlier" value prevails, then a pid may get reused immediately.
148 * Since pids rollover, it is not sufficient to just pick the bigger
149 * value. We have to consider where we started counting from.
151 * 'base' is the value of pid_ns->last_pid that we observed when
152 * we started looking for a pid.
154 * 'pid' is the pid that we eventually found.
156 static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
159 int last_write = base;
162 last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
163 } while ((prev != last_write) && (pid_before(base, last_write, pid)));
166 static int alloc_pidmap(struct pid_namespace *pid_ns)
168 int i, offset, max_scan, pid, last = pid_ns->last_pid;
174 offset = pid & BITS_PER_PAGE_MASK;
175 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
177 * If last_pid points into the middle of the map->page we
178 * want to scan this bitmap block twice, the second time
179 * we start with offset == 0 (or RESERVED_PIDS).
181 max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
182 for (i = 0; i <= max_scan; ++i) {
183 if (unlikely(!map->page)) {
184 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
186 * Free the page if someone raced with us
189 spin_lock_irq(&pidmap_lock);
194 spin_unlock_irq(&pidmap_lock);
196 if (unlikely(!map->page))
199 if (likely(atomic_read(&map->nr_free))) {
201 if (!test_and_set_bit(offset, map->page)) {
202 atomic_dec(&map->nr_free);
203 set_last_pid(pid_ns, last, pid);
206 offset = find_next_offset(map, offset);
207 pid = mk_pid(pid_ns, map, offset);
208 } while (offset < BITS_PER_PAGE && pid < pid_max);
210 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
214 map = &pid_ns->pidmap[0];
215 offset = RESERVED_PIDS;
216 if (unlikely(last == offset))
219 pid = mk_pid(pid_ns, map, offset);
224 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
227 struct pidmap *map, *end;
229 if (last >= PID_MAX_LIMIT)
232 offset = (last + 1) & BITS_PER_PAGE_MASK;
233 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
234 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
235 for (; map < end; map++, offset = 0) {
236 if (unlikely(!map->page))
238 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
239 if (offset < BITS_PER_PAGE)
240 return mk_pid(pid_ns, map, offset);
245 void put_pid(struct pid *pid)
247 struct pid_namespace *ns;
252 ns = pid->numbers[pid->level].ns;
253 if ((atomic_read(&pid->count) == 1) ||
254 atomic_dec_and_test(&pid->count)) {
255 kmem_cache_free(ns->pid_cachep, pid);
259 EXPORT_SYMBOL_GPL(put_pid);
261 static void delayed_put_pid(struct rcu_head *rhp)
263 struct pid *pid = container_of(rhp, struct pid, rcu);
267 void free_pid(struct pid *pid)
269 /* We can be called with write_lock_irq(&tasklist_lock) held */
273 spin_lock_irqsave(&pidmap_lock, flags);
274 for (i = 0; i <= pid->level; i++) {
275 struct upid *upid = pid->numbers + i;
276 struct pid_namespace *ns = upid->ns;
277 hlist_del_rcu(&upid->pid_chain);
278 switch(--ns->nr_hashed) {
280 /* When all that is left in the pid namespace
281 * is the reaper wake up the reaper. The reaper
282 * may be sleeping in zap_pid_ns_processes().
284 wake_up_process(ns->child_reaper);
288 schedule_work(&ns->proc_work);
292 spin_unlock_irqrestore(&pidmap_lock, flags);
294 for (i = 0; i <= pid->level; i++)
295 free_pidmap(pid->numbers + i);
297 call_rcu(&pid->rcu, delayed_put_pid);
300 struct pid *alloc_pid(struct pid_namespace *ns)
305 struct pid_namespace *tmp;
308 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
313 pid->level = ns->level;
314 for (i = ns->level; i >= 0; i--) {
315 nr = alloc_pidmap(tmp);
319 pid->numbers[i].nr = nr;
320 pid->numbers[i].ns = tmp;
324 if (unlikely(is_child_reaper(pid))) {
325 if (pid_ns_prepare_proc(ns))
330 atomic_set(&pid->count, 1);
331 for (type = 0; type < PIDTYPE_MAX; ++type)
332 INIT_HLIST_HEAD(&pid->tasks[type]);
334 upid = pid->numbers + ns->level;
335 spin_lock_irq(&pidmap_lock);
336 if (ns->nr_hashed < 0)
338 for ( ; upid >= pid->numbers; --upid) {
339 hlist_add_head_rcu(&upid->pid_chain,
340 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
341 upid->ns->nr_hashed++;
343 spin_unlock_irq(&pidmap_lock);
349 spin_unlock(&pidmap_lock);
351 while (++i <= ns->level)
352 free_pidmap(pid->numbers + i);
354 kmem_cache_free(ns->pid_cachep, pid);
359 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
361 struct hlist_node *elem;
364 hlist_for_each_entry_rcu(pnr, elem,
365 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
366 if (pnr->nr == nr && pnr->ns == ns)
367 return container_of(pnr, struct pid,
372 EXPORT_SYMBOL_GPL(find_pid_ns);
374 struct pid *find_vpid(int nr)
376 return find_pid_ns(nr, task_active_pid_ns(current));
378 EXPORT_SYMBOL_GPL(find_vpid);
381 * attach_pid() must be called with the tasklist_lock write-held.
383 void attach_pid(struct task_struct *task, enum pid_type type,
386 struct pid_link *link;
388 link = &task->pids[type];
390 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
393 static void __change_pid(struct task_struct *task, enum pid_type type,
396 struct pid_link *link;
400 link = &task->pids[type];
403 hlist_del_rcu(&link->node);
406 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
407 if (!hlist_empty(&pid->tasks[tmp]))
413 void detach_pid(struct task_struct *task, enum pid_type type)
415 __change_pid(task, type, NULL);
418 void change_pid(struct task_struct *task, enum pid_type type,
421 __change_pid(task, type, pid);
422 attach_pid(task, type, pid);
425 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
426 void transfer_pid(struct task_struct *old, struct task_struct *new,
429 new->pids[type].pid = old->pids[type].pid;
430 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
433 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
435 struct task_struct *result = NULL;
437 struct hlist_node *first;
438 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
439 lockdep_tasklist_lock_is_held());
441 result = hlist_entry(first, struct task_struct, pids[(type)].node);
445 EXPORT_SYMBOL(pid_task);
448 * Must be called under rcu_read_lock().
450 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
452 rcu_lockdep_assert(rcu_read_lock_held(),
453 "find_task_by_pid_ns() needs rcu_read_lock()"
455 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
458 struct task_struct *find_task_by_vpid(pid_t vnr)
460 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
463 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
467 if (type != PIDTYPE_PID)
468 task = task->group_leader;
469 pid = get_pid(task->pids[type].pid);
473 EXPORT_SYMBOL_GPL(get_task_pid);
475 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
477 struct task_struct *result;
479 result = pid_task(pid, type);
481 get_task_struct(result);
485 EXPORT_SYMBOL_GPL(get_pid_task);
487 struct pid *find_get_pid(pid_t nr)
492 pid = get_pid(find_vpid(nr));
497 EXPORT_SYMBOL_GPL(find_get_pid);
499 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
504 if (pid && ns->level <= pid->level) {
505 upid = &pid->numbers[ns->level];
511 EXPORT_SYMBOL_GPL(pid_nr_ns);
513 pid_t pid_vnr(struct pid *pid)
515 return pid_nr_ns(pid, task_active_pid_ns(current));
517 EXPORT_SYMBOL_GPL(pid_vnr);
519 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
520 struct pid_namespace *ns)
526 ns = task_active_pid_ns(current);
527 if (likely(pid_alive(task))) {
528 if (type != PIDTYPE_PID)
529 task = task->group_leader;
530 nr = pid_nr_ns(task->pids[type].pid, ns);
536 EXPORT_SYMBOL(__task_pid_nr_ns);
538 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
540 return pid_nr_ns(task_tgid(tsk), ns);
542 EXPORT_SYMBOL(task_tgid_nr_ns);
544 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
546 return ns_of_pid(task_pid(tsk));
548 EXPORT_SYMBOL_GPL(task_active_pid_ns);
551 * Used by proc to find the first pid that is greater than or equal to nr.
553 * If there is a pid at nr this function is exactly the same as find_pid_ns.
555 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
560 pid = find_pid_ns(nr, ns);
563 nr = next_pidmap(ns, nr);
570 * The pid hash table is scaled according to the amount of memory in the
571 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
574 void __init pidhash_init(void)
576 unsigned int i, pidhash_size;
578 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
579 HASH_EARLY | HASH_SMALL,
580 &pidhash_shift, NULL,
582 pidhash_size = 1U << pidhash_shift;
584 for (i = 0; i < pidhash_size; i++)
585 INIT_HLIST_HEAD(&pid_hash[i]);
588 void __init pidmap_init(void)
590 /* bump default and minimum pid_max based on number of cpus */
591 pid_max = min(pid_max_max, max_t(int, pid_max,
592 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
593 pid_max_min = max_t(int, pid_max_min,
594 PIDS_PER_CPU_MIN * num_possible_cpus());
595 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
597 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
598 /* Reserve PID 0. We never call free_pidmap(0) */
599 set_bit(0, init_pid_ns.pidmap[0].page);
600 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
601 init_pid_ns.nr_hashed = 1;
603 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
604 SLAB_HWCACHE_ALIGN | SLAB_PANIC);