3 * Copyright (C) 2011 Novell Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include "overlayfs.h"
20 struct ovl_cache_entry {
24 struct list_head l_node;
31 struct ovl_dir_cache {
34 struct list_head entries;
37 struct ovl_readdir_data {
38 struct dir_context ctx;
41 struct list_head *list;
42 struct list_head middle;
50 struct ovl_dir_cache *cache;
51 struct ovl_cache_entry cursor;
52 struct file *realfile;
53 struct file *upperfile;
56 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
58 return container_of(n, struct ovl_cache_entry, node);
61 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
62 const char *name, int len)
64 struct rb_node *node = root->rb_node;
68 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
70 cmp = strncmp(name, p->name, len);
72 node = p->node.rb_right;
73 else if (cmp < 0 || len < p->len)
74 node = p->node.rb_left;
82 static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
83 u64 ino, unsigned int d_type)
85 struct ovl_cache_entry *p;
86 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
88 p = kmalloc(size, GFP_KERNEL);
90 memcpy(p->name, name, len);
95 p->is_whiteout = false;
102 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
103 const char *name, int len, u64 ino,
106 struct rb_node **newp = &rdd->root.rb_node;
107 struct rb_node *parent = NULL;
108 struct ovl_cache_entry *p;
112 struct ovl_cache_entry *tmp;
115 tmp = ovl_cache_entry_from_node(*newp);
116 cmp = strncmp(name, tmp->name, len);
118 newp = &tmp->node.rb_right;
119 else if (cmp < 0 || len < tmp->len)
120 newp = &tmp->node.rb_left;
125 p = ovl_cache_entry_new(name, len, ino, d_type);
129 list_add_tail(&p->l_node, rdd->list);
130 rb_link_node(&p->node, parent, newp);
131 rb_insert_color(&p->node, &rdd->root);
136 static int ovl_fill_lower(struct ovl_readdir_data *rdd,
137 const char *name, int namelen,
138 loff_t offset, u64 ino, unsigned int d_type)
140 struct ovl_cache_entry *p;
142 p = ovl_cache_entry_find(&rdd->root, name, namelen);
144 list_move_tail(&p->l_node, &rdd->middle);
146 p = ovl_cache_entry_new(name, namelen, ino, d_type);
150 list_add_tail(&p->l_node, &rdd->middle);
156 void ovl_cache_free(struct list_head *list)
158 struct ovl_cache_entry *p;
159 struct ovl_cache_entry *n;
161 list_for_each_entry_safe(p, n, list, l_node)
164 INIT_LIST_HEAD(list);
167 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
169 struct ovl_dir_cache *cache = od->cache;
171 list_del_init(&od->cursor.l_node);
172 WARN_ON(cache->refcount <= 0);
174 if (!cache->refcount) {
175 if (ovl_dir_cache(dentry) == cache)
176 ovl_set_dir_cache(dentry, NULL);
178 ovl_cache_free(&cache->entries);
183 static int ovl_fill_merge(struct dir_context *ctx, const char *name,
184 int namelen, loff_t offset, u64 ino,
187 struct ovl_readdir_data *rdd =
188 container_of(ctx, struct ovl_readdir_data, ctx);
192 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
194 return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
197 static inline int ovl_dir_read(struct path *realpath,
198 struct ovl_readdir_data *rdd)
200 struct file *realfile;
203 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
204 if (IS_ERR(realfile))
205 return PTR_ERR(realfile);
211 err = iterate_dir(realfile, &rdd->ctx);
214 } while (!err && rdd->count);
220 static void ovl_dir_reset(struct file *file)
222 struct ovl_dir_file *od = file->private_data;
223 struct ovl_dir_cache *cache = od->cache;
224 struct dentry *dentry = file->f_path.dentry;
225 enum ovl_path_type type = ovl_path_type(dentry);
227 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
228 ovl_cache_put(od, dentry);
231 WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
232 if (od->is_real && type == OVL_PATH_MERGE)
236 static int ovl_dir_mark_whiteouts(struct dentry *dir,
237 struct ovl_readdir_data *rdd)
239 struct ovl_cache_entry *p;
240 struct dentry *dentry;
241 const struct cred *old_cred;
242 struct cred *override_cred;
244 override_cred = prepare_creds();
245 if (!override_cred) {
246 ovl_cache_free(rdd->list);
251 * CAP_DAC_OVERRIDE for lookup
253 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
254 old_cred = override_creds(override_cred);
256 mutex_lock(&dir->d_inode->i_mutex);
257 list_for_each_entry(p, rdd->list, l_node) {
261 if (p->type != DT_CHR)
264 dentry = lookup_one_len(p->name, dir, p->len);
268 p->is_whiteout = ovl_is_whiteout(dentry);
271 mutex_unlock(&dir->d_inode->i_mutex);
273 revert_creds(old_cred);
274 put_cred(override_cred);
279 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
282 struct path lowerpath;
283 struct path upperpath;
284 struct ovl_readdir_data rdd = {
285 .ctx.actor = ovl_fill_merge,
291 ovl_path_lower(dentry, &lowerpath);
292 ovl_path_upper(dentry, &upperpath);
294 if (upperpath.dentry) {
295 err = ovl_dir_read(&upperpath, &rdd);
299 if (lowerpath.dentry) {
300 err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
305 if (lowerpath.dentry) {
307 * Insert lowerpath entries before upperpath ones, this allows
308 * offsets to be reasonably constant
310 list_add(&rdd.middle, rdd.list);
312 err = ovl_dir_read(&lowerpath, &rdd);
313 list_del(&rdd.middle);
319 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
321 struct ovl_cache_entry *p;
324 list_for_each_entry(p, &od->cache->entries, l_node) {
331 list_move_tail(&od->cursor.l_node, &p->l_node);
334 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
337 struct ovl_dir_cache *cache;
339 cache = ovl_dir_cache(dentry);
340 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
344 ovl_set_dir_cache(dentry, NULL);
346 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
348 return ERR_PTR(-ENOMEM);
351 INIT_LIST_HEAD(&cache->entries);
353 res = ovl_dir_read_merged(dentry, &cache->entries);
355 ovl_cache_free(&cache->entries);
360 cache->version = ovl_dentry_version_get(dentry);
361 ovl_set_dir_cache(dentry, cache);
366 static int ovl_iterate(struct file *file, struct dir_context *ctx)
368 struct ovl_dir_file *od = file->private_data;
369 struct dentry *dentry = file->f_path.dentry;
375 return iterate_dir(od->realfile, ctx);
378 struct ovl_dir_cache *cache;
380 cache = ovl_cache_get(dentry);
382 return PTR_ERR(cache);
385 ovl_seek_cursor(od, ctx->pos);
388 while (od->cursor.l_node.next != &od->cache->entries) {
389 struct ovl_cache_entry *p;
391 p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
394 if (!p->is_whiteout) {
395 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
400 list_move(&od->cursor.l_node, &p->l_node);
405 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
408 struct ovl_dir_file *od = file->private_data;
410 mutex_lock(&file_inode(file)->i_mutex);
415 res = vfs_llseek(od->realfile, offset, origin);
416 file->f_pos = od->realfile->f_pos;
422 offset += file->f_pos;
432 if (offset != file->f_pos) {
433 file->f_pos = offset;
435 ovl_seek_cursor(od, offset);
440 mutex_unlock(&file_inode(file)->i_mutex);
445 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
448 struct ovl_dir_file *od = file->private_data;
449 struct dentry *dentry = file->f_path.dentry;
450 struct file *realfile = od->realfile;
453 * Need to check if we started out being a lower dir, but got copied up
455 if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
456 struct inode *inode = file_inode(file);
458 realfile = lockless_dereference(od->upperfile);
460 struct path upperpath;
462 ovl_path_upper(dentry, &upperpath);
463 realfile = ovl_path_open(&upperpath, O_RDONLY);
464 smp_mb__before_spinlock();
465 mutex_lock(&inode->i_mutex);
466 if (!od->upperfile) {
467 if (IS_ERR(realfile)) {
468 mutex_unlock(&inode->i_mutex);
469 return PTR_ERR(realfile);
471 od->upperfile = realfile;
473 /* somebody has beaten us to it */
474 if (!IS_ERR(realfile))
476 realfile = od->upperfile;
478 mutex_unlock(&inode->i_mutex);
482 return vfs_fsync_range(realfile, start, end, datasync);
485 static int ovl_dir_release(struct inode *inode, struct file *file)
487 struct ovl_dir_file *od = file->private_data;
490 mutex_lock(&inode->i_mutex);
491 ovl_cache_put(od, file->f_path.dentry);
492 mutex_unlock(&inode->i_mutex);
502 static int ovl_dir_open(struct inode *inode, struct file *file)
504 struct path realpath;
505 struct file *realfile;
506 struct ovl_dir_file *od;
507 enum ovl_path_type type;
509 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
513 type = ovl_path_real(file->f_path.dentry, &realpath);
514 realfile = ovl_path_open(&realpath, file->f_flags);
515 if (IS_ERR(realfile)) {
517 return PTR_ERR(realfile);
519 INIT_LIST_HEAD(&od->cursor.l_node);
520 od->realfile = realfile;
521 od->is_real = (type != OVL_PATH_MERGE);
522 od->is_upper = (type != OVL_PATH_LOWER);
523 od->cursor.is_cursor = true;
524 file->private_data = od;
529 const struct file_operations ovl_dir_operations = {
530 .read = generic_read_dir,
531 .open = ovl_dir_open,
532 .iterate = ovl_iterate,
533 .llseek = ovl_dir_llseek,
534 .fsync = ovl_dir_fsync,
535 .release = ovl_dir_release,
538 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
541 struct ovl_cache_entry *p;
543 err = ovl_dir_read_merged(dentry, list);
549 list_for_each_entry(p, list, l_node) {
553 if (p->name[0] == '.') {
556 if (p->len == 2 && p->name[1] == '.')
566 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
568 struct ovl_cache_entry *p;
570 mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD);
571 list_for_each_entry(p, list, l_node) {
572 struct dentry *dentry;
577 dentry = lookup_one_len(p->name, upper, p->len);
578 if (IS_ERR(dentry)) {
579 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
580 upper->d_name.name, p->len, p->name,
581 (int) PTR_ERR(dentry));
584 ovl_cleanup(upper->d_inode, dentry);
587 mutex_unlock(&upper->d_inode->i_mutex);