2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
7 * Created by David Woodhouse <dwmw2@infradead.org>
9 * For licensing information, see the file 'LICENCE' in this directory.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
19 #include <linux/list.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/vfs.h>
25 #include <linux/crc32.h>
28 static int jffs2_flash_setup(struct jffs2_sb_info *c);
30 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
32 struct jffs2_full_dnode *old_metadata, *new_metadata;
33 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
34 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
35 struct jffs2_raw_inode *ri;
36 union jffs2_device_node dev;
37 unsigned char *mdata = NULL;
42 int alloc_type = ALLOC_NORMAL;
44 jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
46 /* Special cases - we don't want more than one data node
47 for these types on the medium at any time. So setattr
48 must read the original data associated with the node
49 (i.e. the device numbers or the target name) and write
50 it out again with the appropriate data attached */
51 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
52 /* For these, we don't actually need to read the old node */
53 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
55 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
57 } else if (S_ISLNK(inode->i_mode)) {
59 mdatalen = f->metadata->size;
60 mdata = kmalloc(f->metadata->size, GFP_USER);
62 mutex_unlock(&f->sem);
65 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
67 mutex_unlock(&f->sem);
71 mutex_unlock(&f->sem);
72 jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
76 ri = jffs2_alloc_raw_inode();
78 if (S_ISLNK(inode->i_mode))
83 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
84 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
86 jffs2_free_raw_inode(ri);
87 if (S_ISLNK(inode->i_mode))
92 ivalid = iattr->ia_valid;
94 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
95 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
96 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
97 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
99 ri->ino = cpu_to_je32(inode->i_ino);
100 ri->version = cpu_to_je32(++f->highest_version);
102 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
103 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
105 if (ivalid & ATTR_MODE)
106 ri->mode = cpu_to_jemode(iattr->ia_mode);
108 ri->mode = cpu_to_jemode(inode->i_mode);
111 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
112 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
113 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
114 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
116 ri->offset = cpu_to_je32(0);
117 ri->csize = ri->dsize = cpu_to_je32(mdatalen);
118 ri->compr = JFFS2_COMPR_NONE;
119 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
120 /* It's an extension. Make it a hole node */
121 ri->compr = JFFS2_COMPR_ZERO;
122 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
123 ri->offset = cpu_to_je32(inode->i_size);
124 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
125 /* For truncate-to-zero, treat it as deletion because
126 it'll always be obsoleting all previous nodes */
127 alloc_type = ALLOC_DELETION;
129 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
131 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
133 ri->data_crc = cpu_to_je32(0);
135 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
136 if (S_ISLNK(inode->i_mode))
139 if (IS_ERR(new_metadata)) {
140 jffs2_complete_reservation(c);
141 jffs2_free_raw_inode(ri);
142 mutex_unlock(&f->sem);
143 return PTR_ERR(new_metadata);
145 /* It worked. Update the inode */
146 inode->i_atime = ITIME(je32_to_cpu(ri->atime));
147 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
148 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
149 inode->i_mode = jemode_to_cpu(ri->mode);
150 inode->i_uid = je16_to_cpu(ri->uid);
151 inode->i_gid = je16_to_cpu(ri->gid);
154 old_metadata = f->metadata;
156 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
157 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
159 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
160 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
161 inode->i_size = iattr->ia_size;
162 inode->i_blocks = (inode->i_size + 511) >> 9;
165 f->metadata = new_metadata;
168 jffs2_mark_node_obsolete(c, old_metadata->raw);
169 jffs2_free_full_dnode(old_metadata);
171 jffs2_free_raw_inode(ri);
173 mutex_unlock(&f->sem);
174 jffs2_complete_reservation(c);
176 /* We have to do the truncate_setsize() without f->sem held, since
177 some pages may be locked and waiting for it in readpage().
178 We are protected from a simultaneous write() extending i_size
179 back past iattr->ia_size, because do_truncate() holds the
180 generic inode semaphore. */
181 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
182 truncate_setsize(inode, iattr->ia_size);
183 inode->i_blocks = (inode->i_size + 511) >> 9;
189 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
193 rc = inode_change_ok(dentry->d_inode, iattr);
197 rc = jffs2_do_setattr(dentry->d_inode, iattr);
198 if (!rc && (iattr->ia_valid & ATTR_MODE))
199 rc = jffs2_acl_chmod(dentry->d_inode);
204 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
206 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
209 buf->f_type = JFFS2_SUPER_MAGIC;
210 buf->f_bsize = 1 << PAGE_SHIFT;
211 buf->f_blocks = c->flash_size >> PAGE_SHIFT;
214 buf->f_namelen = JFFS2_MAX_NAME_LEN;
215 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
216 buf->f_fsid.val[1] = c->mtd->index;
218 spin_lock(&c->erase_completion_lock);
219 avail = c->dirty_size + c->free_size;
220 if (avail > c->sector_size * c->resv_blocks_write)
221 avail -= c->sector_size * c->resv_blocks_write;
224 spin_unlock(&c->erase_completion_lock);
226 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
232 void jffs2_evict_inode (struct inode *inode)
234 /* We can forget about this inode for now - drop all
235 * the nodelists associated with it, etc.
237 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
238 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
240 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
241 __func__, inode->i_ino, inode->i_mode);
242 truncate_inode_pages(&inode->i_data, 0);
243 end_writeback(inode);
244 jffs2_do_clear_inode(c, f);
247 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
249 struct jffs2_inode_info *f;
250 struct jffs2_sb_info *c;
251 struct jffs2_raw_inode latest_node;
252 union jffs2_device_node jdev;
257 jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
259 inode = iget_locked(sb, ino);
261 return ERR_PTR(-ENOMEM);
262 if (!(inode->i_state & I_NEW))
265 f = JFFS2_INODE_INFO(inode);
266 c = JFFS2_SB_INFO(inode->i_sb);
268 jffs2_init_inode_info(f);
271 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
274 mutex_unlock(&f->sem);
278 inode->i_mode = jemode_to_cpu(latest_node.mode);
279 inode->i_uid = je16_to_cpu(latest_node.uid);
280 inode->i_gid = je16_to_cpu(latest_node.gid);
281 inode->i_size = je32_to_cpu(latest_node.isize);
282 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
283 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
284 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
286 set_nlink(inode, f->inocache->pino_nlink);
288 inode->i_blocks = (inode->i_size + 511) >> 9;
290 switch (inode->i_mode & S_IFMT) {
293 inode->i_op = &jffs2_symlink_inode_operations;
298 struct jffs2_full_dirent *fd;
299 set_nlink(inode, 2); /* parent and '.' */
301 for (fd=f->dents; fd; fd = fd->next) {
302 if (fd->type == DT_DIR && fd->ino)
305 /* Root dir gets i_nlink 3 for some reason */
306 if (inode->i_ino == 1)
309 inode->i_op = &jffs2_dir_inode_operations;
310 inode->i_fop = &jffs2_dir_operations;
314 inode->i_op = &jffs2_file_inode_operations;
315 inode->i_fop = &jffs2_file_operations;
316 inode->i_mapping->a_ops = &jffs2_file_address_operations;
317 inode->i_mapping->nrpages = 0;
322 /* Read the device numbers from the media */
323 if (f->metadata->size != sizeof(jdev.old_id) &&
324 f->metadata->size != sizeof(jdev.new_id)) {
325 pr_notice("Device node has strange size %d\n",
329 jffs2_dbg(1, "Reading device numbers from flash\n");
330 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
333 pr_notice("Read device numbers for inode %lu failed\n",
334 (unsigned long)inode->i_ino);
337 if (f->metadata->size == sizeof(jdev.old_id))
338 rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
340 rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
344 inode->i_op = &jffs2_file_inode_operations;
345 init_special_inode(inode, inode->i_mode, rdev);
349 pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
350 __func__, inode->i_mode, (unsigned long)inode->i_ino);
353 mutex_unlock(&f->sem);
355 jffs2_dbg(1, "jffs2_read_inode() returning\n");
356 unlock_new_inode(inode);
362 mutex_unlock(&f->sem);
363 jffs2_do_clear_inode(c, f);
368 void jffs2_dirty_inode(struct inode *inode, int flags)
372 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
373 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
374 __func__, inode->i_ino);
378 jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
379 __func__, inode->i_ino);
381 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
382 iattr.ia_mode = inode->i_mode;
383 iattr.ia_uid = inode->i_uid;
384 iattr.ia_gid = inode->i_gid;
385 iattr.ia_atime = inode->i_atime;
386 iattr.ia_mtime = inode->i_mtime;
387 iattr.ia_ctime = inode->i_ctime;
389 jffs2_do_setattr(inode, &iattr);
392 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
394 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
396 if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
399 /* We stop if it was running, then restart if it needs to.
400 This also catches the case where it was stopped and this
401 is just a remount to restart it.
402 Flush the writebuffer, if neccecary, else we loose it */
403 if (!(sb->s_flags & MS_RDONLY)) {
404 jffs2_stop_garbage_collect_thread(c);
405 mutex_lock(&c->alloc_sem);
406 jffs2_flush_wbuf_pad(c);
407 mutex_unlock(&c->alloc_sem);
410 if (!(*flags & MS_RDONLY))
411 jffs2_start_garbage_collect_thread(c);
413 *flags |= MS_NOATIME;
417 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
418 fill in the raw_inode while you're at it. */
419 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
422 struct super_block *sb = dir_i->i_sb;
423 struct jffs2_sb_info *c;
424 struct jffs2_inode_info *f;
427 jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
428 __func__, dir_i->i_ino, mode);
430 c = JFFS2_SB_INFO(sb);
432 inode = new_inode(sb);
435 return ERR_PTR(-ENOMEM);
437 f = JFFS2_INODE_INFO(inode);
438 jffs2_init_inode_info(f);
441 memset(ri, 0, sizeof(*ri));
442 /* Set OS-specific defaults for new inodes */
443 ri->uid = cpu_to_je16(current_fsuid());
445 if (dir_i->i_mode & S_ISGID) {
446 ri->gid = cpu_to_je16(dir_i->i_gid);
450 ri->gid = cpu_to_je16(current_fsgid());
453 /* POSIX ACLs have to be processed now, at least partly.
454 The umask is only applied if there's no default ACL */
455 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
457 make_bad_inode(inode);
461 ret = jffs2_do_new_inode (c, f, mode, ri);
463 make_bad_inode(inode);
468 inode->i_ino = je32_to_cpu(ri->ino);
469 inode->i_mode = jemode_to_cpu(ri->mode);
470 inode->i_gid = je16_to_cpu(ri->gid);
471 inode->i_uid = je16_to_cpu(ri->uid);
472 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
473 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
478 if (insert_inode_locked(inode) < 0) {
479 make_bad_inode(inode);
481 return ERR_PTR(-EINVAL);
487 static int calculate_inocache_hashsize(uint32_t flash_size)
490 * Pick a inocache hash size based on the size of the medium.
491 * Count how many megabytes we're dealing with, apply a hashsize twice
492 * that size, but rounding down to the usual big powers of 2. And keep
493 * to sensible bounds.
496 int size_mb = flash_size / 1024 / 1024;
497 int hashsize = (size_mb * 2) & ~0x3f;
499 if (hashsize < INOCACHE_HASHSIZE_MIN)
500 return INOCACHE_HASHSIZE_MIN;
501 if (hashsize > INOCACHE_HASHSIZE_MAX)
502 return INOCACHE_HASHSIZE_MAX;
507 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
509 struct jffs2_sb_info *c;
510 struct inode *root_i;
514 c = JFFS2_SB_INFO(sb);
516 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
517 if (c->mtd->type == MTD_NANDFLASH) {
518 pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
521 if (c->mtd->type == MTD_DATAFLASH) {
522 pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
527 c->flash_size = c->mtd->size;
528 c->sector_size = c->mtd->erasesize;
529 blocks = c->flash_size / c->sector_size;
532 * Size alignment check
534 if ((c->sector_size * blocks) != c->flash_size) {
535 c->flash_size = c->sector_size * blocks;
536 pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
537 c->flash_size / 1024);
540 if (c->flash_size < 5*c->sector_size) {
541 pr_err("Too few erase blocks (%d)\n",
542 c->flash_size / c->sector_size);
546 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
548 /* NAND (or other bizarre) flash... do setup accordingly */
549 ret = jffs2_flash_setup(c);
553 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
554 c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
555 if (!c->inocache_list) {
560 jffs2_init_xattr_subsystem(c);
562 if ((ret = jffs2_do_mount_fs(c)))
565 jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
566 root_i = jffs2_iget(sb, 1);
567 if (IS_ERR(root_i)) {
568 jffs2_dbg(1, "get root inode failed\n");
569 ret = PTR_ERR(root_i);
575 jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
576 sb->s_root = d_make_root(root_i);
580 sb->s_maxbytes = 0xFFFFFFFF;
581 sb->s_blocksize = PAGE_CACHE_SIZE;
582 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
583 sb->s_magic = JFFS2_SUPER_MAGIC;
584 if (!(sb->s_flags & MS_RDONLY))
585 jffs2_start_garbage_collect_thread(c);
589 jffs2_free_ino_caches(c);
590 jffs2_free_raw_node_refs(c);
591 if (jffs2_blocks_use_vmalloc(c))
596 jffs2_clear_xattr_subsystem(c);
597 kfree(c->inocache_list);
599 jffs2_flash_cleanup(c);
604 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
605 struct jffs2_inode_info *f)
607 iput(OFNI_EDONI_2SFFJ(f));
610 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
611 int inum, int unlinked)
614 struct jffs2_inode_cache *ic;
617 /* The inode has zero nlink but its nodes weren't yet marked
618 obsolete. This has to be because we're still waiting for
619 the final (close() and) iput() to happen.
621 There's a possibility that the final iput() could have
622 happened while we were contemplating. In order to ensure
623 that we don't cause a new read_inode() (which would fail)
624 for the inode in question, we use ilookup() in this case
627 The nlink can't _become_ zero at this point because we're
628 holding the alloc_sem, and jffs2_do_unlink() would also
629 need that while decrementing nlink on any inode.
631 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
633 jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
636 spin_lock(&c->inocache_lock);
637 ic = jffs2_get_ino_cache(c, inum);
639 jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
641 spin_unlock(&c->inocache_lock);
644 if (ic->state != INO_STATE_CHECKEDABSENT) {
645 /* Wait for progress. Don't just loop */
646 jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
648 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
650 spin_unlock(&c->inocache_lock);
656 /* Inode has links to it still; they're not going away because
657 jffs2_do_unlink() would need the alloc_sem and we have it.
658 Just iget() it, and if read_inode() is necessary that's OK.
660 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
662 return ERR_CAST(inode);
664 if (is_bad_inode(inode)) {
665 pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
667 /* NB. This will happen again. We need to do something appropriate here. */
669 return ERR_PTR(-EIO);
672 return JFFS2_INODE_INFO(inode);
675 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
676 struct jffs2_inode_info *f,
677 unsigned long offset,
680 struct inode *inode = OFNI_EDONI_2SFFJ(f);
683 pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
684 (void *)jffs2_do_readpage_unlock, inode);
688 *priv = (unsigned long)pg;
692 void jffs2_gc_release_page(struct jffs2_sb_info *c,
696 struct page *pg = (void *)*priv;
699 page_cache_release(pg);
702 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
705 if (jffs2_cleanmarker_oob(c)) {
706 /* NAND flash... do setup accordingly */
707 ret = jffs2_nand_flash_setup(c);
713 if (jffs2_dataflash(c)) {
714 ret = jffs2_dataflash_setup(c);
719 /* and Intel "Sibley" flash */
720 if (jffs2_nor_wbuf_flash(c)) {
721 ret = jffs2_nor_wbuf_flash_setup(c);
726 /* and an UBI volume */
727 if (jffs2_ubivol(c)) {
728 ret = jffs2_ubivol_setup(c);
736 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
738 if (jffs2_cleanmarker_oob(c)) {
739 jffs2_nand_flash_cleanup(c);
743 if (jffs2_dataflash(c)) {
744 jffs2_dataflash_cleanup(c);
747 /* and Intel "Sibley" flash */
748 if (jffs2_nor_wbuf_flash(c)) {
749 jffs2_nor_wbuf_flash_cleanup(c);
752 /* and an UBI volume */
753 if (jffs2_ubivol(c)) {
754 jffs2_ubivol_cleanup(c);