ext4: move check under lock scope to close a race.
[firefly-linux-kernel-4.4.55.git] / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/aio.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include "ext4.h"
29 #include "ext4_jbd2.h"
30 #include "xattr.h"
31 #include "acl.h"
32
33 /*
34  * Called when an inode is released. Note that this is different
35  * from ext4_file_open: open gets called at every open, but release
36  * gets called only when /all/ the files are closed.
37  */
38 static int ext4_release_file(struct inode *inode, struct file *filp)
39 {
40         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
41                 ext4_alloc_da_blocks(inode);
42                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
43         }
44         /* if we are the last writer on the inode, drop the block reservation */
45         if ((filp->f_mode & FMODE_WRITE) &&
46                         (atomic_read(&inode->i_writecount) == 1) &&
47                         !EXT4_I(inode)->i_reserved_data_blocks)
48         {
49                 down_write(&EXT4_I(inode)->i_data_sem);
50                 ext4_discard_preallocations(inode);
51                 up_write(&EXT4_I(inode)->i_data_sem);
52         }
53         if (is_dx(inode) && filp->private_data)
54                 ext4_htree_free_dir_info(filp->private_data);
55
56         return 0;
57 }
58
59 static void ext4_unwritten_wait(struct inode *inode)
60 {
61         wait_queue_head_t *wq = ext4_ioend_wq(inode);
62
63         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
64 }
65
66 /*
67  * This tests whether the IO in question is block-aligned or not.
68  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
69  * are converted to written only after the IO is complete.  Until they are
70  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
71  * it needs to zero out portions of the start and/or end block.  If 2 AIO
72  * threads are at work on the same unwritten block, they must be synchronized
73  * or one thread will zero the other's data, causing corruption.
74  */
75 static int
76 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
77 {
78         struct super_block *sb = inode->i_sb;
79         int blockmask = sb->s_blocksize - 1;
80
81         if (pos >= i_size_read(inode))
82                 return 0;
83
84         if ((pos | iov_iter_alignment(from)) & blockmask)
85                 return 1;
86
87         return 0;
88 }
89
90 static ssize_t
91 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
92 {
93         struct file *file = iocb->ki_filp;
94         struct inode *inode = file_inode(iocb->ki_filp);
95         struct mutex *aio_mutex = NULL;
96         struct blk_plug plug;
97         int o_direct = io_is_direct(file);
98         int overwrite = 0;
99         size_t length = iov_iter_count(from);
100         ssize_t ret;
101         loff_t pos = iocb->ki_pos;
102
103         /*
104          * Unaligned direct AIO must be serialized; see comment above
105          * In the case of O_APPEND, assume that we must always serialize
106          */
107         if (o_direct &&
108             ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
109             !is_sync_kiocb(iocb) &&
110             (file->f_flags & O_APPEND ||
111              ext4_unaligned_aio(inode, from, pos))) {
112                 aio_mutex = ext4_aio_mutex(inode);
113                 mutex_lock(aio_mutex);
114                 ext4_unwritten_wait(inode);
115         }
116
117         mutex_lock(&inode->i_mutex);
118         if (file->f_flags & O_APPEND)
119                 iocb->ki_pos = pos = i_size_read(inode);
120
121         /*
122          * If we have encountered a bitmap-format file, the size limit
123          * is smaller than s_maxbytes, which is for extent-mapped files.
124          */
125         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
126                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
127
128                 if ((pos > sbi->s_bitmap_maxbytes) ||
129                     (pos == sbi->s_bitmap_maxbytes && length > 0)) {
130                         mutex_unlock(&inode->i_mutex);
131                         ret = -EFBIG;
132                         goto errout;
133                 }
134
135                 if (pos + length > sbi->s_bitmap_maxbytes)
136                         iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
137         }
138
139         iocb->private = &overwrite;
140         if (o_direct) {
141                 blk_start_plug(&plug);
142
143
144                 /* check whether we do a DIO overwrite or not */
145                 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
146                     !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
147                         struct ext4_map_blocks map;
148                         unsigned int blkbits = inode->i_blkbits;
149                         int err, len;
150
151                         map.m_lblk = pos >> blkbits;
152                         map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
153                                 - map.m_lblk;
154                         len = map.m_len;
155
156                         err = ext4_map_blocks(NULL, inode, &map, 0);
157                         /*
158                          * 'err==len' means that all of blocks has
159                          * been preallocated no matter they are
160                          * initialized or not.  For excluding
161                          * unwritten extents, we need to check
162                          * m_flags.  There are two conditions that
163                          * indicate for initialized extents.  1) If we
164                          * hit extent cache, EXT4_MAP_MAPPED flag is
165                          * returned; 2) If we do a real lookup,
166                          * non-flags are returned.  So we should check
167                          * these two conditions.
168                          */
169                         if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
170                                 overwrite = 1;
171                 }
172         }
173
174         ret = __generic_file_write_iter(iocb, from);
175         mutex_unlock(&inode->i_mutex);
176
177         if (ret > 0) {
178                 ssize_t err;
179
180                 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
181                 if (err < 0)
182                         ret = err;
183         }
184         if (o_direct)
185                 blk_finish_plug(&plug);
186
187 errout:
188         if (aio_mutex)
189                 mutex_unlock(aio_mutex);
190         return ret;
191 }
192
193 #ifdef CONFIG_FS_DAX
194 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
195 {
196         return dax_fault(vma, vmf, ext4_get_block);
197                                         /* Is this the right get_block? */
198 }
199
200 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
201 {
202         return dax_mkwrite(vma, vmf, ext4_get_block);
203 }
204
205 static const struct vm_operations_struct ext4_dax_vm_ops = {
206         .fault          = ext4_dax_fault,
207         .page_mkwrite   = ext4_dax_mkwrite,
208 };
209 #else
210 #define ext4_dax_vm_ops ext4_file_vm_ops
211 #endif
212
213 static const struct vm_operations_struct ext4_file_vm_ops = {
214         .fault          = filemap_fault,
215         .map_pages      = filemap_map_pages,
216         .page_mkwrite   = ext4_page_mkwrite,
217 };
218
219 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
220 {
221         struct inode *inode = file->f_mapping->host;
222
223         if (ext4_encrypted_inode(inode)) {
224                 int err = ext4_generate_encryption_key(inode);
225                 if (err)
226                         return 0;
227         }
228         file_accessed(file);
229         if (IS_DAX(file_inode(file))) {
230                 vma->vm_ops = &ext4_dax_vm_ops;
231                 vma->vm_flags |= VM_MIXEDMAP;
232         } else {
233                 vma->vm_ops = &ext4_file_vm_ops;
234         }
235         return 0;
236 }
237
238 static int ext4_file_open(struct inode * inode, struct file * filp)
239 {
240         struct super_block *sb = inode->i_sb;
241         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
242         struct vfsmount *mnt = filp->f_path.mnt;
243         struct path path;
244         char buf[64], *cp;
245         int ret;
246
247         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
248                      !(sb->s_flags & MS_RDONLY))) {
249                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
250                 /*
251                  * Sample where the filesystem has been mounted and
252                  * store it in the superblock for sysadmin convenience
253                  * when trying to sort through large numbers of block
254                  * devices or filesystem images.
255                  */
256                 memset(buf, 0, sizeof(buf));
257                 path.mnt = mnt;
258                 path.dentry = mnt->mnt_root;
259                 cp = d_path(&path, buf, sizeof(buf));
260                 if (!IS_ERR(cp)) {
261                         handle_t *handle;
262                         int err;
263
264                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
265                         if (IS_ERR(handle))
266                                 return PTR_ERR(handle);
267                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
268                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
269                         if (err) {
270                                 ext4_journal_stop(handle);
271                                 return err;
272                         }
273                         strlcpy(sbi->s_es->s_last_mounted, cp,
274                                 sizeof(sbi->s_es->s_last_mounted));
275                         ext4_handle_dirty_super(handle, sb);
276                         ext4_journal_stop(handle);
277                 }
278         }
279         /*
280          * Set up the jbd2_inode if we are opening the inode for
281          * writing and the journal is present
282          */
283         if (filp->f_mode & FMODE_WRITE) {
284                 ret = ext4_inode_attach_jinode(inode);
285                 if (ret < 0)
286                         return ret;
287         }
288         ret = dquot_file_open(inode, filp);
289         if (!ret && ext4_encrypted_inode(inode)) {
290                 ret = ext4_generate_encryption_key(inode);
291                 if (ret)
292                         ret = -EACCES;
293         }
294         return ret;
295 }
296
297 /*
298  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
299  * file rather than ext4_ext_walk_space() because we can introduce
300  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
301  * function.  When extent status tree has been fully implemented, it will
302  * track all extent status for a file and we can directly use it to
303  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
304  */
305
306 /*
307  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
308  * lookup page cache to check whether or not there has some data between
309  * [startoff, endoff] because, if this range contains an unwritten extent,
310  * we determine this extent as a data or a hole according to whether the
311  * page cache has data or not.
312  */
313 static int ext4_find_unwritten_pgoff(struct inode *inode,
314                                      int whence,
315                                      struct ext4_map_blocks *map,
316                                      loff_t *offset)
317 {
318         struct pagevec pvec;
319         unsigned int blkbits;
320         pgoff_t index;
321         pgoff_t end;
322         loff_t endoff;
323         loff_t startoff;
324         loff_t lastoff;
325         int found = 0;
326
327         blkbits = inode->i_sb->s_blocksize_bits;
328         startoff = *offset;
329         lastoff = startoff;
330         endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
331
332         index = startoff >> PAGE_CACHE_SHIFT;
333         end = endoff >> PAGE_CACHE_SHIFT;
334
335         pagevec_init(&pvec, 0);
336         do {
337                 int i, num;
338                 unsigned long nr_pages;
339
340                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
341                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
342                                           (pgoff_t)num);
343                 if (nr_pages == 0) {
344                         if (whence == SEEK_DATA)
345                                 break;
346
347                         BUG_ON(whence != SEEK_HOLE);
348                         /*
349                          * If this is the first time to go into the loop and
350                          * offset is not beyond the end offset, it will be a
351                          * hole at this offset
352                          */
353                         if (lastoff == startoff || lastoff < endoff)
354                                 found = 1;
355                         break;
356                 }
357
358                 /*
359                  * If this is the first time to go into the loop and
360                  * offset is smaller than the first page offset, it will be a
361                  * hole at this offset.
362                  */
363                 if (lastoff == startoff && whence == SEEK_HOLE &&
364                     lastoff < page_offset(pvec.pages[0])) {
365                         found = 1;
366                         break;
367                 }
368
369                 for (i = 0; i < nr_pages; i++) {
370                         struct page *page = pvec.pages[i];
371                         struct buffer_head *bh, *head;
372
373                         /*
374                          * If the current offset is not beyond the end of given
375                          * range, it will be a hole.
376                          */
377                         if (lastoff < endoff && whence == SEEK_HOLE &&
378                             page->index > end) {
379                                 found = 1;
380                                 *offset = lastoff;
381                                 goto out;
382                         }
383
384                         lock_page(page);
385
386                         if (unlikely(page->mapping != inode->i_mapping)) {
387                                 unlock_page(page);
388                                 continue;
389                         }
390
391                         if (!page_has_buffers(page)) {
392                                 unlock_page(page);
393                                 continue;
394                         }
395
396                         if (page_has_buffers(page)) {
397                                 lastoff = page_offset(page);
398                                 bh = head = page_buffers(page);
399                                 do {
400                                         if (buffer_uptodate(bh) ||
401                                             buffer_unwritten(bh)) {
402                                                 if (whence == SEEK_DATA)
403                                                         found = 1;
404                                         } else {
405                                                 if (whence == SEEK_HOLE)
406                                                         found = 1;
407                                         }
408                                         if (found) {
409                                                 *offset = max_t(loff_t,
410                                                         startoff, lastoff);
411                                                 unlock_page(page);
412                                                 goto out;
413                                         }
414                                         lastoff += bh->b_size;
415                                         bh = bh->b_this_page;
416                                 } while (bh != head);
417                         }
418
419                         lastoff = page_offset(page) + PAGE_SIZE;
420                         unlock_page(page);
421                 }
422
423                 /*
424                  * The no. of pages is less than our desired, that would be a
425                  * hole in there.
426                  */
427                 if (nr_pages < num && whence == SEEK_HOLE) {
428                         found = 1;
429                         *offset = lastoff;
430                         break;
431                 }
432
433                 index = pvec.pages[i - 1]->index + 1;
434                 pagevec_release(&pvec);
435         } while (index <= end);
436
437 out:
438         pagevec_release(&pvec);
439         return found;
440 }
441
442 /*
443  * ext4_seek_data() retrieves the offset for SEEK_DATA.
444  */
445 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
446 {
447         struct inode *inode = file->f_mapping->host;
448         struct ext4_map_blocks map;
449         struct extent_status es;
450         ext4_lblk_t start, last, end;
451         loff_t dataoff, isize;
452         int blkbits;
453         int ret = 0;
454
455         mutex_lock(&inode->i_mutex);
456
457         isize = i_size_read(inode);
458         if (offset >= isize) {
459                 mutex_unlock(&inode->i_mutex);
460                 return -ENXIO;
461         }
462
463         blkbits = inode->i_sb->s_blocksize_bits;
464         start = offset >> blkbits;
465         last = start;
466         end = isize >> blkbits;
467         dataoff = offset;
468
469         do {
470                 map.m_lblk = last;
471                 map.m_len = end - last + 1;
472                 ret = ext4_map_blocks(NULL, inode, &map, 0);
473                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
474                         if (last != start)
475                                 dataoff = (loff_t)last << blkbits;
476                         break;
477                 }
478
479                 /*
480                  * If there is a delay extent at this offset,
481                  * it will be as a data.
482                  */
483                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
484                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
485                         if (last != start)
486                                 dataoff = (loff_t)last << blkbits;
487                         break;
488                 }
489
490                 /*
491                  * If there is a unwritten extent at this offset,
492                  * it will be as a data or a hole according to page
493                  * cache that has data or not.
494                  */
495                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
496                         int unwritten;
497                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
498                                                               &map, &dataoff);
499                         if (unwritten)
500                                 break;
501                 }
502
503                 last++;
504                 dataoff = (loff_t)last << blkbits;
505         } while (last <= end);
506
507         mutex_unlock(&inode->i_mutex);
508
509         if (dataoff > isize)
510                 return -ENXIO;
511
512         return vfs_setpos(file, dataoff, maxsize);
513 }
514
515 /*
516  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
517  */
518 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
519 {
520         struct inode *inode = file->f_mapping->host;
521         struct ext4_map_blocks map;
522         struct extent_status es;
523         ext4_lblk_t start, last, end;
524         loff_t holeoff, isize;
525         int blkbits;
526         int ret = 0;
527
528         mutex_lock(&inode->i_mutex);
529
530         isize = i_size_read(inode);
531         if (offset >= isize) {
532                 mutex_unlock(&inode->i_mutex);
533                 return -ENXIO;
534         }
535
536         blkbits = inode->i_sb->s_blocksize_bits;
537         start = offset >> blkbits;
538         last = start;
539         end = isize >> blkbits;
540         holeoff = offset;
541
542         do {
543                 map.m_lblk = last;
544                 map.m_len = end - last + 1;
545                 ret = ext4_map_blocks(NULL, inode, &map, 0);
546                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
547                         last += ret;
548                         holeoff = (loff_t)last << blkbits;
549                         continue;
550                 }
551
552                 /*
553                  * If there is a delay extent at this offset,
554                  * we will skip this extent.
555                  */
556                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
557                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
558                         last = es.es_lblk + es.es_len;
559                         holeoff = (loff_t)last << blkbits;
560                         continue;
561                 }
562
563                 /*
564                  * If there is a unwritten extent at this offset,
565                  * it will be as a data or a hole according to page
566                  * cache that has data or not.
567                  */
568                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
569                         int unwritten;
570                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
571                                                               &map, &holeoff);
572                         if (!unwritten) {
573                                 last += ret;
574                                 holeoff = (loff_t)last << blkbits;
575                                 continue;
576                         }
577                 }
578
579                 /* find a hole */
580                 break;
581         } while (last <= end);
582
583         mutex_unlock(&inode->i_mutex);
584
585         if (holeoff > isize)
586                 holeoff = isize;
587
588         return vfs_setpos(file, holeoff, maxsize);
589 }
590
591 /*
592  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
593  * by calling generic_file_llseek_size() with the appropriate maxbytes
594  * value for each.
595  */
596 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
597 {
598         struct inode *inode = file->f_mapping->host;
599         loff_t maxbytes;
600
601         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
602                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
603         else
604                 maxbytes = inode->i_sb->s_maxbytes;
605
606         switch (whence) {
607         case SEEK_SET:
608         case SEEK_CUR:
609         case SEEK_END:
610                 return generic_file_llseek_size(file, offset, whence,
611                                                 maxbytes, i_size_read(inode));
612         case SEEK_DATA:
613                 return ext4_seek_data(file, offset, maxbytes);
614         case SEEK_HOLE:
615                 return ext4_seek_hole(file, offset, maxbytes);
616         }
617
618         return -EINVAL;
619 }
620
621 const struct file_operations ext4_file_operations = {
622         .llseek         = ext4_llseek,
623         .read           = new_sync_read,
624         .write          = new_sync_write,
625         .read_iter      = generic_file_read_iter,
626         .write_iter     = ext4_file_write_iter,
627         .unlocked_ioctl = ext4_ioctl,
628 #ifdef CONFIG_COMPAT
629         .compat_ioctl   = ext4_compat_ioctl,
630 #endif
631         .mmap           = ext4_file_mmap,
632         .open           = ext4_file_open,
633         .release        = ext4_release_file,
634         .fsync          = ext4_sync_file,
635         .splice_read    = generic_file_splice_read,
636         .splice_write   = iter_file_splice_write,
637         .fallocate      = ext4_fallocate,
638 };
639
640 #ifdef CONFIG_FS_DAX
641 const struct file_operations ext4_dax_file_operations = {
642         .llseek         = ext4_llseek,
643         .read           = new_sync_read,
644         .write          = new_sync_write,
645         .read_iter      = generic_file_read_iter,
646         .write_iter     = ext4_file_write_iter,
647         .unlocked_ioctl = ext4_ioctl,
648 #ifdef CONFIG_COMPAT
649         .compat_ioctl   = ext4_compat_ioctl,
650 #endif
651         .mmap           = ext4_file_mmap,
652         .open           = ext4_file_open,
653         .release        = ext4_release_file,
654         .fsync          = ext4_sync_file,
655         /* Splice not yet supported with DAX */
656         .fallocate      = ext4_fallocate,
657 };
658 #endif
659
660 const struct inode_operations ext4_file_inode_operations = {
661         .setattr        = ext4_setattr,
662         .getattr        = ext4_getattr,
663         .setxattr       = generic_setxattr,
664         .getxattr       = generic_getxattr,
665         .listxattr      = ext4_listxattr,
666         .removexattr    = generic_removexattr,
667         .get_acl        = ext4_get_acl,
668         .set_acl        = ext4_set_acl,
669         .fiemap         = ext4_fiemap,
670 };
671