staging/lustre/llite: handle io init failure in ll_fault_io_init()
authorJohn L. Hammond <john.hammond@intel.com>
Wed, 24 Jul 2013 17:17:25 +0000 (01:17 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 24 Jul 2013 17:29:19 +0000 (10:29 -0700)
In ll_fault_io_init(), if cl_io_init() has failed then cleanup and
return an ERR_PTR(). This fixes an oops in the page fault handling
code when a partially initialized io is used. In ll_page_mkwrite0() do
not call cl_io_fini() on an ERR_PTR().

Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3487
Lustre-change: http://review.whamcloud.com/6735
Signed-off-by: John L. Hammond <john.hammond@intel.com>
Reviewed-by: Lai Siyao <lai.siyao@intel.com>
Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: Andreas Dilger <andreas.dilger@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/lustre/llite/llite_mmap.c

index 32a57e85300b80cc8107bb22a05c2531d9de9e4d..a4061ee15b62b4ebf432201b7d764d21d9cc867c 100644 (file)
@@ -106,11 +106,12 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
                               struct cl_env_nest *nest,
                               pgoff_t index, unsigned long *ra_flags)
 {
-       struct file       *file  = vma->vm_file;
-       struct inode      *inode = file->f_dentry->d_inode;
-       struct cl_io      *io;
-       struct cl_fault_io *fio;
-       struct lu_env     *env;
+       struct file            *file = vma->vm_file;
+       struct inode           *inode = file->f_dentry->d_inode;
+       struct cl_io           *io;
+       struct cl_fault_io     *fio;
+       struct lu_env          *env;
+       int                     rc;
        ENTRY;
 
        *env_ret = NULL;
@@ -151,17 +152,22 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
        CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
               fio->ft_index, fio->ft_executable);
 
-       if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
+       rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+       if (rc == 0) {
                struct ccc_io *cio = ccc_env_io(env);
                struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 
                LASSERT(cio->cui_cl.cis_io == io);
 
-               /* mmap lock must be MANDATORY
-                * it has to cache pages. */
+               /* mmap lock must be MANDATORY it has to cache
+                * pages. */
                io->ci_lockreq = CILR_MANDATORY;
-
-               cio->cui_fd  = fd;
+               cio->cui_fd = fd;
+       } else {
+               LASSERT(rc < 0);
+               cl_io_fini(env, io);
+               cl_env_nested_put(nest, env);
+               io = ERR_PTR(rc);
        }
 
        return io;
@@ -189,7 +195,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
 
        result = io->ci_result;
        if (result < 0)
-               GOTO(out, result);
+               GOTO(out_io, result);
 
        io->u.ci_fault.ft_mkwrite = 1;
        io->u.ci_fault.ft_writable = 1;
@@ -251,14 +257,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
        }
        EXIT;
 
-out:
+out_io:
        cl_io_fini(env, io);
        cl_env_nested_put(&nest, env);
-
+out:
        CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
-
        LASSERT(ergo(result == 0, PageLocked(vmpage)));
-       return(result);
+
+       return result;
 }