From: Zach Brown Date: Mon, 17 Oct 2005 23:43:33 +0000 (-0700) Subject: [PATCH] aio: revert lock_kiocb() X-Git-Tag: firefly_0821_release~39876^2~50^2~29 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=4faa5285283fad081443e3612ca426a311bb6c7e;p=firefly-linux-kernel-4.4.55.git [PATCH] aio: revert lock_kiocb() lock_kiocb() was introduced to serialize retrying and cancellation. In the process of doing so it tried to sleep waiting for KIF_LOCKED while holding the ctx_lock spinlock. Recent fixes have ensured that multiple concurrent retries won't be attempted for a given iocb. Cancel has other problems and has no significant in-tree users that have been complaining about it. So for the immediate future we'll revert sleeping with the lock held and will address proper cancellation and retry serialization in the future. Signed-off-by: Zach Brown Acked-by: Benjamin LaHaise Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/fs/aio.c b/fs/aio.c index d6b1551342b7..9fe7216457d8 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -398,7 +398,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) if (unlikely(!req)) return NULL; - req->ki_flags = 1 << KIF_LOCKED; + req->ki_flags = 0; req->ki_users = 2; req->ki_key = 0; req->ki_ctx = ctx; @@ -547,25 +547,6 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id) return ioctx; } -static int lock_kiocb_action(void *param) -{ - schedule(); - return 0; -} - -static inline void lock_kiocb(struct kiocb *iocb) -{ - wait_on_bit_lock(&iocb->ki_flags, KIF_LOCKED, lock_kiocb_action, - TASK_UNINTERRUPTIBLE); -} - -static inline void unlock_kiocb(struct kiocb *iocb) -{ - kiocbClearLocked(iocb); - smp_mb__after_clear_bit(); - wake_up_bit(&iocb->ki_flags, KIF_LOCKED); -} - /* * use_mm * Makes the calling kernel thread take on the specified @@ -796,9 +777,7 @@ static int __aio_run_iocbs(struct kioctx *ctx) * Hold an extra reference while retrying i/o. */ iocb->ki_users++; /* grab extra reference */ - lock_kiocb(iocb); aio_run_iocb(iocb); - unlock_kiocb(iocb); if (__aio_put_req(ctx, iocb)) /* drop extra ref */ put_ioctx(ctx); } @@ -1542,7 +1521,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, spin_lock_irq(&ctx->ctx_lock); aio_run_iocb(req); - unlock_kiocb(req); if (!list_empty(&ctx->run_list)) { /* drain the run list */ while (__aio_run_iocbs(ctx)) @@ -1674,7 +1652,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, if (NULL != cancel) { struct io_event tmp; pr_debug("calling cancel\n"); - lock_kiocb(kiocb); memset(&tmp, 0, sizeof(tmp)); tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; tmp.data = kiocb->ki_user_data; @@ -1686,7 +1663,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, if (copy_to_user(result, &tmp, sizeof(tmp))) ret = -EFAULT; } - unlock_kiocb(kiocb); } else ret = -EINVAL; diff --git a/include/linux/aio.h b/include/linux/aio.h index 60def658b246..0decf66117c1 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -24,7 +24,12 @@ struct kioctx; #define KIOCB_SYNC_KEY (~0U) /* ki_flags bits */ -#define KIF_LOCKED 0 +/* + * This may be used for cancel/retry serialization in the future, but + * for now it's unused and we probably don't want modules to even + * think they can use it. + */ +/* #define KIF_LOCKED 0 */ #define KIF_KICKED 1 #define KIF_CANCELLED 2