avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
- atomic_sub(avail, &ctx->reqs_active);
head += avail;
head %= ctx->nr_events;
}
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
*/
-static void kill_ioctx(struct kioctx *ctx)
+static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
{
if (!atomic_xchg(&ctx->dead, 1)) {
+ spin_lock(&mm->ioctx_lock);
hlist_del_rcu(&ctx->list);
+ spin_unlock(&mm->ioctx_lock);
/*
* It'd be more correct to do this in free_ioctx(), after all
*/
ctx->mmap_size = 0;
- kill_ioctx(ctx);
+ kill_ioctx(mm, ctx);
}
}
put_rq:
/* everything turned out well, dispose of the aiocb. */
aio_put_req(iocb);
+ atomic_dec(&ctx->reqs_active);
/*
* We have to order our ring_info tail store above and test
if (head == ctx->tail)
goto out;
+ head %= ctx->nr_events;
+
while (ret < nr) {
long avail;
struct io_event *ev;
flush_dcache_page(ctx->ring_pages[0]);
pr_debug("%li h%u t%u\n", ret, head, ctx->tail);
-
- atomic_sub(ret, &ctx->reqs_active);
out:
mutex_unlock(&ctx->ring_lock);
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
if (ret)
- kill_ioctx(ioctx);
+ kill_ioctx(current->mm, ioctx);
put_ioctx(ioctx);
}
{
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
- kill_ioctx(ioctx);
+ kill_ioctx(current->mm, ioctx);
put_ioctx(ioctx);
return 0;
}