[AIO] tidy cancel code to share code between aio_cancel_all and sys_io_cancel Both aio_cancel_all() and sys_io_cancel() have to perform similar locking on iocbs in order to safely enter the ki_cancel method. Factor this out into common code. This also fixes a bug in aio_cancel_all() where the iocb was not being locked before entering the cancel method. Signed-off-by: Benjamin LaHaise diff -purN --exclude=description 867_async_msg/fs/aio.c 87_cancel_cleanup/fs/aio.c --- 867_async_msg/fs/aio.c 2005-08-17 13:47:02.000000000 -0400 +++ 87_cancel_cleanup/fs/aio.c 2005-08-17 12:40:07.000000000 -0400 @@ -62,6 +62,7 @@ static LIST_HEAD(fput_head); static void aio_kick_handler(void *); static void aio_queue_work(struct kioctx *); +static long aio_cancel_kiocb(struct kiocb *iocb, struct io_event __user *event); /* aio_setup * Creates the slab caches used by the aio routines, panic on @@ -271,22 +272,18 @@ out_freectx: */ static void aio_cancel_all(struct kioctx *ctx) { - int (*cancel)(struct kiocb *, struct io_event *); - struct io_event res; spin_lock_irq(&ctx->ctx_lock); ctx->dead = 1; while (!list_empty(&ctx->active_reqs)) { struct list_head *pos = ctx->active_reqs.next; struct kiocb *iocb = list_kiocb(pos); list_del_init(&iocb->ki_list); - cancel = iocb->ki_cancel; kiocbSetCancelled(iocb); - if (cancel) { - iocb->ki_users++; - spin_unlock_irq(&ctx->ctx_lock); - cancel(iocb, &res); - spin_lock_irq(&ctx->ctx_lock); - } + iocb->ki_users++; + spin_unlock_irq(&ctx->ctx_lock); + aio_cancel_kiocb(iocb, NULL); + aio_put_req(iocb); + spin_lock_irq(&ctx->ctx_lock); } spin_unlock_irq(&ctx->ctx_lock); } @@ -1986,15 +1983,17 @@ int fastcall io_submit_one(struct kioctx goto out_put_req; spin_lock_irq(&ctx->ctx_lock); - aio_run_iocb(req); - if (!kiocbIsDontUnlock(req)) - unlock_kiocb(req); - if (!list_empty(&ctx->run_list)) { + if (likely(list_empty(&ctx->run_list))) { + aio_run_iocb(req); + } else { + list_add_tail(&req->ki_run_list, &ctx->run_list); /* drain the run list */ while (__aio_run_iocbs(ctx)) ; } spin_unlock_irq(&ctx->ctx_lock); + if (!kiocbIsDontUnlock(req)) + unlock_kiocb(req); aio_put_req(req); /* drop extra ref to req */ return 0; @@ -2079,6 +2078,38 @@ static struct kiocb *lookup_kiocb(struct return NULL; } +static long aio_cancel_kiocb(struct kiocb *kiocb, struct io_event __user *result) +{ + int (*cancel)(struct kiocb *iocb, struct io_event *res); + struct io_event tmp; + long ret = -EAGAIN; + + pr_debug("calling cancel(%p) %p\n", kiocb, kiocb->ki_cancel); + memset(&tmp, 0, sizeof(tmp)); + tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; + tmp.data = kiocb->ki_user_data; + + lock_kiocb(kiocb); + cancel = kiocb->ki_cancel; + if (cancel) { + ret = cancel(kiocb, &tmp); + if (!ret) { + /* Cancellation succeeded -- copy the result + * into the user's buffer. + */ + if (result && copy_to_user(result, &tmp, sizeof(tmp))) + ret = -EFAULT; + } + } + unlock_kiocb(kiocb); + /* If the cancellation was successful, we must discard the + * reference held for completion of the iocb. + */ + if (!ret) + aio_put_req(kiocb); + return ret; +} + /* sys_io_cancel: * Attempts to cancel an iocb previously passed to io_submit. If * the operation is successfully cancelled, the resulting event is @@ -2118,26 +2149,7 @@ asmlinkage long sys_io_cancel(aio_contex spin_unlock_irq(&ctx->ctx_lock); if (NULL != cancel) { - struct io_event tmp; - pr_debug("calling cancel\n"); - lock_kiocb(kiocb); - memset(&tmp, 0, sizeof(tmp)); - tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; - tmp.data = kiocb->ki_user_data; - ret = cancel(kiocb, &tmp); - if (!ret) { - /* Cancellation succeeded -- copy the result - * into the user's buffer. - */ - if (copy_to_user(result, &tmp, sizeof(tmp))) - ret = -EFAULT; - } - unlock_kiocb(kiocb); - /* If the cancellation was successful, we must discard the - * reference held for completion of the iocb. - */ - if (!ret) - aio_put_req(kiocb); + ret = aio_cancel_kiocb(kiocb, result); aio_put_req(kiocb); } else printk(KERN_DEBUG "iocb has no cancel operation\n"); diff -purN --exclude=description 867_async_msg/net/socket.c 87_cancel_cleanup/net/socket.c --- 867_async_msg/net/socket.c 2005-08-17 14:26:24.000000000 -0400 +++ 87_cancel_cleanup/net/socket.c 2005-08-17 14:14:03.000000000 -0400 @@ -421,7 +421,7 @@ out: return fd; } -static inline struct socket *file_to_socket(struct file *file) +static inline void file_to_sock(struct file *file) { if (file->f_op == &socket_file_ops) return file->private_data; @@ -453,7 +453,7 @@ struct socket *sockfd_lookup(int fd, int return NULL; } - sock = file_to_socket(file); + sock - file_to_sock(file); if (!IS_ERR(sock)) return sock; @@ -1738,7 +1738,7 @@ out: long vfs_sendmsg(struct file *file, struct msghdr __user *msg, unsigned flags) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; - struct socket *sock = file_to_socket(file); + struct socket *sock = file_to_sock(file); char address[MAX_SOCK_ADDR]; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */ @@ -1842,7 +1842,7 @@ out: long vfs_recvmsg(struct file *file, struct msghdr __user *msg, unsigned int flags) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; - struct socket *sock = file_to_socket(file); + struct socket *sock = file_to_sock(file); struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov=iovstack; struct msghdr msg_sys;