2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
37 #include <asm/uaccess.h>
38 #include <asm/types.h>
40 #include <linux/nbd.h>
44 int harderror; /* Code of hard error */
45 struct socket * sock; /* If == NULL, device is not ready, yet */
48 spinlock_t queue_lock;
49 struct list_head queue_head; /* Requests waiting result */
50 struct request *active_req;
51 wait_queue_head_t active_wq;
52 struct list_head waiting_queue; /* Requests to be sent */
53 wait_queue_head_t waiting_wq;
59 pid_t pid; /* pid of nbd-client, if attached */
61 int disconnect; /* a disconnect has been requested by user */
63 struct timer_list timeout_timer;
64 struct task_struct *task_recv;
65 struct task_struct *task_send;
68 #define NBD_MAGIC 0x68797548
70 static unsigned int nbds_max = 16;
71 static struct nbd_device *nbd_dev;
75 * Use just one lock (or at most 1 per NIC). Two arguments for this:
76 * 1. Each NIC is essentially a synchronization point for all servers
77 * accessed through that NIC so there's no need to have more locks
79 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
80 * down each lock to the point where they're actually slower than just
82 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
84 static DEFINE_SPINLOCK(nbd_lock);
86 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
88 return disk_to_dev(nbd->disk);
91 static const char *nbdcmd_to_ascii(int cmd)
94 case NBD_CMD_READ: return "read";
95 case NBD_CMD_WRITE: return "write";
96 case NBD_CMD_DISC: return "disconnect";
97 case NBD_CMD_FLUSH: return "flush";
98 case NBD_CMD_TRIM: return "trim/discard";
103 static void nbd_end_request(struct nbd_device *nbd, struct request *req)
105 int error = req->errors ? -EIO : 0;
106 struct request_queue *q = req->q;
109 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
110 error ? "failed" : "done");
112 spin_lock_irqsave(q->queue_lock, flags);
113 __blk_end_request_all(req, error);
114 spin_unlock_irqrestore(q->queue_lock, flags);
118 * Forcibly shutdown the socket causing all listeners to error
120 static void sock_shutdown(struct nbd_device *nbd, int lock)
123 mutex_lock(&nbd->tx_lock);
125 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
126 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
128 del_timer_sync(&nbd->timeout_timer);
131 mutex_unlock(&nbd->tx_lock);
134 static void nbd_xmit_timeout(unsigned long arg)
136 struct nbd_device *nbd = (struct nbd_device *)arg;
137 struct task_struct *task;
139 if (list_empty(&nbd->queue_head))
144 task = READ_ONCE(nbd->task_recv);
146 force_sig(SIGKILL, task);
148 task = READ_ONCE(nbd->task_send);
150 force_sig(SIGKILL, nbd->task_send);
152 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
156 * Send or receive packet.
158 static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
161 struct socket *sock = nbd->sock;
165 sigset_t blocked, oldset;
166 unsigned long pflags = current->flags;
168 if (unlikely(!sock)) {
169 dev_err(disk_to_dev(nbd->disk),
170 "Attempted %s on closed socket in sock_xmit\n",
171 (send ? "send" : "recv"));
175 /* Allow interception of SIGKILL only
176 * Don't allow other signals to interrupt the transmission */
177 siginitsetinv(&blocked, sigmask(SIGKILL));
178 sigprocmask(SIG_SETMASK, &blocked, &oldset);
180 current->flags |= PF_MEMALLOC;
182 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
187 msg.msg_control = NULL;
188 msg.msg_controllen = 0;
189 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
192 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
194 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
199 result = -EPIPE; /* short read */
206 sigprocmask(SIG_SETMASK, &oldset, NULL);
207 tsk_restore_flags(current, pflags, PF_MEMALLOC);
209 if (!send && nbd->xmit_timeout)
210 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
215 static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
219 void *kaddr = kmap(bvec->bv_page);
220 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
221 bvec->bv_len, flags);
222 kunmap(bvec->bv_page);
226 /* always call with the tx_lock held */
227 static int nbd_send_req(struct nbd_device *nbd, struct request *req)
230 struct nbd_request request;
231 unsigned long size = blk_rq_bytes(req);
234 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
236 else if (req->cmd_flags & REQ_DISCARD)
238 else if (req->cmd_flags & REQ_FLUSH)
239 type = NBD_CMD_FLUSH;
240 else if (rq_data_dir(req) == WRITE)
241 type = NBD_CMD_WRITE;
245 memset(&request, 0, sizeof(request));
246 request.magic = htonl(NBD_REQUEST_MAGIC);
247 request.type = htonl(type);
248 if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
249 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
250 request.len = htonl(size);
252 memcpy(request.handle, &req, sizeof(req));
254 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
255 req, nbdcmd_to_ascii(type),
256 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
257 result = sock_xmit(nbd, 1, &request, sizeof(request),
258 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
260 dev_err(disk_to_dev(nbd->disk),
261 "Send control failed (result %d)\n", result);
265 if (type == NBD_CMD_WRITE) {
266 struct req_iterator iter;
269 * we are really probing at internals to determine
270 * whether to set MSG_MORE or not...
272 rq_for_each_segment(bvec, req, iter) {
274 if (!rq_iter_last(bvec, iter))
276 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
278 result = sock_send_bvec(nbd, &bvec, flags);
280 dev_err(disk_to_dev(nbd->disk),
281 "Send data failed (result %d)\n",
290 static struct request *nbd_find_request(struct nbd_device *nbd,
291 struct request *xreq)
293 struct request *req, *tmp;
296 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
300 spin_lock(&nbd->queue_lock);
301 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
304 list_del_init(&req->queuelist);
305 spin_unlock(&nbd->queue_lock);
308 spin_unlock(&nbd->queue_lock);
310 return ERR_PTR(-ENOENT);
313 static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
316 void *kaddr = kmap(bvec->bv_page);
317 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
319 kunmap(bvec->bv_page);
323 /* NULL returned = something went wrong, inform userspace */
324 static struct request *nbd_read_stat(struct nbd_device *nbd)
327 struct nbd_reply reply;
331 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
333 dev_err(disk_to_dev(nbd->disk),
334 "Receive control failed (result %d)\n", result);
338 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
339 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
340 (unsigned long)ntohl(reply.magic));
345 req = nbd_find_request(nbd, *(struct request **)reply.handle);
347 result = PTR_ERR(req);
348 if (result != -ENOENT)
351 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
357 if (ntohl(reply.error)) {
358 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
364 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
365 if (rq_data_dir(req) != WRITE) {
366 struct req_iterator iter;
369 rq_for_each_segment(bvec, req, iter) {
370 result = sock_recv_bvec(nbd, &bvec);
372 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
377 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
383 nbd->harderror = result;
387 static ssize_t pid_show(struct device *dev,
388 struct device_attribute *attr, char *buf)
390 struct gendisk *disk = dev_to_disk(dev);
392 return sprintf(buf, "%ld\n",
393 (long) ((struct nbd_device *)disk->private_data)->pid);
396 static struct device_attribute pid_attr = {
397 .attr = { .name = "pid", .mode = S_IRUGO},
401 static int nbd_do_it(struct nbd_device *nbd)
406 BUG_ON(nbd->magic != NBD_MAGIC);
408 sk_set_memalloc(nbd->sock->sk);
409 nbd->pid = task_pid_nr(current);
410 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
412 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
417 nbd->task_recv = current;
419 while ((req = nbd_read_stat(nbd)) != NULL)
420 nbd_end_request(nbd, req);
422 nbd->task_recv = NULL;
424 if (signal_pending(current)) {
427 ret = dequeue_signal_lock(current, ¤t->blocked, &info);
428 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
429 task_pid_nr(current), current->comm, ret);
430 sock_shutdown(nbd, 1);
434 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
439 static void nbd_clear_que(struct nbd_device *nbd)
443 BUG_ON(nbd->magic != NBD_MAGIC);
446 * Because we have set nbd->sock to NULL under the tx_lock, all
447 * modifications to the list must have completed by now. For
448 * the same reason, the active_req must be NULL.
450 * As a consequence, we don't need to take the spin lock while
451 * purging the list here.
454 BUG_ON(nbd->active_req);
456 while (!list_empty(&nbd->queue_head)) {
457 req = list_entry(nbd->queue_head.next, struct request,
459 list_del_init(&req->queuelist);
461 nbd_end_request(nbd, req);
464 while (!list_empty(&nbd->waiting_queue)) {
465 req = list_entry(nbd->waiting_queue.next, struct request,
467 list_del_init(&req->queuelist);
469 nbd_end_request(nbd, req);
474 static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
476 if (req->cmd_type != REQ_TYPE_FS)
479 if (rq_data_dir(req) == WRITE &&
480 (nbd->flags & NBD_FLAG_READ_ONLY)) {
481 dev_err(disk_to_dev(nbd->disk),
482 "Write on read-only\n");
488 mutex_lock(&nbd->tx_lock);
489 if (unlikely(!nbd->sock)) {
490 mutex_unlock(&nbd->tx_lock);
491 dev_err(disk_to_dev(nbd->disk),
492 "Attempted send on closed socket\n");
496 nbd->active_req = req;
498 if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
499 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
501 if (nbd_send_req(nbd, req) != 0) {
502 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
504 nbd_end_request(nbd, req);
506 spin_lock(&nbd->queue_lock);
507 list_add_tail(&req->queuelist, &nbd->queue_head);
508 spin_unlock(&nbd->queue_lock);
511 nbd->active_req = NULL;
512 mutex_unlock(&nbd->tx_lock);
513 wake_up_all(&nbd->active_wq);
519 nbd_end_request(nbd, req);
522 static int nbd_thread(void *data)
524 struct nbd_device *nbd = data;
527 nbd->task_send = current;
529 set_user_nice(current, MIN_NICE);
530 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
531 /* wait for something to do */
532 wait_event_interruptible(nbd->waiting_wq,
533 kthread_should_stop() ||
534 !list_empty(&nbd->waiting_queue));
536 if (signal_pending(current)) {
540 ret = dequeue_signal_lock(current, ¤t->blocked,
542 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
543 task_pid_nr(current), current->comm, ret);
544 sock_shutdown(nbd, 1);
548 /* extract request */
549 if (list_empty(&nbd->waiting_queue))
552 spin_lock_irq(&nbd->queue_lock);
553 req = list_entry(nbd->waiting_queue.next, struct request,
555 list_del_init(&req->queuelist);
556 spin_unlock_irq(&nbd->queue_lock);
559 nbd_handle_req(nbd, req);
562 nbd->task_send = NULL;
568 * We always wait for result of write, for now. It would be nice to make it optional
570 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
571 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
574 static void do_nbd_request(struct request_queue *q)
575 __releases(q->queue_lock) __acquires(q->queue_lock)
579 while ((req = blk_fetch_request(q)) != NULL) {
580 struct nbd_device *nbd;
582 spin_unlock_irq(q->queue_lock);
584 nbd = req->rq_disk->private_data;
586 BUG_ON(nbd->magic != NBD_MAGIC);
588 dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
591 if (unlikely(!nbd->sock)) {
592 dev_err(disk_to_dev(nbd->disk),
593 "Attempted send on closed socket\n");
595 nbd_end_request(nbd, req);
596 spin_lock_irq(q->queue_lock);
600 spin_lock_irq(&nbd->queue_lock);
601 list_add_tail(&req->queuelist, &nbd->waiting_queue);
602 spin_unlock_irq(&nbd->queue_lock);
604 wake_up(&nbd->waiting_wq);
606 spin_lock_irq(q->queue_lock);
610 /* Must be called with tx_lock held */
612 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
613 unsigned int cmd, unsigned long arg)
616 case NBD_DISCONNECT: {
619 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
623 mutex_unlock(&nbd->tx_lock);
625 mutex_lock(&nbd->tx_lock);
626 blk_rq_init(NULL, &sreq);
627 sreq.cmd_type = REQ_TYPE_DRV_PRIV;
629 /* Check again after getting mutex back. */
635 nbd_send_req(nbd, &sreq);
639 case NBD_CLEAR_SOCK: {
640 struct socket *sock = nbd->sock;
643 BUG_ON(!list_empty(&nbd->queue_head));
644 BUG_ON(!list_empty(&nbd->waiting_queue));
656 sock = sockfd_lookup(arg, &err);
660 bdev->bd_invalidated = 1;
661 nbd->disconnect = 0; /* we're connected now */
667 case NBD_SET_BLKSIZE:
669 nbd->bytesize &= ~(nbd->blksize-1);
670 bdev->bd_inode->i_size = nbd->bytesize;
671 set_blocksize(bdev, nbd->blksize);
672 set_capacity(nbd->disk, nbd->bytesize >> 9);
676 nbd->bytesize = arg & ~(nbd->blksize-1);
677 bdev->bd_inode->i_size = nbd->bytesize;
678 set_blocksize(bdev, nbd->blksize);
679 set_capacity(nbd->disk, nbd->bytesize >> 9);
682 case NBD_SET_TIMEOUT:
683 nbd->xmit_timeout = arg * HZ;
685 mod_timer(&nbd->timeout_timer,
686 jiffies + nbd->xmit_timeout);
688 del_timer_sync(&nbd->timeout_timer);
696 case NBD_SET_SIZE_BLOCKS:
697 nbd->bytesize = ((u64) arg) * nbd->blksize;
698 bdev->bd_inode->i_size = nbd->bytesize;
699 set_blocksize(bdev, nbd->blksize);
700 set_capacity(nbd->disk, nbd->bytesize >> 9);
704 struct task_struct *thread;
713 mutex_unlock(&nbd->tx_lock);
715 if (nbd->flags & NBD_FLAG_READ_ONLY)
716 set_device_ro(bdev, true);
717 if (nbd->flags & NBD_FLAG_SEND_TRIM)
718 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
720 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
721 blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
723 blk_queue_flush(nbd->disk->queue, 0);
725 thread = kthread_run(nbd_thread, nbd, "%s",
726 nbd->disk->disk_name);
727 if (IS_ERR(thread)) {
728 mutex_lock(&nbd->tx_lock);
729 return PTR_ERR(thread);
732 error = nbd_do_it(nbd);
733 kthread_stop(thread);
735 mutex_lock(&nbd->tx_lock);
738 sock_shutdown(nbd, 0);
742 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
744 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
745 set_device_ro(bdev, false);
750 bdev->bd_inode->i_size = 0;
751 set_capacity(nbd->disk, 0);
753 blkdev_reread_part(bdev);
754 if (nbd->disconnect) /* user requested, ignore socket errors */
756 return nbd->harderror;
761 * This is for compatibility only. The queue is always cleared
762 * by NBD_DO_IT or NBD_CLEAR_SOCK.
766 case NBD_PRINT_DEBUG:
767 dev_info(disk_to_dev(nbd->disk),
768 "next = %p, prev = %p, head = %p\n",
769 nbd->queue_head.next, nbd->queue_head.prev,
776 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
777 unsigned int cmd, unsigned long arg)
779 struct nbd_device *nbd = bdev->bd_disk->private_data;
782 if (!capable(CAP_SYS_ADMIN))
785 BUG_ON(nbd->magic != NBD_MAGIC);
787 mutex_lock(&nbd->tx_lock);
788 error = __nbd_ioctl(bdev, nbd, cmd, arg);
789 mutex_unlock(&nbd->tx_lock);
794 static const struct block_device_operations nbd_fops =
796 .owner = THIS_MODULE,
801 * And here should be modules and kernel interface
802 * (Just smiley confuses emacs :-)
805 static int __init nbd_init(void)
811 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
814 printk(KERN_ERR "nbd: max_part must be >= 0\n");
820 part_shift = fls(max_part);
823 * Adjust max_part according to part_shift as it is exported
824 * to user space so that user can know the max number of
825 * partition kernel should be able to manage.
827 * Note that -1 is required because partition 0 is reserved
828 * for the whole disk.
830 max_part = (1UL << part_shift) - 1;
833 if ((1UL << part_shift) > DISK_MAX_PARTS)
836 if (nbds_max > 1UL << (MINORBITS - part_shift))
839 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
843 for (i = 0; i < nbds_max; i++) {
844 struct gendisk *disk = alloc_disk(1 << part_shift);
847 nbd_dev[i].disk = disk;
849 * The new linux 2.5 block layer implementation requires
850 * every gendisk to have its very own request_queue struct.
851 * These structs are big so we dynamically allocate them.
853 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
859 * Tell the block layer that we are not a rotational device
861 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
862 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
863 disk->queue->limits.discard_granularity = 512;
864 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
865 disk->queue->limits.discard_zeroes_data = 0;
866 blk_queue_max_hw_sectors(disk->queue, 65536);
867 disk->queue->limits.max_sectors = 256;
870 if (register_blkdev(NBD_MAJOR, "nbd")) {
875 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
877 for (i = 0; i < nbds_max; i++) {
878 struct gendisk *disk = nbd_dev[i].disk;
879 nbd_dev[i].magic = NBD_MAGIC;
880 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
881 spin_lock_init(&nbd_dev[i].queue_lock);
882 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
883 mutex_init(&nbd_dev[i].tx_lock);
884 init_timer(&nbd_dev[i].timeout_timer);
885 nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
886 nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
887 init_waitqueue_head(&nbd_dev[i].active_wq);
888 init_waitqueue_head(&nbd_dev[i].waiting_wq);
889 nbd_dev[i].blksize = 1024;
890 nbd_dev[i].bytesize = 0;
891 disk->major = NBD_MAJOR;
892 disk->first_minor = i << part_shift;
893 disk->fops = &nbd_fops;
894 disk->private_data = &nbd_dev[i];
895 sprintf(disk->disk_name, "nbd%d", i);
896 set_capacity(disk, 0);
903 blk_cleanup_queue(nbd_dev[i].disk->queue);
904 put_disk(nbd_dev[i].disk);
910 static void __exit nbd_cleanup(void)
913 for (i = 0; i < nbds_max; i++) {
914 struct gendisk *disk = nbd_dev[i].disk;
915 nbd_dev[i].magic = 0;
918 blk_cleanup_queue(disk->queue);
922 unregister_blkdev(NBD_MAJOR, "nbd");
924 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
927 module_init(nbd_init);
928 module_exit(nbd_cleanup);
930 MODULE_DESCRIPTION("Network Block Device");
931 MODULE_LICENSE("GPL");
933 module_param(nbds_max, int, 0444);
934 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
935 module_param(max_part, int, 0444);
936 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");