blob: 34a46c32c24faad3a48433657412b4209175313a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/uaccess.h>
39#include <asm/types.h>
40
41#include <linux/nbd.h>
42
Markus Pargmann13e71d62015-04-02 10:11:35 +020043struct nbd_device {
Markus Pargmann22d109c2015-08-17 08:20:09 +020044 u32 flags;
Markus Pargmann13e71d62015-04-02 10:11:35 +020045 struct socket * sock; /* If == NULL, device is not ready, yet */
46 int magic;
47
48 spinlock_t queue_lock;
49 struct list_head queue_head; /* Requests waiting result */
50 struct request *active_req;
51 wait_queue_head_t active_wq;
52 struct list_head waiting_queue; /* Requests to be sent */
53 wait_queue_head_t waiting_wq;
54
55 struct mutex tx_lock;
56 struct gendisk *disk;
57 int blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020058 loff_t bytesize;
Markus Pargmann13e71d62015-04-02 10:11:35 +020059 int xmit_timeout;
Markus Pargmann1f7b5cf2015-10-29 12:01:34 +010060 bool timedout;
Markus Pargmann696697c2015-08-17 08:20:07 +020061 bool disconnect; /* a disconnect has been requested by user */
Markus Pargmann7e2893a2015-08-17 08:20:00 +020062
63 struct timer_list timeout_timer;
Markus Pargmann23272a672015-10-29 11:51:16 +010064 /* protects initialization and shutdown of the socket */
65 spinlock_t sock_lock;
Markus Pargmann7e2893a2015-08-17 08:20:00 +020066 struct task_struct *task_recv;
67 struct task_struct *task_send;
Markus Pargmann30d53d92015-08-17 08:20:06 +020068
69#if IS_ENABLED(CONFIG_DEBUG_FS)
70 struct dentry *dbg_dir;
71#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020072};
73
Markus Pargmann30d53d92015-08-17 08:20:06 +020074#if IS_ENABLED(CONFIG_DEBUG_FS)
75static struct dentry *nbd_dbg_dir;
76#endif
77
78#define nbd_name(nbd) ((nbd)->disk->disk_name)
79
Wanlong Gaof4507162012-03-28 14:42:51 -070080#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Ingo van Lil9c7a4162006-07-01 04:36:36 -070082static unsigned int nbds_max = 16;
Paul Clements20a81432008-02-08 04:21:51 -080083static struct nbd_device *nbd_dev;
Laurent Vivierd71a6d72008-04-29 01:02:51 -070084static int max_part;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86/*
87 * Use just one lock (or at most 1 per NIC). Two arguments for this:
88 * 1. Each NIC is essentially a synchronization point for all servers
89 * accessed through that NIC so there's no need to have more locks
90 * than NICs anyway.
91 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
92 * down each lock to the point where they're actually slower than just
93 * a single lock.
94 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
95 */
96static DEFINE_SPINLOCK(nbd_lock);
97
Markus Pargmannd18509f2015-04-02 10:11:38 +020098static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200100 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101}
102
103static const char *nbdcmd_to_ascii(int cmd)
104{
105 switch (cmd) {
106 case NBD_CMD_READ: return "read";
107 case NBD_CMD_WRITE: return "write";
108 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800109 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700110 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 }
112 return "invalid";
113}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Markus Pargmannd18509f2015-04-02 10:11:38 +0200115static void nbd_end_request(struct nbd_device *nbd, struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
Kiyoshi Ueda097c94a2007-12-11 17:44:06 -0500117 int error = req->errors ? -EIO : 0;
Jens Axboe165125e2007-07-24 09:28:11 +0200118 struct request_queue *q = req->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 unsigned long flags;
120
Markus Pargmannd18509f2015-04-02 10:11:38 +0200121 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
122 error ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124 spin_lock_irqsave(q->queue_lock, flags);
Tejun Heo1011c1b2009-05-07 22:24:45 +0900125 __blk_end_request_all(req, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 spin_unlock_irqrestore(q->queue_lock, flags);
127}
128
Markus Pargmanne018e752015-04-02 10:11:39 +0200129/*
130 * Forcibly shutdown the socket causing all listeners to error
131 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200132static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700133{
Markus Pargmann23272a672015-10-29 11:51:16 +0100134 spin_lock_irq(&nbd->sock_lock);
135
136 if (!nbd->sock) {
137 spin_unlock_irq(&nbd->sock_lock);
Markus Pargmann260bbce2015-08-17 08:20:02 +0200138 return;
Markus Pargmann23272a672015-10-29 11:51:16 +0100139 }
Markus Pargmann260bbce2015-08-17 08:20:02 +0200140
141 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
142 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
Markus Pargmann23272a672015-10-29 11:51:16 +0100143 sockfd_put(nbd->sock);
Markus Pargmann260bbce2015-08-17 08:20:02 +0200144 nbd->sock = NULL;
Markus Pargmann23272a672015-10-29 11:51:16 +0100145 spin_unlock_irq(&nbd->sock_lock);
146
147 del_timer(&nbd->timeout_timer);
Paul Clements7fdfd402007-10-16 23:27:37 -0700148}
149
150static void nbd_xmit_timeout(unsigned long arg)
151{
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200152 struct nbd_device *nbd = (struct nbd_device *)arg;
Markus Pargmanndcc909d2015-10-06 20:03:54 +0200153 unsigned long flags;
Paul Clements7fdfd402007-10-16 23:27:37 -0700154
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200155 if (list_empty(&nbd->queue_head))
156 return;
157
Markus Pargmann23272a672015-10-29 11:51:16 +0100158 spin_lock_irqsave(&nbd->sock_lock, flags);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200159
Markus Pargmann1f7b5cf2015-10-29 12:01:34 +0100160 nbd->timedout = true;
Markus Pargmanndcc909d2015-10-06 20:03:54 +0200161
Markus Pargmann23272a672015-10-29 11:51:16 +0100162 if (nbd->sock)
163 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200164
Markus Pargmann23272a672015-10-29 11:51:16 +0100165 spin_unlock_irqrestore(&nbd->sock_lock, flags);
Markus Pargmanndcc909d2015-10-06 20:03:54 +0200166
Markus Pargmann23272a672015-10-29 11:51:16 +0100167 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700168}
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/*
171 * Send or receive packet.
172 */
Wanlong Gaof4507162012-03-28 14:42:51 -0700173static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 int msg_flags)
175{
Wanlong Gaof4507162012-03-28 14:42:51 -0700176 struct socket *sock = nbd->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 int result;
178 struct msghdr msg;
179 struct kvec iov;
Mel Gorman7f338fe2012-07-31 16:44:32 -0700180 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700182 if (unlikely(!sock)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700183 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200184 "Attempted %s on closed socket in sock_xmit\n",
185 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700186 return -EINVAL;
187 }
188
Mel Gorman7f338fe2012-07-31 16:44:32 -0700189 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700191 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 iov.iov_base = buf;
193 iov.iov_len = size;
194 msg.msg_name = NULL;
195 msg.msg_namelen = 0;
196 msg.msg_control = NULL;
197 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
199
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200200 if (send)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200202 else
Namhyung Kim35fbf5b2011-05-28 14:44:46 +0200203 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
204 msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 if (result <= 0) {
207 if (result == 0)
208 result = -EPIPE; /* short read */
209 break;
210 }
211 size -= result;
212 buf += result;
213 } while (size > 0);
214
Mel Gorman7f338fe2012-07-31 16:44:32 -0700215 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200217 if (!send && nbd->xmit_timeout)
218 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 return result;
221}
222
Wanlong Gaof4507162012-03-28 14:42:51 -0700223static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 int flags)
225{
226 int result;
227 void *kaddr = kmap(bvec->bv_page);
Wanlong Gaof4507162012-03-28 14:42:51 -0700228 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
229 bvec->bv_len, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 kunmap(bvec->bv_page);
231 return result;
232}
233
Paul Clements7fdfd402007-10-16 23:27:37 -0700234/* always call with the tx_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -0700235static int nbd_send_req(struct nbd_device *nbd, struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236{
NeilBrown5705f702007-09-25 12:35:59 +0200237 int result, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 struct nbd_request request;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900239 unsigned long size = blk_rq_bytes(req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200240 u32 type;
241
242 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
243 type = NBD_CMD_DISC;
244 else if (req->cmd_flags & REQ_DISCARD)
245 type = NBD_CMD_TRIM;
246 else if (req->cmd_flags & REQ_FLUSH)
247 type = NBD_CMD_FLUSH;
248 else if (rq_data_dir(req) == WRITE)
249 type = NBD_CMD_WRITE;
250 else
251 type = NBD_CMD_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Hani Benhabiles04cfac42014-06-06 14:38:30 -0700253 memset(&request, 0, sizeof(request));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 request.magic = htonl(NBD_REQUEST_MAGIC);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200255 request.type = htonl(type);
256 if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800257 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
258 request.len = htonl(size);
259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 memcpy(request.handle, &req, sizeof(req));
261
Markus Pargmannd18509f2015-04-02 10:11:38 +0200262 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200263 req, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200264 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Wanlong Gaof4507162012-03-28 14:42:51 -0700265 result = sock_xmit(nbd, 1, &request, sizeof(request),
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200266 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700268 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200269 "Send control failed (result %d)\n", result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200270 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 }
272
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200273 if (type == NBD_CMD_WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200274 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800275 struct bio_vec bvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 /*
277 * we are really probing at internals to determine
278 * whether to set MSG_MORE or not...
279 */
NeilBrown5705f702007-09-25 12:35:59 +0200280 rq_for_each_segment(bvec, req, iter) {
Jens Axboe6c92e692007-08-16 13:43:12 +0200281 flags = 0;
Kent Overstreet4550dd62013-08-07 14:26:21 -0700282 if (!rq_iter_last(bvec, iter))
Jens Axboe6c92e692007-08-16 13:43:12 +0200283 flags = MSG_MORE;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200284 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
285 req, bvec.bv_len);
Kent Overstreet79886132013-11-23 17:19:00 -0800286 result = sock_send_bvec(nbd, &bvec, flags);
Jens Axboe6c92e692007-08-16 13:43:12 +0200287 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700288 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200289 "Send data failed (result %d)\n",
290 result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200291 return -EIO;
Jens Axboe6c92e692007-08-16 13:43:12 +0200292 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
Wanlong Gaof4507162012-03-28 14:42:51 -0700298static struct request *nbd_find_request(struct nbd_device *nbd,
Denis Cheng0cbc591b2007-10-16 23:26:14 -0700299 struct request *xreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
Denis Chengd2c97402007-10-16 23:26:14 -0700301 struct request *req, *tmp;
Herbert Xu4b2f0262006-01-06 00:09:47 -0800302 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Wanlong Gaof4507162012-03-28 14:42:51 -0700304 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800305 if (unlikely(err))
Markus Pargmannde9ad6d2015-04-02 10:11:41 +0200306 return ERR_PTR(err);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800307
Wanlong Gaof4507162012-03-28 14:42:51 -0700308 spin_lock(&nbd->queue_lock);
309 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 if (req != xreq)
311 continue;
312 list_del_init(&req->queuelist);
Wanlong Gaof4507162012-03-28 14:42:51 -0700313 spin_unlock(&nbd->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 return req;
315 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700316 spin_unlock(&nbd->queue_lock);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800317
Markus Pargmannde9ad6d2015-04-02 10:11:41 +0200318 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319}
320
Wanlong Gaof4507162012-03-28 14:42:51 -0700321static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
323 int result;
324 void *kaddr = kmap(bvec->bv_page);
Wanlong Gaof4507162012-03-28 14:42:51 -0700325 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 MSG_WAITALL);
327 kunmap(bvec->bv_page);
328 return result;
329}
330
331/* NULL returned = something went wrong, inform userspace */
Wanlong Gaof4507162012-03-28 14:42:51 -0700332static struct request *nbd_read_stat(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
334 int result;
335 struct nbd_reply reply;
336 struct request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338 reply.magic = 0;
Wanlong Gaof4507162012-03-28 14:42:51 -0700339 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700341 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200342 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200343 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700345
346 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700347 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700348 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200349 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700350 }
351
Wanlong Gaof4507162012-03-28 14:42:51 -0700352 req = nbd_find_request(nbd, *(struct request **)reply.handle);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -0700353 if (IS_ERR(req)) {
Herbert Xu4b2f0262006-01-06 00:09:47 -0800354 result = PTR_ERR(req);
355 if (result != -ENOENT)
Markus Pargmann19391832015-08-17 08:20:03 +0200356 return ERR_PTR(result);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800357
Wanlong Gaof4507162012-03-28 14:42:51 -0700358 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200359 reply.handle);
Markus Pargmann19391832015-08-17 08:20:03 +0200360 return ERR_PTR(-EBADR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
362
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700364 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200365 ntohl(reply.error));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 req->errors++;
367 return req;
368 }
369
Markus Pargmannd18509f2015-04-02 10:11:38 +0200370 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200371 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200372 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800373 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200374
375 rq_for_each_segment(bvec, req, iter) {
Kent Overstreet79886132013-11-23 17:19:00 -0800376 result = sock_recv_bvec(nbd, &bvec);
Jens Axboe6c92e692007-08-16 13:43:12 +0200377 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700378 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200379 result);
Jens Axboe6c92e692007-08-16 13:43:12 +0200380 req->errors++;
381 return req;
382 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200383 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
384 req, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386 }
387 return req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
389
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200390static ssize_t pid_show(struct device *dev,
391 struct device_attribute *attr, char *buf)
Paul Clements6b39bb62006-12-06 20:40:53 -0800392{
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200393 struct gendisk *disk = dev_to_disk(dev);
Markus Pargmann6521d392015-08-17 08:20:05 +0200394 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200395
Markus Pargmann6521d392015-08-17 08:20:05 +0200396 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
Paul Clements6b39bb62006-12-06 20:40:53 -0800397}
398
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200399static struct device_attribute pid_attr = {
Parag Warudkar01e8ef12008-10-18 20:28:50 -0700400 .attr = { .name = "pid", .mode = S_IRUGO},
Paul Clements6b39bb62006-12-06 20:40:53 -0800401 .show = pid_show,
402};
403
Markus Pargmanncad73b22015-08-17 08:20:08 +0200404static int nbd_thread_recv(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
406 struct request *req;
WANG Cong84963042007-05-09 02:33:36 -0700407 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Wanlong Gaof4507162012-03-28 14:42:51 -0700409 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Mel Gorman7f338fe2012-07-31 16:44:32 -0700411 sk_set_memalloc(nbd->sock->sk);
Markus Pargmann6521d392015-08-17 08:20:05 +0200412
413 nbd->task_recv = current;
414
Wanlong Gaof4507162012-03-28 14:42:51 -0700415 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
WANG Cong84963042007-05-09 02:33:36 -0700416 if (ret) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700417 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Markus Pargmanndcc909d2015-10-06 20:03:54 +0200418
Markus Pargmann6521d392015-08-17 08:20:05 +0200419 nbd->task_recv = NULL;
Markus Pargmanndcc909d2015-10-06 20:03:54 +0200420
WANG Cong84963042007-05-09 02:33:36 -0700421 return ret;
422 }
Paul Clements6b39bb62006-12-06 20:40:53 -0800423
Markus Pargmann19391832015-08-17 08:20:03 +0200424 while (1) {
425 req = nbd_read_stat(nbd);
426 if (IS_ERR(req)) {
427 ret = PTR_ERR(req);
428 break;
429 }
430
Markus Pargmannd18509f2015-04-02 10:11:38 +0200431 nbd_end_request(nbd, req);
Markus Pargmann19391832015-08-17 08:20:03 +0200432 }
Paul Clements6b39bb62006-12-06 20:40:53 -0800433
Markus Pargmann6521d392015-08-17 08:20:05 +0200434 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
435
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200436 nbd->task_recv = NULL;
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200437
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200438 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439}
440
Wanlong Gaof4507162012-03-28 14:42:51 -0700441static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 struct request *req;
444
Wanlong Gaof4507162012-03-28 14:42:51 -0700445 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Herbert Xu4b2f0262006-01-06 00:09:47 -0800447 /*
Wanlong Gaof4507162012-03-28 14:42:51 -0700448 * Because we have set nbd->sock to NULL under the tx_lock, all
Herbert Xu4b2f0262006-01-06 00:09:47 -0800449 * modifications to the list must have completed by now. For
450 * the same reason, the active_req must be NULL.
451 *
452 * As a consequence, we don't need to take the spin lock while
453 * purging the list here.
454 */
Wanlong Gaof4507162012-03-28 14:42:51 -0700455 BUG_ON(nbd->sock);
456 BUG_ON(nbd->active_req);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800457
Wanlong Gaof4507162012-03-28 14:42:51 -0700458 while (!list_empty(&nbd->queue_head)) {
459 req = list_entry(nbd->queue_head.next, struct request,
Herbert Xu4b2f0262006-01-06 00:09:47 -0800460 queuelist);
461 list_del_init(&req->queuelist);
462 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200463 nbd_end_request(nbd, req);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800464 }
Paul Clementsfded4e02012-09-17 14:09:02 -0700465
466 while (!list_empty(&nbd->waiting_queue)) {
467 req = list_entry(nbd->waiting_queue.next, struct request,
468 queuelist);
469 list_del_init(&req->queuelist);
470 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200471 nbd_end_request(nbd, req);
Paul Clementsfded4e02012-09-17 14:09:02 -0700472 }
Markus Pargmanne78273c2015-08-17 08:20:04 +0200473 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474}
475
Paul Clements7fdfd402007-10-16 23:27:37 -0700476
Wanlong Gaof4507162012-03-28 14:42:51 -0700477static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700478{
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200479 if (req->cmd_type != REQ_TYPE_FS)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700480 goto error_out;
481
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200482 if (rq_data_dir(req) == WRITE &&
483 (nbd->flags & NBD_FLAG_READ_ONLY)) {
484 dev_err(disk_to_dev(nbd->disk),
485 "Write on read-only\n");
486 goto error_out;
Alex Bligh75f187a2013-02-27 17:05:23 -0800487 }
488
Laurent Vivier48cf6062008-04-29 01:02:46 -0700489 req->errors = 0;
490
Wanlong Gaof4507162012-03-28 14:42:51 -0700491 mutex_lock(&nbd->tx_lock);
492 if (unlikely(!nbd->sock)) {
493 mutex_unlock(&nbd->tx_lock);
494 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200495 "Attempted send on closed socket\n");
Pavel Machek15746fc2009-04-02 16:58:42 -0700496 goto error_out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700497 }
498
Wanlong Gaof4507162012-03-28 14:42:51 -0700499 nbd->active_req = req;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700500
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200501 if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
502 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
503
Wanlong Gaof4507162012-03-28 14:42:51 -0700504 if (nbd_send_req(nbd, req) != 0) {
505 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
Laurent Vivier48cf6062008-04-29 01:02:46 -0700506 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200507 nbd_end_request(nbd, req);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700508 } else {
Wanlong Gaof4507162012-03-28 14:42:51 -0700509 spin_lock(&nbd->queue_lock);
Chetan Loke01ff5db2012-07-31 08:47:13 +0200510 list_add_tail(&req->queuelist, &nbd->queue_head);
Wanlong Gaof4507162012-03-28 14:42:51 -0700511 spin_unlock(&nbd->queue_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700512 }
513
Wanlong Gaof4507162012-03-28 14:42:51 -0700514 nbd->active_req = NULL;
515 mutex_unlock(&nbd->tx_lock);
516 wake_up_all(&nbd->active_wq);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700517
518 return;
519
520error_out:
521 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200522 nbd_end_request(nbd, req);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700523}
524
Markus Pargmanncad73b22015-08-17 08:20:08 +0200525static int nbd_thread_send(void *data)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700526{
Wanlong Gaof4507162012-03-28 14:42:51 -0700527 struct nbd_device *nbd = data;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700528 struct request *req;
529
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200530 nbd->task_send = current;
531
Dongsheng Yang8698a742014-03-11 18:09:12 +0800532 set_user_nice(current, MIN_NICE);
Wanlong Gaof4507162012-03-28 14:42:51 -0700533 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
Laurent Vivier48cf6062008-04-29 01:02:46 -0700534 /* wait for something to do */
Wanlong Gaof4507162012-03-28 14:42:51 -0700535 wait_event_interruptible(nbd->waiting_wq,
Laurent Vivier48cf6062008-04-29 01:02:46 -0700536 kthread_should_stop() ||
Wanlong Gaof4507162012-03-28 14:42:51 -0700537 !list_empty(&nbd->waiting_queue));
Laurent Vivier48cf6062008-04-29 01:02:46 -0700538
539 /* extract request */
Wanlong Gaof4507162012-03-28 14:42:51 -0700540 if (list_empty(&nbd->waiting_queue))
Laurent Vivier48cf6062008-04-29 01:02:46 -0700541 continue;
542
Wanlong Gaof4507162012-03-28 14:42:51 -0700543 spin_lock_irq(&nbd->queue_lock);
544 req = list_entry(nbd->waiting_queue.next, struct request,
Laurent Vivier48cf6062008-04-29 01:02:46 -0700545 queuelist);
546 list_del_init(&req->queuelist);
Wanlong Gaof4507162012-03-28 14:42:51 -0700547 spin_unlock_irq(&nbd->queue_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700548
549 /* handle request */
Wanlong Gaof4507162012-03-28 14:42:51 -0700550 nbd_handle_req(nbd, req);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700551 }
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200552
553 nbd->task_send = NULL;
554
Laurent Vivier48cf6062008-04-29 01:02:46 -0700555 return 0;
556}
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558/*
559 * We always wait for result of write, for now. It would be nice to make it optional
560 * in future
Wanlong Gaof4507162012-03-28 14:42:51 -0700561 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
563 */
564
Markus Pargmanncad73b22015-08-17 08:20:08 +0200565static void nbd_request_handler(struct request_queue *q)
Alex Elder398eb082013-02-27 17:05:28 -0800566 __releases(q->queue_lock) __acquires(q->queue_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
568 struct request *req;
569
Tejun Heo9934c8c2009-05-08 11:54:16 +0900570 while ((req = blk_fetch_request(q)) != NULL) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700571 struct nbd_device *nbd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
Laurent Vivier48cf6062008-04-29 01:02:46 -0700573 spin_unlock_irq(q->queue_lock);
574
Wanlong Gaof4507162012-03-28 14:42:51 -0700575 nbd = req->rq_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Wanlong Gaof4507162012-03-28 14:42:51 -0700577 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Markus Pargmannd18509f2015-04-02 10:11:38 +0200579 dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
580 req, req->cmd_type);
581
Wanlong Gaof4507162012-03-28 14:42:51 -0700582 if (unlikely(!nbd->sock)) {
583 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200584 "Attempted send on closed socket\n");
Paul Clements4d48a542009-02-11 13:04:45 -0800585 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200586 nbd_end_request(nbd, req);
Paul Clements4d48a542009-02-11 13:04:45 -0800587 spin_lock_irq(q->queue_lock);
588 continue;
589 }
590
Wanlong Gaof4507162012-03-28 14:42:51 -0700591 spin_lock_irq(&nbd->queue_lock);
592 list_add_tail(&req->queuelist, &nbd->waiting_queue);
593 spin_unlock_irq(&nbd->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Wanlong Gaof4507162012-03-28 14:42:51 -0700595 wake_up(&nbd->waiting_wq);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 spin_lock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599}
600
Markus Pargmann23272a672015-10-29 11:51:16 +0100601static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
602{
603 int ret = 0;
604
605 spin_lock_irq(&nbd->sock_lock);
606
607 if (nbd->sock) {
608 ret = -EBUSY;
609 goto out;
610 }
611
612 nbd->sock = sock;
613
614out:
615 spin_unlock_irq(&nbd->sock_lock);
616
617 return ret;
618}
619
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100620/* Reset all properties of an NBD device */
621static void nbd_reset(struct nbd_device *nbd)
622{
623 nbd->disconnect = false;
624 nbd->timedout = false;
625 nbd->blksize = 1024;
626 nbd->bytesize = 0;
627 set_capacity(nbd->disk, 0);
628 nbd->flags = 0;
629 nbd->xmit_timeout = 0;
630 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
631 del_timer_sync(&nbd->timeout_timer);
632}
633
634static void nbd_bdev_reset(struct block_device *bdev)
635{
636 set_device_ro(bdev, false);
637 bdev->bd_inode->i_size = 0;
638 if (max_part > 0) {
639 blkdev_reread_part(bdev);
640 bdev->bd_invalidated = 1;
641 }
642}
643
Markus Pargmann30d53d92015-08-17 08:20:06 +0200644static int nbd_dev_dbg_init(struct nbd_device *nbd);
645static void nbd_dev_dbg_close(struct nbd_device *nbd);
646
Pavel Machek1a2ad212009-04-02 16:58:41 -0700647/* Must be called with tx_lock held */
648
Wanlong Gaof4507162012-03-28 14:42:51 -0700649static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -0700650 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 switch (cmd) {
Pavel Machek1a2ad212009-04-02 16:58:41 -0700653 case NBD_DISCONNECT: {
654 struct request sreq;
655
Wanlong Gaof4507162012-03-28 14:42:51 -0700656 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800657 if (!nbd->sock)
658 return -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700659
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800660 mutex_unlock(&nbd->tx_lock);
661 fsync_bdev(bdev);
662 mutex_lock(&nbd->tx_lock);
FUJITA Tomonori4f54eec2008-04-29 09:54:37 +0200663 blk_rq_init(NULL, &sreq);
Christoph Hellwig4f8c9512015-04-17 22:37:16 +0200664 sreq.cmd_type = REQ_TYPE_DRV_PRIV;
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800665
666 /* Check again after getting mutex back. */
Wanlong Gaof4507162012-03-28 14:42:51 -0700667 if (!nbd->sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 return -EINVAL;
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800669
Markus Pargmann696697c2015-08-17 08:20:07 +0200670 nbd->disconnect = true;
Paul Clementsc378f702013-07-03 15:09:04 -0700671
Wanlong Gaof4507162012-03-28 14:42:51 -0700672 nbd_send_req(nbd, &sreq);
Paul Clementsc378f702013-07-03 15:09:04 -0700673 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Markus Pargmann23272a672015-10-29 11:51:16 +0100676 case NBD_CLEAR_SOCK:
677 sock_shutdown(nbd);
Wanlong Gaof4507162012-03-28 14:42:51 -0700678 nbd_clear_que(nbd);
679 BUG_ON(!list_empty(&nbd->queue_head));
Paul Clementsfded4e02012-09-17 14:09:02 -0700680 BUG_ON(!list_empty(&nbd->waiting_queue));
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800681 kill_bdev(bdev);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700682 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700683
684 case NBD_SET_SOCK: {
Al Viroe2511572014-03-05 20:41:36 -0500685 int err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100686 struct socket *sock = sockfd_lookup(arg, &err);
687
688 if (!sock)
689 return err;
690
691 err = nbd_set_socket(nbd, sock);
692 if (!err && max_part)
693 bdev->bd_invalidated = 1;
694
695 return err;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700696 }
697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 case NBD_SET_BLKSIZE:
Wanlong Gaof4507162012-03-28 14:42:51 -0700699 nbd->blksize = arg;
700 nbd->bytesize &= ~(nbd->blksize-1);
701 bdev->bd_inode->i_size = nbd->bytesize;
702 set_blocksize(bdev, nbd->blksize);
703 set_capacity(nbd->disk, nbd->bytesize >> 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 case NBD_SET_SIZE:
Wanlong Gaof4507162012-03-28 14:42:51 -0700707 nbd->bytesize = arg & ~(nbd->blksize-1);
708 bdev->bd_inode->i_size = nbd->bytesize;
709 set_blocksize(bdev, nbd->blksize);
710 set_capacity(nbd->disk, nbd->bytesize >> 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700712
Paul Clements7fdfd402007-10-16 23:27:37 -0700713 case NBD_SET_TIMEOUT:
Wanlong Gaof4507162012-03-28 14:42:51 -0700714 nbd->xmit_timeout = arg * HZ;
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200715 if (arg)
716 mod_timer(&nbd->timeout_timer,
717 jiffies + nbd->xmit_timeout);
718 else
719 del_timer_sync(&nbd->timeout_timer);
720
Paul Clements7fdfd402007-10-16 23:27:37 -0700721 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700722
Paul Clements2f012502012-10-04 17:16:15 -0700723 case NBD_SET_FLAGS:
724 nbd->flags = arg;
725 return 0;
726
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 case NBD_SET_SIZE_BLOCKS:
Wanlong Gaof4507162012-03-28 14:42:51 -0700728 nbd->bytesize = ((u64) arg) * nbd->blksize;
729 bdev->bd_inode->i_size = nbd->bytesize;
730 set_blocksize(bdev, nbd->blksize);
731 set_capacity(nbd->disk, nbd->bytesize >> 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700733
734 case NBD_DO_IT: {
735 struct task_struct *thread;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700736 int error;
737
Markus Pargmann6521d392015-08-17 08:20:05 +0200738 if (nbd->task_recv)
Pavel Machekc91192d2009-01-15 13:51:03 -0800739 return -EBUSY;
Al Viroe2511572014-03-05 20:41:36 -0500740 if (!nbd->sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700742
Wanlong Gaof4507162012-03-28 14:42:51 -0700743 mutex_unlock(&nbd->tx_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700744
Paolo Bonzinia83e8142013-02-27 17:05:26 -0800745 if (nbd->flags & NBD_FLAG_READ_ONLY)
746 set_device_ro(bdev, true);
Paul Clementsa336d292012-10-04 17:16:18 -0700747 if (nbd->flags & NBD_FLAG_SEND_TRIM)
748 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
749 nbd->disk->queue);
Alex Bligh75f187a2013-02-27 17:05:23 -0800750 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
751 blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
752 else
753 blk_queue_flush(nbd->disk->queue, 0);
Paul Clementsa336d292012-10-04 17:16:18 -0700754
Markus Pargmanncad73b22015-08-17 08:20:08 +0200755 thread = kthread_run(nbd_thread_send, nbd, "%s",
Markus Pargmann30d53d92015-08-17 08:20:06 +0200756 nbd_name(nbd));
Pavel Machek1a2ad212009-04-02 16:58:41 -0700757 if (IS_ERR(thread)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700758 mutex_lock(&nbd->tx_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700759 return PTR_ERR(thread);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700760 }
Markus Pargmannd06df602015-04-02 10:11:36 +0200761
Markus Pargmann30d53d92015-08-17 08:20:06 +0200762 nbd_dev_dbg_init(nbd);
Markus Pargmanncad73b22015-08-17 08:20:08 +0200763 error = nbd_thread_recv(nbd);
Markus Pargmann30d53d92015-08-17 08:20:06 +0200764 nbd_dev_dbg_close(nbd);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700765 kthread_stop(thread);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700766
Wanlong Gaof4507162012-03-28 14:42:51 -0700767 mutex_lock(&nbd->tx_lock);
Markus Pargmann19391832015-08-17 08:20:03 +0200768
Markus Pargmann36e47be2015-08-17 08:20:01 +0200769 sock_shutdown(nbd);
Wanlong Gaof4507162012-03-28 14:42:51 -0700770 nbd_clear_que(nbd);
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800771 kill_bdev(bdev);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100772 nbd_bdev_reset(bdev);
773
Paul Clementsc378f702013-07-03 15:09:04 -0700774 if (nbd->disconnect) /* user requested, ignore socket errors */
Markus Pargmann1f7b5cf2015-10-29 12:01:34 +0100775 error = 0;
776 if (nbd->timedout)
777 error = -ETIMEDOUT;
778
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100779 nbd_reset(nbd);
780
Markus Pargmann19391832015-08-17 08:20:03 +0200781 return error;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700782 }
783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -0800785 /*
786 * This is for compatibility only. The queue is always cleared
787 * by NBD_DO_IT or NBD_CLEAR_SOCK.
788 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 case NBD_PRINT_DEBUG:
Wanlong Gaof4507162012-03-28 14:42:51 -0700792 dev_info(disk_to_dev(nbd->disk),
WANG Cong5eedf542011-08-19 14:48:28 +0200793 "next = %p, prev = %p, head = %p\n",
Wanlong Gaof4507162012-03-28 14:42:51 -0700794 nbd->queue_head.next, nbd->queue_head.prev,
795 &nbd->queue_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 return 0;
797 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700798 return -ENOTTY;
799}
800
801static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
802 unsigned int cmd, unsigned long arg)
803{
Wanlong Gaof4507162012-03-28 14:42:51 -0700804 struct nbd_device *nbd = bdev->bd_disk->private_data;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700805 int error;
806
807 if (!capable(CAP_SYS_ADMIN))
808 return -EPERM;
809
Wanlong Gaof4507162012-03-28 14:42:51 -0700810 BUG_ON(nbd->magic != NBD_MAGIC);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700811
Wanlong Gaof4507162012-03-28 14:42:51 -0700812 mutex_lock(&nbd->tx_lock);
813 error = __nbd_ioctl(bdev, nbd, cmd, arg);
814 mutex_unlock(&nbd->tx_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700815
816 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817}
818
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700819static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
821 .owner = THIS_MODULE,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200822 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -0500823 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824};
825
Markus Pargmann30d53d92015-08-17 08:20:06 +0200826#if IS_ENABLED(CONFIG_DEBUG_FS)
827
828static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
829{
830 struct nbd_device *nbd = s->private;
831
832 if (nbd->task_recv)
833 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
834 if (nbd->task_send)
835 seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
836
837 return 0;
838}
839
840static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
841{
842 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
843}
844
845static const struct file_operations nbd_dbg_tasks_ops = {
846 .open = nbd_dbg_tasks_open,
847 .read = seq_read,
848 .llseek = seq_lseek,
849 .release = single_release,
850};
851
852static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
853{
854 struct nbd_device *nbd = s->private;
855 u32 flags = nbd->flags;
856
857 seq_printf(s, "Hex: 0x%08x\n\n", flags);
858
859 seq_puts(s, "Known flags:\n");
860
861 if (flags & NBD_FLAG_HAS_FLAGS)
862 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
863 if (flags & NBD_FLAG_READ_ONLY)
864 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
865 if (flags & NBD_FLAG_SEND_FLUSH)
866 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
867 if (flags & NBD_FLAG_SEND_TRIM)
868 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
869
870 return 0;
871}
872
873static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
874{
875 return single_open(file, nbd_dbg_flags_show, inode->i_private);
876}
877
878static const struct file_operations nbd_dbg_flags_ops = {
879 .open = nbd_dbg_flags_open,
880 .read = seq_read,
881 .llseek = seq_lseek,
882 .release = single_release,
883};
884
885static int nbd_dev_dbg_init(struct nbd_device *nbd)
886{
887 struct dentry *dir;
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200888
889 if (!nbd_dbg_dir)
890 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200891
892 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200893 if (!dir) {
894 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
895 nbd_name(nbd));
896 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200897 }
898 nbd->dbg_dir = dir;
899
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200900 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
901 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
902 debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
903 debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
904 debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +0200905
906 return 0;
907}
908
909static void nbd_dev_dbg_close(struct nbd_device *nbd)
910{
911 debugfs_remove_recursive(nbd->dbg_dir);
912}
913
914static int nbd_dbg_init(void)
915{
916 struct dentry *dbg_dir;
917
918 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200919 if (!dbg_dir)
920 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200921
922 nbd_dbg_dir = dbg_dir;
923
924 return 0;
925}
926
927static void nbd_dbg_close(void)
928{
929 debugfs_remove_recursive(nbd_dbg_dir);
930}
931
932#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
933
934static int nbd_dev_dbg_init(struct nbd_device *nbd)
935{
936 return 0;
937}
938
939static void nbd_dev_dbg_close(struct nbd_device *nbd)
940{
941}
942
943static int nbd_dbg_init(void)
944{
945 return 0;
946}
947
948static void nbd_dbg_close(void)
949{
950}
951
952#endif
953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954/*
955 * And here should be modules and kernel interface
956 * (Just smiley confuses emacs :-)
957 */
958
959static int __init nbd_init(void)
960{
961 int err = -ENOMEM;
962 int i;
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700963 int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
Adrian Bunk5b7b18c2006-03-25 03:07:04 -0800965 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700967 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +0200968 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700969 return -EINVAL;
970 }
971
972 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +0200973 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700974 part_shift = fls(max_part);
975
Namhyung Kim5988ce22011-05-28 14:44:46 +0200976 /*
977 * Adjust max_part according to part_shift as it is exported
978 * to user space so that user can know the max number of
979 * partition kernel should be able to manage.
980 *
981 * Note that -1 is required because partition 0 is reserved
982 * for the whole disk.
983 */
984 max_part = (1UL << part_shift) - 1;
985 }
986
Namhyung Kim3b271082011-05-28 14:44:46 +0200987 if ((1UL << part_shift) > DISK_MAX_PARTS)
988 return -EINVAL;
989
990 if (nbds_max > 1UL << (MINORBITS - part_shift))
991 return -EINVAL;
992
Sudip Mukherjeeff6b8092015-01-27 18:08:22 +0530993 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
994 if (!nbd_dev)
995 return -ENOMEM;
996
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -0700997 for (i = 0; i < nbds_max; i++) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700998 struct gendisk *disk = alloc_disk(1 << part_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 if (!disk)
1000 goto out;
1001 nbd_dev[i].disk = disk;
1002 /*
1003 * The new linux 2.5 block layer implementation requires
1004 * every gendisk to have its very own request_queue struct.
1005 * These structs are big so we dynamically allocate them.
1006 */
Markus Pargmanncad73b22015-08-17 08:20:08 +02001007 disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (!disk->queue) {
1009 put_disk(disk);
1010 goto out;
1011 }
Jens Axboe31dcfab2008-10-31 10:06:37 +01001012 /*
1013 * Tell the block layer that we are not a rotational device
1014 */
1015 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001016 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
Paul Clementsa336d292012-10-04 17:16:18 -07001017 disk->queue->limits.discard_granularity = 512;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001018 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
Paul Clementsa336d292012-10-04 17:16:18 -07001019 disk->queue->limits.discard_zeroes_data = 0;
Michal Belczyk078be022013-04-30 15:28:28 -07001020 blk_queue_max_hw_sectors(disk->queue, 65536);
1021 disk->queue->limits.max_sectors = 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 }
1023
1024 if (register_blkdev(NBD_MAJOR, "nbd")) {
1025 err = -EIO;
1026 goto out;
1027 }
1028
1029 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
Markus Pargmann30d53d92015-08-17 08:20:06 +02001031 nbd_dbg_init();
1032
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001033 for (i = 0; i < nbds_max; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 struct gendisk *disk = nbd_dev[i].disk;
Wanlong Gaof4507162012-03-28 14:42:51 -07001035 nbd_dev[i].magic = NBD_MAGIC;
Laurent Vivier48cf6062008-04-29 01:02:46 -07001036 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 spin_lock_init(&nbd_dev[i].queue_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +01001038 spin_lock_init(&nbd_dev[i].sock_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
Ingo Molnar82d4dc52006-03-23 03:00:38 -08001040 mutex_init(&nbd_dev[i].tx_lock);
Markus Pargmann7e2893a2015-08-17 08:20:00 +02001041 init_timer(&nbd_dev[i].timeout_timer);
1042 nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
1043 nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
Herbert Xu4b2f0262006-01-06 00:09:47 -08001044 init_waitqueue_head(&nbd_dev[i].active_wq);
Laurent Vivier48cf6062008-04-29 01:02:46 -07001045 init_waitqueue_head(&nbd_dev[i].waiting_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 disk->major = NBD_MAJOR;
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001047 disk->first_minor = i << part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 disk->fops = &nbd_fops;
1049 disk->private_data = &nbd_dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 sprintf(disk->disk_name, "nbd%d", i);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001051 nbd_reset(&nbd_dev[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 add_disk(disk);
1053 }
1054
1055 return 0;
1056out:
1057 while (i--) {
1058 blk_cleanup_queue(nbd_dev[i].disk->queue);
1059 put_disk(nbd_dev[i].disk);
1060 }
Sven Wegenerf3944d62008-08-20 14:09:07 -07001061 kfree(nbd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 return err;
1063}
1064
1065static void __exit nbd_cleanup(void)
1066{
1067 int i;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001068
1069 nbd_dbg_close();
1070
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001071 for (i = 0; i < nbds_max; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 struct gendisk *disk = nbd_dev[i].disk;
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001073 nbd_dev[i].magic = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 if (disk) {
1075 del_gendisk(disk);
1076 blk_cleanup_queue(disk->queue);
1077 put_disk(disk);
1078 }
1079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 unregister_blkdev(NBD_MAJOR, "nbd");
Sven Wegenerf3944d62008-08-20 14:09:07 -07001081 kfree(nbd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1083}
1084
1085module_init(nbd_init);
1086module_exit(nbd_cleanup);
1087
1088MODULE_DESCRIPTION("Network Block Device");
1089MODULE_LICENSE("GPL");
1090
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001091module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001092MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1093module_param(max_part, int, 0444);
1094MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");