blob: 07a18e2384831fc302dd5ee31b3590e3414b15e1 [file] [log] [blame]
Rusty Russelle467cde2007-10-22 11:03:38 +10001//#define DEBUG
2#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09003#include <linux/slab.h>
Rusty Russelle467cde2007-10-22 11:03:38 +10004#include <linux/blkdev.h>
5#include <linux/hdreg.h>
Paul Gortmaker0c8d44f2011-07-01 15:56:05 -04006#include <linux/module.h>
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +10307#include <linux/mutex.h>
Rusty Russelle467cde2007-10-22 11:03:38 +10008#include <linux/virtio.h>
9#include <linux/virtio_blk.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020010#include <linux/scatterlist.h>
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010011#include <linux/string_helpers.h>
Liu Yuan6917f832011-04-24 02:49:26 +080012#include <scsi/scsi_cmnd.h>
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020013#include <linux/idr.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020014
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010015#define PART_BITS 4
Rusty Russelle467cde2007-10-22 11:03:38 +100016
Asias Hea98755c2012-08-08 16:07:04 +080017static bool use_bio;
18module_param(use_bio, bool, S_IRUGO);
19
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020020static int major;
21static DEFINE_IDA(vd_index_ida);
22
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010023struct workqueue_struct *virtblk_wq;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010024
Rusty Russelle467cde2007-10-22 11:03:38 +100025struct virtio_blk
26{
Rusty Russelle467cde2007-10-22 11:03:38 +100027 struct virtio_device *vdev;
28 struct virtqueue *vq;
Asias Hea98755c2012-08-08 16:07:04 +080029 wait_queue_head_t queue_wait;
Rusty Russelle467cde2007-10-22 11:03:38 +100030
31 /* The disk structure for the kernel. */
32 struct gendisk *disk;
33
Rusty Russelle467cde2007-10-22 11:03:38 +100034 mempool_t *pool;
35
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010036 /* Process context for config space updates */
37 struct work_struct config_work;
38
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +103039 /* Lock for config space updates */
40 struct mutex config_lock;
41
42 /* enable config space updates */
43 bool config_enable;
44
Rusty Russell0864b792008-12-30 09:26:05 -060045 /* What host tells us, plus 2 for header & tailer. */
46 unsigned int sg_elems;
47
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020048 /* Ida index - used to track minor number allocations. */
49 int index;
50
Rusty Russelle467cde2007-10-22 11:03:38 +100051 /* Scatterlist: can be too big for stack. */
Rusty Russell0864b792008-12-30 09:26:05 -060052 struct scatterlist sg[/*sg_elems*/];
Rusty Russelle467cde2007-10-22 11:03:38 +100053};
54
55struct virtblk_req
56{
Rusty Russelle467cde2007-10-22 11:03:38 +100057 struct request *req;
Asias Hea98755c2012-08-08 16:07:04 +080058 struct bio *bio;
Rusty Russelle467cde2007-10-22 11:03:38 +100059 struct virtio_blk_outhdr out_hdr;
Hannes Reinecke1cde26f2009-05-18 14:41:30 +020060 struct virtio_scsi_inhdr in_hdr;
Asias Hec85a1f912012-08-08 16:07:05 +080061 struct work_struct work;
62 struct virtio_blk *vblk;
63 int flags;
Rusty Russellcb38fa22008-05-02 21:50:45 -050064 u8 status;
Asias Hea98755c2012-08-08 16:07:04 +080065 struct scatterlist sg[];
Rusty Russelle467cde2007-10-22 11:03:38 +100066};
67
Asias Hec85a1f912012-08-08 16:07:05 +080068enum {
69 VBLK_IS_FLUSH = 1,
70 VBLK_REQ_FLUSH = 2,
71 VBLK_REQ_DATA = 4,
72 VBLK_REQ_FUA = 8,
73};
74
Asias Hea98755c2012-08-08 16:07:04 +080075static inline int virtblk_result(struct virtblk_req *vbr)
76{
77 switch (vbr->status) {
78 case VIRTIO_BLK_S_OK:
79 return 0;
80 case VIRTIO_BLK_S_UNSUPP:
81 return -ENOTTY;
82 default:
83 return -EIO;
84 }
85}
86
Asias Hec85a1f912012-08-08 16:07:05 +080087static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
88 gfp_t gfp_mask)
Asias Hea98755c2012-08-08 16:07:04 +080089{
Asias Hec85a1f912012-08-08 16:07:05 +080090 struct virtblk_req *vbr;
91
92 vbr = mempool_alloc(vblk->pool, gfp_mask);
Dan Carpenterf22cf8e2012-09-05 15:32:53 +030093 if (!vbr)
94 return NULL;
Asias Hec85a1f912012-08-08 16:07:05 +080095
96 vbr->vblk = vblk;
Dan Carpenterf22cf8e2012-09-05 15:32:53 +030097 if (use_bio)
98 sg_init_table(vbr->sg, vblk->sg_elems);
Asias Hec85a1f912012-08-08 16:07:05 +080099
100 return vbr;
101}
102
103static void virtblk_add_buf_wait(struct virtio_blk *vblk,
104 struct virtblk_req *vbr,
105 unsigned long out,
106 unsigned long in)
107{
108 DEFINE_WAIT(wait);
109
110 for (;;) {
111 prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
112 TASK_UNINTERRUPTIBLE);
113
114 spin_lock_irq(vblk->disk->queue->queue_lock);
115 if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
116 GFP_ATOMIC) < 0) {
117 spin_unlock_irq(vblk->disk->queue->queue_lock);
118 io_schedule();
119 } else {
120 virtqueue_kick(vblk->vq);
121 spin_unlock_irq(vblk->disk->queue->queue_lock);
122 break;
123 }
124
125 }
126
127 finish_wait(&vblk->queue_wait, &wait);
128}
129
130static inline void virtblk_add_req(struct virtblk_req *vbr,
131 unsigned int out, unsigned int in)
132{
133 struct virtio_blk *vblk = vbr->vblk;
134
135 spin_lock_irq(vblk->disk->queue->queue_lock);
136 if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
137 GFP_ATOMIC) < 0)) {
138 spin_unlock_irq(vblk->disk->queue->queue_lock);
139 virtblk_add_buf_wait(vblk, vbr, out, in);
140 return;
141 }
142 virtqueue_kick(vblk->vq);
143 spin_unlock_irq(vblk->disk->queue->queue_lock);
144}
145
146static int virtblk_bio_send_flush(struct virtblk_req *vbr)
147{
148 unsigned int out = 0, in = 0;
149
150 vbr->flags |= VBLK_IS_FLUSH;
151 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
152 vbr->out_hdr.sector = 0;
153 vbr->out_hdr.ioprio = 0;
154 sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
155 sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));
156
157 virtblk_add_req(vbr, out, in);
158
159 return 0;
160}
161
162static int virtblk_bio_send_data(struct virtblk_req *vbr)
163{
164 struct virtio_blk *vblk = vbr->vblk;
165 unsigned int num, out = 0, in = 0;
166 struct bio *bio = vbr->bio;
167
168 vbr->flags &= ~VBLK_IS_FLUSH;
169 vbr->out_hdr.type = 0;
170 vbr->out_hdr.sector = bio->bi_sector;
171 vbr->out_hdr.ioprio = bio_prio(bio);
172
173 sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
174
175 num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out);
176
177 sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
178 sizeof(vbr->status));
179
180 if (num) {
181 if (bio->bi_rw & REQ_WRITE) {
182 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
183 out += num;
184 } else {
185 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
186 in += num;
187 }
188 }
189
190 virtblk_add_req(vbr, out, in);
191
192 return 0;
193}
194
195static void virtblk_bio_send_data_work(struct work_struct *work)
196{
197 struct virtblk_req *vbr;
198
199 vbr = container_of(work, struct virtblk_req, work);
200
201 virtblk_bio_send_data(vbr);
202}
203
204static void virtblk_bio_send_flush_work(struct work_struct *work)
205{
206 struct virtblk_req *vbr;
207
208 vbr = container_of(work, struct virtblk_req, work);
209
210 virtblk_bio_send_flush(vbr);
211}
212
213static inline void virtblk_request_done(struct virtblk_req *vbr)
214{
215 struct virtio_blk *vblk = vbr->vblk;
Asias Hea98755c2012-08-08 16:07:04 +0800216 struct request *req = vbr->req;
217 int error = virtblk_result(vbr);
218
219 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
220 req->resid_len = vbr->in_hdr.residual;
221 req->sense_len = vbr->in_hdr.sense_len;
222 req->errors = vbr->in_hdr.errors;
223 } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
224 req->errors = (error != 0);
225 }
226
227 __blk_end_request_all(req, error);
228 mempool_free(vbr, vblk->pool);
229}
230
Asias Hec85a1f912012-08-08 16:07:05 +0800231static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
Asias Hea98755c2012-08-08 16:07:04 +0800232{
Asias Hec85a1f912012-08-08 16:07:05 +0800233 struct virtio_blk *vblk = vbr->vblk;
234
235 if (vbr->flags & VBLK_REQ_DATA) {
236 /* Send out the actual write data */
237 INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
238 queue_work(virtblk_wq, &vbr->work);
239 } else {
240 bio_endio(vbr->bio, virtblk_result(vbr));
241 mempool_free(vbr, vblk->pool);
242 }
243}
244
245static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
246{
247 struct virtio_blk *vblk = vbr->vblk;
248
249 if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
250 /* Send out a flush before end the bio */
251 vbr->flags &= ~VBLK_REQ_DATA;
252 INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
253 queue_work(virtblk_wq, &vbr->work);
254 } else {
255 bio_endio(vbr->bio, virtblk_result(vbr));
256 mempool_free(vbr, vblk->pool);
257 }
258}
259
260static inline void virtblk_bio_done(struct virtblk_req *vbr)
261{
262 if (unlikely(vbr->flags & VBLK_IS_FLUSH))
263 virtblk_bio_flush_done(vbr);
264 else
265 virtblk_bio_data_done(vbr);
Asias Hea98755c2012-08-08 16:07:04 +0800266}
267
268static void virtblk_done(struct virtqueue *vq)
Rusty Russelle467cde2007-10-22 11:03:38 +1000269{
270 struct virtio_blk *vblk = vq->vdev->priv;
Asias Hec85a1f912012-08-08 16:07:05 +0800271 bool bio_done = false, req_done = false;
Rusty Russelle467cde2007-10-22 11:03:38 +1000272 struct virtblk_req *vbr;
Rusty Russelle467cde2007-10-22 11:03:38 +1000273 unsigned long flags;
Asias Hea98755c2012-08-08 16:07:04 +0800274 unsigned int len;
Rusty Russelle467cde2007-10-22 11:03:38 +1000275
Asias He2c95a322012-05-25 16:03:27 +0800276 spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
Asias Hebb811102012-09-25 10:36:17 +0800277 do {
278 virtqueue_disable_cb(vq);
279 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
280 if (vbr->bio) {
281 virtblk_bio_done(vbr);
282 bio_done = true;
283 } else {
284 virtblk_request_done(vbr);
285 req_done = true;
286 }
Rusty Russelle467cde2007-10-22 11:03:38 +1000287 }
Asias Hebb811102012-09-25 10:36:17 +0800288 } while (!virtqueue_enable_cb(vq));
Rusty Russelle467cde2007-10-22 11:03:38 +1000289 /* In case queue is stopped waiting for more buffers. */
Asias Hea98755c2012-08-08 16:07:04 +0800290 if (req_done)
291 blk_start_queue(vblk->disk->queue);
Asias He2c95a322012-05-25 16:03:27 +0800292 spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
Asias Hea98755c2012-08-08 16:07:04 +0800293
294 if (bio_done)
295 wake_up(&vblk->queue_wait);
296}
297
Rusty Russelle467cde2007-10-22 11:03:38 +1000298static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
299 struct request *req)
300{
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200301 unsigned long num, out = 0, in = 0;
Rusty Russelle467cde2007-10-22 11:03:38 +1000302 struct virtblk_req *vbr;
303
Asias Hea98755c2012-08-08 16:07:04 +0800304 vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
Rusty Russelle467cde2007-10-22 11:03:38 +1000305 if (!vbr)
306 /* When another request finishes we'll try again. */
307 return false;
308
309 vbr->req = req;
Asias Hea98755c2012-08-08 16:07:04 +0800310 vbr->bio = NULL;
FUJITA Tomonoridd40e452010-07-03 17:45:38 +0900311 if (req->cmd_flags & REQ_FLUSH) {
312 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
Rusty Russelle467cde2007-10-22 11:03:38 +1000313 vbr->out_hdr.sector = 0;
Fernando Luis Vázquez Cao766ca442008-08-14 09:59:13 +0200314 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
FUJITA Tomonoridd40e452010-07-03 17:45:38 +0900315 } else {
316 switch (req->cmd_type) {
317 case REQ_TYPE_FS:
318 vbr->out_hdr.type = 0;
319 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
320 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
321 break;
322 case REQ_TYPE_BLOCK_PC:
323 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
Christoph Hellwigf1b0ef062009-09-17 19:57:42 +0200324 vbr->out_hdr.sector = 0;
325 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
326 break;
FUJITA Tomonoridd40e452010-07-03 17:45:38 +0900327 case REQ_TYPE_SPECIAL:
328 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
329 vbr->out_hdr.sector = 0;
330 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
331 break;
332 default:
333 /* We don't put anything else in the queue. */
334 BUG();
Christoph Hellwigf1b0ef062009-09-17 19:57:42 +0200335 }
Rusty Russelle467cde2007-10-22 11:03:38 +1000336 }
337
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200338 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
Rusty Russelle467cde2007-10-22 11:03:38 +1000339
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200340 /*
341 * If this is a packet command we need a couple of additional headers.
342 * Behind the normal outhdr we put a segment with the scsi command
343 * block, and before the normal inhdr we put the sense data and the
344 * inhdr with additional status information before the normal inhdr.
345 */
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200346 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200347 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
348
349 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
350
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200351 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
Liu Yuan6917f832011-04-24 02:49:26 +0800352 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200353 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
354 sizeof(vbr->in_hdr));
355 }
356
357 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
358 sizeof(vbr->status));
359
360 if (num) {
361 if (rq_data_dir(vbr->req) == WRITE) {
362 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
363 out += num;
364 } else {
365 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
366 in += num;
367 }
Rusty Russelle467cde2007-10-22 11:03:38 +1000368 }
369
Asias Hea98755c2012-08-08 16:07:04 +0800370 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
371 GFP_ATOMIC) < 0) {
Rusty Russelle467cde2007-10-22 11:03:38 +1000372 mempool_free(vbr, vblk->pool);
373 return false;
374 }
375
Rusty Russelle467cde2007-10-22 11:03:38 +1000376 return true;
377}
378
Asias Hea98755c2012-08-08 16:07:04 +0800379static void virtblk_request(struct request_queue *q)
Rusty Russelle467cde2007-10-22 11:03:38 +1000380{
Christoph Hellwig6c3b46f2009-05-18 14:38:28 +0200381 struct virtio_blk *vblk = q->queuedata;
Rusty Russelle467cde2007-10-22 11:03:38 +1000382 struct request *req;
383 unsigned int issued = 0;
384
Tejun Heo9934c8c2009-05-08 11:54:16 +0900385 while ((req = blk_peek_request(q)) != NULL) {
Rusty Russell0864b792008-12-30 09:26:05 -0600386 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
Rusty Russelle467cde2007-10-22 11:03:38 +1000387
388 /* If this request fails, stop queue and wait for something to
389 finish to restart it. */
390 if (!do_req(q, vblk, req)) {
391 blk_stop_queue(q);
392 break;
393 }
Tejun Heo9934c8c2009-05-08 11:54:16 +0900394 blk_start_request(req);
Rusty Russelle467cde2007-10-22 11:03:38 +1000395 issued++;
396 }
397
398 if (issued)
Michael S. Tsirkin09ec6b62010-04-12 16:18:36 +0300399 virtqueue_kick(vblk->vq);
Rusty Russelle467cde2007-10-22 11:03:38 +1000400}
401
Asias Hea98755c2012-08-08 16:07:04 +0800402static void virtblk_make_request(struct request_queue *q, struct bio *bio)
403{
404 struct virtio_blk *vblk = q->queuedata;
Asias Hea98755c2012-08-08 16:07:04 +0800405 struct virtblk_req *vbr;
406
407 BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
Asias Hea98755c2012-08-08 16:07:04 +0800408
409 vbr = virtblk_alloc_req(vblk, GFP_NOIO);
410 if (!vbr) {
411 bio_endio(bio, -ENOMEM);
412 return;
413 }
414
415 vbr->bio = bio;
Asias Hec85a1f912012-08-08 16:07:05 +0800416 vbr->flags = 0;
417 if (bio->bi_rw & REQ_FLUSH)
418 vbr->flags |= VBLK_REQ_FLUSH;
419 if (bio->bi_rw & REQ_FUA)
420 vbr->flags |= VBLK_REQ_FUA;
421 if (bio->bi_size)
422 vbr->flags |= VBLK_REQ_DATA;
Asias Hea98755c2012-08-08 16:07:04 +0800423
Asias Hec85a1f912012-08-08 16:07:05 +0800424 if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
425 virtblk_bio_send_flush(vbr);
426 else
427 virtblk_bio_send_data(vbr);
Asias Hea98755c2012-08-08 16:07:04 +0800428}
429
john cooper4cb2ea22010-03-25 01:33:33 -0400430/* return id (s/n) string for *disk to *id_str
431 */
432static int virtblk_get_id(struct gendisk *disk, char *id_str)
433{
434 struct virtio_blk *vblk = disk->private_data;
435 struct request *req;
436 struct bio *bio;
Mike Snitzere4c47762010-10-09 12:12:13 +1030437 int err;
john cooper4cb2ea22010-03-25 01:33:33 -0400438
439 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
440 GFP_KERNEL);
441 if (IS_ERR(bio))
442 return PTR_ERR(bio);
443
444 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
445 if (IS_ERR(req)) {
446 bio_put(bio);
447 return PTR_ERR(req);
448 }
449
450 req->cmd_type = REQ_TYPE_SPECIAL;
Mike Snitzere4c47762010-10-09 12:12:13 +1030451 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
452 blk_put_request(req);
453
454 return err;
john cooper4cb2ea22010-03-25 01:33:33 -0400455}
456
Christoph Hellwigfe5a50a2010-09-15 01:27:23 +0200457static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
458 unsigned int cmd, unsigned long data)
Rusty Russelle467cde2007-10-22 11:03:38 +1000459{
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200460 struct gendisk *disk = bdev->bd_disk;
461 struct virtio_blk *vblk = disk->private_data;
462
463 /*
464 * Only allow the generic SCSI ioctls if the host can support it.
465 */
466 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
Christoph Hellwigd9ecdea2009-06-20 21:29:41 +0200467 return -ENOTTY;
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200468
Paolo Bonzini577ebb32012-01-12 16:01:27 +0100469 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
470 (void __user *)data);
Rusty Russelle467cde2007-10-22 11:03:38 +1000471}
472
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100473/* We provide getgeo only to please some old bootloader/partitioning tools */
474static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
475{
Ryan Harper48e40432008-04-16 13:56:37 -0500476 struct virtio_blk *vblk = bd->bd_disk->private_data;
477 struct virtio_blk_geometry vgeo;
478 int err;
479
480 /* see if the host passed in geometry config */
481 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
482 offsetof(struct virtio_blk_config, geometry),
483 &vgeo);
484
485 if (!err) {
486 geo->heads = vgeo.heads;
487 geo->sectors = vgeo.sectors;
488 geo->cylinders = vgeo.cylinders;
489 } else {
490 /* some standard values, similar to sd */
491 geo->heads = 1 << 6;
492 geo->sectors = 1 << 5;
493 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
494 }
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100495 return 0;
496}
497
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700498static const struct block_device_operations virtblk_fops = {
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200499 .ioctl = virtblk_ioctl,
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100500 .owner = THIS_MODULE,
501 .getgeo = virtblk_getgeo,
Rusty Russelle467cde2007-10-22 11:03:38 +1000502};
503
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100504static int index_to_minor(int index)
505{
506 return index << PART_BITS;
507}
508
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200509static int minor_to_index(int minor)
510{
511 return minor >> PART_BITS;
512}
513
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500514static ssize_t virtblk_serial_show(struct device *dev,
515 struct device_attribute *attr, char *buf)
516{
517 struct gendisk *disk = dev_to_disk(dev);
518 int err;
519
520 /* sysfs gives us a PAGE_SIZE buffer */
521 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
522
523 buf[VIRTIO_BLK_ID_BYTES] = '\0';
524 err = virtblk_get_id(disk, buf);
525 if (!err)
526 return strlen(buf);
527
528 if (err == -EIO) /* Unsupported? Make it empty. */
529 return 0;
530
531 return err;
532}
533DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
534
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100535static void virtblk_config_changed_work(struct work_struct *work)
536{
537 struct virtio_blk *vblk =
538 container_of(work, struct virtio_blk, config_work);
539 struct virtio_device *vdev = vblk->vdev;
540 struct request_queue *q = vblk->disk->queue;
541 char cap_str_2[10], cap_str_10[10];
542 u64 capacity, size;
543
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +1030544 mutex_lock(&vblk->config_lock);
545 if (!vblk->config_enable)
546 goto done;
547
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100548 /* Host must always specify the capacity. */
549 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
550 &capacity, sizeof(capacity));
551
552 /* If capacity is too big, truncate with warning. */
553 if ((sector_t)capacity != capacity) {
554 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
555 (unsigned long long)capacity);
556 capacity = (sector_t)-1;
557 }
558
559 size = capacity * queue_logical_block_size(q);
560 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
561 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
562
563 dev_notice(&vdev->dev,
564 "new size: %llu %d-byte logical blocks (%s/%s)\n",
565 (unsigned long long)capacity,
566 queue_logical_block_size(q),
567 cap_str_10, cap_str_2);
568
569 set_capacity(vblk->disk, capacity);
Vivek Goyale9986f32012-03-29 10:09:44 +0200570 revalidate_disk(vblk->disk);
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +1030571done:
572 mutex_unlock(&vblk->config_lock);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100573}
574
575static void virtblk_config_changed(struct virtio_device *vdev)
576{
577 struct virtio_blk *vblk = vdev->priv;
578
579 queue_work(virtblk_wq, &vblk->config_work);
580}
581
Amit Shah6abd6e52011-12-22 16:58:29 +0530582static int init_vq(struct virtio_blk *vblk)
583{
584 int err = 0;
585
586 /* We expect one virtqueue, for output. */
Asias Hea98755c2012-08-08 16:07:04 +0800587 vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests");
Amit Shah6abd6e52011-12-22 16:58:29 +0530588 if (IS_ERR(vblk->vq))
589 err = PTR_ERR(vblk->vq);
590
591 return err;
592}
593
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800594/*
595 * Legacy naming scheme used for virtio devices. We are stuck with it for
596 * virtio blk but don't ever use it for any new driver.
597 */
598static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
599{
600 const int base = 'z' - 'a' + 1;
601 char *begin = buf + strlen(prefix);
602 char *end = buf + buflen;
603 char *p;
604 int unit;
605
606 p = end - 1;
607 *p = '\0';
608 unit = base;
609 do {
610 if (p == begin)
611 return -EINVAL;
612 *--p = 'a' + (index % unit);
613 index = (index / unit) - 1;
614 } while (index >= 0);
615
616 memmove(begin, p, end - p);
617 memcpy(buf, prefix, strlen(prefix));
618
619 return 0;
620}
621
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200622static int virtblk_get_cache_mode(struct virtio_device *vdev)
623{
624 u8 writeback;
625 int err;
626
627 err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
628 offsetof(struct virtio_blk_config, wce),
629 &writeback);
630 if (err)
631 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
632
633 return writeback;
634}
635
636static void virtblk_update_cache_mode(struct virtio_device *vdev)
637{
638 u8 writeback = virtblk_get_cache_mode(vdev);
639 struct virtio_blk *vblk = vdev->priv;
640
Asias Hec85a1f912012-08-08 16:07:05 +0800641 if (writeback)
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200642 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
643 else
644 blk_queue_flush(vblk->disk->queue, 0);
645
646 revalidate_disk(vblk->disk);
647}
648
649static const char *const virtblk_cache_types[] = {
650 "write through", "write back"
651};
652
653static ssize_t
654virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
655 const char *buf, size_t count)
656{
657 struct gendisk *disk = dev_to_disk(dev);
658 struct virtio_blk *vblk = disk->private_data;
659 struct virtio_device *vdev = vblk->vdev;
660 int i;
661 u8 writeback;
662
663 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
664 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
665 if (sysfs_streq(buf, virtblk_cache_types[i]))
666 break;
667
668 if (i < 0)
669 return -EINVAL;
670
671 writeback = i;
672 vdev->config->set(vdev,
673 offsetof(struct virtio_blk_config, wce),
674 &writeback, sizeof(writeback));
675
676 virtblk_update_cache_mode(vdev);
677 return count;
678}
679
680static ssize_t
681virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
682 char *buf)
683{
684 struct gendisk *disk = dev_to_disk(dev);
685 struct virtio_blk *vblk = disk->private_data;
686 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
687
688 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
689 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
690}
691
692static const struct device_attribute dev_attr_cache_type_ro =
693 __ATTR(cache_type, S_IRUGO,
694 virtblk_cache_type_show, NULL);
695static const struct device_attribute dev_attr_cache_type_rw =
696 __ATTR(cache_type, S_IRUGO|S_IWUSR,
697 virtblk_cache_type_show, virtblk_cache_type_store);
698
Mike Frysinger98e94442009-05-18 03:39:09 -0400699static int __devinit virtblk_probe(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000700{
701 struct virtio_blk *vblk;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600702 struct request_queue *q;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200703 int err, index;
Asias Hea98755c2012-08-08 16:07:04 +0800704 int pool_size;
705
Rusty Russelle467cde2007-10-22 11:03:38 +1000706 u64 cap;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600707 u32 v, blk_size, sg_elems, opt_io_size;
708 u16 min_io_size;
709 u8 physical_block_exp, alignment_offset;
Rusty Russelle467cde2007-10-22 11:03:38 +1000710
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200711 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
712 GFP_KERNEL);
713 if (err < 0)
714 goto out;
715 index = err;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100716
Rusty Russell0864b792008-12-30 09:26:05 -0600717 /* We need to know how many segments before we allocate. */
718 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
719 offsetof(struct virtio_blk_config, seg_max),
720 &sg_elems);
Christoph Hellwiga5b365a2010-05-25 14:17:54 +0200721
722 /* We need at least one SG element, whatever they say. */
723 if (err || !sg_elems)
Rusty Russell0864b792008-12-30 09:26:05 -0600724 sg_elems = 1;
725
726 /* We need an extra sg elements at head and tail. */
727 sg_elems += 2;
728 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
729 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
Rusty Russelle467cde2007-10-22 11:03:38 +1000730 if (!vblk) {
731 err = -ENOMEM;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200732 goto out_free_index;
Rusty Russelle467cde2007-10-22 11:03:38 +1000733 }
734
Asias Hea98755c2012-08-08 16:07:04 +0800735 init_waitqueue_head(&vblk->queue_wait);
Rusty Russelle467cde2007-10-22 11:03:38 +1000736 vblk->vdev = vdev;
Rusty Russell0864b792008-12-30 09:26:05 -0600737 vblk->sg_elems = sg_elems;
738 sg_init_table(vblk->sg, vblk->sg_elems);
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +1030739 mutex_init(&vblk->config_lock);
Asias Hea98755c2012-08-08 16:07:04 +0800740
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100741 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +1030742 vblk->config_enable = true;
Rusty Russelle467cde2007-10-22 11:03:38 +1000743
Amit Shah6abd6e52011-12-22 16:58:29 +0530744 err = init_vq(vblk);
745 if (err)
Rusty Russelle467cde2007-10-22 11:03:38 +1000746 goto out_free_vblk;
Rusty Russelle467cde2007-10-22 11:03:38 +1000747
Asias Hea98755c2012-08-08 16:07:04 +0800748 pool_size = sizeof(struct virtblk_req);
749 if (use_bio)
750 pool_size += sizeof(struct scatterlist) * sg_elems;
751 vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
Rusty Russelle467cde2007-10-22 11:03:38 +1000752 if (!vblk->pool) {
753 err = -ENOMEM;
754 goto out_free_vq;
755 }
756
Rusty Russelle467cde2007-10-22 11:03:38 +1000757 /* FIXME: How many partitions? How long is a piece of string? */
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100758 vblk->disk = alloc_disk(1 << PART_BITS);
Rusty Russelle467cde2007-10-22 11:03:38 +1000759 if (!vblk->disk) {
760 err = -ENOMEM;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100761 goto out_mempool;
Rusty Russelle467cde2007-10-22 11:03:38 +1000762 }
763
Asias Hea98755c2012-08-08 16:07:04 +0800764 q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600765 if (!q) {
Rusty Russelle467cde2007-10-22 11:03:38 +1000766 err = -ENOMEM;
767 goto out_put_disk;
768 }
769
Asias Hea98755c2012-08-08 16:07:04 +0800770 if (use_bio)
771 blk_queue_make_request(q, virtblk_make_request);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600772 q->queuedata = vblk;
Fernando Luis Vázquez Cao7d116b62008-10-27 18:45:15 +0900773
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800774 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100775
Rusty Russelle467cde2007-10-22 11:03:38 +1000776 vblk->disk->major = major;
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100777 vblk->disk->first_minor = index_to_minor(index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000778 vblk->disk->private_data = vblk;
779 vblk->disk->fops = &virtblk_fops;
Jeremy Katzc4839342008-03-02 17:00:15 -0500780 vblk->disk->driverfs_dev = &vdev->dev;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200781 vblk->index = index;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100782
Tejun Heo02c42b72010-09-03 11:56:18 +0200783 /* configure queue flush support */
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200784 virtblk_update_cache_mode(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000785
Christian Borntraeger3ef53602008-05-16 11:17:03 +0200786 /* If disk is read-only in the host, the guest should obey */
787 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
788 set_disk_ro(vblk->disk, 1);
789
Rusty Russella586d4f2008-02-04 23:49:56 -0500790 /* Host must always specify the capacity. */
Rusty Russell72e61eb2008-05-02 21:50:49 -0500791 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
792 &cap, sizeof(cap));
Rusty Russelle467cde2007-10-22 11:03:38 +1000793
794 /* If capacity is too big, truncate with warning. */
795 if ((sector_t)cap != cap) {
796 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
797 (unsigned long long)cap);
798 cap = (sector_t)-1;
799 }
800 set_capacity(vblk->disk, cap);
801
Rusty Russell0864b792008-12-30 09:26:05 -0600802 /* We can handle whatever the host told us to handle. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500803 blk_queue_max_segments(q, vblk->sg_elems-2);
Rusty Russell0864b792008-12-30 09:26:05 -0600804
Christoph Hellwig4eff3ca2009-07-17 21:47:45 -0600805 /* No need to bounce any requests */
Christoph Hellwig69740c82010-02-24 14:22:25 -0600806 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
Christoph Hellwig4eff3ca2009-07-17 21:47:45 -0600807
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600808 /* No real sector limit. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500809 blk_queue_max_hw_sectors(q, -1U);
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600810
Rusty Russella586d4f2008-02-04 23:49:56 -0500811 /* Host can optionally specify maximum segment size and number of
812 * segments. */
813 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
814 offsetof(struct virtio_blk_config, size_max),
815 &v);
Rusty Russelle467cde2007-10-22 11:03:38 +1000816 if (!err)
Christoph Hellwig69740c82010-02-24 14:22:25 -0600817 blk_queue_max_segment_size(q, v);
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600818 else
Christoph Hellwig69740c82010-02-24 14:22:25 -0600819 blk_queue_max_segment_size(q, -1U);
Rusty Russelle467cde2007-10-22 11:03:38 +1000820
Christian Borntraeger066f4d82008-05-29 11:08:26 +0200821 /* Host can optionally specify the block size of the device */
822 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
823 offsetof(struct virtio_blk_config, blk_size),
824 &blk_size);
825 if (!err)
Christoph Hellwig69740c82010-02-24 14:22:25 -0600826 blk_queue_logical_block_size(q, blk_size);
827 else
828 blk_size = queue_logical_block_size(q);
829
830 /* Use topology information if available */
831 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
832 offsetof(struct virtio_blk_config, physical_block_exp),
833 &physical_block_exp);
834 if (!err && physical_block_exp)
835 blk_queue_physical_block_size(q,
836 blk_size * (1 << physical_block_exp));
837
838 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
839 offsetof(struct virtio_blk_config, alignment_offset),
840 &alignment_offset);
841 if (!err && alignment_offset)
842 blk_queue_alignment_offset(q, blk_size * alignment_offset);
843
844 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
845 offsetof(struct virtio_blk_config, min_io_size),
846 &min_io_size);
847 if (!err && min_io_size)
848 blk_queue_io_min(q, blk_size * min_io_size);
849
850 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
851 offsetof(struct virtio_blk_config, opt_io_size),
852 &opt_io_size);
853 if (!err && opt_io_size)
854 blk_queue_io_opt(q, blk_size * opt_io_size);
855
Rusty Russelle467cde2007-10-22 11:03:38 +1000856 add_disk(vblk->disk);
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500857 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
858 if (err)
859 goto out_del_disk;
860
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200861 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
862 err = device_create_file(disk_to_dev(vblk->disk),
863 &dev_attr_cache_type_rw);
864 else
865 err = device_create_file(disk_to_dev(vblk->disk),
866 &dev_attr_cache_type_ro);
867 if (err)
868 goto out_del_disk;
Rusty Russelle467cde2007-10-22 11:03:38 +1000869 return 0;
870
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500871out_del_disk:
872 del_gendisk(vblk->disk);
873 blk_cleanup_queue(vblk->disk->queue);
Rusty Russelle467cde2007-10-22 11:03:38 +1000874out_put_disk:
875 put_disk(vblk->disk);
Rusty Russelle467cde2007-10-22 11:03:38 +1000876out_mempool:
877 mempool_destroy(vblk->pool);
878out_free_vq:
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600879 vdev->config->del_vqs(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000880out_free_vblk:
881 kfree(vblk);
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200882out_free_index:
883 ida_simple_remove(&vd_index_ida, index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000884out:
885 return err;
886}
887
Mike Frysinger98e94442009-05-18 03:39:09 -0400888static void __devexit virtblk_remove(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000889{
890 struct virtio_blk *vblk = vdev->priv;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200891 int index = vblk->index;
Alexander Graff4953fe2013-01-02 15:37:17 +1030892 int refc;
Rusty Russelle467cde2007-10-22 11:03:38 +1000893
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +1030894 /* Prevent config work handler from accessing the device. */
895 mutex_lock(&vblk->config_lock);
896 vblk->config_enable = false;
897 mutex_unlock(&vblk->config_lock);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100898
Asias He02e2b122012-05-25 10:34:47 +0800899 del_gendisk(vblk->disk);
Asias He483001c2012-05-25 10:34:48 +0800900 blk_cleanup_queue(vblk->disk->queue);
Asias He02e2b122012-05-25 10:34:47 +0800901
Rusty Russell6e5aa7e2008-02-04 23:50:03 -0500902 /* Stop all the virtqueues. */
903 vdev->config->reset(vdev);
904
Michael S. Tsirkin4678d6f92012-01-12 15:44:44 +1030905 flush_work(&vblk->config_work);
906
Alexander Graff4953fe2013-01-02 15:37:17 +1030907 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
Rusty Russelle467cde2007-10-22 11:03:38 +1000908 put_disk(vblk->disk);
Rusty Russelle467cde2007-10-22 11:03:38 +1000909 mempool_destroy(vblk->pool);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600910 vdev->config->del_vqs(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000911 kfree(vblk);
Alexander Graff4953fe2013-01-02 15:37:17 +1030912
913 /* Only free device id if we don't have any users */
914 if (refc == 1)
915 ida_simple_remove(&vd_index_ida, index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000916}
917
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530918#ifdef CONFIG_PM
919static int virtblk_freeze(struct virtio_device *vdev)
920{
921 struct virtio_blk *vblk = vdev->priv;
922
923 /* Ensure we don't receive any more interrupts */
924 vdev->config->reset(vdev);
925
926 /* Prevent config work handler from accessing the device. */
927 mutex_lock(&vblk->config_lock);
928 vblk->config_enable = false;
929 mutex_unlock(&vblk->config_lock);
930
931 flush_work(&vblk->config_work);
932
933 spin_lock_irq(vblk->disk->queue->queue_lock);
934 blk_stop_queue(vblk->disk->queue);
935 spin_unlock_irq(vblk->disk->queue->queue_lock);
936 blk_sync_queue(vblk->disk->queue);
937
938 vdev->config->del_vqs(vdev);
939 return 0;
940}
941
942static int virtblk_restore(struct virtio_device *vdev)
943{
944 struct virtio_blk *vblk = vdev->priv;
945 int ret;
946
947 vblk->config_enable = true;
948 ret = init_vq(vdev->priv);
949 if (!ret) {
950 spin_lock_irq(vblk->disk->queue->queue_lock);
951 blk_start_queue(vblk->disk->queue);
952 spin_unlock_irq(vblk->disk->queue->queue_lock);
953 }
954 return ret;
955}
956#endif
957
Márton Németh47483e22010-01-10 13:40:02 +0100958static const struct virtio_device_id id_table[] = {
Rusty Russelle467cde2007-10-22 11:03:38 +1000959 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
960 { 0 },
961};
962
Rusty Russellc45a6812008-05-02 21:50:50 -0500963static unsigned int features[] = {
Tejun Heo02c42b72010-09-03 11:56:18 +0200964 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
965 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200966 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE
Rusty Russellc45a6812008-05-02 21:50:50 -0500967};
968
Rakib Mullick4fbfff762009-07-17 20:13:22 +0600969/*
970 * virtio_blk causes spurious section mismatch warning by
971 * simultaneously referring to a __devinit and a __devexit function.
972 * Use __refdata to avoid this warning.
973 */
974static struct virtio_driver __refdata virtio_blk = {
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100975 .feature_table = features,
976 .feature_table_size = ARRAY_SIZE(features),
977 .driver.name = KBUILD_MODNAME,
978 .driver.owner = THIS_MODULE,
979 .id_table = id_table,
980 .probe = virtblk_probe,
981 .remove = __devexit_p(virtblk_remove),
982 .config_changed = virtblk_config_changed,
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530983#ifdef CONFIG_PM
984 .freeze = virtblk_freeze,
985 .restore = virtblk_restore,
986#endif
Rusty Russelle467cde2007-10-22 11:03:38 +1000987};
988
989static int __init init(void)
990{
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100991 int error;
992
993 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
994 if (!virtblk_wq)
995 return -ENOMEM;
996
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100997 major = register_blkdev(0, "virtblk");
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100998 if (major < 0) {
999 error = major;
1000 goto out_destroy_workqueue;
1001 }
1002
1003 error = register_virtio_driver(&virtio_blk);
1004 if (error)
1005 goto out_unregister_blkdev;
1006 return 0;
1007
1008out_unregister_blkdev:
1009 unregister_blkdev(major, "virtblk");
1010out_destroy_workqueue:
1011 destroy_workqueue(virtblk_wq);
1012 return error;
Rusty Russelle467cde2007-10-22 11:03:38 +10001013}
1014
1015static void __exit fini(void)
1016{
Christian Borntraeger4f3bf192008-01-31 15:53:53 +01001017 unregister_blkdev(major, "virtblk");
Rusty Russelle467cde2007-10-22 11:03:38 +10001018 unregister_virtio_driver(&virtio_blk);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +01001019 destroy_workqueue(virtblk_wq);
Rusty Russelle467cde2007-10-22 11:03:38 +10001020}
1021module_init(init);
1022module_exit(fini);
1023
1024MODULE_DEVICE_TABLE(virtio, id_table);
1025MODULE_DESCRIPTION("Virtio block driver");
1026MODULE_LICENSE("GPL");