blob: 54352f009bfd5ad3cf011896536c4aa5baee18c6 [file] [log] [blame]
Mike Snitzer4f81a412012-10-12 21:02:13 +01001/*
2 * Copyright (C) 2011-2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_BIO_PRISON_H
8#define DM_BIO_PRISON_H
9
10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
12
Mike Snitzer4f81a412012-10-12 21:02:13 +010013#include <linux/bio.h>
Joe Thornbera195db22014-10-06 16:30:06 -040014#include <linux/rbtree.h>
Mike Snitzer4f81a412012-10-12 21:02:13 +010015
16/*----------------------------------------------------------------*/
17
18/*
19 * Sometimes we can't deal with a bio straight away. We put them in prison
20 * where they can't cause any mischief. Bios are put in a cell identified
21 * by a key, multiple bios can be in the same cell. When the cell is
22 * subsequently unlocked the bios become available.
23 */
24struct dm_bio_prison;
Mike Snitzer4f81a412012-10-12 21:02:13 +010025
Joe Thornber5f274d82014-09-17 10:17:39 +010026/*
27 * Keys define a range of blocks within either a virtual or physical
28 * device.
29 */
Mike Snitzer4f81a412012-10-12 21:02:13 +010030struct dm_cell_key {
31 int virtual;
32 dm_thin_id dev;
Joe Thornber5f274d82014-09-17 10:17:39 +010033 dm_block_t block_begin, block_end;
Mike Snitzer4f81a412012-10-12 21:02:13 +010034};
35
Joe Thornber025b9682013-03-01 22:45:50 +000036/*
37 * Treat this as opaque, only in header so callers can manage allocation
38 * themselves.
39 */
40struct dm_bio_prison_cell {
Joe Thornbera374bb22014-10-10 13:43:14 +010041 struct list_head user_list; /* for client use */
Joe Thornbera195db22014-10-06 16:30:06 -040042 struct rb_node node;
43
Joe Thornber025b9682013-03-01 22:45:50 +000044 struct dm_cell_key key;
45 struct bio *holder;
46 struct bio_list bios;
47};
48
Joe Thornbera195db22014-10-06 16:30:06 -040049struct dm_bio_prison *dm_bio_prison_create(void);
Mike Snitzer4f81a412012-10-12 21:02:13 +010050void dm_bio_prison_destroy(struct dm_bio_prison *prison);
51
52/*
Joe Thornber6beca5e2013-03-01 22:45:50 +000053 * These two functions just wrap a mempool. This is a transitory step:
54 * Eventually all bio prison clients should manage their own cell memory.
55 *
56 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
57 * in interrupt context or passed GFP_NOWAIT.
58 */
59struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
60 gfp_t gfp);
61void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
62 struct dm_bio_prison_cell *cell);
63
64/*
Joe Thornber5f274d82014-09-17 10:17:39 +010065 * Creates, or retrieves a cell that overlaps the given key.
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000066 *
67 * Returns 1 if pre-existing cell returned, zero if new cell created using
68 * @cell_prealloc.
69 */
70int dm_get_cell(struct dm_bio_prison *prison,
71 struct dm_cell_key *key,
72 struct dm_bio_prison_cell *cell_prealloc,
73 struct dm_bio_prison_cell **cell_result);
74
75/*
Joe Thornber5f274d82014-09-17 10:17:39 +010076 * An atomic op that combines retrieving or creating a cell, and adding a
77 * bio to it.
Mike Snitzer4f81a412012-10-12 21:02:13 +010078 *
79 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
80 */
Joe Thornber6beca5e2013-03-01 22:45:50 +000081int dm_bio_detain(struct dm_bio_prison *prison,
82 struct dm_cell_key *key,
83 struct bio *inmate,
84 struct dm_bio_prison_cell *cell_prealloc,
85 struct dm_bio_prison_cell **cell_result);
Mike Snitzer4f81a412012-10-12 21:02:13 +010086
Joe Thornber6beca5e2013-03-01 22:45:50 +000087void dm_cell_release(struct dm_bio_prison *prison,
88 struct dm_bio_prison_cell *cell,
89 struct bio_list *bios);
90void dm_cell_release_no_holder(struct dm_bio_prison *prison,
91 struct dm_bio_prison_cell *cell,
92 struct bio_list *inmates);
93void dm_cell_error(struct dm_bio_prison *prison,
Mike Snitzeraf918052014-05-22 14:32:51 -040094 struct dm_bio_prison_cell *cell, int error);
Mike Snitzer4f81a412012-10-12 21:02:13 +010095
Joe Thornber2d759a42014-10-10 15:27:16 +010096/*
97 * Visits the cell and then releases. Guarantees no new inmates are
98 * inserted between the visit and release.
99 */
100void dm_cell_visit_release(struct dm_bio_prison *prison,
101 void (*visit_fn)(void *, struct dm_bio_prison_cell *),
102 void *context, struct dm_bio_prison_cell *cell);
103
Joe Thornber3cdf93f2015-05-15 15:23:35 +0100104/*
105 * Rather than always releasing the prisoners in a cell, the client may
106 * want to promote one of them to be the new holder. There is a race here
107 * though between releasing an empty cell, and other threads adding new
108 * inmates. So this function makes the decision with its lock held.
109 *
110 * This function can have two outcomes:
111 * i) An inmate is promoted to be the holder of the cell (return value of 0).
112 * ii) The cell has no inmate for promotion and is released (return value of 1).
113 */
114int dm_cell_promote_or_release(struct dm_bio_prison *prison,
115 struct dm_bio_prison_cell *cell);
116
Mike Snitzer4f81a412012-10-12 21:02:13 +0100117/*----------------------------------------------------------------*/
118
119/*
120 * We use the deferred set to keep track of pending reads to shared blocks.
121 * We do this to ensure the new mapping caused by a write isn't performed
122 * until these prior reads have completed. Otherwise the insertion of the
123 * new mapping could free the old block that the read bios are mapped to.
124 */
125
126struct dm_deferred_set;
127struct dm_deferred_entry;
128
129struct dm_deferred_set *dm_deferred_set_create(void);
130void dm_deferred_set_destroy(struct dm_deferred_set *ds);
131
132struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
133void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
134int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
135
136/*----------------------------------------------------------------*/
137
138#endif