blob: 11af2182101b54a90f0f5a379bd4c63d206ad57c [file] [log] [blame]
Mike Snitzer4f81a412012-10-12 21:02:13 +01001/*
2 * Copyright (C) 2011-2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_BIO_PRISON_H
8#define DM_BIO_PRISON_H
9
10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
12
13#include <linux/list.h>
14#include <linux/bio.h>
15
16/*----------------------------------------------------------------*/
17
18/*
19 * Sometimes we can't deal with a bio straight away. We put them in prison
20 * where they can't cause any mischief. Bios are put in a cell identified
21 * by a key, multiple bios can be in the same cell. When the cell is
22 * subsequently unlocked the bios become available.
23 */
24struct dm_bio_prison;
25struct dm_bio_prison_cell;
26
27/* FIXME: this needs to be more abstract */
28struct dm_cell_key {
29 int virtual;
30 dm_thin_id dev;
31 dm_block_t block;
32};
33
34struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells);
35void dm_bio_prison_destroy(struct dm_bio_prison *prison);
36
37/*
Joe Thornber6beca5e2013-03-01 22:45:50 +000038 * These two functions just wrap a mempool. This is a transitory step:
39 * Eventually all bio prison clients should manage their own cell memory.
40 *
41 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
42 * in interrupt context or passed GFP_NOWAIT.
43 */
44struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
45 gfp_t gfp);
46void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
47 struct dm_bio_prison_cell *cell);
48
49/*
50 * An atomic op that combines retrieving a cell, and adding a bio to it.
Mike Snitzer4f81a412012-10-12 21:02:13 +010051 *
52 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
53 */
Joe Thornber6beca5e2013-03-01 22:45:50 +000054int dm_bio_detain(struct dm_bio_prison *prison,
55 struct dm_cell_key *key,
56 struct bio *inmate,
57 struct dm_bio_prison_cell *cell_prealloc,
58 struct dm_bio_prison_cell **cell_result);
Mike Snitzer4f81a412012-10-12 21:02:13 +010059
Joe Thornber6beca5e2013-03-01 22:45:50 +000060void dm_cell_release(struct dm_bio_prison *prison,
61 struct dm_bio_prison_cell *cell,
62 struct bio_list *bios);
63void dm_cell_release_no_holder(struct dm_bio_prison *prison,
64 struct dm_bio_prison_cell *cell,
65 struct bio_list *inmates);
66void dm_cell_error(struct dm_bio_prison *prison,
67 struct dm_bio_prison_cell *cell);
Mike Snitzer4f81a412012-10-12 21:02:13 +010068
69/*----------------------------------------------------------------*/
70
71/*
72 * We use the deferred set to keep track of pending reads to shared blocks.
73 * We do this to ensure the new mapping caused by a write isn't performed
74 * until these prior reads have completed. Otherwise the insertion of the
75 * new mapping could free the old block that the read bios are mapped to.
76 */
77
78struct dm_deferred_set;
79struct dm_deferred_entry;
80
81struct dm_deferred_set *dm_deferred_set_create(void);
82void dm_deferred_set_destroy(struct dm_deferred_set *ds);
83
84struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
85void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
86int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
87
88/*----------------------------------------------------------------*/
89
90#endif