blob: b26ed58899fe28f4b0a4496618dfad45039d79f5 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070019#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070020#include <linux/blkdev.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050021
Vivek Goyal9355aed2010-10-01 21:16:41 +020022/* Max limits for throttle policy */
23#define THROTL_IOPS_MAX UINT_MAX
24
Tejun Heo3381cb82012-04-01 14:38:44 -070025/* CFQ specific, out here for blkcg->cfq_weight */
26#define CFQ_WEIGHT_MIN 10
27#define CFQ_WEIGHT_MAX 1000
28#define CFQ_WEIGHT_DEFAULT 500
29
Tejun Heof48ec1d2012-04-13 13:11:25 -070030#ifdef CONFIG_BLK_CGROUP
31
Tejun Heoedcb0722012-04-01 14:38:42 -070032enum blkg_rwstat_type {
33 BLKG_RWSTAT_READ,
34 BLKG_RWSTAT_WRITE,
35 BLKG_RWSTAT_SYNC,
36 BLKG_RWSTAT_ASYNC,
37
38 BLKG_RWSTAT_NR,
39 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070040};
41
Tejun Heoa6371202012-04-19 16:29:24 -070042struct blkcg_gq;
43
Tejun Heo3c798392012-04-16 13:57:25 -070044struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070045 struct cgroup_subsys_state css;
46 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070047
48 struct radix_tree_root blkg_tree;
49 struct blkcg_gq *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070050 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070051
52 /* for policies to test whether associated blkcg has changed */
Tejun Heo36558c82012-04-16 13:57:24 -070053 uint64_t id;
Tejun Heo3381cb82012-04-01 14:38:44 -070054
Tejun Heo3c798392012-04-16 13:57:25 -070055 /* TODO: per-policy storage in blkcg */
Tejun Heo36558c82012-04-16 13:57:24 -070056 unsigned int cfq_weight; /* belongs to cfq */
Vivek Goyal31e4c282009-12-03 12:59:42 -050057};
58
Tejun Heoedcb0722012-04-01 14:38:42 -070059struct blkg_stat {
60 struct u64_stats_sync syncp;
61 uint64_t cnt;
62};
63
64struct blkg_rwstat {
65 struct u64_stats_sync syncp;
66 uint64_t cnt[BLKG_RWSTAT_NR];
67};
68
Tejun Heof95a04a2012-04-16 13:57:26 -070069/*
70 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
71 * request_queue (q). This is used by blkcg policies which need to track
72 * information per blkcg - q pair.
73 *
74 * There can be multiple active blkcg policies and each has its private
75 * data on each blkg, the size of which is determined by
76 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
77 * together with blkg and invokes pd_init/exit_fn() methods.
78 *
79 * Such private data must embed struct blkg_policy_data (pd) at the
80 * beginning and pd_size can't be smaller than pd.
81 */
Tejun Heo03814112012-03-05 13:15:14 -080082struct blkg_policy_data {
83 /* the blkg this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070084 struct blkcg_gq *blkg;
Tejun Heo03814112012-03-05 13:15:14 -080085
Tejun Heoa2b16932012-04-13 13:11:33 -070086 /* used during policy activation */
Tejun Heo36558c82012-04-16 13:57:24 -070087 struct list_head alloc_node;
Tejun Heo03814112012-03-05 13:15:14 -080088};
89
Tejun Heo3c798392012-04-16 13:57:25 -070090/* association between a blk cgroup and a request queue */
91struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -080092 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -070093 struct request_queue *q;
94 struct list_head q_node;
95 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -070096 struct blkcg *blkcg;
Tejun Heo3c547862013-01-09 08:05:10 -080097
98 /* all non-root blkcg_gq's are guaranteed to have access to parent */
99 struct blkcg_gq *parent;
100
Tejun Heoa0516612012-06-26 15:05:44 -0700101 /* request allocation list for this blkcg-q pair */
102 struct request_list rl;
Tejun Heo3c547862013-01-09 08:05:10 -0800103
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800104 /* reference count */
Tejun Heo36558c82012-04-16 13:57:24 -0700105 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500106
Tejun Heo36558c82012-04-16 13:57:24 -0700107 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800108
Tejun Heo36558c82012-04-16 13:57:24 -0700109 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500110};
111
Tejun Heo3c798392012-04-16 13:57:25 -0700112typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
113typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
114typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
Vivek Goyal3e252062009-12-04 10:36:42 -0500115
Tejun Heo3c798392012-04-16 13:57:25 -0700116struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700117 int plid;
118 /* policy specific private data size */
Tejun Heof95a04a2012-04-16 13:57:26 -0700119 size_t pd_size;
Tejun Heo36558c82012-04-16 13:57:24 -0700120 /* cgroup files for the policy */
121 struct cftype *cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700122
123 /* operations */
124 blkcg_pol_init_pd_fn *pd_init_fn;
125 blkcg_pol_exit_pd_fn *pd_exit_fn;
126 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500127};
128
Tejun Heo3c798392012-04-16 13:57:25 -0700129extern struct blkcg blkcg_root;
Tejun Heo36558c82012-04-16 13:57:24 -0700130
Tejun Heo3c798392012-04-16 13:57:25 -0700131struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
132struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
133 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700134int blkcg_init_queue(struct request_queue *q);
135void blkcg_drain_queue(struct request_queue *q);
136void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800137
Vivek Goyal3e252062009-12-04 10:36:42 -0500138/* Blkio controller policy registration */
Tejun Heo3c798392012-04-16 13:57:25 -0700139int blkcg_policy_register(struct blkcg_policy *pol);
140void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700141int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700142 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700143void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700144 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500145
Tejun Heo3c798392012-04-16 13:57:25 -0700146void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700147 u64 (*prfill)(struct seq_file *,
148 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700149 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700150 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700151u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
152u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700153 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700154u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
155u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
156 int off);
Tejun Heo829fdb52012-04-01 14:38:43 -0700157
158struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700159 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700160 struct blkcg_gq *blkg;
Tejun Heo36558c82012-04-16 13:57:24 -0700161 u64 v;
Tejun Heo829fdb52012-04-01 14:38:43 -0700162};
163
Tejun Heo3c798392012-04-16 13:57:25 -0700164int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
165 const char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700166void blkg_conf_finish(struct blkg_conf_ctx *ctx);
167
168
Tejun Heob1208b52012-06-04 20:40:57 -0700169static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
170{
171 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
172 struct blkcg, css);
173}
174
175static inline struct blkcg *task_blkcg(struct task_struct *tsk)
176{
177 return container_of(task_subsys_state(tsk, blkio_subsys_id),
178 struct blkcg, css);
179}
180
181static inline struct blkcg *bio_blkcg(struct bio *bio)
182{
183 if (bio && bio->bi_css)
184 return container_of(bio->bi_css, struct blkcg, css);
185 return task_blkcg(current);
186}
187
Tejun Heo03814112012-03-05 13:15:14 -0800188/**
Tejun Heo3c547862013-01-09 08:05:10 -0800189 * blkcg_parent - get the parent of a blkcg
190 * @blkcg: blkcg of interest
191 *
192 * Return the parent blkcg of @blkcg. Can be called anytime.
193 */
194static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
195{
196 struct cgroup *pcg = blkcg->css.cgroup->parent;
197
198 return pcg ? cgroup_to_blkcg(pcg) : NULL;
199}
200
201/**
Tejun Heo03814112012-03-05 13:15:14 -0800202 * blkg_to_pdata - get policy private data
203 * @blkg: blkg of interest
204 * @pol: policy of interest
205 *
206 * Return pointer to private data associated with the @blkg-@pol pair.
207 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700208static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
209 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800210{
Tejun Heof95a04a2012-04-16 13:57:26 -0700211 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800212}
213
214/**
215 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700216 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800217 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700218 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800219 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700220static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800221{
Tejun Heof95a04a2012-04-16 13:57:26 -0700222 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800223}
224
Tejun Heo54e7ed12012-04-16 13:57:23 -0700225/**
226 * blkg_path - format cgroup path of blkg
227 * @blkg: blkg of interest
228 * @buf: target buffer
229 * @buflen: target buffer length
230 *
231 * Format the path of the cgroup of @blkg into @buf.
232 */
Tejun Heo3c798392012-04-16 13:57:25 -0700233static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200234{
Tejun Heo54e7ed12012-04-16 13:57:23 -0700235 int ret;
236
237 rcu_read_lock();
238 ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
239 rcu_read_unlock();
240 if (ret)
241 strncpy(buf, "<unavailable>", buflen);
242 return ret;
Vivek Goyalafc24d42010-04-26 19:27:56 +0200243}
244
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800245/**
246 * blkg_get - get a blkg reference
247 * @blkg: blkg to get
248 *
249 * The caller should be holding queue_lock and an existing reference.
250 */
Tejun Heo3c798392012-04-16 13:57:25 -0700251static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800252{
253 lockdep_assert_held(blkg->q->queue_lock);
254 WARN_ON_ONCE(!blkg->refcnt);
255 blkg->refcnt++;
256}
257
Tejun Heo3c798392012-04-16 13:57:25 -0700258void __blkg_release(struct blkcg_gq *blkg);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800259
260/**
261 * blkg_put - put a blkg reference
262 * @blkg: blkg to put
263 *
264 * The caller should be holding queue_lock.
265 */
Tejun Heo3c798392012-04-16 13:57:25 -0700266static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800267{
268 lockdep_assert_held(blkg->q->queue_lock);
269 WARN_ON_ONCE(blkg->refcnt <= 0);
270 if (!--blkg->refcnt)
271 __blkg_release(blkg);
272}
273
Tejun Heoedcb0722012-04-01 14:38:42 -0700274/**
Tejun Heoa0516612012-06-26 15:05:44 -0700275 * blk_get_rl - get request_list to use
276 * @q: request_queue of interest
277 * @bio: bio which will be attached to the allocated request (may be %NULL)
278 *
279 * The caller wants to allocate a request from @q to use for @bio. Find
280 * the request_list to use and obtain a reference on it. Should be called
281 * under queue_lock. This function is guaranteed to return non-%NULL
282 * request_list.
283 */
284static inline struct request_list *blk_get_rl(struct request_queue *q,
285 struct bio *bio)
286{
287 struct blkcg *blkcg;
288 struct blkcg_gq *blkg;
289
290 rcu_read_lock();
291
292 blkcg = bio_blkcg(bio);
293
294 /* bypass blkg lookup and use @q->root_rl directly for root */
295 if (blkcg == &blkcg_root)
296 goto root_rl;
297
298 /*
299 * Try to use blkg->rl. blkg lookup may fail under memory pressure
300 * or if either the blkcg or queue is going away. Fall back to
301 * root_rl in such cases.
302 */
303 blkg = blkg_lookup_create(blkcg, q);
304 if (unlikely(IS_ERR(blkg)))
305 goto root_rl;
306
307 blkg_get(blkg);
308 rcu_read_unlock();
309 return &blkg->rl;
310root_rl:
311 rcu_read_unlock();
312 return &q->root_rl;
313}
314
315/**
316 * blk_put_rl - put request_list
317 * @rl: request_list to put
318 *
319 * Put the reference acquired by blk_get_rl(). Should be called under
320 * queue_lock.
321 */
322static inline void blk_put_rl(struct request_list *rl)
323{
324 /* root_rl may not have blkg set */
325 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
326 blkg_put(rl->blkg);
327}
328
329/**
330 * blk_rq_set_rl - associate a request with a request_list
331 * @rq: request of interest
332 * @rl: target request_list
333 *
334 * Associate @rq with @rl so that accounting and freeing can know the
335 * request_list @rq came from.
336 */
337static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
338{
339 rq->rl = rl;
340}
341
342/**
343 * blk_rq_rl - return the request_list a request came from
344 * @rq: request of interest
345 *
346 * Return the request_list @rq is allocated from.
347 */
348static inline struct request_list *blk_rq_rl(struct request *rq)
349{
350 return rq->rl;
351}
352
353struct request_list *__blk_queue_next_rl(struct request_list *rl,
354 struct request_queue *q);
355/**
356 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
357 *
358 * Should be used under queue_lock.
359 */
360#define blk_queue_for_each_rl(rl, q) \
361 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
362
363/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700364 * blkg_stat_add - add a value to a blkg_stat
365 * @stat: target blkg_stat
366 * @val: value to add
367 *
368 * Add @val to @stat. The caller is responsible for synchronizing calls to
369 * this function.
370 */
371static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
372{
373 u64_stats_update_begin(&stat->syncp);
374 stat->cnt += val;
375 u64_stats_update_end(&stat->syncp);
376}
377
378/**
379 * blkg_stat_read - read the current value of a blkg_stat
380 * @stat: blkg_stat to read
381 *
382 * Read the current value of @stat. This function can be called without
383 * synchroniztion and takes care of u64 atomicity.
384 */
385static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
386{
387 unsigned int start;
388 uint64_t v;
389
390 do {
391 start = u64_stats_fetch_begin(&stat->syncp);
392 v = stat->cnt;
393 } while (u64_stats_fetch_retry(&stat->syncp, start));
394
395 return v;
396}
397
398/**
399 * blkg_stat_reset - reset a blkg_stat
400 * @stat: blkg_stat to reset
401 */
402static inline void blkg_stat_reset(struct blkg_stat *stat)
403{
404 stat->cnt = 0;
405}
406
407/**
408 * blkg_rwstat_add - add a value to a blkg_rwstat
409 * @rwstat: target blkg_rwstat
410 * @rw: mask of REQ_{WRITE|SYNC}
411 * @val: value to add
412 *
413 * Add @val to @rwstat. The counters are chosen according to @rw. The
414 * caller is responsible for synchronizing calls to this function.
415 */
416static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
417 int rw, uint64_t val)
418{
419 u64_stats_update_begin(&rwstat->syncp);
420
421 if (rw & REQ_WRITE)
422 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
423 else
424 rwstat->cnt[BLKG_RWSTAT_READ] += val;
425 if (rw & REQ_SYNC)
426 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
427 else
428 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
429
430 u64_stats_update_end(&rwstat->syncp);
431}
432
433/**
434 * blkg_rwstat_read - read the current values of a blkg_rwstat
435 * @rwstat: blkg_rwstat to read
436 *
437 * Read the current snapshot of @rwstat and return it as the return value.
438 * This function can be called without synchronization and takes care of
439 * u64 atomicity.
440 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700441static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700442{
443 unsigned int start;
444 struct blkg_rwstat tmp;
445
446 do {
447 start = u64_stats_fetch_begin(&rwstat->syncp);
448 tmp = *rwstat;
449 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
450
451 return tmp;
452}
453
454/**
455 * blkg_rwstat_sum - read the total count of a blkg_rwstat
456 * @rwstat: blkg_rwstat to read
457 *
458 * Return the total count of @rwstat regardless of the IO direction. This
459 * function can be called without synchronization and takes care of u64
460 * atomicity.
461 */
462static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
463{
464 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
465
466 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
467}
468
469/**
470 * blkg_rwstat_reset - reset a blkg_rwstat
471 * @rwstat: blkg_rwstat to reset
472 */
473static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
474{
475 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
476}
477
Tejun Heo36558c82012-04-16 13:57:24 -0700478#else /* CONFIG_BLK_CGROUP */
479
480struct cgroup;
Tejun Heob1208b52012-06-04 20:40:57 -0700481struct blkcg;
Jens Axboe2f5ea472009-12-03 21:06:43 +0100482
Tejun Heof95a04a2012-04-16 13:57:26 -0700483struct blkg_policy_data {
484};
485
Tejun Heo3c798392012-04-16 13:57:25 -0700486struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100487};
488
Tejun Heo3c798392012-04-16 13:57:25 -0700489struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500490};
491
Tejun Heo3c798392012-04-16 13:57:25 -0700492static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800493static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
494static inline void blkcg_drain_queue(struct request_queue *q) { }
495static inline void blkcg_exit_queue(struct request_queue *q) { }
Tejun Heo3c798392012-04-16 13:57:25 -0700496static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
497static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700498static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700499 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700500static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700501 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500502
Tejun Heob1208b52012-06-04 20:40:57 -0700503static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
504static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700505
Tejun Heof95a04a2012-04-16 13:57:26 -0700506static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
507 struct blkcg_policy *pol) { return NULL; }
508static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700509static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
510static inline void blkg_get(struct blkcg_gq *blkg) { }
511static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200512
Tejun Heoa0516612012-06-26 15:05:44 -0700513static inline struct request_list *blk_get_rl(struct request_queue *q,
514 struct bio *bio) { return &q->root_rl; }
515static inline void blk_put_rl(struct request_list *rl) { }
516static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
517static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
518
519#define blk_queue_for_each_rl(rl, q) \
520 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
521
Tejun Heo36558c82012-04-16 13:57:24 -0700522#endif /* CONFIG_BLK_CGROUP */
523#endif /* _BLK_CGROUP_H */