blob: 6c20b44509d875bb1fd5557d4b5670c829ac52a3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 *
6 * RAID-5 management functions.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/raid/raid5.h>
23#include <linux/highmem.h>
24#include <linux/bitops.h>
25#include <asm/atomic.h>
26
NeilBrown72626682005-09-09 16:23:54 -070027#include <linux/raid/bitmap.h>
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/*
30 * Stripe cache
31 */
32
33#define NR_STRIPES 256
34#define STRIPE_SIZE PAGE_SIZE
35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1
NeilBrownfccddba2006-01-06 00:20:33 -080038#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#define HASH_MASK (NR_HASH - 1)
40
NeilBrownfccddba2006-01-06 00:20:33 -080041#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43/* bio's attached to a stripe+device for I/O are linked together in bi_sector
44 * order without overlap. There may be several bio's per stripe+device, and
45 * a bio could span several devices.
46 * When walking this list for a particular stripe+device, we must never proceed
47 * beyond a bio that extends past this device, as the next bio might no longer
48 * be valid.
49 * This macro is used to determine the 'next' bio in the list, given the sector
50 * of the current stripe+device
51 */
52#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
53/*
54 * The following can be used to debug the driver
55 */
56#define RAID5_DEBUG 0
57#define RAID5_PARANOIA 1
58#if RAID5_PARANOIA && defined(CONFIG_SMP)
59# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
60#else
61# define CHECK_DEVLOCK()
62#endif
63
64#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
65#if RAID5_DEBUG
66#define inline
67#define __inline__
68#endif
69
70static void print_raid5_conf (raid5_conf_t *conf);
71
Arjan van de Ven858119e2006-01-14 13:20:43 -080072static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 if (atomic_dec_and_test(&sh->count)) {
75 if (!list_empty(&sh->lru))
76 BUG();
77 if (atomic_read(&conf->active_stripes)==0)
78 BUG();
79 if (test_bit(STRIPE_HANDLE, &sh->state)) {
80 if (test_bit(STRIPE_DELAYED, &sh->state))
81 list_add_tail(&sh->lru, &conf->delayed_list);
NeilBrown72626682005-09-09 16:23:54 -070082 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
83 conf->seq_write == sh->bm_seq)
84 list_add_tail(&sh->lru, &conf->bitmap_list);
85 else {
86 clear_bit(STRIPE_BIT_DELAY, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 list_add_tail(&sh->lru, &conf->handle_list);
NeilBrown72626682005-09-09 16:23:54 -070088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 md_wakeup_thread(conf->mddev->thread);
90 } else {
91 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
92 atomic_dec(&conf->preread_active_stripes);
93 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 md_wakeup_thread(conf->mddev->thread);
95 }
96 list_add_tail(&sh->lru, &conf->inactive_list);
97 atomic_dec(&conf->active_stripes);
98 if (!conf->inactive_blocked ||
NeilBrown50368052005-12-12 02:39:17 -080099 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 wake_up(&conf->wait_for_stripe);
101 }
102 }
103}
104static void release_stripe(struct stripe_head *sh)
105{
106 raid5_conf_t *conf = sh->raid_conf;
107 unsigned long flags;
108
109 spin_lock_irqsave(&conf->device_lock, flags);
110 __release_stripe(conf, sh);
111 spin_unlock_irqrestore(&conf->device_lock, flags);
112}
113
NeilBrownfccddba2006-01-06 00:20:33 -0800114static inline void remove_hash(struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
117
NeilBrownfccddba2006-01-06 00:20:33 -0800118 hlist_del_init(&sh->hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Arjan van de Ven858119e2006-01-14 13:20:43 -0800121static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
NeilBrownfccddba2006-01-06 00:20:33 -0800123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
126
127 CHECK_DEVLOCK();
NeilBrownfccddba2006-01-06 00:20:33 -0800128 hlist_add_head(&sh->hash, hp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
130
131
132/* find an idle stripe, make sure it is unhashed, and return it. */
133static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
134{
135 struct stripe_head *sh = NULL;
136 struct list_head *first;
137
138 CHECK_DEVLOCK();
139 if (list_empty(&conf->inactive_list))
140 goto out;
141 first = conf->inactive_list.next;
142 sh = list_entry(first, struct stripe_head, lru);
143 list_del_init(first);
144 remove_hash(sh);
145 atomic_inc(&conf->active_stripes);
146out:
147 return sh;
148}
149
150static void shrink_buffers(struct stripe_head *sh, int num)
151{
152 struct page *p;
153 int i;
154
155 for (i=0; i<num ; i++) {
156 p = sh->dev[i].page;
157 if (!p)
158 continue;
159 sh->dev[i].page = NULL;
NeilBrown2d1f3b52006-01-06 00:20:31 -0800160 put_page(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 }
162}
163
164static int grow_buffers(struct stripe_head *sh, int num)
165{
166 int i;
167
168 for (i=0; i<num; i++) {
169 struct page *page;
170
171 if (!(page = alloc_page(GFP_KERNEL))) {
172 return 1;
173 }
174 sh->dev[i].page = page;
175 }
176 return 0;
177}
178
179static void raid5_build_block (struct stripe_head *sh, int i);
180
Arjan van de Ven858119e2006-01-14 13:20:43 -0800181static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 raid5_conf_t *conf = sh->raid_conf;
184 int disks = conf->raid_disks, i;
185
186 if (atomic_read(&sh->count) != 0)
187 BUG();
188 if (test_bit(STRIPE_HANDLE, &sh->state))
189 BUG();
190
191 CHECK_DEVLOCK();
192 PRINTK("init_stripe called, stripe %llu\n",
193 (unsigned long long)sh->sector);
194
195 remove_hash(sh);
196
197 sh->sector = sector;
198 sh->pd_idx = pd_idx;
199 sh->state = 0;
200
201 for (i=disks; i--; ) {
202 struct r5dev *dev = &sh->dev[i];
203
204 if (dev->toread || dev->towrite || dev->written ||
205 test_bit(R5_LOCKED, &dev->flags)) {
206 printk("sector=%llx i=%d %p %p %p %d\n",
207 (unsigned long long)sh->sector, i, dev->toread,
208 dev->towrite, dev->written,
209 test_bit(R5_LOCKED, &dev->flags));
210 BUG();
211 }
212 dev->flags = 0;
213 raid5_build_block(sh, i);
214 }
215 insert_hash(conf, sh);
216}
217
218static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
219{
220 struct stripe_head *sh;
NeilBrownfccddba2006-01-06 00:20:33 -0800221 struct hlist_node *hn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223 CHECK_DEVLOCK();
224 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
NeilBrownfccddba2006-01-06 00:20:33 -0800225 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 if (sh->sector == sector)
227 return sh;
228 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
229 return NULL;
230}
231
232static void unplug_slaves(mddev_t *mddev);
233static void raid5_unplug_device(request_queue_t *q);
234
235static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
236 int pd_idx, int noblock)
237{
238 struct stripe_head *sh;
239
240 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
241
242 spin_lock_irq(&conf->device_lock);
243
244 do {
NeilBrown72626682005-09-09 16:23:54 -0700245 wait_event_lock_irq(conf->wait_for_stripe,
246 conf->quiesce == 0,
247 conf->device_lock, /* nothing */);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 sh = __find_stripe(conf, sector);
249 if (!sh) {
250 if (!conf->inactive_blocked)
251 sh = get_free_stripe(conf);
252 if (noblock && sh == NULL)
253 break;
254 if (!sh) {
255 conf->inactive_blocked = 1;
256 wait_event_lock_irq(conf->wait_for_stripe,
257 !list_empty(&conf->inactive_list) &&
NeilBrown50368052005-12-12 02:39:17 -0800258 (atomic_read(&conf->active_stripes)
259 < (conf->max_nr_stripes *3/4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 || !conf->inactive_blocked),
261 conf->device_lock,
262 unplug_slaves(conf->mddev);
263 );
264 conf->inactive_blocked = 0;
265 } else
266 init_stripe(sh, sector, pd_idx);
267 } else {
268 if (atomic_read(&sh->count)) {
269 if (!list_empty(&sh->lru))
270 BUG();
271 } else {
272 if (!test_bit(STRIPE_HANDLE, &sh->state))
273 atomic_inc(&conf->active_stripes);
274 if (list_empty(&sh->lru))
275 BUG();
276 list_del_init(&sh->lru);
277 }
278 }
279 } while (sh == NULL);
280
281 if (sh)
282 atomic_inc(&sh->count);
283
284 spin_unlock_irq(&conf->device_lock);
285 return sh;
286}
287
NeilBrown3f294f42005-11-08 21:39:25 -0800288static int grow_one_stripe(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
290 struct stripe_head *sh;
NeilBrown3f294f42005-11-08 21:39:25 -0800291 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
292 if (!sh)
293 return 0;
294 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
295 sh->raid_conf = conf;
296 spin_lock_init(&sh->lock);
297
298 if (grow_buffers(sh, conf->raid_disks)) {
299 shrink_buffers(sh, conf->raid_disks);
300 kmem_cache_free(conf->slab_cache, sh);
301 return 0;
302 }
303 /* we just created an active stripe so... */
304 atomic_set(&sh->count, 1);
305 atomic_inc(&conf->active_stripes);
306 INIT_LIST_HEAD(&sh->lru);
307 release_stripe(sh);
308 return 1;
309}
310
311static int grow_stripes(raid5_conf_t *conf, int num)
312{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 kmem_cache_t *sc;
314 int devs = conf->raid_disks;
315
NeilBrownad01c9e2006-03-27 01:18:07 -0800316 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
317 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
318 conf->active_name = 0;
319 sc = kmem_cache_create(conf->cache_name[conf->active_name],
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
321 0, 0, NULL, NULL);
322 if (!sc)
323 return 1;
324 conf->slab_cache = sc;
NeilBrownad01c9e2006-03-27 01:18:07 -0800325 conf->pool_size = devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 while (num--) {
NeilBrown3f294f42005-11-08 21:39:25 -0800327 if (!grow_one_stripe(conf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 }
330 return 0;
331}
NeilBrownad01c9e2006-03-27 01:18:07 -0800332static int resize_stripes(raid5_conf_t *conf, int newsize)
333{
334 /* Make all the stripes able to hold 'newsize' devices.
335 * New slots in each stripe get 'page' set to a new page.
336 *
337 * This happens in stages:
338 * 1/ create a new kmem_cache and allocate the required number of
339 * stripe_heads.
340 * 2/ gather all the old stripe_heads and tranfer the pages across
341 * to the new stripe_heads. This will have the side effect of
342 * freezing the array as once all stripe_heads have been collected,
343 * no IO will be possible. Old stripe heads are freed once their
344 * pages have been transferred over, and the old kmem_cache is
345 * freed when all stripes are done.
346 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
347 * we simple return a failre status - no need to clean anything up.
348 * 4/ allocate new pages for the new slots in the new stripe_heads.
349 * If this fails, we don't bother trying the shrink the
350 * stripe_heads down again, we just leave them as they are.
351 * As each stripe_head is processed the new one is released into
352 * active service.
353 *
354 * Once step2 is started, we cannot afford to wait for a write,
355 * so we use GFP_NOIO allocations.
356 */
357 struct stripe_head *osh, *nsh;
358 LIST_HEAD(newstripes);
359 struct disk_info *ndisks;
360 int err = 0;
361 kmem_cache_t *sc;
362 int i;
363
364 if (newsize <= conf->pool_size)
365 return 0; /* never bother to shrink */
366
367 /* Step 1 */
368 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
369 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
370 0, 0, NULL, NULL);
371 if (!sc)
372 return -ENOMEM;
373
374 for (i = conf->max_nr_stripes; i; i--) {
375 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
376 if (!nsh)
377 break;
378
379 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
380
381 nsh->raid_conf = conf;
382 spin_lock_init(&nsh->lock);
383
384 list_add(&nsh->lru, &newstripes);
385 }
386 if (i) {
387 /* didn't get enough, give up */
388 while (!list_empty(&newstripes)) {
389 nsh = list_entry(newstripes.next, struct stripe_head, lru);
390 list_del(&nsh->lru);
391 kmem_cache_free(sc, nsh);
392 }
393 kmem_cache_destroy(sc);
394 return -ENOMEM;
395 }
396 /* Step 2 - Must use GFP_NOIO now.
397 * OK, we have enough stripes, start collecting inactive
398 * stripes and copying them over
399 */
400 list_for_each_entry(nsh, &newstripes, lru) {
401 spin_lock_irq(&conf->device_lock);
402 wait_event_lock_irq(conf->wait_for_stripe,
403 !list_empty(&conf->inactive_list),
404 conf->device_lock,
405 unplug_slaves(conf->mddev);
406 );
407 osh = get_free_stripe(conf);
408 spin_unlock_irq(&conf->device_lock);
409 atomic_set(&nsh->count, 1);
410 for(i=0; i<conf->pool_size; i++)
411 nsh->dev[i].page = osh->dev[i].page;
412 for( ; i<newsize; i++)
413 nsh->dev[i].page = NULL;
414 kmem_cache_free(conf->slab_cache, osh);
415 }
416 kmem_cache_destroy(conf->slab_cache);
417
418 /* Step 3.
419 * At this point, we are holding all the stripes so the array
420 * is completely stalled, so now is a good time to resize
421 * conf->disks.
422 */
423 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
424 if (ndisks) {
425 for (i=0; i<conf->raid_disks; i++)
426 ndisks[i] = conf->disks[i];
427 kfree(conf->disks);
428 conf->disks = ndisks;
429 } else
430 err = -ENOMEM;
431
432 /* Step 4, return new stripes to service */
433 while(!list_empty(&newstripes)) {
434 nsh = list_entry(newstripes.next, struct stripe_head, lru);
435 list_del_init(&nsh->lru);
436 for (i=conf->raid_disks; i < newsize; i++)
437 if (nsh->dev[i].page == NULL) {
438 struct page *p = alloc_page(GFP_NOIO);
439 nsh->dev[i].page = p;
440 if (!p)
441 err = -ENOMEM;
442 }
443 release_stripe(nsh);
444 }
445 /* critical section pass, GFP_NOIO no longer needed */
446
447 conf->slab_cache = sc;
448 conf->active_name = 1-conf->active_name;
449 conf->pool_size = newsize;
450 return err;
451}
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
NeilBrown3f294f42005-11-08 21:39:25 -0800454static int drop_one_stripe(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
456 struct stripe_head *sh;
457
NeilBrown3f294f42005-11-08 21:39:25 -0800458 spin_lock_irq(&conf->device_lock);
459 sh = get_free_stripe(conf);
460 spin_unlock_irq(&conf->device_lock);
461 if (!sh)
462 return 0;
463 if (atomic_read(&sh->count))
464 BUG();
NeilBrownad01c9e2006-03-27 01:18:07 -0800465 shrink_buffers(sh, conf->pool_size);
NeilBrown3f294f42005-11-08 21:39:25 -0800466 kmem_cache_free(conf->slab_cache, sh);
467 atomic_dec(&conf->active_stripes);
468 return 1;
469}
470
471static void shrink_stripes(raid5_conf_t *conf)
472{
473 while (drop_one_stripe(conf))
474 ;
475
NeilBrown29fc7e32006-02-03 03:03:41 -0800476 if (conf->slab_cache)
477 kmem_cache_destroy(conf->slab_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 conf->slab_cache = NULL;
479}
480
NeilBrown4e5314b2005-11-08 21:39:22 -0800481static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 int error)
483{
484 struct stripe_head *sh = bi->bi_private;
485 raid5_conf_t *conf = sh->raid_conf;
486 int disks = conf->raid_disks, i;
487 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
488
489 if (bi->bi_size)
490 return 1;
491
492 for (i=0 ; i<disks; i++)
493 if (bi == &sh->dev[i].req)
494 break;
495
496 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
497 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
498 uptodate);
499 if (i == disks) {
500 BUG();
501 return 0;
502 }
503
504 if (uptodate) {
505#if 0
506 struct bio *bio;
507 unsigned long flags;
508 spin_lock_irqsave(&conf->device_lock, flags);
509 /* we can return a buffer if we bypassed the cache or
510 * if the top buffer is not in highmem. If there are
511 * multiple buffers, leave the extra work to
512 * handle_stripe
513 */
514 buffer = sh->bh_read[i];
515 if (buffer &&
516 (!PageHighMem(buffer->b_page)
517 || buffer->b_page == bh->b_page )
518 ) {
519 sh->bh_read[i] = buffer->b_reqnext;
520 buffer->b_reqnext = NULL;
521 } else
522 buffer = NULL;
523 spin_unlock_irqrestore(&conf->device_lock, flags);
524 if (sh->bh_page[i]==bh->b_page)
525 set_buffer_uptodate(bh);
526 if (buffer) {
527 if (buffer->b_page != bh->b_page)
528 memcpy(buffer->b_data, bh->b_data, bh->b_size);
529 buffer->b_end_io(buffer, 1);
530 }
531#else
532 set_bit(R5_UPTODATE, &sh->dev[i].flags);
NeilBrown4e5314b2005-11-08 21:39:22 -0800533#endif
534 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
NeilBrown14f8d262006-01-06 00:20:14 -0800535 printk(KERN_INFO "raid5: read error corrected!!\n");
NeilBrown4e5314b2005-11-08 21:39:22 -0800536 clear_bit(R5_ReadError, &sh->dev[i].flags);
537 clear_bit(R5_ReWrite, &sh->dev[i].flags);
538 }
NeilBrownba22dcb2005-11-08 21:39:31 -0800539 if (atomic_read(&conf->disks[i].rdev->read_errors))
540 atomic_set(&conf->disks[i].rdev->read_errors, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 } else {
NeilBrownba22dcb2005-11-08 21:39:31 -0800542 int retry = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
NeilBrownba22dcb2005-11-08 21:39:31 -0800544 atomic_inc(&conf->disks[i].rdev->read_errors);
545 if (conf->mddev->degraded)
NeilBrown14f8d262006-01-06 00:20:14 -0800546 printk(KERN_WARNING "raid5: read error not correctable.\n");
NeilBrownba22dcb2005-11-08 21:39:31 -0800547 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
NeilBrown4e5314b2005-11-08 21:39:22 -0800548 /* Oh, no!!! */
NeilBrown14f8d262006-01-06 00:20:14 -0800549 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
NeilBrownba22dcb2005-11-08 21:39:31 -0800550 else if (atomic_read(&conf->disks[i].rdev->read_errors)
551 > conf->max_nr_stripes)
NeilBrown14f8d262006-01-06 00:20:14 -0800552 printk(KERN_WARNING
553 "raid5: Too many read errors, failing device.\n");
NeilBrownba22dcb2005-11-08 21:39:31 -0800554 else
555 retry = 1;
556 if (retry)
557 set_bit(R5_ReadError, &sh->dev[i].flags);
558 else {
NeilBrown4e5314b2005-11-08 21:39:22 -0800559 clear_bit(R5_ReadError, &sh->dev[i].flags);
560 clear_bit(R5_ReWrite, &sh->dev[i].flags);
561 md_error(conf->mddev, conf->disks[i].rdev);
NeilBrownba22dcb2005-11-08 21:39:31 -0800562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 }
564 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
565#if 0
566 /* must restore b_page before unlocking buffer... */
567 if (sh->bh_page[i] != bh->b_page) {
568 bh->b_page = sh->bh_page[i];
569 bh->b_data = page_address(bh->b_page);
570 clear_buffer_uptodate(bh);
571 }
572#endif
573 clear_bit(R5_LOCKED, &sh->dev[i].flags);
574 set_bit(STRIPE_HANDLE, &sh->state);
575 release_stripe(sh);
576 return 0;
577}
578
579static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
580 int error)
581{
582 struct stripe_head *sh = bi->bi_private;
583 raid5_conf_t *conf = sh->raid_conf;
584 int disks = conf->raid_disks, i;
585 unsigned long flags;
586 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
587
588 if (bi->bi_size)
589 return 1;
590
591 for (i=0 ; i<disks; i++)
592 if (bi == &sh->dev[i].req)
593 break;
594
595 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
596 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
597 uptodate);
598 if (i == disks) {
599 BUG();
600 return 0;
601 }
602
603 spin_lock_irqsave(&conf->device_lock, flags);
604 if (!uptodate)
605 md_error(conf->mddev, conf->disks[i].rdev);
606
607 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
608
609 clear_bit(R5_LOCKED, &sh->dev[i].flags);
610 set_bit(STRIPE_HANDLE, &sh->state);
611 __release_stripe(conf, sh);
612 spin_unlock_irqrestore(&conf->device_lock, flags);
613 return 0;
614}
615
616
617static sector_t compute_blocknr(struct stripe_head *sh, int i);
618
619static void raid5_build_block (struct stripe_head *sh, int i)
620{
621 struct r5dev *dev = &sh->dev[i];
622
623 bio_init(&dev->req);
624 dev->req.bi_io_vec = &dev->vec;
625 dev->req.bi_vcnt++;
626 dev->req.bi_max_vecs++;
627 dev->vec.bv_page = dev->page;
628 dev->vec.bv_len = STRIPE_SIZE;
629 dev->vec.bv_offset = 0;
630
631 dev->req.bi_sector = sh->sector;
632 dev->req.bi_private = sh;
633
634 dev->flags = 0;
635 if (i != sh->pd_idx)
636 dev->sector = compute_blocknr(sh, i);
637}
638
639static void error(mddev_t *mddev, mdk_rdev_t *rdev)
640{
641 char b[BDEVNAME_SIZE];
642 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
643 PRINTK("raid5: error called\n");
644
NeilBrownb2d444d2005-11-08 21:39:31 -0800645 if (!test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 mddev->sb_dirty = 1;
NeilBrownb2d444d2005-11-08 21:39:31 -0800647 if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 conf->working_disks--;
649 mddev->degraded++;
650 conf->failed_disks++;
NeilBrownb2d444d2005-11-08 21:39:31 -0800651 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 /*
653 * if recovery was running, make sure it aborts.
654 */
655 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
656 }
NeilBrownb2d444d2005-11-08 21:39:31 -0800657 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 printk (KERN_ALERT
659 "raid5: Disk failure on %s, disabling device."
660 " Operation continuing on %d devices\n",
661 bdevname(rdev->bdev,b), conf->working_disks);
662 }
663}
664
665/*
666 * Input: a 'big' sector number,
667 * Output: index of the data and parity disk, and the sector # in them.
668 */
669static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
670 unsigned int data_disks, unsigned int * dd_idx,
671 unsigned int * pd_idx, raid5_conf_t *conf)
672{
673 long stripe;
674 unsigned long chunk_number;
675 unsigned int chunk_offset;
676 sector_t new_sector;
677 int sectors_per_chunk = conf->chunk_size >> 9;
678
679 /* First compute the information on this sector */
680
681 /*
682 * Compute the chunk number and the sector offset inside the chunk
683 */
684 chunk_offset = sector_div(r_sector, sectors_per_chunk);
685 chunk_number = r_sector;
686 BUG_ON(r_sector != chunk_number);
687
688 /*
689 * Compute the stripe number
690 */
691 stripe = chunk_number / data_disks;
692
693 /*
694 * Compute the data disk and parity disk indexes inside the stripe
695 */
696 *dd_idx = chunk_number % data_disks;
697
698 /*
699 * Select the parity disk based on the user selected algorithm.
700 */
701 if (conf->level == 4)
702 *pd_idx = data_disks;
703 else switch (conf->algorithm) {
704 case ALGORITHM_LEFT_ASYMMETRIC:
705 *pd_idx = data_disks - stripe % raid_disks;
706 if (*dd_idx >= *pd_idx)
707 (*dd_idx)++;
708 break;
709 case ALGORITHM_RIGHT_ASYMMETRIC:
710 *pd_idx = stripe % raid_disks;
711 if (*dd_idx >= *pd_idx)
712 (*dd_idx)++;
713 break;
714 case ALGORITHM_LEFT_SYMMETRIC:
715 *pd_idx = data_disks - stripe % raid_disks;
716 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
717 break;
718 case ALGORITHM_RIGHT_SYMMETRIC:
719 *pd_idx = stripe % raid_disks;
720 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
721 break;
722 default:
NeilBrown14f8d262006-01-06 00:20:14 -0800723 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 conf->algorithm);
725 }
726
727 /*
728 * Finally, compute the new sector number
729 */
730 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
731 return new_sector;
732}
733
734
735static sector_t compute_blocknr(struct stripe_head *sh, int i)
736{
737 raid5_conf_t *conf = sh->raid_conf;
738 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
739 sector_t new_sector = sh->sector, check;
740 int sectors_per_chunk = conf->chunk_size >> 9;
741 sector_t stripe;
742 int chunk_offset;
743 int chunk_number, dummy1, dummy2, dd_idx = i;
744 sector_t r_sector;
745
746 chunk_offset = sector_div(new_sector, sectors_per_chunk);
747 stripe = new_sector;
748 BUG_ON(new_sector != stripe);
749
750
751 switch (conf->algorithm) {
752 case ALGORITHM_LEFT_ASYMMETRIC:
753 case ALGORITHM_RIGHT_ASYMMETRIC:
754 if (i > sh->pd_idx)
755 i--;
756 break;
757 case ALGORITHM_LEFT_SYMMETRIC:
758 case ALGORITHM_RIGHT_SYMMETRIC:
759 if (i < sh->pd_idx)
760 i += raid_disks;
761 i -= (sh->pd_idx + 1);
762 break;
763 default:
NeilBrown14f8d262006-01-06 00:20:14 -0800764 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 conf->algorithm);
766 }
767
768 chunk_number = stripe * data_disks + i;
769 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
770
771 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
772 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
NeilBrown14f8d262006-01-06 00:20:14 -0800773 printk(KERN_ERR "compute_blocknr: map not correct\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 return 0;
775 }
776 return r_sector;
777}
778
779
780
781/*
782 * Copy data between a page in the stripe cache, and a bio.
783 * There are no alignment or size guarantees between the page or the
784 * bio except that there is some overlap.
785 * All iovecs in the bio must be considered.
786 */
787static void copy_data(int frombio, struct bio *bio,
788 struct page *page,
789 sector_t sector)
790{
791 char *pa = page_address(page);
792 struct bio_vec *bvl;
793 int i;
794 int page_offset;
795
796 if (bio->bi_sector >= sector)
797 page_offset = (signed)(bio->bi_sector - sector) * 512;
798 else
799 page_offset = (signed)(sector - bio->bi_sector) * -512;
800 bio_for_each_segment(bvl, bio, i) {
801 int len = bio_iovec_idx(bio,i)->bv_len;
802 int clen;
803 int b_offset = 0;
804
805 if (page_offset < 0) {
806 b_offset = -page_offset;
807 page_offset += b_offset;
808 len -= b_offset;
809 }
810
811 if (len > 0 && page_offset + len > STRIPE_SIZE)
812 clen = STRIPE_SIZE - page_offset;
813 else clen = len;
814
815 if (clen > 0) {
816 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
817 if (frombio)
818 memcpy(pa+page_offset, ba+b_offset, clen);
819 else
820 memcpy(ba+b_offset, pa+page_offset, clen);
821 __bio_kunmap_atomic(ba, KM_USER0);
822 }
823 if (clen < len) /* hit end of page */
824 break;
825 page_offset += len;
826 }
827}
828
829#define check_xor() do { \
830 if (count == MAX_XOR_BLOCKS) { \
831 xor_block(count, STRIPE_SIZE, ptr); \
832 count = 1; \
833 } \
834 } while(0)
835
836
837static void compute_block(struct stripe_head *sh, int dd_idx)
838{
839 raid5_conf_t *conf = sh->raid_conf;
840 int i, count, disks = conf->raid_disks;
841 void *ptr[MAX_XOR_BLOCKS], *p;
842
843 PRINTK("compute_block, stripe %llu, idx %d\n",
844 (unsigned long long)sh->sector, dd_idx);
845
846 ptr[0] = page_address(sh->dev[dd_idx].page);
847 memset(ptr[0], 0, STRIPE_SIZE);
848 count = 1;
849 for (i = disks ; i--; ) {
850 if (i == dd_idx)
851 continue;
852 p = page_address(sh->dev[i].page);
853 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
854 ptr[count++] = p;
855 else
NeilBrown14f8d262006-01-06 00:20:14 -0800856 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 " not present\n", dd_idx,
858 (unsigned long long)sh->sector, i);
859
860 check_xor();
861 }
862 if (count != 1)
863 xor_block(count, STRIPE_SIZE, ptr);
864 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
865}
866
867static void compute_parity(struct stripe_head *sh, int method)
868{
869 raid5_conf_t *conf = sh->raid_conf;
870 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
871 void *ptr[MAX_XOR_BLOCKS];
872 struct bio *chosen;
873
874 PRINTK("compute_parity, stripe %llu, method %d\n",
875 (unsigned long long)sh->sector, method);
876
877 count = 1;
878 ptr[0] = page_address(sh->dev[pd_idx].page);
879 switch(method) {
880 case READ_MODIFY_WRITE:
881 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
882 BUG();
883 for (i=disks ; i-- ;) {
884 if (i==pd_idx)
885 continue;
886 if (sh->dev[i].towrite &&
887 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
888 ptr[count++] = page_address(sh->dev[i].page);
889 chosen = sh->dev[i].towrite;
890 sh->dev[i].towrite = NULL;
891
892 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
893 wake_up(&conf->wait_for_overlap);
894
895 if (sh->dev[i].written) BUG();
896 sh->dev[i].written = chosen;
897 check_xor();
898 }
899 }
900 break;
901 case RECONSTRUCT_WRITE:
902 memset(ptr[0], 0, STRIPE_SIZE);
903 for (i= disks; i-- ;)
904 if (i!=pd_idx && sh->dev[i].towrite) {
905 chosen = sh->dev[i].towrite;
906 sh->dev[i].towrite = NULL;
907
908 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
909 wake_up(&conf->wait_for_overlap);
910
911 if (sh->dev[i].written) BUG();
912 sh->dev[i].written = chosen;
913 }
914 break;
915 case CHECK_PARITY:
916 break;
917 }
918 if (count>1) {
919 xor_block(count, STRIPE_SIZE, ptr);
920 count = 1;
921 }
922
923 for (i = disks; i--;)
924 if (sh->dev[i].written) {
925 sector_t sector = sh->dev[i].sector;
926 struct bio *wbi = sh->dev[i].written;
927 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
928 copy_data(1, wbi, sh->dev[i].page, sector);
929 wbi = r5_next_bio(wbi, sector);
930 }
931
932 set_bit(R5_LOCKED, &sh->dev[i].flags);
933 set_bit(R5_UPTODATE, &sh->dev[i].flags);
934 }
935
936 switch(method) {
937 case RECONSTRUCT_WRITE:
938 case CHECK_PARITY:
939 for (i=disks; i--;)
940 if (i != pd_idx) {
941 ptr[count++] = page_address(sh->dev[i].page);
942 check_xor();
943 }
944 break;
945 case READ_MODIFY_WRITE:
946 for (i = disks; i--;)
947 if (sh->dev[i].written) {
948 ptr[count++] = page_address(sh->dev[i].page);
949 check_xor();
950 }
951 }
952 if (count != 1)
953 xor_block(count, STRIPE_SIZE, ptr);
954
955 if (method != CHECK_PARITY) {
956 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
957 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
958 } else
959 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
960}
961
962/*
963 * Each stripe/dev can have one or more bion attached.
964 * toread/towrite point to the first in a chain.
965 * The bi_next chain must be in order.
966 */
967static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
968{
969 struct bio **bip;
970 raid5_conf_t *conf = sh->raid_conf;
NeilBrown72626682005-09-09 16:23:54 -0700971 int firstwrite=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 PRINTK("adding bh b#%llu to stripe s#%llu\n",
974 (unsigned long long)bi->bi_sector,
975 (unsigned long long)sh->sector);
976
977
978 spin_lock(&sh->lock);
979 spin_lock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -0700980 if (forwrite) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 bip = &sh->dev[dd_idx].towrite;
NeilBrown72626682005-09-09 16:23:54 -0700982 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
983 firstwrite = 1;
984 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 bip = &sh->dev[dd_idx].toread;
986 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
987 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
988 goto overlap;
989 bip = & (*bip)->bi_next;
990 }
991 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
992 goto overlap;
993
994 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
995 BUG();
996 if (*bip)
997 bi->bi_next = *bip;
998 *bip = bi;
999 bi->bi_phys_segments ++;
1000 spin_unlock_irq(&conf->device_lock);
1001 spin_unlock(&sh->lock);
1002
1003 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
1004 (unsigned long long)bi->bi_sector,
1005 (unsigned long long)sh->sector, dd_idx);
1006
NeilBrown72626682005-09-09 16:23:54 -07001007 if (conf->mddev->bitmap && firstwrite) {
1008 sh->bm_seq = conf->seq_write;
1009 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1010 STRIPE_SECTORS, 0);
1011 set_bit(STRIPE_BIT_DELAY, &sh->state);
1012 }
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 if (forwrite) {
1015 /* check if page is covered */
1016 sector_t sector = sh->dev[dd_idx].sector;
1017 for (bi=sh->dev[dd_idx].towrite;
1018 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1019 bi && bi->bi_sector <= sector;
1020 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1021 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1022 sector = bi->bi_sector + (bi->bi_size>>9);
1023 }
1024 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1025 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1026 }
1027 return 1;
1028
1029 overlap:
1030 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1031 spin_unlock_irq(&conf->device_lock);
1032 spin_unlock(&sh->lock);
1033 return 0;
1034}
1035
1036
1037/*
1038 * handle_stripe - do things to a stripe.
1039 *
1040 * We lock the stripe and then examine the state of various bits
1041 * to see what needs to be done.
1042 * Possible results:
1043 * return some read request which now have data
1044 * return some write requests which are safely on disc
1045 * schedule a read on some buffers
1046 * schedule a write of some buffers
1047 * return confirmation of parity correctness
1048 *
1049 * Parity calculations are done inside the stripe lock
1050 * buffers are taken off read_list or write_list, and bh_cache buffers
1051 * get BH_Lock set before the stripe lock is released.
1052 *
1053 */
1054
1055static void handle_stripe(struct stripe_head *sh)
1056{
1057 raid5_conf_t *conf = sh->raid_conf;
1058 int disks = conf->raid_disks;
1059 struct bio *return_bi= NULL;
1060 struct bio *bi;
1061 int i;
1062 int syncing;
1063 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1064 int non_overwrite = 0;
1065 int failed_num=0;
1066 struct r5dev *dev;
1067
1068 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
1069 (unsigned long long)sh->sector, atomic_read(&sh->count),
1070 sh->pd_idx);
1071
1072 spin_lock(&sh->lock);
1073 clear_bit(STRIPE_HANDLE, &sh->state);
1074 clear_bit(STRIPE_DELAYED, &sh->state);
1075
1076 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1077 /* Now to look around and see what can be done */
1078
NeilBrown9910f162006-01-06 00:20:24 -08001079 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 for (i=disks; i--; ) {
1081 mdk_rdev_t *rdev;
1082 dev = &sh->dev[i];
1083 clear_bit(R5_Insync, &dev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
1085 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1086 i, dev->flags, dev->toread, dev->towrite, dev->written);
1087 /* maybe we can reply to a read */
1088 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1089 struct bio *rbi, *rbi2;
1090 PRINTK("Return read for disc %d\n", i);
1091 spin_lock_irq(&conf->device_lock);
1092 rbi = dev->toread;
1093 dev->toread = NULL;
1094 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1095 wake_up(&conf->wait_for_overlap);
1096 spin_unlock_irq(&conf->device_lock);
1097 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1098 copy_data(0, rbi, dev->page, dev->sector);
1099 rbi2 = r5_next_bio(rbi, dev->sector);
1100 spin_lock_irq(&conf->device_lock);
1101 if (--rbi->bi_phys_segments == 0) {
1102 rbi->bi_next = return_bi;
1103 return_bi = rbi;
1104 }
1105 spin_unlock_irq(&conf->device_lock);
1106 rbi = rbi2;
1107 }
1108 }
1109
1110 /* now count some things */
1111 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1112 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1113
1114
1115 if (dev->toread) to_read++;
1116 if (dev->towrite) {
1117 to_write++;
1118 if (!test_bit(R5_OVERWRITE, &dev->flags))
1119 non_overwrite++;
1120 }
1121 if (dev->written) written++;
NeilBrown9910f162006-01-06 00:20:24 -08001122 rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -08001123 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
NeilBrown14f8d262006-01-06 00:20:14 -08001124 /* The ReadError flag will just be confusing now */
NeilBrown4e5314b2005-11-08 21:39:22 -08001125 clear_bit(R5_ReadError, &dev->flags);
1126 clear_bit(R5_ReWrite, &dev->flags);
1127 }
NeilBrownb2d444d2005-11-08 21:39:31 -08001128 if (!rdev || !test_bit(In_sync, &rdev->flags)
NeilBrown4e5314b2005-11-08 21:39:22 -08001129 || test_bit(R5_ReadError, &dev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 failed++;
1131 failed_num = i;
1132 } else
1133 set_bit(R5_Insync, &dev->flags);
1134 }
NeilBrown9910f162006-01-06 00:20:24 -08001135 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 PRINTK("locked=%d uptodate=%d to_read=%d"
1137 " to_write=%d failed=%d failed_num=%d\n",
1138 locked, uptodate, to_read, to_write, failed, failed_num);
1139 /* check if the array has lost two devices and, if so, some requests might
1140 * need to be failed
1141 */
1142 if (failed > 1 && to_read+to_write+written) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 for (i=disks; i--; ) {
NeilBrown72626682005-09-09 16:23:54 -07001144 int bitmap_end = 0;
NeilBrown4e5314b2005-11-08 21:39:22 -08001145
1146 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
NeilBrown9910f162006-01-06 00:20:24 -08001147 mdk_rdev_t *rdev;
1148 rcu_read_lock();
1149 rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -08001150 if (rdev && test_bit(In_sync, &rdev->flags))
NeilBrown4e5314b2005-11-08 21:39:22 -08001151 /* multiple read failures in one stripe */
1152 md_error(conf->mddev, rdev);
NeilBrown9910f162006-01-06 00:20:24 -08001153 rcu_read_unlock();
NeilBrown4e5314b2005-11-08 21:39:22 -08001154 }
1155
NeilBrown72626682005-09-09 16:23:54 -07001156 spin_lock_irq(&conf->device_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 /* fail all writes first */
1158 bi = sh->dev[i].towrite;
1159 sh->dev[i].towrite = NULL;
NeilBrown72626682005-09-09 16:23:54 -07001160 if (bi) { to_write--; bitmap_end = 1; }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1163 wake_up(&conf->wait_for_overlap);
1164
1165 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1166 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1167 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1168 if (--bi->bi_phys_segments == 0) {
1169 md_write_end(conf->mddev);
1170 bi->bi_next = return_bi;
1171 return_bi = bi;
1172 }
1173 bi = nextbi;
1174 }
1175 /* and fail all 'written' */
1176 bi = sh->dev[i].written;
1177 sh->dev[i].written = NULL;
NeilBrown72626682005-09-09 16:23:54 -07001178 if (bi) bitmap_end = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1180 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1181 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1182 if (--bi->bi_phys_segments == 0) {
1183 md_write_end(conf->mddev);
1184 bi->bi_next = return_bi;
1185 return_bi = bi;
1186 }
1187 bi = bi2;
1188 }
1189
1190 /* fail any reads if this device is non-operational */
NeilBrown4e5314b2005-11-08 21:39:22 -08001191 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1192 test_bit(R5_ReadError, &sh->dev[i].flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 bi = sh->dev[i].toread;
1194 sh->dev[i].toread = NULL;
1195 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1196 wake_up(&conf->wait_for_overlap);
1197 if (bi) to_read--;
1198 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1199 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1200 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1201 if (--bi->bi_phys_segments == 0) {
1202 bi->bi_next = return_bi;
1203 return_bi = bi;
1204 }
1205 bi = nextbi;
1206 }
1207 }
NeilBrown72626682005-09-09 16:23:54 -07001208 spin_unlock_irq(&conf->device_lock);
1209 if (bitmap_end)
1210 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1211 STRIPE_SECTORS, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 }
1214 if (failed > 1 && syncing) {
1215 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1216 clear_bit(STRIPE_SYNCING, &sh->state);
1217 syncing = 0;
1218 }
1219
1220 /* might be able to return some write requests if the parity block
1221 * is safe, or on a failed drive
1222 */
1223 dev = &sh->dev[sh->pd_idx];
1224 if ( written &&
1225 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1226 test_bit(R5_UPTODATE, &dev->flags))
1227 || (failed == 1 && failed_num == sh->pd_idx))
1228 ) {
1229 /* any written block on an uptodate or failed drive can be returned.
1230 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1231 * never LOCKED, so we don't need to test 'failed' directly.
1232 */
1233 for (i=disks; i--; )
1234 if (sh->dev[i].written) {
1235 dev = &sh->dev[i];
1236 if (!test_bit(R5_LOCKED, &dev->flags) &&
1237 test_bit(R5_UPTODATE, &dev->flags) ) {
1238 /* We can return any write requests */
1239 struct bio *wbi, *wbi2;
NeilBrown72626682005-09-09 16:23:54 -07001240 int bitmap_end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 PRINTK("Return write for disc %d\n", i);
1242 spin_lock_irq(&conf->device_lock);
1243 wbi = dev->written;
1244 dev->written = NULL;
1245 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1246 wbi2 = r5_next_bio(wbi, dev->sector);
1247 if (--wbi->bi_phys_segments == 0) {
1248 md_write_end(conf->mddev);
1249 wbi->bi_next = return_bi;
1250 return_bi = wbi;
1251 }
1252 wbi = wbi2;
1253 }
NeilBrown72626682005-09-09 16:23:54 -07001254 if (dev->towrite == NULL)
1255 bitmap_end = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 spin_unlock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -07001257 if (bitmap_end)
1258 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1259 STRIPE_SECTORS,
1260 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 }
1262 }
1263 }
1264
1265 /* Now we might consider reading some blocks, either to check/generate
1266 * parity, or to satisfy requests
1267 * or to load a block that is being partially written.
1268 */
1269 if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1270 for (i=disks; i--;) {
1271 dev = &sh->dev[i];
1272 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1273 (dev->toread ||
1274 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1275 syncing ||
1276 (failed && (sh->dev[failed_num].toread ||
1277 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1278 )
1279 ) {
1280 /* we would like to get this block, possibly
1281 * by computing it, but we might not be able to
1282 */
1283 if (uptodate == disks-1) {
1284 PRINTK("Computing block %d\n", i);
1285 compute_block(sh, i);
1286 uptodate++;
1287 } else if (test_bit(R5_Insync, &dev->flags)) {
1288 set_bit(R5_LOCKED, &dev->flags);
1289 set_bit(R5_Wantread, &dev->flags);
1290#if 0
1291 /* if I am just reading this block and we don't have
1292 a failed drive, or any pending writes then sidestep the cache */
1293 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1294 ! syncing && !failed && !to_write) {
1295 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1296 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1297 }
1298#endif
1299 locked++;
1300 PRINTK("Reading block %d (sync=%d)\n",
1301 i, syncing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 }
1303 }
1304 }
1305 set_bit(STRIPE_HANDLE, &sh->state);
1306 }
1307
1308 /* now to consider writing and what else, if anything should be read */
1309 if (to_write) {
1310 int rmw=0, rcw=0;
1311 for (i=disks ; i--;) {
1312 /* would I have to read this buffer for read_modify_write */
1313 dev = &sh->dev[i];
1314 if ((dev->towrite || i == sh->pd_idx) &&
1315 (!test_bit(R5_LOCKED, &dev->flags)
1316#if 0
1317|| sh->bh_page[i]!=bh->b_page
1318#endif
1319 ) &&
1320 !test_bit(R5_UPTODATE, &dev->flags)) {
1321 if (test_bit(R5_Insync, &dev->flags)
1322/* && !(!mddev->insync && i == sh->pd_idx) */
1323 )
1324 rmw++;
1325 else rmw += 2*disks; /* cannot read it */
1326 }
1327 /* Would I have to read this buffer for reconstruct_write */
1328 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1329 (!test_bit(R5_LOCKED, &dev->flags)
1330#if 0
1331|| sh->bh_page[i] != bh->b_page
1332#endif
1333 ) &&
1334 !test_bit(R5_UPTODATE, &dev->flags)) {
1335 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1336 else rcw += 2*disks;
1337 }
1338 }
1339 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1340 (unsigned long long)sh->sector, rmw, rcw);
1341 set_bit(STRIPE_HANDLE, &sh->state);
1342 if (rmw < rcw && rmw > 0)
1343 /* prefer read-modify-write, but need to get some data */
1344 for (i=disks; i--;) {
1345 dev = &sh->dev[i];
1346 if ((dev->towrite || i == sh->pd_idx) &&
1347 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1348 test_bit(R5_Insync, &dev->flags)) {
1349 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1350 {
1351 PRINTK("Read_old block %d for r-m-w\n", i);
1352 set_bit(R5_LOCKED, &dev->flags);
1353 set_bit(R5_Wantread, &dev->flags);
1354 locked++;
1355 } else {
1356 set_bit(STRIPE_DELAYED, &sh->state);
1357 set_bit(STRIPE_HANDLE, &sh->state);
1358 }
1359 }
1360 }
1361 if (rcw <= rmw && rcw > 0)
1362 /* want reconstruct write, but need to get some data */
1363 for (i=disks; i--;) {
1364 dev = &sh->dev[i];
1365 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1366 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1367 test_bit(R5_Insync, &dev->flags)) {
1368 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1369 {
1370 PRINTK("Read_old block %d for Reconstruct\n", i);
1371 set_bit(R5_LOCKED, &dev->flags);
1372 set_bit(R5_Wantread, &dev->flags);
1373 locked++;
1374 } else {
1375 set_bit(STRIPE_DELAYED, &sh->state);
1376 set_bit(STRIPE_HANDLE, &sh->state);
1377 }
1378 }
1379 }
1380 /* now if nothing is locked, and if we have enough data, we can start a write request */
NeilBrown72626682005-09-09 16:23:54 -07001381 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1382 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 PRINTK("Computing parity...\n");
1384 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1385 /* now every locked buffer is ready to be written */
1386 for (i=disks; i--;)
1387 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1388 PRINTK("Writing block %d\n", i);
1389 locked++;
1390 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1391 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1392 || (i==sh->pd_idx && failed == 0))
1393 set_bit(STRIPE_INSYNC, &sh->state);
1394 }
1395 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1396 atomic_dec(&conf->preread_active_stripes);
1397 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1398 md_wakeup_thread(conf->mddev->thread);
1399 }
1400 }
1401 }
1402
1403 /* maybe we need to check and possibly fix the parity for this stripe
1404 * Any reads will already have been scheduled, so we just see if enough data
1405 * is available
1406 */
1407 if (syncing && locked == 0 &&
NeilBrown14f8d262006-01-06 00:20:14 -08001408 !test_bit(STRIPE_INSYNC, &sh->state)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 set_bit(STRIPE_HANDLE, &sh->state);
1410 if (failed == 0) {
1411 char *pagea;
1412 if (uptodate != disks)
1413 BUG();
1414 compute_parity(sh, CHECK_PARITY);
1415 uptodate--;
1416 pagea = page_address(sh->dev[sh->pd_idx].page);
1417 if ((*(u32*)pagea) == 0 &&
1418 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1419 /* parity is correct (on disc, not in buffer any more) */
1420 set_bit(STRIPE_INSYNC, &sh->state);
NeilBrown9d888832005-11-08 21:39:26 -08001421 } else {
1422 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1423 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1424 /* don't try to repair!! */
1425 set_bit(STRIPE_INSYNC, &sh->state);
NeilBrown14f8d262006-01-06 00:20:14 -08001426 else {
1427 compute_block(sh, sh->pd_idx);
1428 uptodate++;
1429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 }
1431 }
1432 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
NeilBrown14f8d262006-01-06 00:20:14 -08001433 /* either failed parity check, or recovery is happening */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 if (failed==0)
1435 failed_num = sh->pd_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 dev = &sh->dev[failed_num];
NeilBrown14f8d262006-01-06 00:20:14 -08001437 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1438 BUG_ON(uptodate != disks);
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 set_bit(R5_LOCKED, &dev->flags);
1441 set_bit(R5_Wantwrite, &dev->flags);
NeilBrown72626682005-09-09 16:23:54 -07001442 clear_bit(STRIPE_DEGRADED, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 locked++;
1444 set_bit(STRIPE_INSYNC, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 }
1446 }
1447 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1448 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1449 clear_bit(STRIPE_SYNCING, &sh->state);
1450 }
NeilBrown4e5314b2005-11-08 21:39:22 -08001451
1452 /* If the failed drive is just a ReadError, then we might need to progress
1453 * the repair/check process
1454 */
NeilBrownba22dcb2005-11-08 21:39:31 -08001455 if (failed == 1 && ! conf->mddev->ro &&
1456 test_bit(R5_ReadError, &sh->dev[failed_num].flags)
NeilBrown4e5314b2005-11-08 21:39:22 -08001457 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1458 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1459 ) {
1460 dev = &sh->dev[failed_num];
1461 if (!test_bit(R5_ReWrite, &dev->flags)) {
1462 set_bit(R5_Wantwrite, &dev->flags);
1463 set_bit(R5_ReWrite, &dev->flags);
1464 set_bit(R5_LOCKED, &dev->flags);
1465 } else {
1466 /* let's read it back */
1467 set_bit(R5_Wantread, &dev->flags);
1468 set_bit(R5_LOCKED, &dev->flags);
1469 }
1470 }
1471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 spin_unlock(&sh->lock);
1473
1474 while ((bi=return_bi)) {
1475 int bytes = bi->bi_size;
1476
1477 return_bi = bi->bi_next;
1478 bi->bi_next = NULL;
1479 bi->bi_size = 0;
1480 bi->bi_end_io(bi, bytes, 0);
1481 }
1482 for (i=disks; i-- ;) {
1483 int rw;
1484 struct bio *bi;
1485 mdk_rdev_t *rdev;
1486 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1487 rw = 1;
1488 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1489 rw = 0;
1490 else
1491 continue;
1492
1493 bi = &sh->dev[i].req;
1494
1495 bi->bi_rw = rw;
1496 if (rw)
1497 bi->bi_end_io = raid5_end_write_request;
1498 else
1499 bi->bi_end_io = raid5_end_read_request;
1500
1501 rcu_read_lock();
Suzanne Woodd6065f72005-11-08 21:39:27 -08001502 rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -08001503 if (rdev && test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 rdev = NULL;
1505 if (rdev)
1506 atomic_inc(&rdev->nr_pending);
1507 rcu_read_unlock();
1508
1509 if (rdev) {
NeilBrown9910f162006-01-06 00:20:24 -08001510 if (syncing)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1512
1513 bi->bi_bdev = rdev->bdev;
1514 PRINTK("for %llu schedule op %ld on disc %d\n",
1515 (unsigned long long)sh->sector, bi->bi_rw, i);
1516 atomic_inc(&sh->count);
1517 bi->bi_sector = sh->sector + rdev->data_offset;
1518 bi->bi_flags = 1 << BIO_UPTODATE;
1519 bi->bi_vcnt = 1;
1520 bi->bi_max_vecs = 1;
1521 bi->bi_idx = 0;
1522 bi->bi_io_vec = &sh->dev[i].vec;
1523 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1524 bi->bi_io_vec[0].bv_offset = 0;
1525 bi->bi_size = STRIPE_SIZE;
1526 bi->bi_next = NULL;
NeilBrown4dbcdc72006-01-06 00:20:52 -08001527 if (rw == WRITE &&
1528 test_bit(R5_ReWrite, &sh->dev[i].flags))
1529 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 generic_make_request(bi);
1531 } else {
NeilBrown72626682005-09-09 16:23:54 -07001532 if (rw == 1)
1533 set_bit(STRIPE_DEGRADED, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 PRINTK("skip op %ld on disc %d for sector %llu\n",
1535 bi->bi_rw, i, (unsigned long long)sh->sector);
1536 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1537 set_bit(STRIPE_HANDLE, &sh->state);
1538 }
1539 }
1540}
1541
Arjan van de Ven858119e2006-01-14 13:20:43 -08001542static void raid5_activate_delayed(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543{
1544 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1545 while (!list_empty(&conf->delayed_list)) {
1546 struct list_head *l = conf->delayed_list.next;
1547 struct stripe_head *sh;
1548 sh = list_entry(l, struct stripe_head, lru);
1549 list_del_init(l);
1550 clear_bit(STRIPE_DELAYED, &sh->state);
1551 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1552 atomic_inc(&conf->preread_active_stripes);
1553 list_add_tail(&sh->lru, &conf->handle_list);
1554 }
1555 }
1556}
1557
Arjan van de Ven858119e2006-01-14 13:20:43 -08001558static void activate_bit_delay(raid5_conf_t *conf)
NeilBrown72626682005-09-09 16:23:54 -07001559{
1560 /* device_lock is held */
1561 struct list_head head;
1562 list_add(&head, &conf->bitmap_list);
1563 list_del_init(&conf->bitmap_list);
1564 while (!list_empty(&head)) {
1565 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1566 list_del_init(&sh->lru);
1567 atomic_inc(&sh->count);
1568 __release_stripe(conf, sh);
1569 }
1570}
1571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572static void unplug_slaves(mddev_t *mddev)
1573{
1574 raid5_conf_t *conf = mddev_to_conf(mddev);
1575 int i;
1576
1577 rcu_read_lock();
1578 for (i=0; i<mddev->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -08001579 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -08001580 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1582
1583 atomic_inc(&rdev->nr_pending);
1584 rcu_read_unlock();
1585
1586 if (r_queue->unplug_fn)
1587 r_queue->unplug_fn(r_queue);
1588
1589 rdev_dec_pending(rdev, mddev);
1590 rcu_read_lock();
1591 }
1592 }
1593 rcu_read_unlock();
1594}
1595
1596static void raid5_unplug_device(request_queue_t *q)
1597{
1598 mddev_t *mddev = q->queuedata;
1599 raid5_conf_t *conf = mddev_to_conf(mddev);
1600 unsigned long flags;
1601
1602 spin_lock_irqsave(&conf->device_lock, flags);
1603
NeilBrown72626682005-09-09 16:23:54 -07001604 if (blk_remove_plug(q)) {
1605 conf->seq_flush++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 raid5_activate_delayed(conf);
NeilBrown72626682005-09-09 16:23:54 -07001607 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 md_wakeup_thread(mddev->thread);
1609
1610 spin_unlock_irqrestore(&conf->device_lock, flags);
1611
1612 unplug_slaves(mddev);
1613}
1614
1615static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1616 sector_t *error_sector)
1617{
1618 mddev_t *mddev = q->queuedata;
1619 raid5_conf_t *conf = mddev_to_conf(mddev);
1620 int i, ret = 0;
1621
1622 rcu_read_lock();
1623 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -08001624 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -08001625 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 struct block_device *bdev = rdev->bdev;
1627 request_queue_t *r_queue = bdev_get_queue(bdev);
1628
1629 if (!r_queue->issue_flush_fn)
1630 ret = -EOPNOTSUPP;
1631 else {
1632 atomic_inc(&rdev->nr_pending);
1633 rcu_read_unlock();
1634 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1635 error_sector);
1636 rdev_dec_pending(rdev, mddev);
1637 rcu_read_lock();
1638 }
1639 }
1640 }
1641 rcu_read_unlock();
1642 return ret;
1643}
1644
1645static inline void raid5_plug_device(raid5_conf_t *conf)
1646{
1647 spin_lock_irq(&conf->device_lock);
1648 blk_plug_device(conf->mddev->queue);
1649 spin_unlock_irq(&conf->device_lock);
1650}
1651
1652static int make_request (request_queue_t *q, struct bio * bi)
1653{
1654 mddev_t *mddev = q->queuedata;
1655 raid5_conf_t *conf = mddev_to_conf(mddev);
1656 const unsigned int raid_disks = conf->raid_disks;
1657 const unsigned int data_disks = raid_disks - 1;
1658 unsigned int dd_idx, pd_idx;
1659 sector_t new_sector;
1660 sector_t logical_sector, last_sector;
1661 struct stripe_head *sh;
Jens Axboea3623572005-11-01 09:26:16 +01001662 const int rw = bio_data_dir(bi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
NeilBrowne5dcdd82005-09-09 16:23:41 -07001664 if (unlikely(bio_barrier(bi))) {
1665 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1666 return 0;
1667 }
1668
NeilBrown3d310eb2005-06-21 17:17:26 -07001669 md_write_start(mddev, bi);
NeilBrown06d91a52005-06-21 17:17:12 -07001670
Jens Axboea3623572005-11-01 09:26:16 +01001671 disk_stat_inc(mddev->gendisk, ios[rw]);
1672 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
1674 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1675 last_sector = bi->bi_sector + (bi->bi_size>>9);
1676 bi->bi_next = NULL;
1677 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
NeilBrown06d91a52005-06-21 17:17:12 -07001678
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1680 DEFINE_WAIT(w);
1681
1682 new_sector = raid5_compute_sector(logical_sector,
1683 raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1684
1685 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1686 (unsigned long long)new_sector,
1687 (unsigned long long)logical_sector);
1688
1689 retry:
1690 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1691 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1692 if (sh) {
1693 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1694 /* Add failed due to overlap. Flush everything
1695 * and wait a while
1696 */
1697 raid5_unplug_device(mddev->queue);
1698 release_stripe(sh);
1699 schedule();
1700 goto retry;
1701 }
1702 finish_wait(&conf->wait_for_overlap, &w);
1703 raid5_plug_device(conf);
1704 handle_stripe(sh);
1705 release_stripe(sh);
1706
1707 } else {
1708 /* cannot get stripe for read-ahead, just give-up */
1709 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1710 finish_wait(&conf->wait_for_overlap, &w);
1711 break;
1712 }
1713
1714 }
1715 spin_lock_irq(&conf->device_lock);
1716 if (--bi->bi_phys_segments == 0) {
1717 int bytes = bi->bi_size;
1718
1719 if ( bio_data_dir(bi) == WRITE )
1720 md_write_end(mddev);
1721 bi->bi_size = 0;
1722 bi->bi_end_io(bi, bytes, 0);
1723 }
1724 spin_unlock_irq(&conf->device_lock);
1725 return 0;
1726}
1727
1728/* FIXME go_faster isn't used */
NeilBrown57afd892005-06-21 17:17:13 -07001729static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730{
1731 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1732 struct stripe_head *sh;
1733 int sectors_per_chunk = conf->chunk_size >> 9;
1734 sector_t x;
1735 unsigned long stripe;
1736 int chunk_offset;
1737 int dd_idx, pd_idx;
1738 sector_t first_sector;
1739 int raid_disks = conf->raid_disks;
1740 int data_disks = raid_disks-1;
NeilBrown72626682005-09-09 16:23:54 -07001741 sector_t max_sector = mddev->size << 1;
1742 int sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
NeilBrown72626682005-09-09 16:23:54 -07001744 if (sector_nr >= max_sector) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 /* just being told to finish up .. nothing much to do */
1746 unplug_slaves(mddev);
NeilBrown72626682005-09-09 16:23:54 -07001747
1748 if (mddev->curr_resync < max_sector) /* aborted */
1749 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1750 &sync_blocks, 1);
1751 else /* compelted sync */
1752 conf->fullsync = 0;
1753 bitmap_close_sync(mddev->bitmap);
1754
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 return 0;
1756 }
1757 /* if there is 1 or more failed drives and we are trying
1758 * to resync, then assert that we are finished, because there is
1759 * nothing we can do.
1760 */
1761 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
NeilBrown57afd892005-06-21 17:17:13 -07001762 sector_t rv = (mddev->size << 1) - sector_nr;
1763 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 return rv;
1765 }
NeilBrown72626682005-09-09 16:23:54 -07001766 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
NeilBrown3855ad92005-11-08 21:39:38 -08001767 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
NeilBrown72626682005-09-09 16:23:54 -07001768 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1769 /* we can skip this block, and probably more */
1770 sync_blocks /= STRIPE_SECTORS;
1771 *skipped = 1;
1772 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
1775 x = sector_nr;
1776 chunk_offset = sector_div(x, sectors_per_chunk);
1777 stripe = x;
1778 BUG_ON(x != stripe);
1779
1780 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1781 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1782 sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1783 if (sh == NULL) {
1784 sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1785 /* make sure we don't swamp the stripe cache if someone else
1786 * is trying to get access
1787 */
Nishanth Aravamudan66c006a2005-11-07 01:01:17 -08001788 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 }
NeilBrown72626682005-09-09 16:23:54 -07001790 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 spin_lock(&sh->lock);
1792 set_bit(STRIPE_SYNCING, &sh->state);
1793 clear_bit(STRIPE_INSYNC, &sh->state);
1794 spin_unlock(&sh->lock);
1795
1796 handle_stripe(sh);
1797 release_stripe(sh);
1798
1799 return STRIPE_SECTORS;
1800}
1801
1802/*
1803 * This is our raid5 kernel thread.
1804 *
1805 * We scan the hash table for stripes which can be handled now.
1806 * During the scan, completed stripes are saved for us by the interrupt
1807 * handler, so that they will not have to wait for our next wakeup.
1808 */
1809static void raid5d (mddev_t *mddev)
1810{
1811 struct stripe_head *sh;
1812 raid5_conf_t *conf = mddev_to_conf(mddev);
1813 int handled;
1814
1815 PRINTK("+++ raid5d active\n");
1816
1817 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
1819 handled = 0;
1820 spin_lock_irq(&conf->device_lock);
1821 while (1) {
1822 struct list_head *first;
1823
NeilBrown72626682005-09-09 16:23:54 -07001824 if (conf->seq_flush - conf->seq_write > 0) {
1825 int seq = conf->seq_flush;
NeilBrown700e4322005-11-28 13:44:10 -08001826 spin_unlock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -07001827 bitmap_unplug(mddev->bitmap);
NeilBrown700e4322005-11-28 13:44:10 -08001828 spin_lock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -07001829 conf->seq_write = seq;
1830 activate_bit_delay(conf);
1831 }
1832
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 if (list_empty(&conf->handle_list) &&
1834 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1835 !blk_queue_plugged(mddev->queue) &&
1836 !list_empty(&conf->delayed_list))
1837 raid5_activate_delayed(conf);
1838
1839 if (list_empty(&conf->handle_list))
1840 break;
1841
1842 first = conf->handle_list.next;
1843 sh = list_entry(first, struct stripe_head, lru);
1844
1845 list_del_init(first);
1846 atomic_inc(&sh->count);
1847 if (atomic_read(&sh->count)!= 1)
1848 BUG();
1849 spin_unlock_irq(&conf->device_lock);
1850
1851 handled++;
1852 handle_stripe(sh);
1853 release_stripe(sh);
1854
1855 spin_lock_irq(&conf->device_lock);
1856 }
1857 PRINTK("%d stripes handled\n", handled);
1858
1859 spin_unlock_irq(&conf->device_lock);
1860
1861 unplug_slaves(mddev);
1862
1863 PRINTK("--- raid5d inactive\n");
1864}
1865
NeilBrown3f294f42005-11-08 21:39:25 -08001866static ssize_t
NeilBrown007583c2005-11-08 21:39:30 -08001867raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
NeilBrown3f294f42005-11-08 21:39:25 -08001868{
NeilBrown007583c2005-11-08 21:39:30 -08001869 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08001870 if (conf)
1871 return sprintf(page, "%d\n", conf->max_nr_stripes);
1872 else
1873 return 0;
NeilBrown3f294f42005-11-08 21:39:25 -08001874}
1875
1876static ssize_t
NeilBrown007583c2005-11-08 21:39:30 -08001877raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
NeilBrown3f294f42005-11-08 21:39:25 -08001878{
NeilBrown007583c2005-11-08 21:39:30 -08001879 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown3f294f42005-11-08 21:39:25 -08001880 char *end;
1881 int new;
1882 if (len >= PAGE_SIZE)
1883 return -EINVAL;
NeilBrown96de1e62005-11-08 21:39:39 -08001884 if (!conf)
1885 return -ENODEV;
NeilBrown3f294f42005-11-08 21:39:25 -08001886
1887 new = simple_strtoul(page, &end, 10);
1888 if (!*page || (*end && *end != '\n') )
1889 return -EINVAL;
1890 if (new <= 16 || new > 32768)
1891 return -EINVAL;
1892 while (new < conf->max_nr_stripes) {
1893 if (drop_one_stripe(conf))
1894 conf->max_nr_stripes--;
1895 else
1896 break;
1897 }
1898 while (new > conf->max_nr_stripes) {
1899 if (grow_one_stripe(conf))
1900 conf->max_nr_stripes++;
1901 else break;
1902 }
1903 return len;
1904}
NeilBrown007583c2005-11-08 21:39:30 -08001905
NeilBrown96de1e62005-11-08 21:39:39 -08001906static struct md_sysfs_entry
1907raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1908 raid5_show_stripe_cache_size,
1909 raid5_store_stripe_cache_size);
NeilBrown3f294f42005-11-08 21:39:25 -08001910
1911static ssize_t
NeilBrown96de1e62005-11-08 21:39:39 -08001912stripe_cache_active_show(mddev_t *mddev, char *page)
NeilBrown3f294f42005-11-08 21:39:25 -08001913{
NeilBrown007583c2005-11-08 21:39:30 -08001914 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08001915 if (conf)
1916 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1917 else
1918 return 0;
NeilBrown3f294f42005-11-08 21:39:25 -08001919}
1920
NeilBrown96de1e62005-11-08 21:39:39 -08001921static struct md_sysfs_entry
1922raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
NeilBrown3f294f42005-11-08 21:39:25 -08001923
NeilBrown007583c2005-11-08 21:39:30 -08001924static struct attribute *raid5_attrs[] = {
NeilBrown3f294f42005-11-08 21:39:25 -08001925 &raid5_stripecache_size.attr,
1926 &raid5_stripecache_active.attr,
1927 NULL,
1928};
NeilBrown007583c2005-11-08 21:39:30 -08001929static struct attribute_group raid5_attrs_group = {
1930 .name = NULL,
1931 .attrs = raid5_attrs,
NeilBrown3f294f42005-11-08 21:39:25 -08001932};
1933
NeilBrown72626682005-09-09 16:23:54 -07001934static int run(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935{
1936 raid5_conf_t *conf;
1937 int raid_disk, memory;
1938 mdk_rdev_t *rdev;
1939 struct disk_info *disk;
1940 struct list_head *tmp;
1941
1942 if (mddev->level != 5 && mddev->level != 4) {
NeilBrown14f8d262006-01-06 00:20:14 -08001943 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
1944 mdname(mddev), mddev->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 return -EIO;
1946 }
1947
NeilBrownb55e6bf2006-03-27 01:18:06 -08001948 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 if ((conf = mddev->private) == NULL)
1950 goto abort;
NeilBrownb55e6bf2006-03-27 01:18:06 -08001951 conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
1952 GFP_KERNEL);
1953 if (!conf->disks)
1954 goto abort;
NeilBrown9ffae0c2006-01-06 00:20:32 -08001955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 conf->mddev = mddev;
1957
NeilBrownfccddba2006-01-06 00:20:33 -08001958 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 spin_lock_init(&conf->device_lock);
1962 init_waitqueue_head(&conf->wait_for_stripe);
1963 init_waitqueue_head(&conf->wait_for_overlap);
1964 INIT_LIST_HEAD(&conf->handle_list);
1965 INIT_LIST_HEAD(&conf->delayed_list);
NeilBrown72626682005-09-09 16:23:54 -07001966 INIT_LIST_HEAD(&conf->bitmap_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 INIT_LIST_HEAD(&conf->inactive_list);
1968 atomic_set(&conf->active_stripes, 0);
1969 atomic_set(&conf->preread_active_stripes, 0);
1970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
1972
1973 ITERATE_RDEV(mddev,rdev,tmp) {
1974 raid_disk = rdev->raid_disk;
1975 if (raid_disk >= mddev->raid_disks
1976 || raid_disk < 0)
1977 continue;
1978 disk = conf->disks + raid_disk;
1979
1980 disk->rdev = rdev;
1981
NeilBrownb2d444d2005-11-08 21:39:31 -08001982 if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 char b[BDEVNAME_SIZE];
1984 printk(KERN_INFO "raid5: device %s operational as raid"
1985 " disk %d\n", bdevname(rdev->bdev,b),
1986 raid_disk);
1987 conf->working_disks++;
1988 }
1989 }
1990
1991 conf->raid_disks = mddev->raid_disks;
1992 /*
1993 * 0 for a fully functional array, 1 for a degraded array.
1994 */
1995 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1996 conf->mddev = mddev;
1997 conf->chunk_size = mddev->chunk_size;
1998 conf->level = mddev->level;
1999 conf->algorithm = mddev->layout;
2000 conf->max_nr_stripes = NR_STRIPES;
2001
2002 /* device size must be a multiple of chunk size */
2003 mddev->size &= ~(mddev->chunk_size/1024 -1);
NeilBrownb1581562005-07-31 22:34:50 -07002004 mddev->resync_max_sectors = mddev->size << 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
2006 if (!conf->chunk_size || conf->chunk_size % 4) {
2007 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
2008 conf->chunk_size, mdname(mddev));
2009 goto abort;
2010 }
2011 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
2012 printk(KERN_ERR
2013 "raid5: unsupported parity algorithm %d for %s\n",
2014 conf->algorithm, mdname(mddev));
2015 goto abort;
2016 }
2017 if (mddev->degraded > 1) {
2018 printk(KERN_ERR "raid5: not enough operational devices for %s"
2019 " (%d/%d failed)\n",
2020 mdname(mddev), conf->failed_disks, conf->raid_disks);
2021 goto abort;
2022 }
2023
2024 if (mddev->degraded == 1 &&
2025 mddev->recovery_cp != MaxSector) {
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08002026 if (mddev->ok_start_degraded)
2027 printk(KERN_WARNING
2028 "raid5: starting dirty degraded array: %s"
2029 "- data corruption possible.\n",
2030 mdname(mddev));
2031 else {
2032 printk(KERN_ERR
2033 "raid5: cannot start dirty degraded array for %s\n",
2034 mdname(mddev));
2035 goto abort;
2036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 }
2038
2039 {
2040 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
2041 if (!mddev->thread) {
2042 printk(KERN_ERR
2043 "raid5: couldn't allocate thread for %s\n",
2044 mdname(mddev));
2045 goto abort;
2046 }
2047 }
NeilBrown50368052005-12-12 02:39:17 -08002048 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
2050 if (grow_stripes(conf, conf->max_nr_stripes)) {
2051 printk(KERN_ERR
2052 "raid5: couldn't allocate %dkB for buffers\n", memory);
2053 shrink_stripes(conf);
2054 md_unregister_thread(mddev->thread);
2055 goto abort;
2056 } else
2057 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
2058 memory, mdname(mddev));
2059
2060 if (mddev->degraded == 0)
2061 printk("raid5: raid level %d set %s active with %d out of %d"
2062 " devices, algorithm %d\n", conf->level, mdname(mddev),
2063 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
2064 conf->algorithm);
2065 else
2066 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
2067 " out of %d devices, algorithm %d\n", conf->level,
2068 mdname(mddev), mddev->raid_disks - mddev->degraded,
2069 mddev->raid_disks, conf->algorithm);
2070
2071 print_raid5_conf(conf);
2072
2073 /* read-ahead size must cover two whole stripes, which is
2074 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
2075 */
2076 {
2077 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
NeilBrown2d1f3b52006-01-06 00:20:31 -08002078 / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
2080 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
2081 }
2082
2083 /* Ok, everything is just fine now */
NeilBrown007583c2005-11-08 21:39:30 -08002084 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
NeilBrown7a5febe2005-05-16 21:53:16 -07002085
2086 mddev->queue->unplug_fn = raid5_unplug_device;
2087 mddev->queue->issue_flush_fn = raid5_issue_flush;
2088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
2090 return 0;
2091abort:
2092 if (conf) {
2093 print_raid5_conf(conf);
NeilBrownb55e6bf2006-03-27 01:18:06 -08002094 kfree(conf->disks);
NeilBrownfccddba2006-01-06 00:20:33 -08002095 kfree(conf->stripe_hashtbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 kfree(conf);
2097 }
2098 mddev->private = NULL;
2099 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
2100 return -EIO;
2101}
2102
2103
2104
NeilBrown3f294f42005-11-08 21:39:25 -08002105static int stop(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106{
2107 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2108
2109 md_unregister_thread(mddev->thread);
2110 mddev->thread = NULL;
2111 shrink_stripes(conf);
NeilBrownfccddba2006-01-06 00:20:33 -08002112 kfree(conf->stripe_hashtbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
NeilBrown007583c2005-11-08 21:39:30 -08002114 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
NeilBrownb55e6bf2006-03-27 01:18:06 -08002115 kfree(conf->disks);
NeilBrown96de1e62005-11-08 21:39:39 -08002116 kfree(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 mddev->private = NULL;
2118 return 0;
2119}
2120
2121#if RAID5_DEBUG
2122static void print_sh (struct stripe_head *sh)
2123{
2124 int i;
2125
2126 printk("sh %llu, pd_idx %d, state %ld.\n",
2127 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2128 printk("sh %llu, count %d.\n",
2129 (unsigned long long)sh->sector, atomic_read(&sh->count));
2130 printk("sh %llu, ", (unsigned long long)sh->sector);
2131 for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2132 printk("(cache%d: %p %ld) ",
2133 i, sh->dev[i].page, sh->dev[i].flags);
2134 }
2135 printk("\n");
2136}
2137
2138static void printall (raid5_conf_t *conf)
2139{
2140 struct stripe_head *sh;
NeilBrownfccddba2006-01-06 00:20:33 -08002141 struct hlist_node *hn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 int i;
2143
2144 spin_lock_irq(&conf->device_lock);
2145 for (i = 0; i < NR_HASH; i++) {
NeilBrownfccddba2006-01-06 00:20:33 -08002146 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 if (sh->raid_conf != conf)
2148 continue;
2149 print_sh(sh);
2150 }
2151 }
2152 spin_unlock_irq(&conf->device_lock);
2153}
2154#endif
2155
2156static void status (struct seq_file *seq, mddev_t *mddev)
2157{
2158 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2159 int i;
2160
2161 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2162 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2163 for (i = 0; i < conf->raid_disks; i++)
2164 seq_printf (seq, "%s",
2165 conf->disks[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -08002166 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 seq_printf (seq, "]");
2168#if RAID5_DEBUG
2169#define D(x) \
2170 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2171 printall(conf);
2172#endif
2173}
2174
2175static void print_raid5_conf (raid5_conf_t *conf)
2176{
2177 int i;
2178 struct disk_info *tmp;
2179
2180 printk("RAID5 conf printout:\n");
2181 if (!conf) {
2182 printk("(conf==NULL)\n");
2183 return;
2184 }
2185 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2186 conf->working_disks, conf->failed_disks);
2187
2188 for (i = 0; i < conf->raid_disks; i++) {
2189 char b[BDEVNAME_SIZE];
2190 tmp = conf->disks + i;
2191 if (tmp->rdev)
2192 printk(" disk %d, o:%d, dev:%s\n",
NeilBrownb2d444d2005-11-08 21:39:31 -08002193 i, !test_bit(Faulty, &tmp->rdev->flags),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 bdevname(tmp->rdev->bdev,b));
2195 }
2196}
2197
2198static int raid5_spare_active(mddev_t *mddev)
2199{
2200 int i;
2201 raid5_conf_t *conf = mddev->private;
2202 struct disk_info *tmp;
2203
2204 for (i = 0; i < conf->raid_disks; i++) {
2205 tmp = conf->disks + i;
2206 if (tmp->rdev
NeilBrownb2d444d2005-11-08 21:39:31 -08002207 && !test_bit(Faulty, &tmp->rdev->flags)
2208 && !test_bit(In_sync, &tmp->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 mddev->degraded--;
2210 conf->failed_disks--;
2211 conf->working_disks++;
NeilBrownb2d444d2005-11-08 21:39:31 -08002212 set_bit(In_sync, &tmp->rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 }
2214 }
2215 print_raid5_conf(conf);
2216 return 0;
2217}
2218
2219static int raid5_remove_disk(mddev_t *mddev, int number)
2220{
2221 raid5_conf_t *conf = mddev->private;
2222 int err = 0;
2223 mdk_rdev_t *rdev;
2224 struct disk_info *p = conf->disks + number;
2225
2226 print_raid5_conf(conf);
2227 rdev = p->rdev;
2228 if (rdev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08002229 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 atomic_read(&rdev->nr_pending)) {
2231 err = -EBUSY;
2232 goto abort;
2233 }
2234 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07002235 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 if (atomic_read(&rdev->nr_pending)) {
2237 /* lost the race, try later */
2238 err = -EBUSY;
2239 p->rdev = rdev;
2240 }
2241 }
2242abort:
2243
2244 print_raid5_conf(conf);
2245 return err;
2246}
2247
2248static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2249{
2250 raid5_conf_t *conf = mddev->private;
2251 int found = 0;
2252 int disk;
2253 struct disk_info *p;
2254
2255 if (mddev->degraded > 1)
2256 /* no point adding a device */
2257 return 0;
2258
2259 /*
2260 * find the disk ...
2261 */
2262 for (disk=0; disk < mddev->raid_disks; disk++)
2263 if ((p=conf->disks + disk)->rdev == NULL) {
NeilBrownb2d444d2005-11-08 21:39:31 -08002264 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 rdev->raid_disk = disk;
2266 found = 1;
NeilBrown72626682005-09-09 16:23:54 -07002267 if (rdev->saved_raid_disk != disk)
2268 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08002269 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 break;
2271 }
2272 print_raid5_conf(conf);
2273 return found;
2274}
2275
2276static int raid5_resize(mddev_t *mddev, sector_t sectors)
2277{
2278 /* no resync is happening, and there is enough space
2279 * on all devices, so we can resize.
2280 * We need to make sure resync covers any new space.
2281 * If the array is shrinking we should possibly wait until
2282 * any io in the removed space completes, but it hardly seems
2283 * worth it.
2284 */
2285 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2286 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2287 set_capacity(mddev->gendisk, mddev->array_size << 1);
2288 mddev->changed = 1;
2289 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2290 mddev->recovery_cp = mddev->size << 1;
2291 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2292 }
2293 mddev->size = sectors /2;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07002294 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 return 0;
2296}
2297
NeilBrown72626682005-09-09 16:23:54 -07002298static void raid5_quiesce(mddev_t *mddev, int state)
2299{
2300 raid5_conf_t *conf = mddev_to_conf(mddev);
2301
2302 switch(state) {
2303 case 1: /* stop all writes */
2304 spin_lock_irq(&conf->device_lock);
2305 conf->quiesce = 1;
2306 wait_event_lock_irq(conf->wait_for_stripe,
2307 atomic_read(&conf->active_stripes) == 0,
2308 conf->device_lock, /* nothing */);
2309 spin_unlock_irq(&conf->device_lock);
2310 break;
2311
2312 case 0: /* re-enable writes */
2313 spin_lock_irq(&conf->device_lock);
2314 conf->quiesce = 0;
2315 wake_up(&conf->wait_for_stripe);
2316 spin_unlock_irq(&conf->device_lock);
2317 break;
2318 }
NeilBrown72626682005-09-09 16:23:54 -07002319}
NeilBrownb15c2e52006-01-06 00:20:16 -08002320
NeilBrown2604b702006-01-06 00:20:36 -08002321static struct mdk_personality raid5_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322{
2323 .name = "raid5",
NeilBrown2604b702006-01-06 00:20:36 -08002324 .level = 5,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 .owner = THIS_MODULE,
2326 .make_request = make_request,
2327 .run = run,
2328 .stop = stop,
2329 .status = status,
2330 .error_handler = error,
2331 .hot_add_disk = raid5_add_disk,
2332 .hot_remove_disk= raid5_remove_disk,
2333 .spare_active = raid5_spare_active,
2334 .sync_request = sync_request,
2335 .resize = raid5_resize,
NeilBrown72626682005-09-09 16:23:54 -07002336 .quiesce = raid5_quiesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337};
2338
NeilBrown2604b702006-01-06 00:20:36 -08002339static struct mdk_personality raid4_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340{
NeilBrown2604b702006-01-06 00:20:36 -08002341 .name = "raid4",
2342 .level = 4,
2343 .owner = THIS_MODULE,
2344 .make_request = make_request,
2345 .run = run,
2346 .stop = stop,
2347 .status = status,
2348 .error_handler = error,
2349 .hot_add_disk = raid5_add_disk,
2350 .hot_remove_disk= raid5_remove_disk,
2351 .spare_active = raid5_spare_active,
2352 .sync_request = sync_request,
2353 .resize = raid5_resize,
2354 .quiesce = raid5_quiesce,
2355};
2356
2357static int __init raid5_init(void)
2358{
2359 register_md_personality(&raid5_personality);
2360 register_md_personality(&raid4_personality);
2361 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362}
2363
NeilBrown2604b702006-01-06 00:20:36 -08002364static void raid5_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365{
NeilBrown2604b702006-01-06 00:20:36 -08002366 unregister_md_personality(&raid5_personality);
2367 unregister_md_personality(&raid4_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368}
2369
2370module_init(raid5_init);
2371module_exit(raid5_exit);
2372MODULE_LICENSE("GPL");
2373MODULE_ALIAS("md-personality-4"); /* RAID5 */
NeilBrownd9d166c2006-01-06 00:20:51 -08002374MODULE_ALIAS("md-raid5");
2375MODULE_ALIAS("md-raid4");
NeilBrown2604b702006-01-06 00:20:36 -08002376MODULE_ALIAS("md-level-5");
2377MODULE_ALIAS("md-level-4");