Skip to content

Commit 55ce74d

Browse files
author
NeilBrown
committed
md/raid1: ensure device failure recorded before write request returns.
When a write to one of the legs of a RAID1 fails, the failure is recorded in the metadata of the other leg(s) so that after a restart the data on the failed drive wont be trusted even if that drive seems to be working again (maybe a cable was unplugged). Similarly when we record a bad-block in response to a write failure, we must not let the write complete until the bad-block update is safe. Currently there is no interlock between the write request completing and the metadata update. So it is possible that the write will complete, the app will confirm success in some way, and then the machine will crash before the metadata update completes. This is an extremely small hole for a racy to fit in, but it is theoretically possible and so should be closed. So: - set MD_CHANGE_PENDING when requesting a metadata update for a failed device, so we can know with certainty when it completes - queue requests that experienced an error on a new queue which is only processed after the metadata update completes - call raid_end_bio_io() on bios in that queue when the time comes. Signed-off-by: NeilBrown <[email protected]>
1 parent 18b9f67 commit 55ce74d

File tree

3 files changed

+34
-1
lines changed

3 files changed

+34
-1
lines changed

drivers/md/md.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8629,6 +8629,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
86298629
/* Make sure they get written out promptly */
86308630
sysfs_notify_dirent_safe(rdev->sysfs_state);
86318631
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
8632+
set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
86328633
md_wakeup_thread(rdev->mddev->thread);
86338634
}
86348635
return rv;

drivers/md/raid1.c

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1508,6 +1508,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
15081508
*/
15091509
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
15101510
set_bit(MD_CHANGE_DEVS, &mddev->flags);
1511+
set_bit(MD_CHANGE_PENDING, &mddev->flags);
15111512
printk(KERN_ALERT
15121513
"md/raid1:%s: Disk failure on %s, disabling device.\n"
15131514
"md/raid1:%s: Operation continuing on %d devices.\n",
@@ -2289,6 +2290,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
22892290
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
22902291
{
22912292
int m;
2293+
bool fail = false;
22922294
for (m = 0; m < conf->raid_disks * 2 ; m++)
22932295
if (r1_bio->bios[m] == IO_MADE_GOOD) {
22942296
struct md_rdev *rdev = conf->mirrors[m].rdev;
@@ -2301,6 +2303,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
23012303
* narrow down and record precise write
23022304
* errors.
23032305
*/
2306+
fail = true;
23042307
if (!narrow_write_error(r1_bio, m)) {
23052308
md_error(conf->mddev,
23062309
conf->mirrors[m].rdev);
@@ -2312,7 +2315,13 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
23122315
}
23132316
if (test_bit(R1BIO_WriteError, &r1_bio->state))
23142317
close_write(r1_bio);
2315-
raid_end_bio_io(r1_bio);
2318+
if (fail) {
2319+
spin_lock_irq(&conf->device_lock);
2320+
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2321+
spin_unlock_irq(&conf->device_lock);
2322+
md_wakeup_thread(conf->mddev->thread);
2323+
} else
2324+
raid_end_bio_io(r1_bio);
23162325
}
23172326

23182327
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@ -2418,6 +2427,23 @@ static void raid1d(struct md_thread *thread)
24182427

24192428
md_check_recovery(mddev);
24202429

2430+
if (!list_empty_careful(&conf->bio_end_io_list) &&
2431+
!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
2432+
LIST_HEAD(tmp);
2433+
spin_lock_irqsave(&conf->device_lock, flags);
2434+
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
2435+
list_add(&tmp, &conf->bio_end_io_list);
2436+
list_del_init(&conf->bio_end_io_list);
2437+
}
2438+
spin_unlock_irqrestore(&conf->device_lock, flags);
2439+
while (!list_empty(&tmp)) {
2440+
r1_bio = list_first_entry(&conf->bio_end_io_list,
2441+
struct r1bio, retry_list);
2442+
list_del(&r1_bio->retry_list);
2443+
raid_end_bio_io(r1_bio);
2444+
}
2445+
}
2446+
24212447
blk_start_plug(&plug);
24222448
for (;;) {
24232449

@@ -2819,6 +2845,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
28192845
conf->raid_disks = mddev->raid_disks;
28202846
conf->mddev = mddev;
28212847
INIT_LIST_HEAD(&conf->retry_list);
2848+
INIT_LIST_HEAD(&conf->bio_end_io_list);
28222849

28232850
spin_lock_init(&conf->resync_lock);
28242851
init_waitqueue_head(&conf->wait_barrier);

drivers/md/raid1.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,11 @@ struct r1conf {
6161
* block, or anything else.
6262
*/
6363
struct list_head retry_list;
64+
/* A separate list of r1bio which just need raid_end_bio_io called.
65+
* This mustn't happen for writes which had any errors if the superblock
66+
* needs to be written.
67+
*/
68+
struct list_head bio_end_io_list;
6469

6570
/* queue pending writes to be submitted on unplug */
6671
struct bio_list pending_bio_list;

0 commit comments

Comments
 (0)