mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
md/raid10: Refactor raid10_make_request
Refactor raid10_make_request into seperate read and write functions to clean up the code. Shaohua: add the recovery check back to read path Signed-off-by: Robert LeBlanc <robert@leblancnet.us> Signed-off-by: Shaohua Li <shli@fb.com>
This commit is contained in:
parent
3b046a97cb
commit
bb5f1ed70b
1 changed files with 140 additions and 105 deletions
|
@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||||
kfree(plug);
|
kfree(plug);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __make_request(struct mddev *mddev, struct bio *bio)
|
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||||
|
struct r10bio *r10_bio)
|
||||||
{
|
{
|
||||||
struct r10conf *conf = mddev->private;
|
struct r10conf *conf = mddev->private;
|
||||||
struct r10bio *r10_bio;
|
|
||||||
struct bio *read_bio;
|
struct bio *read_bio;
|
||||||
|
const int op = bio_op(bio);
|
||||||
|
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||||
|
int sectors_handled;
|
||||||
|
int max_sectors;
|
||||||
|
sector_t sectors;
|
||||||
|
struct md_rdev *rdev;
|
||||||
|
int slot;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Register the new request and wait if the reconstruction
|
||||||
|
* thread has put up a bar for new requests.
|
||||||
|
* Continue immediately if no resync is active currently.
|
||||||
|
*/
|
||||||
|
wait_barrier(conf);
|
||||||
|
|
||||||
|
sectors = bio_sectors(bio);
|
||||||
|
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||||
|
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
||||||
|
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
||||||
|
/*
|
||||||
|
* IO spans the reshape position. Need to wait for reshape to
|
||||||
|
* pass
|
||||||
|
*/
|
||||||
|
raid10_log(conf->mddev, "wait reshape");
|
||||||
|
allow_barrier(conf);
|
||||||
|
wait_event(conf->wait_barrier,
|
||||||
|
conf->reshape_progress <= bio->bi_iter.bi_sector ||
|
||||||
|
conf->reshape_progress >= bio->bi_iter.bi_sector +
|
||||||
|
sectors);
|
||||||
|
wait_barrier(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
read_again:
|
||||||
|
rdev = read_balance(conf, r10_bio, &max_sectors);
|
||||||
|
if (!rdev) {
|
||||||
|
raid_end_bio_io(r10_bio);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
slot = r10_bio->read_slot;
|
||||||
|
|
||||||
|
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
||||||
|
bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
|
||||||
|
max_sectors);
|
||||||
|
|
||||||
|
r10_bio->devs[slot].bio = read_bio;
|
||||||
|
r10_bio->devs[slot].rdev = rdev;
|
||||||
|
|
||||||
|
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
|
||||||
|
choose_data_offset(r10_bio, rdev);
|
||||||
|
read_bio->bi_bdev = rdev->bdev;
|
||||||
|
read_bio->bi_end_io = raid10_end_read_request;
|
||||||
|
bio_set_op_attrs(read_bio, op, do_sync);
|
||||||
|
if (test_bit(FailFast, &rdev->flags) &&
|
||||||
|
test_bit(R10BIO_FailFast, &r10_bio->state))
|
||||||
|
read_bio->bi_opf |= MD_FAILFAST;
|
||||||
|
read_bio->bi_private = r10_bio;
|
||||||
|
|
||||||
|
if (mddev->gendisk)
|
||||||
|
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
|
||||||
|
read_bio, disk_devt(mddev->gendisk),
|
||||||
|
r10_bio->sector);
|
||||||
|
if (max_sectors < r10_bio->sectors) {
|
||||||
|
/*
|
||||||
|
* Could not read all from this device, so we will need another
|
||||||
|
* r10_bio.
|
||||||
|
*/
|
||||||
|
sectors_handled = (r10_bio->sector + max_sectors
|
||||||
|
- bio->bi_iter.bi_sector);
|
||||||
|
r10_bio->sectors = max_sectors;
|
||||||
|
spin_lock_irq(&conf->device_lock);
|
||||||
|
if (bio->bi_phys_segments == 0)
|
||||||
|
bio->bi_phys_segments = 2;
|
||||||
|
else
|
||||||
|
bio->bi_phys_segments++;
|
||||||
|
spin_unlock_irq(&conf->device_lock);
|
||||||
|
/*
|
||||||
|
* Cannot call generic_make_request directly as that will be
|
||||||
|
* queued in __generic_make_request and subsequent
|
||||||
|
* mempool_alloc might block waiting for it. so hand bio over
|
||||||
|
* to raid10d.
|
||||||
|
*/
|
||||||
|
reschedule_retry(r10_bio);
|
||||||
|
|
||||||
|
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||||
|
|
||||||
|
r10_bio->master_bio = bio;
|
||||||
|
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
|
||||||
|
r10_bio->state = 0;
|
||||||
|
r10_bio->mddev = mddev;
|
||||||
|
r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
|
||||||
|
goto read_again;
|
||||||
|
} else
|
||||||
|
generic_make_request(read_bio);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||||
|
struct r10bio *r10_bio)
|
||||||
|
{
|
||||||
|
struct r10conf *conf = mddev->private;
|
||||||
int i;
|
int i;
|
||||||
const int op = bio_op(bio);
|
const int op = bio_op(bio);
|
||||||
const int rw = bio_data_dir(bio);
|
|
||||||
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||||
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
|
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct md_rdev *blocked_rdev;
|
struct md_rdev *blocked_rdev;
|
||||||
struct blk_plug_cb *cb;
|
struct blk_plug_cb *cb;
|
||||||
struct raid10_plug_cb *plug = NULL;
|
struct raid10_plug_cb *plug = NULL;
|
||||||
|
sector_t sectors;
|
||||||
int sectors_handled;
|
int sectors_handled;
|
||||||
int max_sectors;
|
int max_sectors;
|
||||||
int sectors;
|
|
||||||
|
|
||||||
md_write_start(mddev, bio);
|
md_write_start(mddev, bio);
|
||||||
|
|
||||||
|
@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
||||||
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||||
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
||||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
||||||
/* IO spans the reshape position. Need to wait for
|
/*
|
||||||
* reshape to pass
|
* IO spans the reshape position. Need to wait for reshape to
|
||||||
|
* pass
|
||||||
*/
|
*/
|
||||||
raid10_log(conf->mddev, "wait reshape");
|
raid10_log(conf->mddev, "wait reshape");
|
||||||
allow_barrier(conf);
|
allow_barrier(conf);
|
||||||
|
@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
||||||
sectors);
|
sectors);
|
||||||
wait_barrier(conf);
|
wait_barrier(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||||
bio_data_dir(bio) == WRITE &&
|
|
||||||
(mddev->reshape_backwards
|
(mddev->reshape_backwards
|
||||||
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
|
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
|
||||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
|
bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
|
||||||
|
@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
||||||
conf->reshape_safe = mddev->reshape_position;
|
conf->reshape_safe = mddev->reshape_position;
|
||||||
}
|
}
|
||||||
|
|
||||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
|
||||||
|
|
||||||
r10_bio->master_bio = bio;
|
|
||||||
r10_bio->sectors = sectors;
|
|
||||||
|
|
||||||
r10_bio->mddev = mddev;
|
|
||||||
r10_bio->sector = bio->bi_iter.bi_sector;
|
|
||||||
r10_bio->state = 0;
|
|
||||||
|
|
||||||
/* We might need to issue multiple reads to different
|
|
||||||
* devices if there are bad blocks around, so we keep
|
|
||||||
* track of the number of reads in bio->bi_phys_segments.
|
|
||||||
* If this is 0, there is only one r10_bio and no locking
|
|
||||||
* will be needed when the request completes. If it is
|
|
||||||
* non-zero, then it is the number of not-completed requests.
|
|
||||||
*/
|
|
||||||
bio->bi_phys_segments = 0;
|
|
||||||
bio_clear_flag(bio, BIO_SEG_VALID);
|
|
||||||
|
|
||||||
if (rw == READ) {
|
|
||||||
/*
|
|
||||||
* read balancing logic:
|
|
||||||
*/
|
|
||||||
struct md_rdev *rdev;
|
|
||||||
int slot;
|
|
||||||
|
|
||||||
read_again:
|
|
||||||
rdev = read_balance(conf, r10_bio, &max_sectors);
|
|
||||||
if (!rdev) {
|
|
||||||
raid_end_bio_io(r10_bio);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
slot = r10_bio->read_slot;
|
|
||||||
|
|
||||||
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
|
||||||
bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
|
|
||||||
max_sectors);
|
|
||||||
|
|
||||||
r10_bio->devs[slot].bio = read_bio;
|
|
||||||
r10_bio->devs[slot].rdev = rdev;
|
|
||||||
|
|
||||||
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
|
|
||||||
choose_data_offset(r10_bio, rdev);
|
|
||||||
read_bio->bi_bdev = rdev->bdev;
|
|
||||||
read_bio->bi_end_io = raid10_end_read_request;
|
|
||||||
bio_set_op_attrs(read_bio, op, do_sync);
|
|
||||||
if (test_bit(FailFast, &rdev->flags) &&
|
|
||||||
test_bit(R10BIO_FailFast, &r10_bio->state))
|
|
||||||
read_bio->bi_opf |= MD_FAILFAST;
|
|
||||||
read_bio->bi_private = r10_bio;
|
|
||||||
|
|
||||||
if (mddev->gendisk)
|
|
||||||
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
|
|
||||||
read_bio, disk_devt(mddev->gendisk),
|
|
||||||
r10_bio->sector);
|
|
||||||
if (max_sectors < r10_bio->sectors) {
|
|
||||||
/* Could not read all from this device, so we will
|
|
||||||
* need another r10_bio.
|
|
||||||
*/
|
|
||||||
sectors_handled = (r10_bio->sector + max_sectors
|
|
||||||
- bio->bi_iter.bi_sector);
|
|
||||||
r10_bio->sectors = max_sectors;
|
|
||||||
spin_lock_irq(&conf->device_lock);
|
|
||||||
if (bio->bi_phys_segments == 0)
|
|
||||||
bio->bi_phys_segments = 2;
|
|
||||||
else
|
|
||||||
bio->bi_phys_segments++;
|
|
||||||
spin_unlock_irq(&conf->device_lock);
|
|
||||||
/* Cannot call generic_make_request directly
|
|
||||||
* as that will be queued in __generic_make_request
|
|
||||||
* and subsequent mempool_alloc might block
|
|
||||||
* waiting for it. so hand bio over to raid10d.
|
|
||||||
*/
|
|
||||||
reschedule_retry(r10_bio);
|
|
||||||
|
|
||||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
|
||||||
|
|
||||||
r10_bio->master_bio = bio;
|
|
||||||
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
|
|
||||||
r10_bio->state = 0;
|
|
||||||
r10_bio->mddev = mddev;
|
|
||||||
r10_bio->sector = bio->bi_iter.bi_sector +
|
|
||||||
sectors_handled;
|
|
||||||
goto read_again;
|
|
||||||
} else
|
|
||||||
generic_make_request(read_bio);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* WRITE:
|
|
||||||
*/
|
|
||||||
if (conf->pending_count >= max_queued_requests) {
|
if (conf->pending_count >= max_queued_requests) {
|
||||||
md_wakeup_thread(mddev->thread);
|
md_wakeup_thread(mddev->thread);
|
||||||
raid10_log(mddev, "wait queued");
|
raid10_log(mddev, "wait queued");
|
||||||
|
@ -1300,8 +1308,7 @@ retry_write:
|
||||||
int bad_sectors;
|
int bad_sectors;
|
||||||
int is_bad;
|
int is_bad;
|
||||||
|
|
||||||
is_bad = is_badblock(rdev, dev_sector,
|
is_bad = is_badblock(rdev, dev_sector, max_sectors,
|
||||||
max_sectors,
|
|
||||||
&first_bad, &bad_sectors);
|
&first_bad, &bad_sectors);
|
||||||
if (is_bad < 0) {
|
if (is_bad < 0) {
|
||||||
/* Mustn't write here until the bad block
|
/* Mustn't write here until the bad block
|
||||||
|
@ -1405,8 +1412,7 @@ retry_write:
|
||||||
r10_bio->devs[i].bio = mbio;
|
r10_bio->devs[i].bio = mbio;
|
||||||
|
|
||||||
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
|
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
|
||||||
choose_data_offset(r10_bio,
|
choose_data_offset(r10_bio, rdev));
|
||||||
rdev));
|
|
||||||
mbio->bi_bdev = rdev->bdev;
|
mbio->bi_bdev = rdev->bdev;
|
||||||
mbio->bi_end_io = raid10_end_write_request;
|
mbio->bi_end_io = raid10_end_write_request;
|
||||||
bio_set_op_attrs(mbio, op, do_sync | do_fua);
|
bio_set_op_attrs(mbio, op, do_sync | do_fua);
|
||||||
|
@ -1457,8 +1463,7 @@ retry_write:
|
||||||
r10_bio->devs[i].repl_bio = mbio;
|
r10_bio->devs[i].repl_bio = mbio;
|
||||||
|
|
||||||
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
|
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
|
||||||
choose_data_offset(
|
choose_data_offset(r10_bio, rdev));
|
||||||
r10_bio, rdev));
|
|
||||||
mbio->bi_bdev = rdev->bdev;
|
mbio->bi_bdev = rdev->bdev;
|
||||||
mbio->bi_end_io = raid10_end_write_request;
|
mbio->bi_end_io = raid10_end_write_request;
|
||||||
bio_set_op_attrs(mbio, op, do_sync | do_fua);
|
bio_set_op_attrs(mbio, op, do_sync | do_fua);
|
||||||
|
@ -1503,6 +1508,36 @@ retry_write:
|
||||||
one_write_done(r10_bio);
|
one_write_done(r10_bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __make_request(struct mddev *mddev, struct bio *bio)
|
||||||
|
{
|
||||||
|
struct r10conf *conf = mddev->private;
|
||||||
|
struct r10bio *r10_bio;
|
||||||
|
|
||||||
|
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||||
|
|
||||||
|
r10_bio->master_bio = bio;
|
||||||
|
r10_bio->sectors = bio_sectors(bio);
|
||||||
|
|
||||||
|
r10_bio->mddev = mddev;
|
||||||
|
r10_bio->sector = bio->bi_iter.bi_sector;
|
||||||
|
r10_bio->state = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We might need to issue multiple reads to different devices if there
|
||||||
|
* are bad blocks around, so we keep track of the number of reads in
|
||||||
|
* bio->bi_phys_segments. If this is 0, there is only one r10_bio and
|
||||||
|
* no locking will be needed when the request completes. If it is
|
||||||
|
* non-zero, then it is the number of not-completed requests.
|
||||||
|
*/
|
||||||
|
bio->bi_phys_segments = 0;
|
||||||
|
bio_clear_flag(bio, BIO_SEG_VALID);
|
||||||
|
|
||||||
|
if (bio_data_dir(bio) == READ)
|
||||||
|
raid10_read_request(mddev, bio, r10_bio);
|
||||||
|
else
|
||||||
|
raid10_write_request(mddev, bio, r10_bio);
|
||||||
|
}
|
||||||
|
|
||||||
static void raid10_make_request(struct mddev *mddev, struct bio *bio)
|
static void raid10_make_request(struct mddev *mddev, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct r10conf *conf = mddev->private;
|
struct r10conf *conf = mddev->private;
|
||||||
|
|
Loading…
Add table
Reference in a new issue