forked from luck/tmp_suning_uos_patched
Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-5.9/drivers
Pull MD fixes from Song. * 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md: md-cluster: fix wild pointer of unlock_all_bitmaps() md/raid5-cache: clear MD_SB_CHANGE_PENDING before flushing stripes md: fix deadlock causing by sysfs_notify md: improve io stats accounting md: raid0/linear: fix dereference before null check on pointer mddev
This commit is contained in:
commit
b1d37e5b96
|
@ -1631,7 +1631,7 @@ void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
|
|||
s += blocks;
|
||||
}
|
||||
bitmap->last_end_sync = jiffies;
|
||||
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
|
||||
}
|
||||
EXPORT_SYMBOL(md_bitmap_cond_end_sync);
|
||||
|
||||
|
|
|
@ -1518,6 +1518,7 @@ static void unlock_all_bitmaps(struct mddev *mddev)
|
|||
}
|
||||
}
|
||||
kfree(cinfo->other_bitmap_lockres);
|
||||
cinfo->other_bitmap_lockres = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
110
drivers/md/md.c
110
drivers/md/md.c
|
@ -463,12 +463,38 @@ void md_handle_request(struct mddev *mddev, struct bio *bio)
|
|||
}
|
||||
EXPORT_SYMBOL(md_handle_request);
|
||||
|
||||
struct md_io {
|
||||
struct mddev *mddev;
|
||||
bio_end_io_t *orig_bi_end_io;
|
||||
void *orig_bi_private;
|
||||
unsigned long start_time;
|
||||
};
|
||||
|
||||
static void md_end_io(struct bio *bio)
|
||||
{
|
||||
struct md_io *md_io = bio->bi_private;
|
||||
struct mddev *mddev = md_io->mddev;
|
||||
|
||||
disk_end_io_acct(mddev->gendisk, bio_op(bio), md_io->start_time);
|
||||
|
||||
bio->bi_end_io = md_io->orig_bi_end_io;
|
||||
bio->bi_private = md_io->orig_bi_private;
|
||||
|
||||
mempool_free(md_io, &mddev->md_io_pool);
|
||||
|
||||
if (bio->bi_end_io)
|
||||
bio->bi_end_io(bio);
|
||||
}
|
||||
|
||||
static blk_qc_t md_submit_bio(struct bio *bio)
|
||||
{
|
||||
const int rw = bio_data_dir(bio);
|
||||
const int sgrp = op_stat_group(bio_op(bio));
|
||||
struct mddev *mddev = bio->bi_disk->private_data;
|
||||
unsigned int sectors;
|
||||
|
||||
if (mddev == NULL || mddev->pers == NULL) {
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
|
||||
bio_io_error(bio);
|
||||
|
@ -477,10 +503,6 @@ static blk_qc_t md_submit_bio(struct bio *bio)
|
|||
|
||||
blk_queue_split(&bio);
|
||||
|
||||
if (mddev == NULL || mddev->pers == NULL) {
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
|
||||
if (bio_sectors(bio) != 0)
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
|
@ -488,21 +510,27 @@ static blk_qc_t md_submit_bio(struct bio *bio)
|
|||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* save the sectors now since our bio can
|
||||
* go away inside make_request
|
||||
*/
|
||||
sectors = bio_sectors(bio);
|
||||
if (bio->bi_end_io != md_end_io) {
|
||||
struct md_io *md_io;
|
||||
|
||||
md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
|
||||
md_io->mddev = mddev;
|
||||
md_io->orig_bi_end_io = bio->bi_end_io;
|
||||
md_io->orig_bi_private = bio->bi_private;
|
||||
|
||||
bio->bi_end_io = md_end_io;
|
||||
bio->bi_private = md_io;
|
||||
|
||||
md_io->start_time = disk_start_io_acct(mddev->gendisk,
|
||||
bio_sectors(bio),
|
||||
bio_op(bio));
|
||||
}
|
||||
|
||||
/* bio could be mergeable after passing to underlayer */
|
||||
bio->bi_opf &= ~REQ_NOMERGE;
|
||||
|
||||
md_handle_request(mddev, bio);
|
||||
|
||||
part_stat_lock();
|
||||
part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
|
||||
part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
|
||||
part_stat_unlock();
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
|
@ -2444,6 +2472,10 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
|
|||
if (sysfs_create_link(&rdev->kobj, ko, "block"))
|
||||
/* failure here is OK */;
|
||||
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
|
||||
rdev->sysfs_unack_badblocks =
|
||||
sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
|
||||
rdev->sysfs_badblocks =
|
||||
sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
|
||||
|
||||
list_add_rcu(&rdev->same_set, &mddev->disks);
|
||||
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
|
||||
|
@ -2477,7 +2509,11 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
|
|||
rdev->mddev = NULL;
|
||||
sysfs_remove_link(&rdev->kobj, "block");
|
||||
sysfs_put(rdev->sysfs_state);
|
||||
sysfs_put(rdev->sysfs_unack_badblocks);
|
||||
sysfs_put(rdev->sysfs_badblocks);
|
||||
rdev->sysfs_state = NULL;
|
||||
rdev->sysfs_unack_badblocks = NULL;
|
||||
rdev->sysfs_badblocks = NULL;
|
||||
rdev->badblocks.count = 0;
|
||||
/* We need to delay this, otherwise we can deadlock when
|
||||
* writing to 'remove' to "dev/state". We also need
|
||||
|
@ -2822,7 +2858,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
|
|||
goto repeat;
|
||||
wake_up(&mddev->sb_wait);
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
|
||||
rdev_for_each(rdev, mddev) {
|
||||
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
|
||||
|
@ -4075,7 +4111,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
mddev_resume(mddev);
|
||||
if (!mddev->thread)
|
||||
md_update_sb(mddev, 1);
|
||||
sysfs_notify(&mddev->kobj, NULL, "level");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_level);
|
||||
md_new_event(mddev);
|
||||
rv = len;
|
||||
out_unlock:
|
||||
|
@ -4828,7 +4864,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
|
|||
}
|
||||
if (err)
|
||||
return err;
|
||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
|
||||
} else {
|
||||
if (cmd_match(page, "check"))
|
||||
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
|
||||
|
@ -5534,6 +5570,13 @@ static void md_free(struct kobject *ko)
|
|||
|
||||
if (mddev->sysfs_state)
|
||||
sysfs_put(mddev->sysfs_state);
|
||||
if (mddev->sysfs_completed)
|
||||
sysfs_put(mddev->sysfs_completed);
|
||||
if (mddev->sysfs_degraded)
|
||||
sysfs_put(mddev->sysfs_degraded);
|
||||
if (mddev->sysfs_level)
|
||||
sysfs_put(mddev->sysfs_level);
|
||||
|
||||
|
||||
if (mddev->gendisk)
|
||||
del_gendisk(mddev->gendisk);
|
||||
|
@ -5545,6 +5588,7 @@ static void md_free(struct kobject *ko)
|
|||
|
||||
bioset_exit(&mddev->bio_set);
|
||||
bioset_exit(&mddev->sync_set);
|
||||
mempool_exit(&mddev->md_io_pool);
|
||||
kfree(mddev);
|
||||
}
|
||||
|
||||
|
@ -5640,6 +5684,11 @@ static int md_alloc(dev_t dev, char *name)
|
|||
*/
|
||||
mddev->hold_active = UNTIL_STOP;
|
||||
|
||||
error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
|
||||
sizeof(struct md_io));
|
||||
if (error)
|
||||
goto abort;
|
||||
|
||||
error = -ENOMEM;
|
||||
mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
|
||||
if (!mddev->queue)
|
||||
|
@ -5695,6 +5744,9 @@ static int md_alloc(dev_t dev, char *name)
|
|||
if (!error && mddev->kobj.sd) {
|
||||
kobject_uevent(&mddev->kobj, KOBJ_ADD);
|
||||
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
|
||||
mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
|
||||
mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
|
||||
mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
|
||||
}
|
||||
mddev_put(mddev);
|
||||
return error;
|
||||
|
@ -6049,7 +6101,7 @@ static int do_md_run(struct mddev *mddev)
|
|||
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_action);
|
||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
|
||||
out:
|
||||
clear_bit(MD_NOT_READY, &mddev->flags);
|
||||
return err;
|
||||
|
@ -8768,7 +8820,7 @@ void md_do_sync(struct md_thread *thread)
|
|||
} else
|
||||
mddev->curr_resync = 3; /* no longer delayed */
|
||||
mddev->curr_resync_completed = j;
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
md_new_event(mddev);
|
||||
update_time = jiffies;
|
||||
|
||||
|
@ -8796,7 +8848,7 @@ void md_do_sync(struct md_thread *thread)
|
|||
mddev->recovery_cp = j;
|
||||
update_time = jiffies;
|
||||
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
}
|
||||
|
||||
while (j >= mddev->resync_max &&
|
||||
|
@ -8903,7 +8955,7 @@ void md_do_sync(struct md_thread *thread)
|
|||
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
|
||||
mddev->curr_resync > 3) {
|
||||
mddev->curr_resync_completed = mddev->curr_resync;
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
}
|
||||
mddev->pers->sync_request(mddev, max_sectors, &skipped);
|
||||
|
||||
|
@ -9033,7 +9085,7 @@ static int remove_and_add_spares(struct mddev *mddev,
|
|||
}
|
||||
|
||||
if (removed && mddev->kobj.sd)
|
||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
|
||||
|
||||
if (this && removed)
|
||||
goto no_add;
|
||||
|
@ -9316,8 +9368,7 @@ void md_reap_sync_thread(struct mddev *mddev)
|
|||
/* success...*/
|
||||
/* activate any spares */
|
||||
if (mddev->pers->spare_active(mddev)) {
|
||||
sysfs_notify(&mddev->kobj, NULL,
|
||||
"degraded");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
}
|
||||
}
|
||||
|
@ -9407,8 +9458,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
|
|||
if (rv == 0) {
|
||||
/* Make sure they get written out promptly */
|
||||
if (test_bit(ExternalBbl, &rdev->flags))
|
||||
sysfs_notify(&rdev->kobj, NULL,
|
||||
"unacknowledged_bad_blocks");
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
||||
set_mask_bits(&mddev->sb_flags, 0,
|
||||
BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
|
||||
|
@ -9429,7 +9479,7 @@ int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
|
|||
s += rdev->data_offset;
|
||||
rv = badblocks_clear(&rdev->badblocks, s, sectors);
|
||||
if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
|
||||
sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
|
||||
|
@ -9659,7 +9709,7 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
|||
if (rdev->recovery_offset == MaxSector &&
|
||||
!test_bit(In_sync, &rdev->flags) &&
|
||||
mddev->pers->spare_active(mddev))
|
||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
|
||||
|
||||
put_page(swapout);
|
||||
return 0;
|
||||
|
|
|
@ -126,7 +126,10 @@ struct md_rdev {
|
|||
|
||||
struct kernfs_node *sysfs_state; /* handle for 'state'
|
||||
* sysfs entry */
|
||||
|
||||
/* handle for 'unacknowledged_bad_blocks' sysfs dentry */
|
||||
struct kernfs_node *sysfs_unack_badblocks;
|
||||
/* handle for 'bad_blocks' sysfs dentry */
|
||||
struct kernfs_node *sysfs_badblocks;
|
||||
struct badblocks badblocks;
|
||||
|
||||
struct {
|
||||
|
@ -420,6 +423,9 @@ struct mddev {
|
|||
* file in sysfs.
|
||||
*/
|
||||
struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
|
||||
struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */
|
||||
struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */
|
||||
struct kernfs_node *sysfs_level; /*handle for 'level' */
|
||||
|
||||
struct work_struct del_work; /* used for delayed sysfs removal */
|
||||
|
||||
|
@ -481,6 +487,7 @@ struct mddev {
|
|||
struct bio_set sync_set; /* for sync operations like
|
||||
* metadata and bitmap writes
|
||||
*/
|
||||
mempool_t md_io_pool;
|
||||
|
||||
/* Generic flush handling.
|
||||
* The last to finish preflush schedules a worker to submit
|
||||
|
|
|
@ -4454,7 +4454,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|||
sector_nr = conf->reshape_progress;
|
||||
if (sector_nr) {
|
||||
mddev->curr_resync_completed = sector_nr;
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
*skipped = 1;
|
||||
return sector_nr;
|
||||
}
|
||||
|
|
|
@ -2430,10 +2430,15 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
|
|||
struct mddev *mddev = log->rdev->mddev;
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct stripe_head *sh, *next;
|
||||
bool cleared_pending = false;
|
||||
|
||||
if (ctx->data_only_stripes == 0)
|
||||
return;
|
||||
|
||||
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
|
||||
cleared_pending = true;
|
||||
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
|
||||
}
|
||||
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
|
||||
|
||||
list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
|
||||
|
@ -2448,6 +2453,8 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
|
|||
atomic_read(&conf->active_stripes) == 0);
|
||||
|
||||
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
|
||||
if (cleared_pending)
|
||||
set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
|
||||
}
|
||||
|
||||
static int r5l_recovery_log(struct r5l_log *log)
|
||||
|
|
|
@ -5799,7 +5799,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
|
|||
sector_div(sector_nr, new_data_disks);
|
||||
if (sector_nr) {
|
||||
mddev->curr_resync_completed = sector_nr;
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
*skipped = 1;
|
||||
retn = sector_nr;
|
||||
goto finish;
|
||||
|
@ -5913,7 +5913,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
|
|||
conf->reshape_safe = mddev->reshape_position;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&stripes);
|
||||
|
@ -6020,7 +6020,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
|
|||
conf->reshape_safe = mddev->reshape_position;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_completed);
|
||||
}
|
||||
ret:
|
||||
return retn;
|
||||
|
|
Loading…
Reference in New Issue
Block a user