Revert "block: split .sysfs_lock into two locks"

This reverts commit fa137b50f3.

It breaks the abi and is not needed in Android devices.

Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ia9b92ae9b03e3e12b13584373448160c767bf4cb
This commit is contained in:
Greg Kroah-Hartman 2021-03-04 16:13:09 +01:00
parent b0a6dfc3a2
commit 6fbaea28a7
6 changed files with 34 additions and 87 deletions

View File

@ -1061,7 +1061,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
mutex_init(&q->blk_trace_mutex); mutex_init(&q->blk_trace_mutex);
#endif #endif
mutex_init(&q->sysfs_lock); mutex_init(&q->sysfs_lock);
mutex_init(&q->sysfs_dir_lock);
spin_lock_init(&q->__queue_lock); spin_lock_init(&q->__queue_lock);
if (!q->mq_ops) if (!q->mq_ops)

View File

@ -264,7 +264,7 @@ void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i; int i;
lockdep_assert_held(&q->sysfs_dir_lock); lockdep_assert_held(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx); blk_mq_unregister_hctx(hctx);
@ -312,7 +312,7 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
int ret, i; int ret, i;
WARN_ON_ONCE(!q->kobj.parent); WARN_ON_ONCE(!q->kobj.parent);
lockdep_assert_held(&q->sysfs_dir_lock); lockdep_assert_held(&q->sysfs_lock);
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0) if (ret < 0)
@ -358,7 +358,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i; int i;
mutex_lock(&q->sysfs_dir_lock); mutex_lock(&q->sysfs_lock);
if (!q->mq_sysfs_init_done) if (!q->mq_sysfs_init_done)
goto unlock; goto unlock;
@ -366,7 +366,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
blk_mq_unregister_hctx(hctx); blk_mq_unregister_hctx(hctx);
unlock: unlock:
mutex_unlock(&q->sysfs_dir_lock); mutex_unlock(&q->sysfs_lock);
} }
int blk_mq_sysfs_register(struct request_queue *q) int blk_mq_sysfs_register(struct request_queue *q)
@ -374,7 +374,7 @@ int blk_mq_sysfs_register(struct request_queue *q)
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i, ret = 0; int i, ret = 0;
mutex_lock(&q->sysfs_dir_lock); mutex_lock(&q->sysfs_lock);
if (!q->mq_sysfs_init_done) if (!q->mq_sysfs_init_done)
goto unlock; goto unlock;
@ -385,7 +385,7 @@ int blk_mq_sysfs_register(struct request_queue *q)
} }
unlock: unlock:
mutex_unlock(&q->sysfs_dir_lock); mutex_unlock(&q->sysfs_lock);
return ret; return ret;
} }

View File

@ -892,7 +892,6 @@ int blk_register_queue(struct gendisk *disk)
int ret; int ret;
struct device *dev = disk_to_dev(disk); struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
bool has_elevator = false;
if (WARN_ON(!q)) if (WARN_ON(!q))
return -ENXIO; return -ENXIO;
@ -900,6 +899,7 @@ int blk_register_queue(struct gendisk *disk)
WARN_ONCE(blk_queue_registered(q), WARN_ONCE(blk_queue_registered(q),
"%s is registering an already registered queue\n", "%s is registering an already registered queue\n",
kobject_name(&dev->kobj)); kobject_name(&dev->kobj));
queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
/* /*
* SCSI probing may synchronously create and destroy a lot of * SCSI probing may synchronously create and destroy a lot of
@ -920,7 +920,8 @@ int blk_register_queue(struct gendisk *disk)
if (ret) if (ret)
return ret; return ret;
mutex_lock(&q->sysfs_dir_lock); /* Prevent changes through sysfs until registration is completed. */
mutex_lock(&q->sysfs_lock);
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) { if (ret < 0) {
@ -933,36 +934,26 @@ int blk_register_queue(struct gendisk *disk)
blk_mq_debugfs_register(q); blk_mq_debugfs_register(q);
} }
/* kobject_uevent(&q->kobj, KOBJ_ADD);
* The flag of QUEUE_FLAG_REGISTERED isn't set yet, so elevator
* switch won't happen at all. wbt_enable_default(q);
*/
blk_throtl_register_queue(q);
if (q->request_fn || (q->mq_ops && q->elevator)) { if (q->request_fn || (q->mq_ops && q->elevator)) {
ret = elv_register_queue(q, false); ret = elv_register_queue(q);
if (ret) { if (ret) {
mutex_unlock(&q->sysfs_dir_lock); mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj); kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev); blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj); kobject_put(&dev->kobj);
return ret; return ret;
} }
has_elevator = true;
} }
mutex_lock(&q->sysfs_lock);
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
wbt_enable_default(q);
blk_throtl_register_queue(q);
/* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&q->kobj, KOBJ_ADD);
if (has_elevator)
kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
mutex_unlock(&q->sysfs_lock);
ret = 0; ret = 0;
unlock: unlock:
mutex_unlock(&q->sysfs_dir_lock); mutex_unlock(&q->sysfs_lock);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(blk_register_queue); EXPORT_SYMBOL_GPL(blk_register_queue);
@ -977,7 +968,6 @@ EXPORT_SYMBOL_GPL(blk_register_queue);
void blk_unregister_queue(struct gendisk *disk) void blk_unregister_queue(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
bool has_elevator;
if (WARN_ON(!q)) if (WARN_ON(!q))
return; return;
@ -992,27 +982,25 @@ void blk_unregister_queue(struct gendisk *disk)
* concurrent elv_iosched_store() calls. * concurrent elv_iosched_store() calls.
*/ */
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
has_elevator = !!q->elevator;
mutex_unlock(&q->sysfs_lock);
mutex_lock(&q->sysfs_dir_lock); blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
/* /*
* Remove the sysfs attributes before unregistering the queue data * Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs. * structures that can be modified through sysfs.
*/ */
if (q->mq_ops) if (q->mq_ops)
blk_mq_unregister_dev(disk_to_dev(disk), q); blk_mq_unregister_dev(disk_to_dev(disk), q);
mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj); kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk)); blk_trace_remove_sysfs(disk_to_dev(disk));
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (q->request_fn || has_elevator) if (q->request_fn || (q->mq_ops && q->elevator))
elv_unregister_queue(q); elv_unregister_queue(q);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
mutex_unlock(&q->sysfs_dir_lock);
kobject_put(&disk_to_dev(disk)->kobj); kobject_put(&disk_to_dev(disk)->kobj);
} }

View File

@ -244,7 +244,7 @@ int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q, int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e); struct elevator_type *new_e);
void elevator_exit(struct request_queue *, struct elevator_queue *); void elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q, bool uevent); int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q); void elv_unregister_queue(struct request_queue *q);
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);

View File

@ -833,16 +833,13 @@ static struct kobj_type elv_ktype = {
.release = elevator_release, .release = elevator_release,
}; };
/* int elv_register_queue(struct request_queue *q)
* elv_register_queue is called from either blk_register_queue or
* elevator_switch, elevator switch is prevented from being happen
* in the two paths, so it is safe to not hold q->sysfs_lock.
*/
int elv_register_queue(struct request_queue *q, bool uevent)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
int error; int error;
lockdep_assert_held(&q->sysfs_lock);
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
if (!error) { if (!error) {
struct elv_fs_entry *attr = e->type->elevator_attrs; struct elv_fs_entry *attr = e->type->elevator_attrs;
@ -853,36 +850,26 @@ int elv_register_queue(struct request_queue *q, bool uevent)
attr++; attr++;
} }
} }
if (uevent) kobject_uevent(&e->kobj, KOBJ_ADD);
kobject_uevent(&e->kobj, KOBJ_ADD);
mutex_lock(&q->sysfs_lock);
e->registered = 1; e->registered = 1;
if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn) if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
e->type->ops.sq.elevator_registered_fn(q); e->type->ops.sq.elevator_registered_fn(q);
mutex_unlock(&q->sysfs_lock);
} }
return error; return error;
} }
/*
* elv_unregister_queue is called from either blk_unregister_queue or
* elevator_switch, elevator switch is prevented from being happen
* in the two paths, so it is safe to not hold q->sysfs_lock.
*/
void elv_unregister_queue(struct request_queue *q) void elv_unregister_queue(struct request_queue *q)
{ {
lockdep_assert_held(&q->sysfs_lock);
if (q) { if (q) {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj); kobject_del(&e->kobj);
mutex_lock(&q->sysfs_lock);
e->registered = 0; e->registered = 0;
/* Re-enable throttling in case elevator disabled it */ /* Re-enable throttling in case elevator disabled it */
wbt_enable_default(q); wbt_enable_default(q);
mutex_unlock(&q->sysfs_lock);
} }
} }
@ -953,32 +940,10 @@ int elevator_switch_mq(struct request_queue *q,
lockdep_assert_held(&q->sysfs_lock); lockdep_assert_held(&q->sysfs_lock);
if (q->elevator) { if (q->elevator) {
if (q->elevator->registered) { if (q->elevator->registered)
mutex_unlock(&q->sysfs_lock);
/*
* Concurrent elevator switch can't happen becasue
* sysfs write is always exclusively on same file.
*
* Also the elevator queue won't be freed after
* sysfs_lock is released becasue kobject_del() in
* blk_unregister_queue() waits for completion of
* .store & .show on its attributes.
*/
elv_unregister_queue(q); elv_unregister_queue(q);
mutex_lock(&q->sysfs_lock);
}
ioc_clear_queue(q); ioc_clear_queue(q);
elevator_exit(q, q->elevator); elevator_exit(q, q->elevator);
/*
* sysfs_lock may be dropped, so re-check if queue is
* unregistered. If yes, don't switch to new elevator
* any more
*/
if (!blk_queue_registered(q))
return 0;
} }
ret = blk_mq_init_sched(q, new_e); ret = blk_mq_init_sched(q, new_e);
@ -986,11 +951,7 @@ int elevator_switch_mq(struct request_queue *q,
goto out; goto out;
if (new_e) { if (new_e) {
mutex_unlock(&q->sysfs_lock); ret = elv_register_queue(q);
ret = elv_register_queue(q, true);
mutex_lock(&q->sysfs_lock);
if (ret) { if (ret) {
elevator_exit(q, q->elevator); elevator_exit(q, q->elevator);
goto out; goto out;
@ -1086,7 +1047,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
if (err) if (err)
goto fail_init; goto fail_init;
err = elv_register_queue(q, true); err = elv_register_queue(q);
if (err) if (err)
goto fail_register; goto fail_register;
@ -1106,7 +1067,7 @@ fail_init:
/* switch failed, restore and re-register old elevator */ /* switch failed, restore and re-register old elevator */
if (old) { if (old) {
q->elevator = old; q->elevator = old;
elv_register_queue(q, true); elv_register_queue(q);
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
} }

View File

@ -642,7 +642,6 @@ struct request_queue {
struct delayed_work requeue_work; struct delayed_work requeue_work;
struct mutex sysfs_lock; struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
int bypass_depth; int bypass_depth;
atomic_t mq_freeze_depth; atomic_t mq_freeze_depth;