Revert "dm: fix deadlock when swapping to encrypted device"

This reverts commit 5c5bb51465.

It breaks the abi and is nothing that is needed for any Android devices.

Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I6f14268eae3250b096ed9e12c5d66bec8935dc11
This commit is contained in:
Greg Kroah-Hartman 2021-03-04 15:57:18 +01:00
parent 6455a150fa
commit bd792f5690
4 changed files with 0 additions and 70 deletions

View File

@ -110,10 +110,6 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
int swap_bios;
struct semaphore swap_bios_semaphore;
struct mutex swap_bios_lock;
struct dm_stats stats;
struct kthread_worker kworker;

View File

@ -2861,7 +2861,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
wake_up_process(cc->write_thread);
ti->num_flush_bios = 1;
ti->limit_swap_bios = true;
return 0;

View File

@ -148,16 +148,6 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
static int swap_bios = DEFAULT_SWAP_BIOS;
static int get_swap_bios(void)
{
int latch = READ_ONCE(swap_bios);
if (unlikely(latch <= 0))
latch = DEFAULT_SWAP_BIOS;
return latch;
}
/*
* For mempools pre-allocation at the table loading time.
*/
@ -947,11 +937,6 @@ void disable_write_zeroes(struct mapped_device *md)
limits->max_write_zeroes_sectors = 0;
}
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
}
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
@ -989,11 +974,6 @@ static void clone_endio(struct bio *bio)
}
}
if (unlikely(swap_bios_limit(tio->ti, bio))) {
struct mapped_device *md = io->md;
up(&md->swap_bios_semaphore);
}
free_tio(tio);
dec_pending(io, error);
}
@ -1272,22 +1252,6 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
{
mutex_lock(&md->swap_bios_lock);
while (latch < md->swap_bios) {
cond_resched();
down(&md->swap_bios_semaphore);
md->swap_bios--;
}
while (latch > md->swap_bios) {
cond_resched();
up(&md->swap_bios_semaphore);
md->swap_bios++;
}
mutex_unlock(&md->swap_bios_lock);
}
static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
@ -1308,14 +1272,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
if (unlikely(swap_bios_limit(ti, clone))) {
struct mapped_device *md = io->md;
int latch = get_swap_bios();
if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch);
down(&md->swap_bios_semaphore);
}
r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
@ -1330,18 +1286,10 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
if (unlikely(swap_bios_limit(ti, clone))) {
struct mapped_device *md = io->md;
up(&md->swap_bios_semaphore);
}
free_tio(tio);
dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
if (unlikely(swap_bios_limit(ti, clone))) {
struct mapped_device *md = io->md;
up(&md->swap_bios_semaphore);
}
free_tio(tio);
dec_pending(io, BLK_STS_DM_REQUEUE);
break;
@ -1918,7 +1866,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
mutex_destroy(&md->suspend_lock);
mutex_destroy(&md->type_lock);
mutex_destroy(&md->table_devices_lock);
mutex_destroy(&md->swap_bios_lock);
dm_mq_cleanup_mapped_device(md);
}
@ -1993,10 +1940,6 @@ static struct mapped_device *alloc_dev(int minor)
init_completion(&md->kobj_holder.completion);
md->kworker_task = NULL;
md->swap_bios = get_swap_bios();
sema_init(&md->swap_bios_semaphore, md->swap_bios);
mutex_init(&md->swap_bios_lock);
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
@ -3459,9 +3402,6 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
module_param(swap_bios, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");

View File

@ -316,11 +316,6 @@ struct dm_target {
*/
bool split_discard_bios:1;
/*
* Set if we need to limit the number of in-flight bios when swapping.
*/
bool limit_swap_bios:1;
/*
* Set if inline crypto capabilities from this target's underlying
* device(s) can be exposed via the device-mapper device.