iommu/ipmmu-vmsa: Fix allocation in atomic context

When attaching a device to an IOMMU group with
CONFIG_DEBUG_ATOMIC_SLEEP=y:

    BUG: sleeping function called from invalid context at mm/slab.h:421
    in_atomic(): 1, irqs_disabled(): 128, pid: 61, name: kworker/1:1
    ...
    Call trace:
     ...
     arm_lpae_alloc_pgtable+0x114/0x184
     arm_64_lpae_alloc_pgtable_s1+0x2c/0x128
     arm_32_lpae_alloc_pgtable_s1+0x40/0x6c
     alloc_io_pgtable_ops+0x60/0x88
     ipmmu_attach_device+0x140/0x334

ipmmu_attach_device() takes a spinlock, while arm_lpae_alloc_pgtable()
allocates memory using GFP_KERNEL.  Originally, the ipmmu-vmsa driver
had its own custom page table allocation implementation using
GFP_ATOMIC, hence the spinlock was fine.

Fix this by replacing the spinlock by a mutex, like the arm-smmu driver
does.

Fixes: f20ed39f53 ("iommu/ipmmu-vmsa: Use the ARM LPAE page table allocator")
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Geert Uytterhoeven 2018-07-20 18:16:59 +02:00 committed by Joerg Roedel
parent 2ae8695570
commit 46583e8c48

View File

@ -74,7 +74,7 @@ struct ipmmu_vmsa_domain {
struct io_pgtable_ops *iop;
unsigned int context_id;
spinlock_t lock; /* Protects mappings */
struct mutex mutex; /* Protects mappings */
};
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
@ -600,7 +600,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
if (!domain)
return NULL;
spin_lock_init(&domain->lock);
mutex_init(&domain->mutex);
return &domain->io_domain;
}
@ -646,7 +646,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags;
unsigned int i;
int ret = 0;
@ -655,7 +654,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
return -ENXIO;
}
spin_lock_irqsave(&domain->lock, flags);
mutex_lock(&domain->mutex);
if (!domain->mmu) {
/* The domain hasn't been used yet, initialize it. */
@ -679,7 +678,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
} else
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
spin_unlock_irqrestore(&domain->lock, flags);
mutex_unlock(&domain->mutex);
if (ret < 0)
return ret;