]> git.baikalelectronics.ru Git - kernel.git/commitdiff
iommu/amd: Fix race in increase_address_space()
authorJoerg Roedel <jroedel@suse.de>
Fri, 6 Sep 2019 08:39:54 +0000 (10:39 +0200)
committerJoerg Roedel <jroedel@suse.de>
Fri, 6 Sep 2019 08:55:51 +0000 (10:55 +0200)
After the conversion to lock-less dma-api call the
increase_address_space() function can be called without any
locking. Multiple CPUs could potentially race for increasing
the address space, leading to invalid domain->mode settings
and invalid page-tables. This has been happening in the wild
under high IO load and memory pressure.

Fix the race by locking this operation. The function is
called infrequently so that this does not introduce
a performance regression in the dma-api path again.

Reported-by: Qian Cai <cai@lca.pw>
Fixes: e7c3ba0036b9 ('iommu/amd: Make use of the generic IOVA allocator')
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd_iommu.c

index f853b96ee547e486158f54fe30977483f25832e4..61de81965c44ed95b52ef857b4f31a4cb8519a28 100644 (file)
@@ -1435,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
  * another level increases the size of the address space by 9 bits to a size up
  * to 64 bits.
  */
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
                                   gfp_t gfp)
 {
+       unsigned long flags;
        u64 *pte;
 
-       if (domain->mode == PAGE_MODE_6_LEVEL)
+       spin_lock_irqsave(&domain->lock, flags);
+
+       if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
                /* address space already 64 bit large */
-               return false;
+               goto out;
 
        pte = (void *)get_zeroed_page(gfp);
        if (!pte)
-               return false;
+               goto out;
 
        *pte             = PM_LEVEL_PDE(domain->mode,
                                        iommu_virt_to_phys(domain->pt_root));
@@ -1454,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
        domain->mode    += 1;
        domain->updated  = true;
 
-       return true;
+out:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return;
 }
 
 static u64 *alloc_pte(struct protection_domain *domain,