]> git.baikalelectronics.ru Git - kernel.git/commitdiff
PCI/MSI: Use msi_add_msi_desc()
authorThomas Gleixner <tglx@linutronix.de>
Mon, 6 Dec 2021 22:51:15 +0000 (23:51 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 16 Dec 2021 21:22:17 +0000 (22:22 +0100)
Simplify the allocation of MSI descriptors by using msi_add_msi_desc()
which moves the storage handling to core code and prepares for dynamic
extension of the MSI-X vector space.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Nishanth Menon <nm@ti.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
Link: https://lore.kernel.org/r/20211206210748.035348646@linutronix.de
drivers/pci/msi/msi.c

index c9a03f968013165f208c71e14c83787bdc321973..4ee47ee955559e3aca8e79c23b1779c3749b036a 100644 (file)
@@ -376,40 +376,41 @@ static int pci_setup_msi_context(struct pci_dev *dev)
        return ret;
 }
 
-static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
+static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
+                             struct irq_affinity_desc *masks)
 {
-       struct msi_desc *entry;
+       struct msi_desc desc;
        u16 control;
 
        /* MSI Entry Initialization */
-       entry = alloc_msi_entry(&dev->dev, nvec, masks);
-       if (!entry)
-               return NULL;
+       memset(&desc, 0, sizeof(desc));
 
        pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
        /* Lies, damned lies, and MSIs */
        if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
                control |= PCI_MSI_FLAGS_MASKBIT;
+       /* Respect XEN's mask disabling */
+       if (pci_msi_ignore_mask)
+               control &= ~PCI_MSI_FLAGS_MASKBIT;
 
-       entry->pci.msi_attrib.is_64     = !!(control & PCI_MSI_FLAGS_64BIT);
-       entry->pci.msi_attrib.can_mask  = !pci_msi_ignore_mask &&
-                                         !!(control & PCI_MSI_FLAGS_MASKBIT);
-       entry->pci.msi_attrib.default_irq = dev->irq;
-       entry->pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
-       entry->pci.msi_attrib.multiple  = ilog2(__roundup_pow_of_two(nvec));
+       desc.nvec_used                  = nvec;
+       desc.pci.msi_attrib.is_64       = !!(control & PCI_MSI_FLAGS_64BIT);
+       desc.pci.msi_attrib.can_mask    = !!(control & PCI_MSI_FLAGS_MASKBIT);
+       desc.pci.msi_attrib.default_irq = dev->irq;
+       desc.pci.msi_attrib.multi_cap   = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+       desc.pci.msi_attrib.multiple    = ilog2(__roundup_pow_of_two(nvec));
+       desc.affinity                   = masks;
 
        if (control & PCI_MSI_FLAGS_64BIT)
-               entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+               desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
        else
-               entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+               desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
 
        /* Save the initial mask status */
-       if (entry->pci.msi_attrib.can_mask)
-               pci_read_config_dword(dev, entry->pci.mask_pos, &entry->pci.msi_mask);
-
+       if (desc.pci.msi_attrib.can_mask)
+               pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
 
-       return entry;
+       return msi_add_msi_desc(&dev->dev, &desc);
 }
 
 static int msi_verify_entries(struct pci_dev *dev)
@@ -459,17 +460,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
                masks = irq_create_affinity_masks(nvec, affd);
 
        msi_lock_descs(&dev->dev);
-       entry = msi_setup_entry(dev, nvec, masks);
-       if (!entry) {
-               ret = -ENOMEM;
+       ret = msi_setup_msi_desc(dev, nvec, masks);
+       if (ret)
                goto fail;
-       }
 
        /* All MSIs are unmasked by default; mask them all */
+       entry = first_pci_msi_entry(dev);
        pci_msi_mask(entry, msi_multi_mask(entry));
 
-       list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
-
        /* Configure MSI capability structure */
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
        if (ret)
@@ -519,48 +517,40 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
        return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
 }
 
-static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
-                             struct msix_entry *entries, int nvec,
-                             struct irq_affinity_desc *masks)
+static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
+                               struct msix_entry *entries, int nvec,
+                               struct irq_affinity_desc *masks)
 {
-       int i, vec_count = pci_msix_vec_count(dev);
+       int ret = 0, i, vec_count = pci_msix_vec_count(dev);
        struct irq_affinity_desc *curmsk;
-       struct msi_desc *entry;
+       struct msi_desc desc;
        void __iomem *addr;
 
-       for (i = 0, curmsk = masks; i < nvec; i++) {
-               entry = alloc_msi_entry(&dev->dev, 1, curmsk);
-               if (!entry) {
-                       /* No enough memory. Don't try again */
-                       return -ENOMEM;
-               }
-
-               entry->pci.msi_attrib.is_msix   = 1;
-               entry->pci.msi_attrib.is_64     = 1;
+       memset(&desc, 0, sizeof(desc));
 
-               if (entries)
-                       entry->msi_index = entries[i].entry;
-               else
-                       entry->msi_index = i;
+       desc.nvec_used                  = 1;
+       desc.pci.msi_attrib.is_msix     = 1;
+       desc.pci.msi_attrib.is_64       = 1;
+       desc.pci.msi_attrib.default_irq = dev->irq;
+       desc.pci.mask_base              = base;
 
-               entry->pci.msi_attrib.is_virtual = entry->msi_index >= vec_count;
+       for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
+               desc.msi_index = entries ? entries[i].entry : i;
+               desc.affinity = masks ? curmsk : NULL;
+               desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
+               desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
+                                              !desc.pci.msi_attrib.is_virtual;
 
-               entry->pci.msi_attrib.can_mask  = !pci_msi_ignore_mask &&
-                                                 !entry->pci.msi_attrib.is_virtual;
-
-               entry->pci.msi_attrib.default_irq       = dev->irq;
-               entry->pci.mask_base                    = base;
-
-               if (entry->pci.msi_attrib.can_mask) {
-                       addr = pci_msix_desc_addr(entry);
-                       entry->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+               if (!desc.pci.msi_attrib.can_mask) {
+                       addr = pci_msix_desc_addr(&desc);
+                       desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
                }
 
-               list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
-               if (masks)
-                       curmsk++;
+               ret = msi_add_msi_desc(&dev->dev, &desc);
+               if (ret)
+                       break;
        }
-       return 0;
+       return ret;
 }
 
 static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
@@ -598,7 +588,7 @@ static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base,
                masks = irq_create_affinity_masks(nvec, affd);
 
        msi_lock_descs(&dev->dev);
-       ret = msix_setup_entries(dev, base, entries, nvec, masks);
+       ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
        if (ret)
                goto out_free;