5.10-stable review patch. If anyone has any objections, please let me know.
------------------
From: Vasant Hegde vasant.hegde@amd.com
[ Upstream commit 404ec4e4c169fb64da6b2a38b471c13ac0897c76 ]
Newer AMD systems can support multiple PCI segments, where each segment contains one or more IOMMU instances. However, an IOMMU instance can only support a single PCI segment.
Current code assumes that system contains only one pci segment (segment 0) and creates global data structures such as device table, rlookup table, etc.
Introducing per PCI segment data structure, which contains segment specific data structures. This will eventually replace the global data structures.
Also update `amd_iommu->pci_seg` variable to point to PCI segment structure instead of PCI segment ID.
Co-developed-by: Suravee Suthikulpanit suravee.suthikulpanit@amd.com Signed-off-by: Suravee Suthikulpanit suravee.suthikulpanit@amd.com Signed-off-by: Vasant Hegde vasant.hegde@amd.com Link: https://lore.kernel.org/r/20220706113825.25582-3-vasant.hegde@amd.com Signed-off-by: Joerg Roedel jroedel@suse.de Stable-dep-of: a295ec52c862 ("iommu/amd: Fix sysfs leak in iommu init") Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/iommu/amd/amd_iommu_types.h | 24 ++++++++++++++- drivers/iommu/amd/init.c | 46 ++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 4a8791e037b84..c4b1a652c2c7f 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -435,6 +435,11 @@ extern bool amd_iommu_irq_remap; /* kmem_cache to get tables with 128 byte alignement */ extern struct kmem_cache *amd_iommu_irq_cache;
+/* Make iterating over all pci segment easier */ +#define for_each_pci_segment(pci_seg) \ + list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list) +#define for_each_pci_segment_safe(pci_seg, next) \ + list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list) /* * Make iterating over all IOMMUs easier */ @@ -494,6 +499,17 @@ struct domain_pgtable { u64 *root; };
+/* + * This structure contains information about one PCI segment in the system. + */ +struct amd_iommu_pci_seg { + /* List with all PCI segments in the system */ + struct list_head list; + + /* PCI segment number */ + u16 id; +}; + /* * Structure where we save information about one hardware AMD IOMMU in the * system. @@ -545,7 +561,7 @@ struct amd_iommu { u16 cap_ptr;
/* pci domain of this IOMMU */ - u16 pci_seg; + struct amd_iommu_pci_seg *pci_seg;
/* start of exclusion range of that IOMMU */ u64 exclusion_start; @@ -676,6 +692,12 @@ extern struct list_head ioapic_map; extern struct list_head hpet_map; extern struct list_head acpihid_map;
+/* + * List with all PCI segments in the system. This list is not locked because + * it is only written at driver initialization time + */ +extern struct list_head amd_iommu_pci_seg_list; + /* * List with all IOMMUs in the system. This list is not locked because it is * only written and read at driver initialization or suspend time diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 91cc3a5643caf..22d28dbe092ee 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -165,6 +165,7 @@ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings we find in ACPI */ bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
+LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */
@@ -1456,6 +1457,43 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, return 0; }
+/* Allocate PCI segment data structure */ +static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id) +{ + struct amd_iommu_pci_seg *pci_seg; + + pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); + if (pci_seg == NULL) + return NULL; + + pci_seg->id = id; + list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); + + return pci_seg; +} + +static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id) +{ + struct amd_iommu_pci_seg *pci_seg; + + for_each_pci_segment(pci_seg) { + if (pci_seg->id == id) + return pci_seg; + } + + return alloc_pci_segment(id); +} + +static void __init free_pci_segments(void) +{ + struct amd_iommu_pci_seg *pci_seg, *next; + + for_each_pci_segment_safe(pci_seg, next) { + list_del(&pci_seg->list); + kfree(pci_seg); + } +} + static void __init free_iommu_one(struct amd_iommu *iommu) { free_cwwb_sem(iommu); @@ -1542,8 +1580,14 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) */ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) { + struct amd_iommu_pci_seg *pci_seg; int ret;
+ pci_seg = get_pci_segment(h->pci_seg); + if (pci_seg == NULL) + return -ENOMEM; + iommu->pci_seg = pci_seg; + raw_spin_lock_init(&iommu->lock); iommu->cmd_sem_val = 0;
@@ -1564,7 +1608,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) */ iommu->devid = h->devid; iommu->cap_ptr = h->cap_ptr; - iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys;
switch (h->type) { @@ -2511,6 +2554,7 @@ static void __init free_iommu_resources(void) amd_iommu_dev_table = NULL;
free_iommu_all(); + free_pci_segments(); }
/* SB IOAPIC is always on this device in AMD systems */