iommu/amd: Update (un)init_device_table_dma()

Include struct amd_iommu_pci_seg as a function parameter since
we need to access per PCI segment device table.

Co-developed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20220706113825.25582-26-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Suravee Suthikulpanit 2022-07-06 17:08:15 +05:30 committed by Joerg Roedel
parent c7d311247b
commit 1ab5a15334

View file

@ -238,7 +238,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
static bool amd_iommu_pre_enabled = true;
@ -2119,6 +2119,7 @@ static void print_iommu_info(void)
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
int ret;
for_each_iommu(iommu) {
@ -2149,7 +2150,8 @@ static int __init amd_iommu_init_pci(void)
goto out;
}
init_device_table_dma();
for_each_pci_segment(pci_seg)
init_device_table_dma(pci_seg);
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
@ -2511,9 +2513,13 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
/*
* Init the device table to not allow DMA access for devices
*/
static void init_device_table_dma(void)
static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
struct dev_table_entry *dev_table = pci_seg->dev_table;
if (dev_table == NULL)
return;
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
set_dev_entry_bit(devid, DEV_ENTRY_VALID);
@ -2521,13 +2527,17 @@ static void init_device_table_dma(void)
}
}
static void __init uninit_device_table_dma(void)
static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
struct dev_table_entry *dev_table = pci_seg->dev_table;
if (dev_table == NULL)
return;
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
amd_iommu_dev_table[devid].data[0] = 0ULL;
amd_iommu_dev_table[devid].data[1] = 0ULL;
dev_table[devid].data[0] = 0ULL;
dev_table[devid].data[1] = 0ULL;
}
}
@ -3120,8 +3130,11 @@ static int __init state_next(void)
free_iommu_resources();
} else {
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
for_each_pci_segment(pci_seg)
uninit_device_table_dma(pci_seg);
uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}