Loading Documentation/ABI/testing/sysfs-kernel-iommu_groups +12 −0 Original line number Diff line number Diff line Loading @@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub- file if the IOMMU driver has chosen to register a more common name for the group. Users: What: /sys/kernel/iommu_groups/reserved_regions Date: January 2017 KernelVersion: v4.11 Contact: Eric Auger <eric.auger@redhat.com> Description: /sys/kernel/iommu_groups/reserved_regions list IOVA regions that are reserved. Not necessarily all reserved regions are listed. This is typically used to output direct-mapped, MSI, non mappable regions. Each region is described on a single line: the 1st field is the base IOVA, the second is the end IOVA and the third field describes the type of the region. drivers/iommu/amd_iommu.c +35 −19 Original line number Diff line number Diff line Loading @@ -3161,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap) return false; } static void amd_iommu_get_dm_regions(struct device *dev, static void amd_iommu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *region; struct unity_map_entry *entry; int devid; Loading @@ -3172,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev, return; list_for_each_entry(entry, &amd_iommu_unity_map, list) { struct iommu_dm_region *region; size_t length; int prot = 0; if (devid < entry->devid_start || devid > entry->devid_end) continue; region = kzalloc(sizeof(*region), GFP_KERNEL); length = entry->address_end - entry->address_start; if (entry->prot & IOMMU_PROT_IR) prot |= IOMMU_READ; if (entry->prot & IOMMU_PROT_IW) prot |= IOMMU_WRITE; region = iommu_alloc_resv_region(entry->address_start, length, prot, IOMMU_RESV_DIRECT); if (!region) { pr_err("Out of memory allocating dm-regions for %s\n", dev_name(dev)); return; } list_add_tail(®ion->list, head); } region->start = entry->address_start; region->length = entry->address_end - entry->address_start; if (entry->prot & IOMMU_PROT_IR) region->prot |= IOMMU_READ; if (entry->prot & IOMMU_PROT_IW) region->prot |= IOMMU_WRITE; region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, 0, IOMMU_RESV_RESERVED); if (!region) return; list_add_tail(®ion->list, head); region = iommu_alloc_resv_region(HT_RANGE_START, HT_RANGE_END - HT_RANGE_START + 1, 0, IOMMU_RESV_RESERVED); if (!region) return; list_add_tail(®ion->list, head); } } static void amd_iommu_put_dm_regions(struct device *dev, static void amd_iommu_put_resv_regions(struct device *dev, struct list_head *head) { struct iommu_dm_region *entry, *next; struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) kfree(entry); } static void amd_iommu_apply_dm_region(struct device *dev, static void amd_iommu_apply_resv_region(struct device *dev, struct iommu_domain *domain, struct iommu_dm_region *region) struct iommu_resv_region *region) { struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); unsigned long start, end; Loading @@ -3230,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = { .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, .device_group = amd_iommu_device_group, .get_dm_regions = amd_iommu_get_dm_regions, .put_dm_regions = amd_iommu_put_dm_regions, .apply_dm_region = amd_iommu_apply_dm_region, .get_resv_regions = amd_iommu_get_resv_regions, .put_resv_regions = amd_iommu_put_resv_regions, .apply_resv_region = amd_iommu_apply_resv_region, .pgsize_bitmap = AMD_IOMMU_PGSIZES, }; Loading drivers/iommu/arm-smmu-v3.c +28 −2 Original line number Diff line number Diff line Loading @@ -412,6 +412,9 @@ /* High-level queue structures */ #define ARM_SMMU_POLL_TIMEOUT_US 100 #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 static bool disable_bypass; module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, Loading Loading @@ -1372,8 +1375,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) switch (cap) { case IOMMU_CAP_CACHE_COHERENCY: return true; case IOMMU_CAP_INTR_REMAP: return true; /* MSIs are just memory writes */ case IOMMU_CAP_NOEXEC: return true; default: Loading Loading @@ -1883,6 +1884,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) return iommu_fwspec_add_ids(dev, args->args, 1); } static void arm_smmu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *region; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot, IOMMU_RESV_MSI); if (!region) return; list_add_tail(®ion->list, head); } static void arm_smmu_put_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) kfree(entry); } static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, Loading @@ -1898,6 +1922,8 @@ static struct iommu_ops arm_smmu_ops = { .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; Loading drivers/iommu/arm-smmu.c +28 −2 Original line number Diff line number Diff line Loading @@ -281,6 +281,9 @@ enum arm_smmu_s2cr_privcfg { #define FSYNR0_WNR (1 << 4) #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 static int force_stage; module_param(force_stage, int, S_IRUGO); MODULE_PARM_DESC(force_stage, Loading Loading @@ -1371,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) * requests. */ return true; case IOMMU_CAP_INTR_REMAP: return true; /* MSIs are just memory writes */ case IOMMU_CAP_NOEXEC: return true; default: Loading Loading @@ -1549,6 +1550,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) return iommu_fwspec_add_ids(dev, &fwid, 1); } static void arm_smmu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *region; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot, IOMMU_RESV_MSI); if (!region) return; list_add_tail(®ion->list, head); } static void arm_smmu_put_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) kfree(entry); } static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, Loading @@ -1564,6 +1588,8 @@ static struct iommu_ops arm_smmu_ops = { .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; Loading drivers/iommu/dma-iommu.c +96 −23 Original line number Diff line number Diff line Loading @@ -37,15 +37,50 @@ struct iommu_dma_msi_page { phys_addr_t phys; }; enum iommu_dma_cookie_type { IOMMU_DMA_IOVA_COOKIE, IOMMU_DMA_MSI_COOKIE, }; struct iommu_dma_cookie { enum iommu_dma_cookie_type type; union { /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ struct iova_domain iovad; /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ dma_addr_t msi_iova; }; struct list_head msi_page_list; spinlock_t msi_lock; }; static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) { if (cookie->type == IOMMU_DMA_IOVA_COOKIE) return cookie->iovad.granule; return PAGE_SIZE; } static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) { return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; struct iommu_dma_cookie *cookie = domain->iova_cookie; if (cookie->type == IOMMU_DMA_IOVA_COOKIE) return &cookie->iovad; return NULL; } static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) { struct iommu_dma_cookie *cookie; cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->type = type; } return cookie; } int iommu_dma_init(void) Loading @@ -61,26 +96,54 @@ int iommu_dma_init(void) * callback when domain->type == IOMMU_DOMAIN_DMA. */ int iommu_get_dma_cookie(struct iommu_domain *domain) { if (domain->iova_cookie) return -EEXIST; domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); if (!domain->iova_cookie) return -ENOMEM; return 0; } EXPORT_SYMBOL(iommu_get_dma_cookie); /** * iommu_get_msi_cookie - Acquire just MSI remapping resources * @domain: IOMMU domain to prepare * @base: Start address of IOVA region for MSI mappings * * Users who manage their own IOVA allocation and do not want DMA API support, * but would still like to take advantage of automatic MSI remapping, can use * this to initialise their own domain appropriately. Users should reserve a * contiguous IOVA region, starting at @base, large enough to accommodate the * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address * used by the devices attached to @domain. */ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) { struct iommu_dma_cookie *cookie; if (domain->type != IOMMU_DOMAIN_UNMANAGED) return -EINVAL; if (domain->iova_cookie) return -EEXIST; cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); if (!cookie) return -ENOMEM; spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->msi_iova = base; domain->iova_cookie = cookie; return 0; } EXPORT_SYMBOL(iommu_get_dma_cookie); EXPORT_SYMBOL(iommu_get_msi_cookie); /** * iommu_put_dma_cookie - Release a domain's DMA mapping resources * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or * iommu_get_msi_cookie() * * IOMMU drivers should normally call this from their domain_free callback. */ Loading @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) if (!cookie) return; if (cookie->iovad.granule) if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) put_iova_domain(&cookie->iovad); list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { Loading Loading @@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { struct iova_domain *iovad = cookie_iovad(domain); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn, end_pfn; if (!iovad) return -ENODEV; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; /* Use the smallest supported page size for IOVA granularity */ order = __ffs(domain->pgsize_bitmap); Loading Loading @@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_msi_page *msi_page; struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = cookie_iovad(domain); struct iova *iova; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; size_t size = cookie_msi_granule(cookie); msi_addr &= ~(phys_addr_t)iova_mask(iovad); msi_addr &= ~(phys_addr_t)(size - 1); list_for_each_entry(msi_page, &cookie->msi_page_list, list) if (msi_page->phys == msi_addr) return msi_page; Loading @@ -675,13 +740,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); msi_page->phys = msi_addr; if (iovad) { iova = __alloc_iova(domain, size, dma_get_mask(dev)); if (!iova) goto out_free_page; msi_page->phys = msi_addr; msi_page->iova = iova_dma_addr(iovad, iova); if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) } else { msi_page->iova = cookie->msi_iova; cookie->msi_iova += size; } if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) goto out_free_iova; INIT_LIST_HEAD(&msi_page->list); Loading @@ -689,7 +759,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, return msi_page; out_free_iova: if (iovad) __free_iova(iovad, iova); else cookie->msi_iova -= size; out_free_page: kfree(msi_page); return NULL; Loading Loading @@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) msg->data = ~0U; } else { msg->address_hi = upper_32_bits(msi_page->iova); msg->address_lo &= iova_mask(&cookie->iovad); msg->address_lo &= cookie_msi_granule(cookie) - 1; msg->address_lo += lower_32_bits(msi_page->iova); } } Loading
Documentation/ABI/testing/sysfs-kernel-iommu_groups +12 −0 Original line number Diff line number Diff line Loading @@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub- file if the IOMMU driver has chosen to register a more common name for the group. Users: What: /sys/kernel/iommu_groups/reserved_regions Date: January 2017 KernelVersion: v4.11 Contact: Eric Auger <eric.auger@redhat.com> Description: /sys/kernel/iommu_groups/reserved_regions list IOVA regions that are reserved. Not necessarily all reserved regions are listed. This is typically used to output direct-mapped, MSI, non mappable regions. Each region is described on a single line: the 1st field is the base IOVA, the second is the end IOVA and the third field describes the type of the region.
drivers/iommu/amd_iommu.c +35 −19 Original line number Diff line number Diff line Loading @@ -3161,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap) return false; } static void amd_iommu_get_dm_regions(struct device *dev, static void amd_iommu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *region; struct unity_map_entry *entry; int devid; Loading @@ -3172,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev, return; list_for_each_entry(entry, &amd_iommu_unity_map, list) { struct iommu_dm_region *region; size_t length; int prot = 0; if (devid < entry->devid_start || devid > entry->devid_end) continue; region = kzalloc(sizeof(*region), GFP_KERNEL); length = entry->address_end - entry->address_start; if (entry->prot & IOMMU_PROT_IR) prot |= IOMMU_READ; if (entry->prot & IOMMU_PROT_IW) prot |= IOMMU_WRITE; region = iommu_alloc_resv_region(entry->address_start, length, prot, IOMMU_RESV_DIRECT); if (!region) { pr_err("Out of memory allocating dm-regions for %s\n", dev_name(dev)); return; } list_add_tail(®ion->list, head); } region->start = entry->address_start; region->length = entry->address_end - entry->address_start; if (entry->prot & IOMMU_PROT_IR) region->prot |= IOMMU_READ; if (entry->prot & IOMMU_PROT_IW) region->prot |= IOMMU_WRITE; region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, 0, IOMMU_RESV_RESERVED); if (!region) return; list_add_tail(®ion->list, head); region = iommu_alloc_resv_region(HT_RANGE_START, HT_RANGE_END - HT_RANGE_START + 1, 0, IOMMU_RESV_RESERVED); if (!region) return; list_add_tail(®ion->list, head); } } static void amd_iommu_put_dm_regions(struct device *dev, static void amd_iommu_put_resv_regions(struct device *dev, struct list_head *head) { struct iommu_dm_region *entry, *next; struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) kfree(entry); } static void amd_iommu_apply_dm_region(struct device *dev, static void amd_iommu_apply_resv_region(struct device *dev, struct iommu_domain *domain, struct iommu_dm_region *region) struct iommu_resv_region *region) { struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); unsigned long start, end; Loading @@ -3230,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = { .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, .device_group = amd_iommu_device_group, .get_dm_regions = amd_iommu_get_dm_regions, .put_dm_regions = amd_iommu_put_dm_regions, .apply_dm_region = amd_iommu_apply_dm_region, .get_resv_regions = amd_iommu_get_resv_regions, .put_resv_regions = amd_iommu_put_resv_regions, .apply_resv_region = amd_iommu_apply_resv_region, .pgsize_bitmap = AMD_IOMMU_PGSIZES, }; Loading
drivers/iommu/arm-smmu-v3.c +28 −2 Original line number Diff line number Diff line Loading @@ -412,6 +412,9 @@ /* High-level queue structures */ #define ARM_SMMU_POLL_TIMEOUT_US 100 #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 static bool disable_bypass; module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, Loading Loading @@ -1372,8 +1375,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) switch (cap) { case IOMMU_CAP_CACHE_COHERENCY: return true; case IOMMU_CAP_INTR_REMAP: return true; /* MSIs are just memory writes */ case IOMMU_CAP_NOEXEC: return true; default: Loading Loading @@ -1883,6 +1884,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) return iommu_fwspec_add_ids(dev, args->args, 1); } static void arm_smmu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *region; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot, IOMMU_RESV_MSI); if (!region) return; list_add_tail(®ion->list, head); } static void arm_smmu_put_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) kfree(entry); } static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, Loading @@ -1898,6 +1922,8 @@ static struct iommu_ops arm_smmu_ops = { .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; Loading
drivers/iommu/arm-smmu.c +28 −2 Original line number Diff line number Diff line Loading @@ -281,6 +281,9 @@ enum arm_smmu_s2cr_privcfg { #define FSYNR0_WNR (1 << 4) #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 static int force_stage; module_param(force_stage, int, S_IRUGO); MODULE_PARM_DESC(force_stage, Loading Loading @@ -1371,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) * requests. */ return true; case IOMMU_CAP_INTR_REMAP: return true; /* MSIs are just memory writes */ case IOMMU_CAP_NOEXEC: return true; default: Loading Loading @@ -1549,6 +1550,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) return iommu_fwspec_add_ids(dev, &fwid, 1); } static void arm_smmu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *region; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot, IOMMU_RESV_MSI); if (!region) return; list_add_tail(®ion->list, head); } static void arm_smmu_put_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) kfree(entry); } static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, Loading @@ -1564,6 +1588,8 @@ static struct iommu_ops arm_smmu_ops = { .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; Loading
drivers/iommu/dma-iommu.c +96 −23 Original line number Diff line number Diff line Loading @@ -37,15 +37,50 @@ struct iommu_dma_msi_page { phys_addr_t phys; }; enum iommu_dma_cookie_type { IOMMU_DMA_IOVA_COOKIE, IOMMU_DMA_MSI_COOKIE, }; struct iommu_dma_cookie { enum iommu_dma_cookie_type type; union { /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ struct iova_domain iovad; /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ dma_addr_t msi_iova; }; struct list_head msi_page_list; spinlock_t msi_lock; }; static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) { if (cookie->type == IOMMU_DMA_IOVA_COOKIE) return cookie->iovad.granule; return PAGE_SIZE; } static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) { return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; struct iommu_dma_cookie *cookie = domain->iova_cookie; if (cookie->type == IOMMU_DMA_IOVA_COOKIE) return &cookie->iovad; return NULL; } static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) { struct iommu_dma_cookie *cookie; cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->type = type; } return cookie; } int iommu_dma_init(void) Loading @@ -61,26 +96,54 @@ int iommu_dma_init(void) * callback when domain->type == IOMMU_DOMAIN_DMA. */ int iommu_get_dma_cookie(struct iommu_domain *domain) { if (domain->iova_cookie) return -EEXIST; domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); if (!domain->iova_cookie) return -ENOMEM; return 0; } EXPORT_SYMBOL(iommu_get_dma_cookie); /** * iommu_get_msi_cookie - Acquire just MSI remapping resources * @domain: IOMMU domain to prepare * @base: Start address of IOVA region for MSI mappings * * Users who manage their own IOVA allocation and do not want DMA API support, * but would still like to take advantage of automatic MSI remapping, can use * this to initialise their own domain appropriately. Users should reserve a * contiguous IOVA region, starting at @base, large enough to accommodate the * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address * used by the devices attached to @domain. */ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) { struct iommu_dma_cookie *cookie; if (domain->type != IOMMU_DOMAIN_UNMANAGED) return -EINVAL; if (domain->iova_cookie) return -EEXIST; cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); if (!cookie) return -ENOMEM; spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->msi_iova = base; domain->iova_cookie = cookie; return 0; } EXPORT_SYMBOL(iommu_get_dma_cookie); EXPORT_SYMBOL(iommu_get_msi_cookie); /** * iommu_put_dma_cookie - Release a domain's DMA mapping resources * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or * iommu_get_msi_cookie() * * IOMMU drivers should normally call this from their domain_free callback. */ Loading @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) if (!cookie) return; if (cookie->iovad.granule) if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) put_iova_domain(&cookie->iovad); list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { Loading Loading @@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { struct iova_domain *iovad = cookie_iovad(domain); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn, end_pfn; if (!iovad) return -ENODEV; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; /* Use the smallest supported page size for IOVA granularity */ order = __ffs(domain->pgsize_bitmap); Loading Loading @@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_msi_page *msi_page; struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = cookie_iovad(domain); struct iova *iova; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; size_t size = cookie_msi_granule(cookie); msi_addr &= ~(phys_addr_t)iova_mask(iovad); msi_addr &= ~(phys_addr_t)(size - 1); list_for_each_entry(msi_page, &cookie->msi_page_list, list) if (msi_page->phys == msi_addr) return msi_page; Loading @@ -675,13 +740,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); msi_page->phys = msi_addr; if (iovad) { iova = __alloc_iova(domain, size, dma_get_mask(dev)); if (!iova) goto out_free_page; msi_page->phys = msi_addr; msi_page->iova = iova_dma_addr(iovad, iova); if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) } else { msi_page->iova = cookie->msi_iova; cookie->msi_iova += size; } if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) goto out_free_iova; INIT_LIST_HEAD(&msi_page->list); Loading @@ -689,7 +759,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, return msi_page; out_free_iova: if (iovad) __free_iova(iovad, iova); else cookie->msi_iova -= size; out_free_page: kfree(msi_page); return NULL; Loading Loading @@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) msg->data = ~0U; } else { msg->address_hi = upper_32_bits(msi_page->iova); msg->address_lo &= iova_mask(&cookie->iovad); msg->address_lo &= cookie_msi_granule(cookie) - 1; msg->address_lo += lower_32_bits(msi_page->iova); } }