Loading drivers/amba/bus.c +36 −1 Original line number Diff line number Diff line Loading @@ -20,6 +20,10 @@ #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/of_irq.h> #include <linux/of_device.h> #include <linux/acpi.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> #define to_amba_driver(d) container_of(d, struct amba_driver, drv) Loading Loading @@ -273,6 +277,36 @@ static void amba_shutdown(struct device *dev) drv->shutdown(to_amba_device(dev)); } static int amba_dma_configure(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); enum dev_dma_attr attr; int ret = 0; if (dev->of_node) { ret = of_dma_configure(dev, dev->of_node, true); } else if (has_acpi_companion(dev)) { attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); ret = acpi_dma_configure(dev, attr); } if (!ret && !drv->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); } return ret; } static void amba_dma_cleanup(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); if (!drv->driver_managed_dma) iommu_device_unuse_default_domain(dev); } #ifdef CONFIG_PM /* * Hooks to provide runtime PM of the pclk (bus clock). It is safe to Loading Loading @@ -341,7 +375,8 @@ struct bus_type amba_bustype = { .probe = amba_probe, .remove = amba_remove, .shutdown = amba_shutdown, .dma_configure = platform_dma_configure, .dma_configure = amba_dma_configure, .dma_cleanup = amba_dma_cleanup, .pm = &amba_pm, }; EXPORT_SYMBOL_GPL(amba_bustype); Loading drivers/base/dd.c +5 −0 Original line number Diff line number Diff line Loading @@ -671,6 +671,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); if (dev->bus && dev->bus->dma_cleanup) dev->bus->dma_cleanup(dev); pinctrl_bind_failed: device_links_no_driver(dev); device_unbind_cleanup(dev); Loading Loading @@ -1199,6 +1201,9 @@ static void __device_release_driver(struct device *dev, struct device *parent) device_remove(dev); if (dev->bus && dev->bus->dma_cleanup) dev->bus->dma_cleanup(dev); device_links_driver_cleanup(dev); device_unbind_cleanup(dev); Loading drivers/base/platform.c +19 −2 Original line number Diff line number Diff line Loading @@ -30,6 +30,8 @@ #include <linux/property.h> #include <linux/kmemleak.h> #include <linux/types.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> #include "base.h" #include "power/power.h" Loading Loading @@ -1454,9 +1456,9 @@ static void platform_shutdown(struct device *_dev) drv->shutdown(dev); } int platform_dma_configure(struct device *dev) static int platform_dma_configure(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); enum dev_dma_attr attr; int ret = 0; Loading @@ -1467,9 +1469,23 @@ int platform_dma_configure(struct device *dev) ret = acpi_dma_configure(dev, attr); } if (!ret && !drv->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); } return ret; } static void platform_dma_cleanup(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); if (!drv->driver_managed_dma) iommu_device_unuse_default_domain(dev); } static const struct dev_pm_ops platform_dev_pm_ops = { SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) USE_PLATFORM_PM_SLEEP_OPS Loading @@ -1484,6 +1500,7 @@ struct bus_type platform_bus_type = { .remove = platform_remove, .shutdown = platform_shutdown, .dma_configure = platform_dma_configure, .dma_cleanup = platform_dma_cleanup, .pm = &platform_dev_pm_ops, }; EXPORT_SYMBOL_GPL(platform_bus_type); Loading drivers/bus/fsl-mc/fsl-mc-bus.c +22 −2 Original line number Diff line number Diff line Loading @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/acpi.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> #include "fsl-mc-private.h" Loading Loading @@ -140,15 +141,33 @@ static int fsl_mc_dma_configure(struct device *dev) { struct device *dma_dev = dev; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); u32 input_id = mc_dev->icid; int ret; while (dev_is_fsl_mc(dma_dev)) dma_dev = dma_dev->parent; if (dev_of_node(dma_dev)) return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); else ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); if (!ret && !mc_drv->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); } return ret; } static void fsl_mc_dma_cleanup(struct device *dev) { struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); if (!mc_drv->driver_managed_dma) iommu_device_unuse_default_domain(dev); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, Loading Loading @@ -312,6 +331,7 @@ struct bus_type fsl_mc_bus_type = { .match = fsl_mc_bus_match, .uevent = fsl_mc_bus_uevent, .dma_configure = fsl_mc_dma_configure, .dma_cleanup = fsl_mc_dma_cleanup, .dev_groups = fsl_mc_dev_groups, .bus_groups = fsl_mc_bus_groups, }; Loading drivers/iommu/iommu.c +229 −102 Original line number Diff line number Diff line Loading @@ -18,7 +18,6 @@ #include <linux/errno.h> #include <linux/iommu.h> #include <linux/idr.h> #include <linux/notifier.h> #include <linux/err.h> #include <linux/pci.h> #include <linux/bitops.h> Loading @@ -40,14 +39,16 @@ struct iommu_group { struct kobject *devices_kobj; struct list_head devices; struct mutex mutex; struct blocking_notifier_head notifier; void *iommu_data; void (*iommu_data_release)(void *iommu_data); char *name; int id; struct iommu_domain *default_domain; struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; unsigned int owner_cnt; void *owner; }; struct group_device { Loading Loading @@ -82,8 +83,8 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); static void __iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group); static int __iommu_group_set_domain(struct iommu_group *group, struct iommu_domain *new_domain); static int iommu_create_device_direct_mappings(struct iommu_group *group, struct device *dev); static struct iommu_group *iommu_group_get_for_dev(struct device *dev); Loading Loading @@ -294,7 +295,11 @@ int iommu_probe_device(struct device *dev) mutex_lock(&group->mutex); iommu_alloc_default_domain(group, dev); if (group->default_domain) { /* * If device joined an existing group which has been claimed, don't * attach the default domain. */ if (group->default_domain && !group->owner) { ret = __iommu_attach_device(group->default_domain, dev); if (ret) { mutex_unlock(&group->mutex); Loading Loading @@ -599,6 +604,8 @@ static void iommu_group_release(struct kobject *kobj) if (group->default_domain) iommu_domain_free(group->default_domain); if (group->blocking_domain) iommu_domain_free(group->blocking_domain); kfree(group->name); kfree(group); Loading Loading @@ -633,7 +640,6 @@ struct iommu_group *iommu_group_alloc(void) mutex_init(&group->mutex); INIT_LIST_HEAD(&group->devices); INIT_LIST_HEAD(&group->entry); BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); if (ret < 0) { Loading Loading @@ -906,10 +912,6 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) if (ret) goto err_put_group; /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); trace_add_device_to_group(group->id, dev); dev_info(dev, "Adding to iommu group %d\n", group->id); Loading Loading @@ -951,10 +953,6 @@ void iommu_group_remove_device(struct device *dev) dev_info(dev, "Removing from iommu group %d\n", group->id); /* Pre-notify listeners that a device is being removed. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); mutex_lock(&group->mutex); list_for_each_entry(tmp_device, &group->devices, list) { if (tmp_device->dev == dev) { Loading Loading @@ -1076,36 +1074,6 @@ void iommu_group_put(struct iommu_group *group) } EXPORT_SYMBOL_GPL(iommu_group_put); /** * iommu_group_register_notifier - Register a notifier for group changes * @group: the group to watch * @nb: notifier block to signal * * This function allows iommu group users to track changes in a group. * See include/linux/iommu.h for actions sent via this notifier. Caller * should hold a reference to the group throughout notifier registration. */ int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb) { return blocking_notifier_chain_register(&group->notifier, nb); } EXPORT_SYMBOL_GPL(iommu_group_register_notifier); /** * iommu_group_unregister_notifier - Unregister a notifier * @group: the group to watch * @nb: notifier block to signal * * Unregister a previously registered group notifier block. */ int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&group->notifier, nb); } EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); /** * iommu_register_device_fault_handler() - Register a device fault handler * @dev: the device Loading Loading @@ -1651,14 +1619,8 @@ static int remove_iommu_group(struct device *dev, void *data) static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long group_action = 0; struct device *dev = data; struct iommu_group *group; /* * ADD/DEL call into iommu driver ops if provided, which may * result in ADD/DEL notifiers to group->notifier */ if (action == BUS_NOTIFY_ADD_DEVICE) { int ret; Loading @@ -1669,34 +1631,6 @@ static int iommu_bus_notifier(struct notifier_block *nb, return NOTIFY_OK; } /* * Remaining BUS_NOTIFYs get filtered and republished to the * group, if anyone is listening */ group = iommu_group_get(dev); if (!group) return 0; switch (action) { case BUS_NOTIFY_BIND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; break; case BUS_NOTIFY_BOUND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; break; case BUS_NOTIFY_UNBIND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; break; case BUS_NOTIFY_UNBOUND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; break; } if (group_action) blocking_notifier_call_chain(&group->notifier, group_action, dev); iommu_group_put(group); return 0; } Loading Loading @@ -1983,6 +1917,24 @@ void iommu_domain_free(struct iommu_domain *domain) } EXPORT_SYMBOL_GPL(iommu_domain_free); /* * Put the group's domain back to the appropriate core-owned domain - either the * standard kernel-mode DMA configuration or an all-DMA-blocked domain. */ static void __iommu_group_set_core_domain(struct iommu_group *group) { struct iommu_domain *new_domain; int ret; if (group->owner) new_domain = group->blocking_domain; else new_domain = group->default_domain; ret = __iommu_group_set_domain(group, new_domain); WARN(ret, "iommu driver failed to attach the default/blocking domain"); } static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { Loading Loading @@ -2039,9 +1991,6 @@ static void __iommu_detach_device(struct iommu_domain *domain, if (iommu_is_attach_deferred(dev)) return; if (unlikely(domain->ops->detach_dev == NULL)) return; domain->ops->detach_dev(domain, dev); trace_detach_device_from_domain(dev); } Loading @@ -2055,12 +2004,10 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) return; mutex_lock(&group->mutex); if (iommu_group_device_count(group) != 1) { WARN_ON(1); if (WARN_ON(domain != group->domain) || WARN_ON(iommu_group_device_count(group) != 1)) goto out_unlock; } __iommu_detach_group(domain, group); __iommu_group_set_core_domain(group); out_unlock: mutex_unlock(&group->mutex); Loading Loading @@ -2116,7 +2063,8 @@ static int __iommu_attach_group(struct iommu_domain *domain, { int ret; if (group->default_domain && group->domain != group->default_domain) if (group->domain && group->domain != group->default_domain && group->domain != group->blocking_domain) return -EBUSY; ret = __iommu_group_for_each_dev(group, domain, Loading Loading @@ -2148,34 +2096,49 @@ static int iommu_group_do_detach_device(struct device *dev, void *data) return 0; } static void __iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) static int __iommu_group_set_domain(struct iommu_group *group, struct iommu_domain *new_domain) { int ret; if (!group->default_domain) { __iommu_group_for_each_dev(group, domain, if (group->domain == new_domain) return 0; /* * New drivers should support default domains and so the detach_dev() op * will never be called. Otherwise the NULL domain represents some * platform specific behavior. */ if (!new_domain) { if (WARN_ON(!group->domain->ops->detach_dev)) return -EINVAL; __iommu_group_for_each_dev(group, group->domain, iommu_group_do_detach_device); group->domain = NULL; return; return 0; } if (group->domain == group->default_domain) return; /* Detach by re-attaching to the default domain */ ret = __iommu_group_for_each_dev(group, group->default_domain, /* * Changing the domain is done by calling attach_dev() on the new * domain. This switch does not have to be atomic and DMA can be * discarded during the transition. DMA must only be able to access * either new_domain or group->domain, never something else. * * Note that this is called in error unwind paths, attaching to a * domain that has already been attached cannot fail. */ ret = __iommu_group_for_each_dev(group, new_domain, iommu_group_do_attach_device); if (ret != 0) WARN_ON(1); else group->domain = group->default_domain; if (ret) return ret; group->domain = new_domain; return 0; } void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_detach_group(domain, group); __iommu_group_set_core_domain(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_group); Loading Loading @@ -3102,3 +3065,167 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, return ret; } /** * iommu_device_use_default_domain() - Device driver wants to handle device * DMA through the kernel DMA API. * @dev: The device. * * The device driver about to bind @dev wants to do DMA through the kernel * DMA API. Return 0 if it is allowed, otherwise an error. */ int iommu_device_use_default_domain(struct device *dev) { struct iommu_group *group = iommu_group_get(dev); int ret = 0; if (!group) return 0; mutex_lock(&group->mutex); if (group->owner_cnt) { if (group->domain != group->default_domain || group->owner) { ret = -EBUSY; goto unlock_out; } } group->owner_cnt++; unlock_out: mutex_unlock(&group->mutex); iommu_group_put(group); return ret; } /** * iommu_device_unuse_default_domain() - Device driver stops handling device * DMA through the kernel DMA API. * @dev: The device. * * The device driver doesn't want to do DMA through kernel DMA API anymore. * It must be called after iommu_device_use_default_domain(). */ void iommu_device_unuse_default_domain(struct device *dev) { struct iommu_group *group = iommu_group_get(dev); if (!group) return; mutex_lock(&group->mutex); if (!WARN_ON(!group->owner_cnt)) group->owner_cnt--; mutex_unlock(&group->mutex); iommu_group_put(group); } static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) { struct group_device *dev = list_first_entry(&group->devices, struct group_device, list); if (group->blocking_domain) return 0; group->blocking_domain = __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); if (!group->blocking_domain) { /* * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED * create an empty domain instead. */ group->blocking_domain = __iommu_domain_alloc( dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); if (!group->blocking_domain) return -EINVAL; } return 0; } /** * iommu_group_claim_dma_owner() - Set DMA ownership of a group * @group: The group. * @owner: Caller specified pointer. Used for exclusive ownership. * * This is to support backward compatibility for vfio which manages * the dma ownership in iommu_group level. New invocations on this * interface should be prohibited. */ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) { int ret = 0; mutex_lock(&group->mutex); if (group->owner_cnt) { ret = -EPERM; goto unlock_out; } else { if (group->domain && group->domain != group->default_domain) { ret = -EBUSY; goto unlock_out; } ret = __iommu_group_alloc_blocking_domain(group); if (ret) goto unlock_out; ret = __iommu_group_set_domain(group, group->blocking_domain); if (ret) goto unlock_out; group->owner = owner; } group->owner_cnt++; unlock_out: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); /** * iommu_group_release_dma_owner() - Release DMA ownership of a group * @group: The group. * * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). */ void iommu_group_release_dma_owner(struct iommu_group *group) { int ret; mutex_lock(&group->mutex); if (WARN_ON(!group->owner_cnt || !group->owner)) goto unlock_out; group->owner_cnt = 0; group->owner = NULL; ret = __iommu_group_set_domain(group, group->default_domain); WARN(ret, "iommu driver failed to attach the default domain"); unlock_out: mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); /** * iommu_group_dma_owner_claimed() - Query group dma ownership status * @group: The group. * * This provides status query on a given group. It is racy and only for * non-binding status reporting. */ bool iommu_group_dma_owner_claimed(struct iommu_group *group) { unsigned int user; mutex_lock(&group->mutex); user = group->owner_cnt; mutex_unlock(&group->mutex); return user; } EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); Loading
drivers/amba/bus.c +36 −1 Original line number Diff line number Diff line Loading @@ -20,6 +20,10 @@ #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/of_irq.h> #include <linux/of_device.h> #include <linux/acpi.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> #define to_amba_driver(d) container_of(d, struct amba_driver, drv) Loading Loading @@ -273,6 +277,36 @@ static void amba_shutdown(struct device *dev) drv->shutdown(to_amba_device(dev)); } static int amba_dma_configure(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); enum dev_dma_attr attr; int ret = 0; if (dev->of_node) { ret = of_dma_configure(dev, dev->of_node, true); } else if (has_acpi_companion(dev)) { attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); ret = acpi_dma_configure(dev, attr); } if (!ret && !drv->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); } return ret; } static void amba_dma_cleanup(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); if (!drv->driver_managed_dma) iommu_device_unuse_default_domain(dev); } #ifdef CONFIG_PM /* * Hooks to provide runtime PM of the pclk (bus clock). It is safe to Loading Loading @@ -341,7 +375,8 @@ struct bus_type amba_bustype = { .probe = amba_probe, .remove = amba_remove, .shutdown = amba_shutdown, .dma_configure = platform_dma_configure, .dma_configure = amba_dma_configure, .dma_cleanup = amba_dma_cleanup, .pm = &amba_pm, }; EXPORT_SYMBOL_GPL(amba_bustype); Loading
drivers/base/dd.c +5 −0 Original line number Diff line number Diff line Loading @@ -671,6 +671,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); if (dev->bus && dev->bus->dma_cleanup) dev->bus->dma_cleanup(dev); pinctrl_bind_failed: device_links_no_driver(dev); device_unbind_cleanup(dev); Loading Loading @@ -1199,6 +1201,9 @@ static void __device_release_driver(struct device *dev, struct device *parent) device_remove(dev); if (dev->bus && dev->bus->dma_cleanup) dev->bus->dma_cleanup(dev); device_links_driver_cleanup(dev); device_unbind_cleanup(dev); Loading
drivers/base/platform.c +19 −2 Original line number Diff line number Diff line Loading @@ -30,6 +30,8 @@ #include <linux/property.h> #include <linux/kmemleak.h> #include <linux/types.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> #include "base.h" #include "power/power.h" Loading Loading @@ -1454,9 +1456,9 @@ static void platform_shutdown(struct device *_dev) drv->shutdown(dev); } int platform_dma_configure(struct device *dev) static int platform_dma_configure(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); enum dev_dma_attr attr; int ret = 0; Loading @@ -1467,9 +1469,23 @@ int platform_dma_configure(struct device *dev) ret = acpi_dma_configure(dev, attr); } if (!ret && !drv->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); } return ret; } static void platform_dma_cleanup(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); if (!drv->driver_managed_dma) iommu_device_unuse_default_domain(dev); } static const struct dev_pm_ops platform_dev_pm_ops = { SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) USE_PLATFORM_PM_SLEEP_OPS Loading @@ -1484,6 +1500,7 @@ struct bus_type platform_bus_type = { .remove = platform_remove, .shutdown = platform_shutdown, .dma_configure = platform_dma_configure, .dma_cleanup = platform_dma_cleanup, .pm = &platform_dev_pm_ops, }; EXPORT_SYMBOL_GPL(platform_bus_type); Loading
drivers/bus/fsl-mc/fsl-mc-bus.c +22 −2 Original line number Diff line number Diff line Loading @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/acpi.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> #include "fsl-mc-private.h" Loading Loading @@ -140,15 +141,33 @@ static int fsl_mc_dma_configure(struct device *dev) { struct device *dma_dev = dev; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); u32 input_id = mc_dev->icid; int ret; while (dev_is_fsl_mc(dma_dev)) dma_dev = dma_dev->parent; if (dev_of_node(dma_dev)) return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); else ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); if (!ret && !mc_drv->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); } return ret; } static void fsl_mc_dma_cleanup(struct device *dev) { struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); if (!mc_drv->driver_managed_dma) iommu_device_unuse_default_domain(dev); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, Loading Loading @@ -312,6 +331,7 @@ struct bus_type fsl_mc_bus_type = { .match = fsl_mc_bus_match, .uevent = fsl_mc_bus_uevent, .dma_configure = fsl_mc_dma_configure, .dma_cleanup = fsl_mc_dma_cleanup, .dev_groups = fsl_mc_dev_groups, .bus_groups = fsl_mc_bus_groups, }; Loading
drivers/iommu/iommu.c +229 −102 Original line number Diff line number Diff line Loading @@ -18,7 +18,6 @@ #include <linux/errno.h> #include <linux/iommu.h> #include <linux/idr.h> #include <linux/notifier.h> #include <linux/err.h> #include <linux/pci.h> #include <linux/bitops.h> Loading @@ -40,14 +39,16 @@ struct iommu_group { struct kobject *devices_kobj; struct list_head devices; struct mutex mutex; struct blocking_notifier_head notifier; void *iommu_data; void (*iommu_data_release)(void *iommu_data); char *name; int id; struct iommu_domain *default_domain; struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; unsigned int owner_cnt; void *owner; }; struct group_device { Loading Loading @@ -82,8 +83,8 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); static void __iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group); static int __iommu_group_set_domain(struct iommu_group *group, struct iommu_domain *new_domain); static int iommu_create_device_direct_mappings(struct iommu_group *group, struct device *dev); static struct iommu_group *iommu_group_get_for_dev(struct device *dev); Loading Loading @@ -294,7 +295,11 @@ int iommu_probe_device(struct device *dev) mutex_lock(&group->mutex); iommu_alloc_default_domain(group, dev); if (group->default_domain) { /* * If device joined an existing group which has been claimed, don't * attach the default domain. */ if (group->default_domain && !group->owner) { ret = __iommu_attach_device(group->default_domain, dev); if (ret) { mutex_unlock(&group->mutex); Loading Loading @@ -599,6 +604,8 @@ static void iommu_group_release(struct kobject *kobj) if (group->default_domain) iommu_domain_free(group->default_domain); if (group->blocking_domain) iommu_domain_free(group->blocking_domain); kfree(group->name); kfree(group); Loading Loading @@ -633,7 +640,6 @@ struct iommu_group *iommu_group_alloc(void) mutex_init(&group->mutex); INIT_LIST_HEAD(&group->devices); INIT_LIST_HEAD(&group->entry); BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); if (ret < 0) { Loading Loading @@ -906,10 +912,6 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) if (ret) goto err_put_group; /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); trace_add_device_to_group(group->id, dev); dev_info(dev, "Adding to iommu group %d\n", group->id); Loading Loading @@ -951,10 +953,6 @@ void iommu_group_remove_device(struct device *dev) dev_info(dev, "Removing from iommu group %d\n", group->id); /* Pre-notify listeners that a device is being removed. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); mutex_lock(&group->mutex); list_for_each_entry(tmp_device, &group->devices, list) { if (tmp_device->dev == dev) { Loading Loading @@ -1076,36 +1074,6 @@ void iommu_group_put(struct iommu_group *group) } EXPORT_SYMBOL_GPL(iommu_group_put); /** * iommu_group_register_notifier - Register a notifier for group changes * @group: the group to watch * @nb: notifier block to signal * * This function allows iommu group users to track changes in a group. * See include/linux/iommu.h for actions sent via this notifier. Caller * should hold a reference to the group throughout notifier registration. */ int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb) { return blocking_notifier_chain_register(&group->notifier, nb); } EXPORT_SYMBOL_GPL(iommu_group_register_notifier); /** * iommu_group_unregister_notifier - Unregister a notifier * @group: the group to watch * @nb: notifier block to signal * * Unregister a previously registered group notifier block. */ int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&group->notifier, nb); } EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); /** * iommu_register_device_fault_handler() - Register a device fault handler * @dev: the device Loading Loading @@ -1651,14 +1619,8 @@ static int remove_iommu_group(struct device *dev, void *data) static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long group_action = 0; struct device *dev = data; struct iommu_group *group; /* * ADD/DEL call into iommu driver ops if provided, which may * result in ADD/DEL notifiers to group->notifier */ if (action == BUS_NOTIFY_ADD_DEVICE) { int ret; Loading @@ -1669,34 +1631,6 @@ static int iommu_bus_notifier(struct notifier_block *nb, return NOTIFY_OK; } /* * Remaining BUS_NOTIFYs get filtered and republished to the * group, if anyone is listening */ group = iommu_group_get(dev); if (!group) return 0; switch (action) { case BUS_NOTIFY_BIND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; break; case BUS_NOTIFY_BOUND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; break; case BUS_NOTIFY_UNBIND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; break; case BUS_NOTIFY_UNBOUND_DRIVER: group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; break; } if (group_action) blocking_notifier_call_chain(&group->notifier, group_action, dev); iommu_group_put(group); return 0; } Loading Loading @@ -1983,6 +1917,24 @@ void iommu_domain_free(struct iommu_domain *domain) } EXPORT_SYMBOL_GPL(iommu_domain_free); /* * Put the group's domain back to the appropriate core-owned domain - either the * standard kernel-mode DMA configuration or an all-DMA-blocked domain. */ static void __iommu_group_set_core_domain(struct iommu_group *group) { struct iommu_domain *new_domain; int ret; if (group->owner) new_domain = group->blocking_domain; else new_domain = group->default_domain; ret = __iommu_group_set_domain(group, new_domain); WARN(ret, "iommu driver failed to attach the default/blocking domain"); } static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { Loading Loading @@ -2039,9 +1991,6 @@ static void __iommu_detach_device(struct iommu_domain *domain, if (iommu_is_attach_deferred(dev)) return; if (unlikely(domain->ops->detach_dev == NULL)) return; domain->ops->detach_dev(domain, dev); trace_detach_device_from_domain(dev); } Loading @@ -2055,12 +2004,10 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) return; mutex_lock(&group->mutex); if (iommu_group_device_count(group) != 1) { WARN_ON(1); if (WARN_ON(domain != group->domain) || WARN_ON(iommu_group_device_count(group) != 1)) goto out_unlock; } __iommu_detach_group(domain, group); __iommu_group_set_core_domain(group); out_unlock: mutex_unlock(&group->mutex); Loading Loading @@ -2116,7 +2063,8 @@ static int __iommu_attach_group(struct iommu_domain *domain, { int ret; if (group->default_domain && group->domain != group->default_domain) if (group->domain && group->domain != group->default_domain && group->domain != group->blocking_domain) return -EBUSY; ret = __iommu_group_for_each_dev(group, domain, Loading Loading @@ -2148,34 +2096,49 @@ static int iommu_group_do_detach_device(struct device *dev, void *data) return 0; } static void __iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) static int __iommu_group_set_domain(struct iommu_group *group, struct iommu_domain *new_domain) { int ret; if (!group->default_domain) { __iommu_group_for_each_dev(group, domain, if (group->domain == new_domain) return 0; /* * New drivers should support default domains and so the detach_dev() op * will never be called. Otherwise the NULL domain represents some * platform specific behavior. */ if (!new_domain) { if (WARN_ON(!group->domain->ops->detach_dev)) return -EINVAL; __iommu_group_for_each_dev(group, group->domain, iommu_group_do_detach_device); group->domain = NULL; return; return 0; } if (group->domain == group->default_domain) return; /* Detach by re-attaching to the default domain */ ret = __iommu_group_for_each_dev(group, group->default_domain, /* * Changing the domain is done by calling attach_dev() on the new * domain. This switch does not have to be atomic and DMA can be * discarded during the transition. DMA must only be able to access * either new_domain or group->domain, never something else. * * Note that this is called in error unwind paths, attaching to a * domain that has already been attached cannot fail. */ ret = __iommu_group_for_each_dev(group, new_domain, iommu_group_do_attach_device); if (ret != 0) WARN_ON(1); else group->domain = group->default_domain; if (ret) return ret; group->domain = new_domain; return 0; } void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_detach_group(domain, group); __iommu_group_set_core_domain(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_group); Loading Loading @@ -3102,3 +3065,167 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, return ret; } /** * iommu_device_use_default_domain() - Device driver wants to handle device * DMA through the kernel DMA API. * @dev: The device. * * The device driver about to bind @dev wants to do DMA through the kernel * DMA API. Return 0 if it is allowed, otherwise an error. */ int iommu_device_use_default_domain(struct device *dev) { struct iommu_group *group = iommu_group_get(dev); int ret = 0; if (!group) return 0; mutex_lock(&group->mutex); if (group->owner_cnt) { if (group->domain != group->default_domain || group->owner) { ret = -EBUSY; goto unlock_out; } } group->owner_cnt++; unlock_out: mutex_unlock(&group->mutex); iommu_group_put(group); return ret; } /** * iommu_device_unuse_default_domain() - Device driver stops handling device * DMA through the kernel DMA API. * @dev: The device. * * The device driver doesn't want to do DMA through kernel DMA API anymore. * It must be called after iommu_device_use_default_domain(). */ void iommu_device_unuse_default_domain(struct device *dev) { struct iommu_group *group = iommu_group_get(dev); if (!group) return; mutex_lock(&group->mutex); if (!WARN_ON(!group->owner_cnt)) group->owner_cnt--; mutex_unlock(&group->mutex); iommu_group_put(group); } static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) { struct group_device *dev = list_first_entry(&group->devices, struct group_device, list); if (group->blocking_domain) return 0; group->blocking_domain = __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); if (!group->blocking_domain) { /* * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED * create an empty domain instead. */ group->blocking_domain = __iommu_domain_alloc( dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); if (!group->blocking_domain) return -EINVAL; } return 0; } /** * iommu_group_claim_dma_owner() - Set DMA ownership of a group * @group: The group. * @owner: Caller specified pointer. Used for exclusive ownership. * * This is to support backward compatibility for vfio which manages * the dma ownership in iommu_group level. New invocations on this * interface should be prohibited. */ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) { int ret = 0; mutex_lock(&group->mutex); if (group->owner_cnt) { ret = -EPERM; goto unlock_out; } else { if (group->domain && group->domain != group->default_domain) { ret = -EBUSY; goto unlock_out; } ret = __iommu_group_alloc_blocking_domain(group); if (ret) goto unlock_out; ret = __iommu_group_set_domain(group, group->blocking_domain); if (ret) goto unlock_out; group->owner = owner; } group->owner_cnt++; unlock_out: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); /** * iommu_group_release_dma_owner() - Release DMA ownership of a group * @group: The group. * * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). */ void iommu_group_release_dma_owner(struct iommu_group *group) { int ret; mutex_lock(&group->mutex); if (WARN_ON(!group->owner_cnt || !group->owner)) goto unlock_out; group->owner_cnt = 0; group->owner = NULL; ret = __iommu_group_set_domain(group, group->default_domain); WARN(ret, "iommu driver failed to attach the default domain"); unlock_out: mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); /** * iommu_group_dma_owner_claimed() - Query group dma ownership status * @group: The group. * * This provides status query on a given group. It is racy and only for * non-binding status reporting. */ bool iommu_group_dma_owner_claimed(struct iommu_group *group) { unsigned int user; mutex_lock(&group->mutex); user = group->owner_cnt; mutex_unlock(&group->mutex); return user; } EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);