#endif
static bool iommu_dma_strict __read_mostly = true;
------ struct iommu_callback_data {
------ const struct iommu_ops *ops;
------ };
------
struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
- dev_warn(dev,
- "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
- iommu_def_domain_type);
dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+ if (dom) {
+ dev_warn(dev,
+ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+ iommu_def_domain_type);
+ }
}
group->default_domain = dom;
{
int err;
struct notifier_block *nb;
------ struct iommu_callback_data cb = {
------ .ops = ops,
------ };
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
if (err)
goto out_free;
------ err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
++++++ err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
if (err)
goto out_err;
out_err:
/* Clean up */
------ bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
++++++ bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
bus_unregister_notifier(bus, nb);
out_free:
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+++ ++
+++ ++/*
+++ ++ * Per device IOMMU features.
+++ ++ */
+++ ++bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
+++ ++{
+++ ++ const struct iommu_ops *ops = dev->bus->iommu_ops;
+++ ++
+++ ++ if (ops && ops->dev_has_feat)
+++ ++ return ops->dev_has_feat(dev, feat);
+++ ++
+++ ++ return false;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
+++ ++
+++ ++int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+++ ++{
+++ ++ const struct iommu_ops *ops = dev->bus->iommu_ops;
+++ ++
+++ ++ if (ops && ops->dev_enable_feat)
+++ ++ return ops->dev_enable_feat(dev, feat);
+++ ++
+++ ++ return -ENODEV;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
+++ ++
+++ ++/*
+++ ++ * The device drivers should do the necessary cleanups before calling this.
+++ ++ * For example, before disabling the aux-domain feature, the device driver
+++ ++ * should detach all aux-domains. Otherwise, this will return -EBUSY.
+++ ++ */
+++ ++int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+++ ++{
+++ ++ const struct iommu_ops *ops = dev->bus->iommu_ops;
+++ ++
+++ ++ if (ops && ops->dev_disable_feat)
+++ ++ return ops->dev_disable_feat(dev, feat);
+++ ++
+++ ++ return -EBUSY;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
+++ ++
+++ ++bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+++ ++{
+++ ++ const struct iommu_ops *ops = dev->bus->iommu_ops;
+++ ++
+++ ++ if (ops && ops->dev_feat_enabled)
+++ ++ return ops->dev_feat_enabled(dev, feat);
+++ ++
+++ ++ return false;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
+++ ++
+++ ++/*
+++ ++ * Aux-domain specific attach/detach.
+++ ++ *
+++ ++ * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
+++ ++ * true. Also, as long as domains are attached to a device through this
+++ ++ * interface, any tries to call iommu_attach_device() should fail
+++ ++ * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
+++ ++ * This should make us safe against a device being attached to a guest as a
+++ ++ * whole while there are still pasid users on it (aux and sva).
+++ ++ */
+++ ++int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+++ ++{
+++ ++ int ret = -ENODEV;
+++ ++
+++ ++ if (domain->ops->aux_attach_dev)
+++ ++ ret = domain->ops->aux_attach_dev(domain, dev);
+++ ++
+++ ++ if (!ret)
+++ ++ trace_attach_device_to_domain(dev);
+++ ++
+++ ++ return ret;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
+++ ++
+++ ++void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+++ ++{
+++ ++ if (domain->ops->aux_detach_dev) {
+++ ++ domain->ops->aux_detach_dev(domain, dev);
+++ ++ trace_detach_device_from_domain(dev);
+++ ++ }
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
+++ ++
+++ ++int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+++ ++{
+++ ++ int ret = -ENODEV;
+++ ++
+++ ++ if (domain->ops->aux_get_pasid)
+++ ++ ret = domain->ops->aux_get_pasid(domain, dev);
+++ ++
+++ ++ return ret;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
+++ ++
+++ ++/**
+++ ++ * iommu_sva_bind_device() - Bind a process address space to a device
+++ ++ * @dev: the device
+++ ++ * @mm: the mm to bind, caller must hold a reference to it
+++ ++ *
+++ ++ * Create a bond between device and address space, allowing the device to access
+++ ++ * the mm using the returned PASID. If a bond already exists between @device and
+++ ++ * @mm, it is returned and an additional reference is taken. Caller must call
+++ ++ * iommu_sva_unbind_device() to release each reference.
+++ ++ *
+++ ++ * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
+++ ++ * initialize the required SVA features.
+++ ++ *
+++ ++ * On error, returns an ERR_PTR value.
+++ ++ */
+++ ++struct iommu_sva *
+++ ++iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
+++ ++{
+++ ++ struct iommu_group *group;
+++ ++ struct iommu_sva *handle = ERR_PTR(-EINVAL);
+++ ++ const struct iommu_ops *ops = dev->bus->iommu_ops;
+++ ++
+++ ++ if (!ops || !ops->sva_bind)
+++ ++ return ERR_PTR(-ENODEV);
+++ ++
+++ ++ group = iommu_group_get(dev);
+++ ++ if (!group)
+++ ++ return ERR_PTR(-ENODEV);
+++ ++
+++ ++ /* Ensure device count and domain don't change while we're binding */
+++ ++ mutex_lock(&group->mutex);
+++ ++
+++ ++ /*
+++ ++ * To keep things simple, SVA currently doesn't support IOMMU groups
+++ ++ * with more than one device. Existing SVA-capable systems are not
+++ ++ * affected by the problems that required IOMMU groups (lack of ACS
+++ ++ * isolation, device ID aliasing and other hardware issues).
+++ ++ */
+++ ++ if (iommu_group_device_count(group) != 1)
+++ ++ goto out_unlock;
+++ ++
+++ ++ handle = ops->sva_bind(dev, mm, drvdata);
+++ ++
+++ ++out_unlock:
+++ ++ mutex_unlock(&group->mutex);
+++ ++ iommu_group_put(group);
+++ ++
+++ ++ return handle;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
+++ ++
+++ ++/**
+++ ++ * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
+++ ++ * @handle: the handle returned by iommu_sva_bind_device()
+++ ++ *
+++ ++ * Put reference to a bond between device and address space. The device should
+++ ++ * not be issuing any more transaction for this PASID. All outstanding page
+++ ++ * requests for this PASID must have been flushed to the IOMMU.
+++ ++ *
+++ ++ * Returns 0 on success, or an error value
+++ ++ */
+++ ++void iommu_sva_unbind_device(struct iommu_sva *handle)
+++ ++{
+++ ++ struct iommu_group *group;
+++ ++ struct device *dev = handle->dev;
+++ ++ const struct iommu_ops *ops = dev->bus->iommu_ops;
+++ ++
+++ ++ if (!ops || !ops->sva_unbind)
+++ ++ return;
+++ ++
+++ ++ group = iommu_group_get(dev);
+++ ++ if (!group)
+++ ++ return;
+++ ++
+++ ++ mutex_lock(&group->mutex);
+++ ++ ops->sva_unbind(handle);
+++ ++ mutex_unlock(&group->mutex);
+++ ++
+++ ++ iommu_group_put(group);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
+++ ++
+++ ++int iommu_sva_set_ops(struct iommu_sva *handle,
+++ ++ const struct iommu_sva_ops *sva_ops)
+++ ++{
+++ ++ if (handle->ops && handle->ops != sva_ops)
+++ ++ return -EEXIST;
+++ ++
+++ ++ handle->ops = sva_ops;
+++ ++ return 0;
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
+++ ++
+++ ++int iommu_sva_get_pasid(struct iommu_sva *handle)
+++ ++{
+++ ++ const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
+++ ++
+++ ++ if (!ops || !ops->sva_get_pasid)
+++ ++ return IOMMU_PASID_INVALID;
+++ ++
+++ ++ return ops->sva_get_pasid(handle);
+++ ++}
+++ ++EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);