return vdpa_reset(vdpa);
}
+static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!vdpa->use_va || !ops->bind_mm)
+ return 0;
+
+ return ops->bind_mm(vdpa, v->vdev.mm);
+}
+
+static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!vdpa->use_va || !ops->unbind_mm)
+ return;
+
+ ops->unbind_mm(vdpa);
+}
+
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
break;
}
+ if (r)
+ goto out;
+
+ switch (cmd) {
+ case VHOST_SET_OWNER:
+ r = vhost_vdpa_bind_mm(v);
+ if (r)
+ vhost_dev_reset_owner(d, NULL);
+ break;
+ }
+out:
mutex_unlock(&d->mutex);
return r;
}
if (!v->in_batch)
ops->set_map(vdpa, asid, iotlb);
}
- /* If we are in the middle of batch processing, delay the free
- * of AS until BATCH_END.
- */
- if (!v->in_batch && !iotlb->nmaps)
- vhost_vdpa_remove_as(v, asid);
+
}
static int vhost_vdpa_va_map(struct vhost_vdpa *v,
if (v->in_batch && ops->set_map)
ops->set_map(vdpa, asid, iotlb);
v->in_batch = false;
- if (!iotlb->nmaps)
- vhost_vdpa_remove_as(v, asid);
break;
default:
r = -EINVAL;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
- struct bus_type *bus;
+ const struct bus_type *bus;
int ret;
/* Device want to do DMA by itself */
vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
+ vhost_vdpa_unbind_mm(v);
vhost_vdpa_config_put(v);
vhost_vdpa_cleanup(v);
mutex_unlock(&d->mutex);