spin_unlock(&ie->list_lock);
if (found)
- complete_desc(found, IDXD_COMPLETE_ABORT);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
+
+ /*
- * complete_desc() will return desc to allocator and the desc can be
- * acquired by a different process and the desc->list can be modified.
- * Delete desc from list so the list trasversing does not get corrupted
- * by the other process.
++ * completing the descriptor will return desc to allocator and
++ * the desc can be acquired by a different process and the
++ * desc->list can be modified. Delete desc from list so the
++ * list trasversing does not get corrupted by the other process.
+ */
+ list_for_each_entry_safe(d, t, &flist, list) {
+ list_del_init(&d->list);
- complete_desc(d, IDXD_COMPLETE_NORMAL);
++ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
+ }
}
+/*
+ * ENQCMDS typically fail when the WQ is inactive or busy. On host submission, the driver
+ * has better control of number of descriptors being submitted to a shared wq by limiting
+ * the number of driver allocated descriptors to the wq size. However, when the swq is
+ * exported to a guest kernel, it may be shared with multiple guest kernels. This means
+ * the likelihood of getting busy returned on the swq when submitting goes significantly up.
+ * Having a tunable retry mechanism allows the driver to keep trying for a bit before giving
+ * up. The sysfs knob can be tuned by the system administrator.
+ */
+int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
+{
+ int rc, retries = 0;
+
+ do {
+ rc = enqcmds(portal, desc);
+ if (rc == 0)
+ break;
+ cpu_relax();
+ } while (retries++ < wq->enqcmds_retries);
+
+ return rc;
+}
+
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;