Merge branch 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax
[linux-2.6-block.git] / drivers / dma / dmaengine.c
index 83e8c5c027d31f5f849232b8c7729c24a4a4d69c..f1a441ab395d7529ebbc4f8d59e891e20c61419d 100644 (file)
@@ -498,12 +498,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
        caps->max_burst = device->max_burst;
        caps->residue_granularity = device->residue_granularity;
        caps->descriptor_reuse = device->descriptor_reuse;
-
-       /*
-        * Some devices implement only pause (e.g. to get residuum) but no
-        * resume. However cmd_pause is advertised as pause AND resume.
-        */
-       caps->cmd_pause = !!(device->device_pause && device->device_resume);
+       caps->cmd_pause = !!device->device_pause;
+       caps->cmd_resume = !!device->device_resume;
        caps->cmd_terminate = !!device->device_terminate_all;
 
        return 0;
@@ -772,8 +768,14 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
                return ERR_PTR(-ENODEV);
 
        chan = __dma_request_channel(mask, NULL, NULL);
-       if (!chan)
-               chan = ERR_PTR(-ENODEV);
+       if (!chan) {
+               mutex_lock(&dma_list_mutex);
+               if (list_empty(&dma_device_list))
+                       chan = ERR_PTR(-EPROBE_DEFER);
+               else
+                       chan = ERR_PTR(-ENODEV);
+               mutex_unlock(&dma_list_mutex);
+       }
 
        return chan;
 }
@@ -1130,6 +1132,41 @@ void dma_async_device_unregister(struct dma_device *device)
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
 
+static void dmam_device_release(struct device *dev, void *res)
+{
+       struct dma_device *device;
+
+       device = *(struct dma_device **)res;
+       dma_async_device_unregister(device);
+}
+
+/**
+ * dmaenginem_async_device_register - registers DMA devices found
+ * @device: &dma_device
+ *
+ * The operation is managed and will be undone on driver detach.
+ */
+int dmaenginem_async_device_register(struct dma_device *device)
+{
+       void *p;
+       int ret;
+
+       p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       ret = dma_async_device_register(device);
+       if (!ret) {
+               *(struct dma_device **)p = device;
+               devres_add(device->dev, p);
+       } else {
+               devres_free(p);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(dmaenginem_async_device_register);
+
 struct dmaengine_unmap_pool {
        struct kmem_cache *cache;
        const char *name;