dmaengine: remove BUG_ON while registering devices
authorVinod Koul <vinod.koul@intel.com>
Sun, 27 Aug 2017 11:25:32 +0000 (16:55 +0530)
committerVinod Koul <vinod.koul@intel.com>
Mon, 28 Aug 2017 04:09:46 +0000 (09:39 +0530)
DMAengine core has BUG_ON to check for mandatory operations and ones based
on capabilities, but they use BUG_ON, so remove and move to error returns
and logging the errors gracefully

Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/dmaengine.c

index 428b1414263adb91a7378d1ba386265efe365bd3..b451354735d3d6b80b003f8e6c3ea098d37041e0 100644 (file)
@@ -923,28 +923,85 @@ int dma_async_device_register(struct dma_device *device)
                return -ENODEV;
 
        /* validate device routines */
-       BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
-               !device->device_prep_dma_memcpy);
-       BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
-               !device->device_prep_dma_xor);
-       BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
-               !device->device_prep_dma_xor_val);
-       BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
-               !device->device_prep_dma_pq);
-       BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
-               !device->device_prep_dma_pq_val);
-       BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
-               !device->device_prep_dma_memset);
-       BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
-               !device->device_prep_dma_interrupt);
-       BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
-               !device->device_prep_dma_cyclic);
-       BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
-               !device->device_prep_interleaved_dma);
-
-       BUG_ON(!device->device_tx_status);
-       BUG_ON(!device->device_issue_pending);
-       BUG_ON(!device->dev);
+       if (!device->dev) {
+               pr_err("DMAdevice must have dev\n");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_MEMCPY");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_XOR");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_XOR_VAL");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_PQ");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_PQ_VAL");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_MEMSET");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_INTERRUPT");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_CYCLIC");
+               return -EIO;
+       }
+
+       if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
+               dev_err(device->dev,
+                       "Device claims capability %s, but op is not defined\n",
+                       "DMA_INTERLEAVE");
+               return -EIO;
+       }
+
+
+       if (!device->device_tx_status) {
+               dev_err(device->dev, "Device tx_status is not defined\n");
+               return -EIO;
+       }
+
+
+       if (!device->device_issue_pending) {
+               dev_err(device->dev, "Device issue_pending is not defined\n");
+               return -EIO;
+       }
 
        /* note: this only matters in the
         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case