spi: Rework per message DMA mapped flag to be per transfer
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Fri, 31 May 2024 19:42:40 +0000 (22:42 +0300)
committerMark Brown <broonie@kernel.org>
Mon, 10 Jun 2024 11:49:03 +0000 (12:49 +0100)
The granularity of DMA mappings is transfer and moreover,
the direction is also important as it can be unidirect.

The current cur_msg_mapped flag doesn't fit well the DMA mapping
and syncing calls and we have tons of checks around on top of it.
So, instead of doing that rework the code to use per transfer per
direction flag to show if it's DMA mapped or not.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8650-QRD
Link: https://lore.kernel.org/r/20240531194723.1761567-9-andriy.shevchenko@linux.intel.com
Reviewed-by: Serge Semin <fancer.lancer@gmail.com>
Tested-by: NĂ­colas F. R. A. Prado <nfraprado@collabora.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
drivers/spi/internals.h
drivers/spi/spi.c
include/linux/spi/spi.h

index 47a87c2a6979667823f673a248af0fd1263bfbec..1f459b8958912c7917af2d8b461085e9cc06559a 100644 (file)
@@ -45,7 +45,7 @@ static inline bool spi_xfer_is_dma_mapped(struct spi_controller *ctlr,
                                          struct spi_transfer *xfer)
 {
        return ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer) &&
-              ctlr->cur_msg_mapped;
+              (xfer->tx_sg_mapped || xfer->rx_sg_mapped);
 }
 
 #endif /* __LINUX_SPI_INTERNALS_H */
index c1e8cde426e51fec454e9a35753f0fa7710ed0dd..9721adf048b5edb41e8c6d9db4c648312f4a0dad 100644 (file)
@@ -1220,11 +1220,6 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
        spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
 }
 
-/* Dummy SG for unidirect transfers */
-static struct scatterlist dummy_sg = {
-       .page_link = SG_END,
-};
-
 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 {
        struct device *tx_dev, *rx_dev;
@@ -1263,8 +1258,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
                                                attrs);
                        if (ret != 0)
                                return ret;
-               } else {
-                       xfer->tx_sg.sgl = &dummy_sg;
+
+                       xfer->tx_sg_mapped = true;
                }
 
                if (xfer->rx_buf != NULL) {
@@ -1278,8 +1273,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 
                                return ret;
                        }
-               } else {
-                       xfer->rx_sg.sgl = &dummy_sg;
+
+                       xfer->rx_sg_mapped = true;
                }
        }
        /* No transfer has been mapped, bail out with success */
@@ -1288,7 +1283,6 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 
        ctlr->cur_rx_dma_dev = rx_dev;
        ctlr->cur_tx_dma_dev = tx_dev;
-       ctlr->cur_msg_mapped = true;
 
        return 0;
 }
@@ -1299,57 +1293,46 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
        struct device *tx_dev = ctlr->cur_tx_dma_dev;
        struct spi_transfer *xfer;
 
-       if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
-               return 0;
-
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
                /* The sync has already been done after each transfer. */
                unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
 
-               if (!ctlr->can_dma(ctlr, msg->spi, xfer))
-                       continue;
+               if (xfer->rx_sg_mapped)
+                       spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+                                           DMA_FROM_DEVICE, attrs);
+               xfer->rx_sg_mapped = false;
 
-               spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
-                                   DMA_FROM_DEVICE, attrs);
-               spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
-                                   DMA_TO_DEVICE, attrs);
+               if (xfer->tx_sg_mapped)
+                       spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+                                           DMA_TO_DEVICE, attrs);
+               xfer->tx_sg_mapped = false;
        }
 
-       ctlr->cur_msg_mapped = false;
-
        return 0;
 }
 
-static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
+static void spi_dma_sync_for_device(struct spi_controller *ctlr,
                                    struct spi_transfer *xfer)
 {
        struct device *rx_dev = ctlr->cur_rx_dma_dev;
        struct device *tx_dev = ctlr->cur_tx_dma_dev;
 
-       if (!ctlr->cur_msg_mapped)
-               return;
-
-       if (!ctlr->can_dma(ctlr, msg->spi, xfer))
-               return;
-
-       dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
-       dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+       if (xfer->tx_sg_mapped)
+               dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+       if (xfer->rx_sg_mapped)
+               dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 }
 
-static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
                                 struct spi_transfer *xfer)
 {
        struct device *rx_dev = ctlr->cur_rx_dma_dev;
        struct device *tx_dev = ctlr->cur_tx_dma_dev;
 
-       if (!ctlr->cur_msg_mapped)
-               return;
-
-       if (!ctlr->can_dma(ctlr, msg->spi, xfer))
-               return;
-
-       dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
-       dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+       if (xfer->rx_sg_mapped)
+               dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+       if (xfer->tx_sg_mapped)
+               dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 }
 #else /* !CONFIG_HAS_DMA */
 static inline int __spi_map_msg(struct spi_controller *ctlr,
@@ -1365,13 +1348,11 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
 }
 
 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
-                                   struct spi_message *msg,
                                    struct spi_transfer *xfer)
 {
 }
 
 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
-                                struct spi_message *msg,
                                 struct spi_transfer *xfer)
 {
 }
@@ -1643,13 +1624,13 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
                        reinit_completion(&ctlr->xfer_completion);
 
 fallback_pio:
-                       spi_dma_sync_for_device(ctlr, msg, xfer);
+                       spi_dma_sync_for_device(ctlr, xfer);
                        ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
                        if (ret < 0) {
-                               spi_dma_sync_for_cpu(ctlr, msg, xfer);
+                               spi_dma_sync_for_cpu(ctlr, xfer);
 
-                               if (ctlr->cur_msg_mapped &&
-                                  (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+                               if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
+                                   (xfer->error & SPI_TRANS_FAIL_NO_START)) {
                                        __spi_unmap_msg(ctlr, msg);
                                        ctlr->fallback = true;
                                        xfer->error &= ~SPI_TRANS_FAIL_NO_START;
@@ -1671,7 +1652,7 @@ fallback_pio:
                                        msg->status = ret;
                        }
 
-                       spi_dma_sync_for_cpu(ctlr, msg, xfer);
+                       spi_dma_sync_for_cpu(ctlr, xfer);
                } else {
                        if (xfer->len)
                                dev_err(&msg->spi->dev,
index e8e1e798924f4cbf939648d2d0c60360ce079ed1..b4a89db4c855b39cf3deed6e5dfab17921a1c560 100644 (file)
@@ -447,7 +447,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  * @cur_msg_need_completion: Flag used internally to opportunistically skip
  *     the @cur_msg_completion. This flag is used to signal the context that
  *     is running spi_finalize_current_message() that it needs to complete()
- * @cur_msg_mapped: message has been mapped for DMA
  * @fallback: fallback to PIO if DMA transfer return failure with
  *     SPI_TRANS_FAIL_NO_START.
  * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
@@ -708,7 +707,6 @@ struct spi_controller {
        bool                            running;
        bool                            rt;
        bool                            auto_runtime_pm;
-       bool                            cur_msg_mapped;
        bool                            fallback;
        bool                            last_cs_mode_high;
        s8                              last_cs[SPI_CS_CNT_MAX];
@@ -981,6 +979,8 @@ struct spi_res {
  *      transfer this transfer. Set to 0 if the SPI bus driver does
  *      not support it.
  * @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA
+ * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA
  * @tx_sg: Scatterlist for transmit, currently not for client use
  * @rx_sg: Scatterlist for receive, currently not for client use
  * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
@@ -1077,10 +1077,13 @@ struct spi_transfer {
 #define SPI_TRANS_FAIL_IO      BIT(1)
        u16             error;
 
-       dma_addr_t      tx_dma;
-       dma_addr_t      rx_dma;
+       bool            tx_sg_mapped;
+       bool            rx_sg_mapped;
+
        struct sg_table tx_sg;
        struct sg_table rx_sg;
+       dma_addr_t      tx_dma;
+       dma_addr_t      rx_dma;
 
        unsigned        dummy_data:1;
        unsigned        cs_off:1;