net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr()
authorYang Yingliang <yangyingliang@huawei.com>
Thu, 19 May 2022 03:21:08 +0000 (11:21 +0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 21 May 2022 00:19:19 +0000 (17:19 -0700)
Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass
the flag.

Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
Reported-by: Hulk Robot <hulkci@huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/wwan/t7xx/t7xx_hif_cldma.c

index 0c52801ed0dea01840710fe0b3f9026910f34ed7..6ff30cb8eb16ff269135dddf1eb95d9db6c219d3 100644 (file)
@@ -91,9 +91,9 @@ static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_p
 }
 
 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
-                                       size_t size)
+                                       size_t size, gfp_t gfp_mask)
 {
-       req->skb = __dev_alloc_skb(size, GFP_KERNEL);
+       req->skb = __dev_alloc_skb(size, gfp_mask);
        if (!req->skb)
                return -ENOMEM;
 
@@ -174,7 +174,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
                spin_unlock_irqrestore(&queue->ring_lock, flags);
                req = queue->rx_refill;
 
-               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
+               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
                if (ret)
                        return ret;
 
@@ -402,7 +402,7 @@ static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, s
        if (!req->gpd)
                goto err_free_req;
 
-       val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
+       val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
        if (val)
                goto err_free_pool;
 
@@ -801,7 +801,7 @@ static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
                if (req->skb)
                        continue;
 
-               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
+               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
                if (ret)
                        break;