svcrdma: Clean up allocation of svc_rdma_send_ctxt
authorChuck Lever <chuck.lever@oracle.com>
Mon, 5 Jun 2023 13:11:37 +0000 (09:11 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 12 Jun 2023 16:16:35 +0000 (12:16 -0400)
The physical device's favored NUMA node ID is available when
allocating a send_ctxt. Use that value instead of relying on the
assumption that the memory allocation happens to be running on a
node close to the device.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index 22a871e6fe4dcfeb423e10f14eb8275547d2a5ec..a35d1e055b1ad78bf7759b9c933e8373498d3698 100644 (file)
@@ -123,18 +123,17 @@ static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
 static struct svc_rdma_send_ctxt *
 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
 {
+       int node = ibdev_to_node(rdma->sc_cm_id->device);
        struct svc_rdma_send_ctxt *ctxt;
        dma_addr_t addr;
        void *buffer;
-       size_t size;
        int i;
 
-       size = sizeof(*ctxt);
-       size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
-       ctxt = kmalloc(size, GFP_KERNEL);
+       ctxt = kmalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges),
+                           GFP_KERNEL, node);
        if (!ctxt)
                goto fail0;
-       buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
+       buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
        if (!buffer)
                goto fail1;
        addr = ib_dma_map_single(rdma->sc_pd->device, buffer,