tests/ncdevmem: Fix double-free of queue array
authorCosmin Ratiu <cratiu@nvidia.com>
Thu, 8 May 2025 08:44:34 +0000 (11:44 +0300)
committerJakub Kicinski <kuba@kernel.org>
Fri, 9 May 2025 22:05:07 +0000 (15:05 -0700)
netdev_bind_rx takes ownership of the queue array passed as parameter
and frees it, so a queue array buffer cannot be reused across multiple
netdev_bind_rx calls.

This commit fixes that by always passing in a newly created queue array
to all netdev_bind_rx calls in ncdevmem.

Fixes: 85585b4bc8d8 ("selftests: add ncdevmem, netcat for devmem TCP")
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Reviewed-by: Joe Damato <jdamato@fastly.com>
Reviewed-by: Mina Almasry <almasrymina@google.com>
Link: https://patch.msgid.link/20250508084434.1933069-1-cratiu@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
tools/testing/selftests/drivers/net/hw/ncdevmem.c

index 2bf14ac2b8c62497e2d5f06174dbaf71ae23af2e..9d48004ff1a178264bf591cbb1c0f90de3bfe8a0 100644 (file)
@@ -431,6 +431,22 @@ static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
        return 0;
 }
 
+static struct netdev_queue_id *create_queues(void)
+{
+       struct netdev_queue_id *queues;
+       size_t i = 0;
+
+       queues = calloc(num_queues, sizeof(*queues));
+       for (i = 0; i < num_queues; i++) {
+               queues[i]._present.type = 1;
+               queues[i]._present.id = 1;
+               queues[i].type = NETDEV_QUEUE_TYPE_RX;
+               queues[i].id = start_queue + i;
+       }
+
+       return queues;
+}
+
 int do_server(struct memory_buffer *mem)
 {
        char ctrl_data[sizeof(int) * 20000];
@@ -448,7 +464,6 @@ int do_server(struct memory_buffer *mem)
        char buffer[256];
        int socket_fd;
        int client_fd;
-       size_t i = 0;
        int ret;
 
        ret = parse_address(server_ip, atoi(port), &server_sin);
@@ -471,16 +486,7 @@ int do_server(struct memory_buffer *mem)
 
        sleep(1);
 
-       queues = malloc(sizeof(*queues) * num_queues);
-
-       for (i = 0; i < num_queues; i++) {
-               queues[i]._present.type = 1;
-               queues[i]._present.id = 1;
-               queues[i].type = NETDEV_QUEUE_TYPE_RX;
-               queues[i].id = start_queue + i;
-       }
-
-       if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+       if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
                error(1, 0, "Failed to bind\n");
 
        tmp_mem = malloc(mem->size);
@@ -545,7 +551,6 @@ int do_server(struct memory_buffer *mem)
                        goto cleanup;
                }
 
-               i++;
                for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
                        if (cm->cmsg_level != SOL_SOCKET ||
                            (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
@@ -630,10 +635,8 @@ cleanup:
 
 void run_devmem_tests(void)
 {
-       struct netdev_queue_id *queues;
        struct memory_buffer *mem;
        struct ynl_sock *ys;
-       size_t i = 0;
 
        mem = provider->alloc(getpagesize() * NUM_PAGES);
 
@@ -641,38 +644,24 @@ void run_devmem_tests(void)
        if (configure_rss())
                error(1, 0, "rss error\n");
 
-       queues = calloc(num_queues, sizeof(*queues));
-
        if (configure_headersplit(1))
                error(1, 0, "Failed to configure header split\n");
 
-       if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+       if (!bind_rx_queue(ifindex, mem->fd,
+                          calloc(num_queues, sizeof(struct netdev_queue_id)),
+                          num_queues, &ys))
                error(1, 0, "Binding empty queues array should have failed\n");
 
-       for (i = 0; i < num_queues; i++) {
-               queues[i]._present.type = 1;
-               queues[i]._present.id = 1;
-               queues[i].type = NETDEV_QUEUE_TYPE_RX;
-               queues[i].id = start_queue + i;
-       }
-
        if (configure_headersplit(0))
                error(1, 0, "Failed to configure header split\n");
 
-       if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+       if (!bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
                error(1, 0, "Configure dmabuf with header split off should have failed\n");
 
        if (configure_headersplit(1))
                error(1, 0, "Failed to configure header split\n");
 
-       for (i = 0; i < num_queues; i++) {
-               queues[i]._present.type = 1;
-               queues[i]._present.id = 1;
-               queues[i].type = NETDEV_QUEUE_TYPE_RX;
-               queues[i].id = start_queue + i;
-       }
-
-       if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+       if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
                error(1, 0, "Failed to bind\n");
 
        /* Deactivating a bound queue should not be legal */