rxrpc: Fix missing active use pinning of rxrpc_local object
authorDavid Howells <dhowells@redhat.com>
Thu, 30 Jan 2020 21:50:36 +0000 (21:50 +0000)
committerDavid Howells <dhowells@redhat.com>
Thu, 30 Jan 2020 21:50:41 +0000 (21:50 +0000)
The introduction of a split between the reference count on rxrpc_local
objects and the usage count didn't quite go far enough.  A number of kernel
work items need to make use of the socket to perform transmission.  These
also need to get an active count on the local object to prevent the socket
from being closed.

Fix this by getting the active count in those places.

Also split out the raw active count get/put functions as these places tend
to hold refs on the rxrpc_local object already, so getting and putting an
extra object ref is just a waste of time.

The problem can lead to symptoms like:

    BUG: kernel NULL pointer dereference, address: 0000000000000018
    ..
    CPU: 2 PID: 818 Comm: kworker/u9:0 Not tainted 5.5.0-fscache+ #51
    ...
    RIP: 0010:selinux_socket_sendmsg+0x5/0x13
    ...
    Call Trace:
     security_socket_sendmsg+0x2c/0x3e
     sock_sendmsg+0x1a/0x46
     rxrpc_send_keepalive+0x131/0x1ae
     rxrpc_peer_keepalive_worker+0x219/0x34b
     process_one_work+0x18e/0x271
     worker_thread+0x1a3/0x247
     kthread+0xe6/0xeb
     ret_from_fork+0x1f/0x30

Fixes: 730c5fd42c1e ("rxrpc: Fix local endpoint refcounting")
Signed-off-by: David Howells <dhowells@redhat.com>
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/conn_event.c
net/rxrpc/local_object.c
net/rxrpc/peer_event.c

index 9d3c4d2d893ab2372af651704efdc512494733a2..fe42f986cd94911e3a4b28bd290577dc137774f1 100644 (file)
@@ -194,6 +194,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 service_in_use:
        write_unlock(&local->services_lock);
        rxrpc_unuse_local(local);
+       rxrpc_put_local(local);
        ret = -EADDRINUSE;
 error_unlock:
        release_sock(&rx->sk);
@@ -899,6 +900,7 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_purge_queue(&sk->sk_receive_queue);
 
        rxrpc_unuse_local(rx->local);
+       rxrpc_put_local(rx->local);
        rx->local = NULL;
        key_put(rx->key);
        rx->key = NULL;
index 5e99df80e80a775b26e0de82eec5738454398abc..94441fee85bcfa5970da893a488a299aa539d7aa 100644 (file)
@@ -1021,6 +1021,16 @@ void rxrpc_unuse_local(struct rxrpc_local *);
 void rxrpc_queue_local(struct rxrpc_local *);
 void rxrpc_destroy_all_locals(struct rxrpc_net *);
 
+static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
+{
+       return atomic_dec_return(&local->active_users) == 0;
+}
+
+static inline bool __rxrpc_use_local(struct rxrpc_local *local)
+{
+       return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
+}
+
 /*
  * misc.c
  */
index 808a4723f8684c849da712808938df11f3ce4872..06fcff2ebbba1ef2806e039b5a1665bbc5f825a9 100644 (file)
@@ -438,16 +438,12 @@ again:
 /*
  * connection-level event processor
  */
-void rxrpc_process_connection(struct work_struct *work)
+static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
 {
-       struct rxrpc_connection *conn =
-               container_of(work, struct rxrpc_connection, processor);
        struct sk_buff *skb;
        u32 abort_code = RX_PROTOCOL_ERROR;
        int ret;
 
-       rxrpc_see_connection(conn);
-
        if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
                rxrpc_secure_connection(conn);
 
@@ -475,18 +471,32 @@ void rxrpc_process_connection(struct work_struct *work)
                }
        }
 
-out:
-       rxrpc_put_connection(conn);
-       _leave("");
        return;
 
 requeue_and_leave:
        skb_queue_head(&conn->rx_queue, skb);
-       goto out;
+       return;
 
 protocol_error:
        if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
                goto requeue_and_leave;
        rxrpc_free_skb(skb, rxrpc_skb_freed);
-       goto out;
+       return;
+}
+
+void rxrpc_process_connection(struct work_struct *work)
+{
+       struct rxrpc_connection *conn =
+               container_of(work, struct rxrpc_connection, processor);
+
+       rxrpc_see_connection(conn);
+
+       if (__rxrpc_use_local(conn->params.local)) {
+               rxrpc_do_process_connection(conn);
+               rxrpc_unuse_local(conn->params.local);
+       }
+
+       rxrpc_put_connection(conn);
+       _leave("");
+       return;
 }
index 3aa179efcda43f2245fd2c71a1af219620d7d097..a6c1349e965d94c3ef6361c04298799a25a2f8c3 100644 (file)
@@ -383,14 +383,11 @@ void rxrpc_put_local(struct rxrpc_local *local)
  */
 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
 {
-       unsigned int au;
-
        local = rxrpc_get_local_maybe(local);
        if (!local)
                return NULL;
 
-       au = atomic_fetch_add_unless(&local->active_users, 1, 0);
-       if (au == 0) {
+       if (!__rxrpc_use_local(local)) {
                rxrpc_put_local(local);
                return NULL;
        }
@@ -404,14 +401,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
  */
 void rxrpc_unuse_local(struct rxrpc_local *local)
 {
-       unsigned int au;
-
        if (local) {
-               au = atomic_dec_return(&local->active_users);
-               if (au == 0)
+               if (__rxrpc_unuse_local(local)) {
+                       rxrpc_get_local(local);
                        rxrpc_queue_local(local);
-               else
-                       rxrpc_put_local(local);
+               }
        }
 }
 
@@ -468,7 +462,7 @@ static void rxrpc_local_processor(struct work_struct *work)
 
        do {
                again = false;
-               if (atomic_read(&local->active_users) == 0) {
+               if (!__rxrpc_use_local(local)) {
                        rxrpc_local_destroyer(local);
                        break;
                }
@@ -482,6 +476,8 @@ static void rxrpc_local_processor(struct work_struct *work)
                        rxrpc_process_local_events(local);
                        again = true;
                }
+
+               __rxrpc_unuse_local(local);
        } while (again);
 
        rxrpc_put_local(local);
index 48f67a9b1037ceb7b7a749a241bbb127a12a1f67..923b263c401b34ff8bd3ff46c24c3847fbe4ffec 100644 (file)
@@ -364,27 +364,31 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
                if (!rxrpc_get_peer_maybe(peer))
                        continue;
 
-               spin_unlock_bh(&rxnet->peer_hash_lock);
-
-               keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
-               slot = keepalive_at - base;
-               _debug("%02x peer %u t=%d {%pISp}",
-                      cursor, peer->debug_id, slot, &peer->srx.transport);
+               if (__rxrpc_use_local(peer->local)) {
+                       spin_unlock_bh(&rxnet->peer_hash_lock);
+
+                       keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+                       slot = keepalive_at - base;
+                       _debug("%02x peer %u t=%d {%pISp}",
+                              cursor, peer->debug_id, slot, &peer->srx.transport);
+
+                       if (keepalive_at <= base ||
+                           keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+                               rxrpc_send_keepalive(peer);
+                               slot = RXRPC_KEEPALIVE_TIME;
+                       }
 
-               if (keepalive_at <= base ||
-                   keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
-                       rxrpc_send_keepalive(peer);
-                       slot = RXRPC_KEEPALIVE_TIME;
+                       /* A transmission to this peer occurred since last we
+                        * examined it so put it into the appropriate future
+                        * bucket.
+                        */
+                       slot += cursor;
+                       slot &= mask;
+                       spin_lock_bh(&rxnet->peer_hash_lock);
+                       list_add_tail(&peer->keepalive_link,
+                                     &rxnet->peer_keepalive[slot & mask]);
+                       rxrpc_unuse_local(peer->local);
                }
-
-               /* A transmission to this peer occurred since last we examined
-                * it so put it into the appropriate future bucket.
-                */
-               slot += cursor;
-               slot &= mask;
-               spin_lock_bh(&rxnet->peer_hash_lock);
-               list_add_tail(&peer->keepalive_link,
-                             &rxnet->peer_keepalive[slot & mask]);
                rxrpc_put_peer_locked(peer);
        }