sctp: implement memory accounting on rx path
authorXin Long <lucien.xin@gmail.com>
Mon, 15 Apr 2019 09:15:07 +0000 (17:15 +0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 15 Apr 2019 20:36:51 +0000 (13:36 -0700)
sk_forward_alloc's updating is also done on rx path, but to be consistent
we change to use sk_mem_charge() in sctp_skb_set_owner_r().

In sctp_eat_data(), it's not enough to check sctp_memory_pressure only,
which doesn't work for mem_cgroup_sockets_enabled, so we change to use
sk_under_memory_pressure().

When it's under memory pressure, sk_mem_reclaim() and sk_rmem_schedule()
should be called on both RENEGE or CHUNK DELIVERY path exit the memory
pressure status as soon as possible.

Note that sk_rmem_schedule() is using datalen to make things easy there.

Reported-by: Matteo Croce <mcroce@redhat.com>
Tested-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sctp/sctp.h
net/sctp/sm_statefuns.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c

index 1d13ec3f2707e5777d564aaf15690f10a15b05b6..eefdfa5abf6e088aaf6a27d4113007c19d6d2f8f 100644 (file)
@@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        /*
         * This mimics the behavior of skb_set_owner_r
         */
-       sk->sk_forward_alloc -= event->rmem_len;
+       sk_mem_charge(sk, event->rmem_len);
 }
 
 /* Tests if the list has one and only one entry. */
index c9ae3404b1bb11572e34255cb3eae86ca1dd8131..7dfc34b28f4fb3e98086963afdc636e8c511d58a 100644 (file)
@@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
         * memory usage too much
         */
-       if (*sk->sk_prot_creator->memory_pressure) {
+       if (sk_under_memory_pressure(sk)) {
                if (sctp_tsnmap_has_gap(map) &&
                    (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
                        pr_debug("%s: under pressure, reneging for tsn:%u\n",
                                 __func__, tsn);
                        deliver = SCTP_CMD_RENEGE;
-                }
+               } else {
+                       sk_mem_reclaim(sk);
+               }
        }
 
        /*
index 8cb7d9858270a617e46e32e988babf86196ef84c..c2a7478587ab4fe53e3e30178b604b3eb46c5f8d 100644 (file)
@@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
                                                gfp_t gfp)
 {
        struct sctp_ulpevent *event = NULL;
-       struct sk_buff *skb;
-       size_t padding, len;
+       struct sk_buff *skb = chunk->skb;
+       struct sock *sk = asoc->base.sk;
+       size_t padding, datalen;
        int rx_count;
 
        /*
@@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
        if (asoc->ep->rcvbuf_policy)
                rx_count = atomic_read(&asoc->rmem_alloc);
        else
-               rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
+               rx_count = atomic_read(&sk->sk_rmem_alloc);
 
-       if (rx_count >= asoc->base.sk->sk_rcvbuf) {
+       datalen = ntohs(chunk->chunk_hdr->length);
 
-               if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
-                   (!sk_rmem_schedule(asoc->base.sk, chunk->skb,
-                                      chunk->skb->truesize)))
-                       goto fail;
-       }
+       if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
+               goto fail;
 
        /* Clone the original skb, sharing the data.  */
        skb = skb_clone(chunk->skb, gfp);
@@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * The sender should never pad with more than 3 bytes.  The receiver
         * MUST ignore the padding bytes.
         */
-       len = ntohs(chunk->chunk_hdr->length);
-       padding = SCTP_PAD4(len) - len;
+       padding = SCTP_PAD4(datalen) - datalen;
 
        /* Fixup cloned skb with just this chunks data.  */
        skb_trim(skb, chunk->chunk_end - padding - skb->data);
index 7cdc3623fa35cd3b3750f2937123d8d791666a86..a212fe079c07e17dd533b71bc3dbb5526c89d8c0 100644 (file)
@@ -1104,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
        }
        /* If able to free enough room, accept this chunk. */
-       if (freed >= needed) {
+       if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
+           freed >= needed) {
                int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
                /*
                 * Enter partial delivery if chunk has not been