| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* Peer event handling, typically ICMP messages. |
| 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
| 6 | */ |
| 7 | |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/net.h> |
| 10 | #include <linux/skbuff.h> |
| 11 | #include <linux/errqueue.h> |
| 12 | #include <linux/udp.h> |
| 13 | #include <linux/in.h> |
| 14 | #include <linux/in6.h> |
| 15 | #include <linux/icmp.h> |
| 16 | #include <net/sock.h> |
| 17 | #include <net/af_rxrpc.h> |
| 18 | #include <net/ip.h> |
| 19 | #include "ar-internal.h" |
| 20 | |
| 21 | static void rxrpc_store_error(struct rxrpc_peer *, struct sk_buff *); |
| 22 | static void rxrpc_distribute_error(struct rxrpc_peer *, struct sk_buff *, |
| 23 | enum rxrpc_call_completion, int); |
| 24 | |
| 25 | /* |
| 26 | * Find the peer associated with a local error. |
| 27 | */ |
| 28 | static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local, |
| 29 | const struct sk_buff *skb, |
| 30 | struct sockaddr_rxrpc *srx) |
| 31 | { |
| 32 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 33 | |
| 34 | _enter(""); |
| 35 | |
| 36 | memset(srx, 0, sizeof(*srx)); |
| 37 | srx->transport_type = local->srx.transport_type; |
| 38 | srx->transport_len = local->srx.transport_len; |
| 39 | srx->transport.family = local->srx.transport.family; |
| 40 | |
| 41 | /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice |
| 42 | * versa? |
| 43 | */ |
| 44 | switch (srx->transport.family) { |
| 45 | case AF_INET: |
| 46 | srx->transport_len = sizeof(srx->transport.sin); |
| 47 | srx->transport.family = AF_INET; |
| 48 | srx->transport.sin.sin_port = serr->port; |
| 49 | switch (serr->ee.ee_origin) { |
| 50 | case SO_EE_ORIGIN_ICMP: |
| 51 | memcpy(&srx->transport.sin.sin_addr, |
| 52 | skb_network_header(skb) + serr->addr_offset, |
| 53 | sizeof(struct in_addr)); |
| 54 | break; |
| 55 | case SO_EE_ORIGIN_ICMP6: |
| 56 | memcpy(&srx->transport.sin.sin_addr, |
| 57 | skb_network_header(skb) + serr->addr_offset + 12, |
| 58 | sizeof(struct in_addr)); |
| 59 | break; |
| 60 | default: |
| 61 | memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr, |
| 62 | sizeof(struct in_addr)); |
| 63 | break; |
| 64 | } |
| 65 | break; |
| 66 | |
| 67 | #ifdef CONFIG_AF_RXRPC_IPV6 |
| 68 | case AF_INET6: |
| 69 | switch (serr->ee.ee_origin) { |
| 70 | case SO_EE_ORIGIN_ICMP6: |
| 71 | srx->transport.sin6.sin6_port = serr->port; |
| 72 | memcpy(&srx->transport.sin6.sin6_addr, |
| 73 | skb_network_header(skb) + serr->addr_offset, |
| 74 | sizeof(struct in6_addr)); |
| 75 | break; |
| 76 | case SO_EE_ORIGIN_ICMP: |
| 77 | srx->transport_len = sizeof(srx->transport.sin); |
| 78 | srx->transport.family = AF_INET; |
| 79 | srx->transport.sin.sin_port = serr->port; |
| 80 | memcpy(&srx->transport.sin.sin_addr, |
| 81 | skb_network_header(skb) + serr->addr_offset, |
| 82 | sizeof(struct in_addr)); |
| 83 | break; |
| 84 | default: |
| 85 | memcpy(&srx->transport.sin6.sin6_addr, |
| 86 | &ipv6_hdr(skb)->saddr, |
| 87 | sizeof(struct in6_addr)); |
| 88 | break; |
| 89 | } |
| 90 | break; |
| 91 | #endif |
| 92 | |
| 93 | default: |
| 94 | BUG(); |
| 95 | } |
| 96 | |
| 97 | return rxrpc_lookup_peer_rcu(local, srx); |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Handle an MTU/fragmentation problem. |
| 102 | */ |
| 103 | static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) |
| 104 | { |
| 105 | /* wind down the local interface MTU */ |
| 106 | if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) |
| 107 | peer->if_mtu = mtu; |
| 108 | |
| 109 | if (mtu == 0) { |
| 110 | /* they didn't give us a size, estimate one */ |
| 111 | mtu = peer->if_mtu; |
| 112 | if (mtu > 1500) { |
| 113 | mtu >>= 1; |
| 114 | if (mtu < 1500) |
| 115 | mtu = 1500; |
| 116 | } else { |
| 117 | mtu -= 100; |
| 118 | if (mtu < peer->hdrsize) |
| 119 | mtu = peer->hdrsize + 4; |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | if (mtu < peer->mtu) { |
| 124 | spin_lock(&peer->lock); |
| 125 | peer->mtu = mtu; |
| 126 | peer->maxdata = peer->mtu - peer->hdrsize; |
| 127 | spin_unlock(&peer->lock); |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | /* |
| 132 | * Handle an error received on the local endpoint. |
| 133 | */ |
| 134 | void rxrpc_input_error(struct rxrpc_local *local, struct sk_buff *skb) |
| 135 | { |
| 136 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 137 | struct sockaddr_rxrpc srx; |
| 138 | struct rxrpc_peer *peer = NULL; |
| 139 | |
| 140 | _enter("L=%x", local->debug_id); |
| 141 | |
| 142 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { |
| 143 | _leave("UDP empty message"); |
| 144 | return; |
| 145 | } |
| 146 | |
| 147 | rcu_read_lock(); |
| 148 | peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx); |
| 149 | if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input_error)) |
| 150 | peer = NULL; |
| 151 | rcu_read_unlock(); |
| 152 | if (!peer) |
| 153 | return; |
| 154 | |
| 155 | trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); |
| 156 | |
| 157 | if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && |
| 158 | serr->ee.ee_type == ICMP_DEST_UNREACH && |
| 159 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { |
| 160 | rxrpc_adjust_mtu(peer, serr->ee.ee_info); |
| 161 | goto out; |
| 162 | } |
| 163 | |
| 164 | rxrpc_store_error(peer, skb); |
| 165 | out: |
| 166 | rxrpc_put_peer(peer, rxrpc_peer_put_input_error); |
| 167 | } |
| 168 | |
| 169 | /* |
| 170 | * Map an error report to error codes on the peer record. |
| 171 | */ |
| 172 | static void rxrpc_store_error(struct rxrpc_peer *peer, struct sk_buff *skb) |
| 173 | { |
| 174 | enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR; |
| 175 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 176 | struct sock_extended_err *ee = &serr->ee; |
| 177 | int err = ee->ee_errno; |
| 178 | |
| 179 | _enter(""); |
| 180 | |
| 181 | switch (ee->ee_origin) { |
| 182 | case SO_EE_ORIGIN_NONE: |
| 183 | case SO_EE_ORIGIN_LOCAL: |
| 184 | compl = RXRPC_CALL_LOCAL_ERROR; |
| 185 | break; |
| 186 | |
| 187 | case SO_EE_ORIGIN_ICMP6: |
| 188 | if (err == EACCES) |
| 189 | err = EHOSTUNREACH; |
| 190 | fallthrough; |
| 191 | case SO_EE_ORIGIN_ICMP: |
| 192 | default: |
| 193 | break; |
| 194 | } |
| 195 | |
| 196 | rxrpc_distribute_error(peer, skb, compl, err); |
| 197 | } |
| 198 | |
| 199 | /* |
| 200 | * Distribute an error that occurred on a peer. |
| 201 | */ |
| 202 | static void rxrpc_distribute_error(struct rxrpc_peer *peer, struct sk_buff *skb, |
| 203 | enum rxrpc_call_completion compl, int err) |
| 204 | { |
| 205 | struct rxrpc_call *call; |
| 206 | HLIST_HEAD(error_targets); |
| 207 | |
| 208 | spin_lock(&peer->lock); |
| 209 | hlist_move_list(&peer->error_targets, &error_targets); |
| 210 | |
| 211 | while (!hlist_empty(&error_targets)) { |
| 212 | call = hlist_entry(error_targets.first, |
| 213 | struct rxrpc_call, error_link); |
| 214 | hlist_del_init(&call->error_link); |
| 215 | spin_unlock(&peer->lock); |
| 216 | |
| 217 | rxrpc_see_call(call, rxrpc_call_see_distribute_error); |
| 218 | rxrpc_set_call_completion(call, compl, 0, -err); |
| 219 | rxrpc_input_call_event(call, skb); |
| 220 | |
| 221 | spin_lock(&peer->lock); |
| 222 | } |
| 223 | |
| 224 | spin_unlock(&peer->lock); |
| 225 | } |
| 226 | |
| 227 | /* |
| 228 | * Perform keep-alive pings. |
| 229 | */ |
| 230 | static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, |
| 231 | struct list_head *collector, |
| 232 | time64_t base, |
| 233 | u8 cursor) |
| 234 | { |
| 235 | struct rxrpc_peer *peer; |
| 236 | const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; |
| 237 | time64_t keepalive_at; |
| 238 | bool use; |
| 239 | int slot; |
| 240 | |
| 241 | spin_lock(&rxnet->peer_hash_lock); |
| 242 | |
| 243 | while (!list_empty(collector)) { |
| 244 | peer = list_entry(collector->next, |
| 245 | struct rxrpc_peer, keepalive_link); |
| 246 | |
| 247 | list_del_init(&peer->keepalive_link); |
| 248 | if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive)) |
| 249 | continue; |
| 250 | |
| 251 | use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive); |
| 252 | spin_unlock(&rxnet->peer_hash_lock); |
| 253 | |
| 254 | if (use) { |
| 255 | keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME; |
| 256 | slot = keepalive_at - base; |
| 257 | _debug("%02x peer %u t=%d {%pISp}", |
| 258 | cursor, peer->debug_id, slot, &peer->srx.transport); |
| 259 | |
| 260 | if (keepalive_at <= base || |
| 261 | keepalive_at > base + RXRPC_KEEPALIVE_TIME) { |
| 262 | rxrpc_send_keepalive(peer); |
| 263 | slot = RXRPC_KEEPALIVE_TIME; |
| 264 | } |
| 265 | |
| 266 | /* A transmission to this peer occurred since last we |
| 267 | * examined it so put it into the appropriate future |
| 268 | * bucket. |
| 269 | */ |
| 270 | slot += cursor; |
| 271 | slot &= mask; |
| 272 | spin_lock(&rxnet->peer_hash_lock); |
| 273 | list_add_tail(&peer->keepalive_link, |
| 274 | &rxnet->peer_keepalive[slot & mask]); |
| 275 | spin_unlock(&rxnet->peer_hash_lock); |
| 276 | rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive); |
| 277 | } |
| 278 | rxrpc_put_peer(peer, rxrpc_peer_put_keepalive); |
| 279 | spin_lock(&rxnet->peer_hash_lock); |
| 280 | } |
| 281 | |
| 282 | spin_unlock(&rxnet->peer_hash_lock); |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Perform keep-alive pings with VERSION packets to keep any NAT alive. |
| 287 | */ |
| 288 | void rxrpc_peer_keepalive_worker(struct work_struct *work) |
| 289 | { |
| 290 | struct rxrpc_net *rxnet = |
| 291 | container_of(work, struct rxrpc_net, peer_keepalive_work); |
| 292 | const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; |
| 293 | time64_t base, now, delay; |
| 294 | u8 cursor, stop; |
| 295 | LIST_HEAD(collector); |
| 296 | |
| 297 | now = ktime_get_seconds(); |
| 298 | base = rxnet->peer_keepalive_base; |
| 299 | cursor = rxnet->peer_keepalive_cursor; |
| 300 | _enter("%lld,%u", base - now, cursor); |
| 301 | |
| 302 | if (!rxnet->live) |
| 303 | return; |
| 304 | |
| 305 | /* Remove to a temporary list all the peers that are currently lodged |
| 306 | * in expired buckets plus all new peers. |
| 307 | * |
| 308 | * Everything in the bucket at the cursor is processed this |
| 309 | * second; the bucket at cursor + 1 goes at now + 1s and so |
| 310 | * on... |
| 311 | */ |
| 312 | spin_lock(&rxnet->peer_hash_lock); |
| 313 | list_splice_init(&rxnet->peer_keepalive_new, &collector); |
| 314 | |
| 315 | stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive); |
| 316 | while (base <= now && (s8)(cursor - stop) < 0) { |
| 317 | list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask], |
| 318 | &collector); |
| 319 | base++; |
| 320 | cursor++; |
| 321 | } |
| 322 | |
| 323 | base = now; |
| 324 | spin_unlock(&rxnet->peer_hash_lock); |
| 325 | |
| 326 | rxnet->peer_keepalive_base = base; |
| 327 | rxnet->peer_keepalive_cursor = cursor; |
| 328 | rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor); |
| 329 | ASSERT(list_empty(&collector)); |
| 330 | |
| 331 | /* Schedule the timer for the next occupied timeslot. */ |
| 332 | cursor = rxnet->peer_keepalive_cursor; |
| 333 | stop = cursor + RXRPC_KEEPALIVE_TIME - 1; |
| 334 | for (; (s8)(cursor - stop) < 0; cursor++) { |
| 335 | if (!list_empty(&rxnet->peer_keepalive[cursor & mask])) |
| 336 | break; |
| 337 | base++; |
| 338 | } |
| 339 | |
| 340 | now = ktime_get_seconds(); |
| 341 | delay = base - now; |
| 342 | if (delay < 1) |
| 343 | delay = 1; |
| 344 | delay *= HZ; |
| 345 | if (rxnet->live) |
| 346 | timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); |
| 347 | |
| 348 | _leave(""); |
| 349 | } |