Commit | Line | Data |
---|---|---|
ac713874 UB |
1 | /* |
2 | * Shared Memory Communications over RDMA (SMC-R) and RoCE | |
3 | * | |
4 | * AF_SMC protocol family socket handler keeping the AF_INET sock address type | |
5 | * applies to SOCK_STREAM sockets only | |
6 | * offers an alternative communication option for TCP-protocol sockets | |
7 | * applicable with RoCE-cards only | |
8 | * | |
a046d57d UB |
9 | * Initial restrictions: |
10 | * - non-blocking connect postponed | |
11 | * - IPv6 support postponed | |
12 | * - support for alternate links postponed | |
13 | * - partial support for non-blocking sockets only | |
14 | * - support for urgent data postponed | |
15 | * | |
ac713874 UB |
16 | * Copyright IBM Corp. 2016 |
17 | * | |
18 | * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> | |
19 | * based on prototype from Frank Blaschka | |
20 | */ | |
21 | ||
22 | #define KMSG_COMPONENT "smc" | |
23 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/socket.h> | |
a046d57d UB |
27 | #include <linux/inetdevice.h> |
28 | #include <linux/workqueue.h> | |
5f08318f | 29 | #include <linux/in.h> |
c3edc401 IM |
30 | #include <linux/sched/signal.h> |
31 | ||
ac713874 | 32 | #include <net/sock.h> |
a046d57d | 33 | #include <net/tcp.h> |
f16a7dd5 | 34 | #include <net/smc.h> |
ac713874 UB |
35 | |
36 | #include "smc.h" | |
a046d57d | 37 | #include "smc_clc.h" |
9bf9abea | 38 | #include "smc_llc.h" |
5f08318f | 39 | #include "smc_cdc.h" |
0cfdd8f9 | 40 | #include "smc_core.h" |
a4cf0443 | 41 | #include "smc_ib.h" |
6812baab | 42 | #include "smc_pnet.h" |
e6727f39 | 43 | #include "smc_tx.h" |
952310cc | 44 | #include "smc_rx.h" |
b38d7324 | 45 | #include "smc_close.h" |
ac713874 | 46 | |
0cfdd8f9 UB |
47 | static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group |
48 | * creation | |
49 | */ | |
50 | ||
51 | struct smc_lgr_list smc_lgr_list = { /* established link groups */ | |
52 | .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock), | |
53 | .list = LIST_HEAD_INIT(smc_lgr_list.list), | |
54 | }; | |
55 | ||
a046d57d UB |
56 | static void smc_tcp_listen_work(struct work_struct *); |
57 | ||
ac713874 UB |
58 | static void smc_set_keepalive(struct sock *sk, int val) |
59 | { | |
60 | struct smc_sock *smc = smc_sk(sk); | |
61 | ||
62 | smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val); | |
63 | } | |
64 | ||
f16a7dd5 UB |
65 | static struct smc_hashinfo smc_v4_hashinfo = { |
66 | .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock), | |
67 | }; | |
68 | ||
69 | int smc_hash_sk(struct sock *sk) | |
70 | { | |
71 | struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; | |
72 | struct hlist_head *head; | |
73 | ||
74 | head = &h->ht; | |
75 | ||
76 | write_lock_bh(&h->lock); | |
77 | sk_add_node(sk, head); | |
78 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | |
79 | write_unlock_bh(&h->lock); | |
80 | ||
81 | return 0; | |
82 | } | |
83 | EXPORT_SYMBOL_GPL(smc_hash_sk); | |
84 | ||
85 | void smc_unhash_sk(struct sock *sk) | |
86 | { | |
87 | struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; | |
88 | ||
89 | write_lock_bh(&h->lock); | |
90 | if (sk_del_node_init(sk)) | |
91 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | |
92 | write_unlock_bh(&h->lock); | |
93 | } | |
94 | EXPORT_SYMBOL_GPL(smc_unhash_sk); | |
95 | ||
96 | struct proto smc_proto = { | |
ac713874 UB |
97 | .name = "SMC", |
98 | .owner = THIS_MODULE, | |
99 | .keepalive = smc_set_keepalive, | |
f16a7dd5 UB |
100 | .hash = smc_hash_sk, |
101 | .unhash = smc_unhash_sk, | |
ac713874 | 102 | .obj_size = sizeof(struct smc_sock), |
f16a7dd5 | 103 | .h.smc_hash = &smc_v4_hashinfo, |
5f0d5a3a | 104 | .slab_flags = SLAB_TYPESAFE_BY_RCU, |
ac713874 | 105 | }; |
f16a7dd5 | 106 | EXPORT_SYMBOL_GPL(smc_proto); |
ac713874 UB |
107 | |
108 | static int smc_release(struct socket *sock) | |
109 | { | |
110 | struct sock *sk = sock->sk; | |
111 | struct smc_sock *smc; | |
b38d7324 | 112 | int rc = 0; |
ac713874 UB |
113 | |
114 | if (!sk) | |
115 | goto out; | |
116 | ||
117 | smc = smc_sk(sk); | |
b38d7324 UB |
118 | sock_hold(sk); |
119 | if (sk->sk_state == SMC_LISTEN) | |
120 | /* smc_close_non_accepted() is called and acquires | |
121 | * sock lock for child sockets again | |
122 | */ | |
123 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); | |
124 | else | |
125 | lock_sock(sk); | |
ac713874 | 126 | |
b38d7324 UB |
127 | if (smc->use_fallback) { |
128 | sk->sk_state = SMC_CLOSED; | |
129 | sk->sk_state_change(sk); | |
130 | } else { | |
131 | rc = smc_close_active(smc); | |
132 | sock_set_flag(sk, SOCK_DEAD); | |
133 | sk->sk_shutdown |= SHUTDOWN_MASK; | |
134 | } | |
ac713874 UB |
135 | if (smc->clcsock) { |
136 | sock_release(smc->clcsock); | |
137 | smc->clcsock = NULL; | |
138 | } | |
139 | ||
140 | /* detach socket */ | |
141 | sock_orphan(sk); | |
142 | sock->sk = NULL; | |
b38d7324 UB |
143 | if (smc->use_fallback) { |
144 | schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN); | |
145 | } else if (sk->sk_state == SMC_CLOSED) { | |
146 | smc_conn_free(&smc->conn); | |
147 | schedule_delayed_work(&smc->sock_put_work, | |
148 | SMC_CLOSE_SOCK_PUT_DELAY); | |
149 | } | |
ac713874 UB |
150 | release_sock(sk); |
151 | ||
152 | sock_put(sk); | |
153 | out: | |
b38d7324 | 154 | return rc; |
ac713874 UB |
155 | } |
156 | ||
157 | static void smc_destruct(struct sock *sk) | |
158 | { | |
159 | if (sk->sk_state != SMC_CLOSED) | |
160 | return; | |
161 | if (!sock_flag(sk, SOCK_DEAD)) | |
162 | return; | |
163 | ||
164 | sk_refcnt_debug_dec(sk); | |
165 | } | |
166 | ||
167 | static struct sock *smc_sock_alloc(struct net *net, struct socket *sock) | |
168 | { | |
169 | struct smc_sock *smc; | |
170 | struct sock *sk; | |
171 | ||
172 | sk = sk_alloc(net, PF_SMC, GFP_KERNEL, &smc_proto, 0); | |
173 | if (!sk) | |
174 | return NULL; | |
175 | ||
176 | sock_init_data(sock, sk); /* sets sk_refcnt to 1 */ | |
177 | sk->sk_state = SMC_INIT; | |
178 | sk->sk_destruct = smc_destruct; | |
179 | sk->sk_protocol = SMCPROTO_SMC; | |
ac713874 | 180 | smc = smc_sk(sk); |
a046d57d UB |
181 | INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); |
182 | INIT_LIST_HEAD(&smc->accept_q); | |
183 | spin_lock_init(&smc->accept_q_lock); | |
b38d7324 | 184 | INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work); |
f16a7dd5 | 185 | sk->sk_prot->hash(sk); |
a046d57d | 186 | sk_refcnt_debug_inc(sk); |
ac713874 UB |
187 | |
188 | return sk; | |
189 | } | |
190 | ||
191 | static int smc_bind(struct socket *sock, struct sockaddr *uaddr, | |
192 | int addr_len) | |
193 | { | |
194 | struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; | |
195 | struct sock *sk = sock->sk; | |
196 | struct smc_sock *smc; | |
197 | int rc; | |
198 | ||
199 | smc = smc_sk(sk); | |
200 | ||
201 | /* replicate tests from inet_bind(), to be safe wrt. future changes */ | |
202 | rc = -EINVAL; | |
203 | if (addr_len < sizeof(struct sockaddr_in)) | |
204 | goto out; | |
205 | ||
206 | rc = -EAFNOSUPPORT; | |
207 | /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */ | |
208 | if ((addr->sin_family != AF_INET) && | |
209 | ((addr->sin_family != AF_UNSPEC) || | |
210 | (addr->sin_addr.s_addr != htonl(INADDR_ANY)))) | |
211 | goto out; | |
212 | ||
213 | lock_sock(sk); | |
214 | ||
215 | /* Check if socket is already active */ | |
216 | rc = -EINVAL; | |
217 | if (sk->sk_state != SMC_INIT) | |
218 | goto out_rel; | |
219 | ||
220 | smc->clcsock->sk->sk_reuse = sk->sk_reuse; | |
221 | rc = kernel_bind(smc->clcsock, uaddr, addr_len); | |
222 | ||
223 | out_rel: | |
224 | release_sock(sk); | |
225 | out: | |
226 | return rc; | |
227 | } | |
228 | ||
229 | static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk, | |
230 | unsigned long mask) | |
231 | { | |
232 | /* options we don't get control via setsockopt for */ | |
233 | nsk->sk_type = osk->sk_type; | |
234 | nsk->sk_sndbuf = osk->sk_sndbuf; | |
235 | nsk->sk_rcvbuf = osk->sk_rcvbuf; | |
236 | nsk->sk_sndtimeo = osk->sk_sndtimeo; | |
237 | nsk->sk_rcvtimeo = osk->sk_rcvtimeo; | |
238 | nsk->sk_mark = osk->sk_mark; | |
239 | nsk->sk_priority = osk->sk_priority; | |
240 | nsk->sk_rcvlowat = osk->sk_rcvlowat; | |
241 | nsk->sk_bound_dev_if = osk->sk_bound_dev_if; | |
242 | nsk->sk_err = osk->sk_err; | |
243 | ||
244 | nsk->sk_flags &= ~mask; | |
245 | nsk->sk_flags |= osk->sk_flags & mask; | |
246 | } | |
247 | ||
248 | #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \ | |
249 | (1UL << SOCK_KEEPOPEN) | \ | |
250 | (1UL << SOCK_LINGER) | \ | |
251 | (1UL << SOCK_BROADCAST) | \ | |
252 | (1UL << SOCK_TIMESTAMP) | \ | |
253 | (1UL << SOCK_DBG) | \ | |
254 | (1UL << SOCK_RCVTSTAMP) | \ | |
255 | (1UL << SOCK_RCVTSTAMPNS) | \ | |
256 | (1UL << SOCK_LOCALROUTE) | \ | |
257 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ | |
258 | (1UL << SOCK_RXQ_OVFL) | \ | |
259 | (1UL << SOCK_WIFI_STATUS) | \ | |
260 | (1UL << SOCK_NOFCS) | \ | |
261 | (1UL << SOCK_FILTER_LOCKED)) | |
262 | /* copy only relevant settings and flags of SOL_SOCKET level from smc to | |
263 | * clc socket (since smc is not called for these options from net/core) | |
264 | */ | |
265 | static void smc_copy_sock_settings_to_clc(struct smc_sock *smc) | |
266 | { | |
267 | smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC); | |
268 | } | |
269 | ||
270 | #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \ | |
271 | (1UL << SOCK_KEEPOPEN) | \ | |
272 | (1UL << SOCK_LINGER) | \ | |
273 | (1UL << SOCK_DBG)) | |
274 | /* copy only settings and flags relevant for smc from clc to smc socket */ | |
275 | static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) | |
276 | { | |
277 | smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); | |
278 | } | |
279 | ||
a046d57d UB |
280 | /* determine subnet and mask of internal TCP socket */ |
281 | int smc_netinfo_by_tcpsk(struct socket *clcsock, | |
282 | __be32 *subnet, u8 *prefix_len) | |
283 | { | |
284 | struct dst_entry *dst = sk_dst_get(clcsock->sk); | |
731b0085 | 285 | struct in_device *in_dev; |
a046d57d UB |
286 | struct sockaddr_in addr; |
287 | int rc = -ENOENT; | |
288 | int len; | |
289 | ||
290 | if (!dst) { | |
291 | rc = -ENOTCONN; | |
292 | goto out; | |
293 | } | |
294 | if (!dst->dev) { | |
295 | rc = -ENODEV; | |
296 | goto out_rel; | |
297 | } | |
298 | ||
299 | /* get address to which the internal TCP socket is bound */ | |
300 | kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len); | |
301 | /* analyze IPv4 specific data of net_device belonging to TCP socket */ | |
731b0085 UB |
302 | rcu_read_lock(); |
303 | in_dev = __in_dev_get_rcu(dst->dev); | |
304 | for_ifa(in_dev) { | |
305 | if (!inet_ifa_match(addr.sin_addr.s_addr, ifa)) | |
a046d57d UB |
306 | continue; |
307 | *prefix_len = inet_mask_len(ifa->ifa_mask); | |
308 | *subnet = ifa->ifa_address & ifa->ifa_mask; | |
309 | rc = 0; | |
310 | break; | |
731b0085 UB |
311 | } endfor_ifa(in_dev); |
312 | rcu_read_unlock(); | |
a046d57d UB |
313 | |
314 | out_rel: | |
315 | dst_release(dst); | |
316 | out: | |
317 | return rc; | |
318 | } | |
319 | ||
9bf9abea UB |
320 | static int smc_clnt_conf_first_link(struct smc_sock *smc, union ib_gid *gid) |
321 | { | |
322 | struct smc_link_group *lgr = smc->conn.lgr; | |
323 | struct smc_link *link; | |
324 | int rest; | |
325 | int rc; | |
326 | ||
327 | link = &lgr->lnk[SMC_SINGLE_LINK]; | |
328 | /* receive CONFIRM LINK request from server over RoCE fabric */ | |
329 | rest = wait_for_completion_interruptible_timeout( | |
330 | &link->llc_confirm, | |
331 | SMC_LLC_WAIT_FIRST_TIME); | |
332 | if (rest <= 0) { | |
333 | struct smc_clc_msg_decline dclc; | |
334 | ||
335 | rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), | |
336 | SMC_CLC_DECLINE); | |
337 | return rc; | |
338 | } | |
339 | ||
340 | rc = smc_ib_modify_qp_rts(link); | |
341 | if (rc) | |
342 | return SMC_CLC_DECL_INTERR; | |
343 | ||
344 | smc_wr_remember_qp_attr(link); | |
652a1e41 UB |
345 | |
346 | rc = smc_wr_reg_send(link, | |
347 | smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); | |
348 | if (rc) | |
349 | return SMC_CLC_DECL_INTERR; | |
350 | ||
9bf9abea UB |
351 | /* send CONFIRM LINK response over RoCE fabric */ |
352 | rc = smc_llc_send_confirm_link(link, | |
353 | link->smcibdev->mac[link->ibport - 1], | |
354 | gid, SMC_LLC_RESP); | |
355 | if (rc < 0) | |
356 | return SMC_CLC_DECL_TCL; | |
357 | ||
358 | return rc; | |
359 | } | |
360 | ||
0cfdd8f9 UB |
361 | static void smc_conn_save_peer_info(struct smc_sock *smc, |
362 | struct smc_clc_msg_accept_confirm *clc) | |
363 | { | |
364 | smc->conn.peer_conn_idx = clc->conn_idx; | |
5f08318f | 365 | smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token); |
cd6851f3 UB |
366 | smc->conn.peer_rmbe_size = smc_uncompress_bufsize(clc->rmbe_size); |
367 | atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); | |
0cfdd8f9 UB |
368 | } |
369 | ||
370 | static void smc_link_save_peer_info(struct smc_link *link, | |
371 | struct smc_clc_msg_accept_confirm *clc) | |
372 | { | |
373 | link->peer_qpn = ntoh24(clc->qpn); | |
374 | memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE); | |
375 | memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac)); | |
376 | link->peer_psn = ntoh24(clc->psn); | |
377 | link->peer_mtu = clc->qp_mtu; | |
378 | } | |
379 | ||
a046d57d UB |
380 | /* setup for RDMA connection of client */ |
381 | static int smc_connect_rdma(struct smc_sock *smc) | |
382 | { | |
0cfdd8f9 | 383 | struct sockaddr_in *inaddr = (struct sockaddr_in *)smc->addr; |
a046d57d | 384 | struct smc_clc_msg_accept_confirm aclc; |
0cfdd8f9 | 385 | int local_contact = SMC_FIRST_CONTACT; |
a046d57d | 386 | struct smc_ib_device *smcibdev; |
0cfdd8f9 UB |
387 | struct smc_link *link; |
388 | u8 srv_first_contact; | |
a046d57d UB |
389 | int reason_code = 0; |
390 | int rc = 0; | |
391 | u8 ibport; | |
392 | ||
393 | /* IPSec connections opt out of SMC-R optimizations */ | |
394 | if (using_ipsec(smc)) { | |
395 | reason_code = SMC_CLC_DECL_IPSEC; | |
396 | goto decline_rdma; | |
397 | } | |
398 | ||
399 | /* PNET table look up: search active ib_device and port | |
400 | * within same PNETID that also contains the ethernet device | |
401 | * used for the internal TCP socket | |
402 | */ | |
403 | smc_pnet_find_roce_resource(smc->clcsock->sk, &smcibdev, &ibport); | |
404 | if (!smcibdev) { | |
405 | reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ | |
406 | goto decline_rdma; | |
407 | } | |
408 | ||
409 | /* do inband token exchange */ | |
410 | reason_code = smc_clc_send_proposal(smc, smcibdev, ibport); | |
411 | if (reason_code < 0) { | |
412 | rc = reason_code; | |
413 | goto out_err; | |
414 | } | |
415 | if (reason_code > 0) /* configuration error */ | |
416 | goto decline_rdma; | |
417 | /* receive SMC Accept CLC message */ | |
418 | reason_code = smc_clc_wait_msg(smc, &aclc, sizeof(aclc), | |
419 | SMC_CLC_ACCEPT); | |
420 | if (reason_code < 0) { | |
421 | rc = reason_code; | |
422 | goto out_err; | |
423 | } | |
424 | if (reason_code > 0) | |
425 | goto decline_rdma; | |
426 | ||
0cfdd8f9 UB |
427 | srv_first_contact = aclc.hdr.flag; |
428 | mutex_lock(&smc_create_lgr_pending); | |
429 | local_contact = smc_conn_create(smc, inaddr->sin_addr.s_addr, smcibdev, | |
430 | ibport, &aclc.lcl, srv_first_contact); | |
431 | if (local_contact < 0) { | |
432 | rc = local_contact; | |
433 | if (rc == -ENOMEM) | |
434 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | |
435 | else if (rc == -ENOLINK) | |
436 | reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */ | |
437 | goto decline_rdma_unlock; | |
438 | } | |
439 | link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK]; | |
a046d57d | 440 | |
0cfdd8f9 | 441 | smc_conn_save_peer_info(smc, &aclc); |
cd6851f3 | 442 | |
3e034725 UB |
443 | /* create send buffer and rmb */ |
444 | rc = smc_buf_create(smc); | |
cd6851f3 UB |
445 | if (rc) { |
446 | reason_code = SMC_CLC_DECL_MEM; | |
447 | goto decline_rdma_unlock; | |
448 | } | |
449 | ||
0cfdd8f9 UB |
450 | if (local_contact == SMC_FIRST_CONTACT) |
451 | smc_link_save_peer_info(link, &aclc); | |
bd4ad577 UB |
452 | |
453 | rc = smc_rmb_rtoken_handling(&smc->conn, &aclc); | |
454 | if (rc) { | |
455 | reason_code = SMC_CLC_DECL_INTERR; | |
456 | goto decline_rdma_unlock; | |
457 | } | |
458 | ||
46c28dbd UB |
459 | smc_close_init(smc); |
460 | smc_rx_init(smc); | |
461 | ||
bd4ad577 UB |
462 | if (local_contact == SMC_FIRST_CONTACT) { |
463 | rc = smc_ib_ready_link(link); | |
464 | if (rc) { | |
465 | reason_code = SMC_CLC_DECL_INTERR; | |
466 | goto decline_rdma_unlock; | |
467 | } | |
652a1e41 UB |
468 | } else { |
469 | struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; | |
470 | ||
471 | if (!buf_desc->reused) { | |
472 | /* register memory region for new rmb */ | |
473 | rc = smc_wr_reg_send(link, | |
474 | buf_desc->mr_rx[SMC_SINGLE_LINK]); | |
475 | if (rc) { | |
476 | reason_code = SMC_CLC_DECL_INTERR; | |
477 | goto decline_rdma_unlock; | |
478 | } | |
479 | } | |
bd4ad577 | 480 | } |
10428dd8 | 481 | smc_rmb_sync_sg_for_device(&smc->conn); |
a046d57d UB |
482 | |
483 | rc = smc_clc_send_confirm(smc); | |
484 | if (rc) | |
0cfdd8f9 | 485 | goto out_err_unlock; |
a046d57d | 486 | |
9bf9abea UB |
487 | if (local_contact == SMC_FIRST_CONTACT) { |
488 | /* QP confirmation over RoCE fabric */ | |
489 | reason_code = smc_clnt_conf_first_link( | |
490 | smc, &smcibdev->gid[ibport - 1]); | |
491 | if (reason_code < 0) { | |
492 | rc = reason_code; | |
493 | goto out_err_unlock; | |
494 | } | |
495 | if (reason_code > 0) | |
496 | goto decline_rdma_unlock; | |
497 | } | |
a046d57d | 498 | |
0cfdd8f9 | 499 | mutex_unlock(&smc_create_lgr_pending); |
e6727f39 UB |
500 | smc_tx_init(smc); |
501 | ||
a046d57d UB |
502 | out_connected: |
503 | smc_copy_sock_settings_to_clc(smc); | |
b38d7324 UB |
504 | if (smc->sk.sk_state == SMC_INIT) |
505 | smc->sk.sk_state = SMC_ACTIVE; | |
a046d57d | 506 | |
0cfdd8f9 | 507 | return rc ? rc : local_contact; |
a046d57d | 508 | |
0cfdd8f9 UB |
509 | decline_rdma_unlock: |
510 | mutex_unlock(&smc_create_lgr_pending); | |
511 | smc_conn_free(&smc->conn); | |
a046d57d UB |
512 | decline_rdma: |
513 | /* RDMA setup failed, switch back to TCP */ | |
514 | smc->use_fallback = true; | |
515 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { | |
bfbedfd3 | 516 | rc = smc_clc_send_decline(smc, reason_code); |
a046d57d UB |
517 | if (rc < sizeof(struct smc_clc_msg_decline)) |
518 | goto out_err; | |
519 | } | |
520 | goto out_connected; | |
521 | ||
0cfdd8f9 UB |
522 | out_err_unlock: |
523 | mutex_unlock(&smc_create_lgr_pending); | |
524 | smc_conn_free(&smc->conn); | |
a046d57d UB |
525 | out_err: |
526 | return rc; | |
527 | } | |
528 | ||
ac713874 UB |
529 | static int smc_connect(struct socket *sock, struct sockaddr *addr, |
530 | int alen, int flags) | |
531 | { | |
532 | struct sock *sk = sock->sk; | |
533 | struct smc_sock *smc; | |
534 | int rc = -EINVAL; | |
535 | ||
536 | smc = smc_sk(sk); | |
537 | ||
538 | /* separate smc parameter checking to be safe */ | |
539 | if (alen < sizeof(addr->sa_family)) | |
540 | goto out_err; | |
541 | if (addr->sa_family != AF_INET) | |
542 | goto out_err; | |
a046d57d | 543 | smc->addr = addr; /* needed for nonblocking connect */ |
ac713874 UB |
544 | |
545 | lock_sock(sk); | |
546 | switch (sk->sk_state) { | |
547 | default: | |
548 | goto out; | |
549 | case SMC_ACTIVE: | |
550 | rc = -EISCONN; | |
551 | goto out; | |
552 | case SMC_INIT: | |
553 | rc = 0; | |
554 | break; | |
555 | } | |
556 | ||
557 | smc_copy_sock_settings_to_clc(smc); | |
558 | rc = kernel_connect(smc->clcsock, addr, alen, flags); | |
559 | if (rc) | |
560 | goto out; | |
561 | ||
a046d57d UB |
562 | /* setup RDMA connection */ |
563 | rc = smc_connect_rdma(smc); | |
564 | if (rc < 0) | |
565 | goto out; | |
566 | else | |
567 | rc = 0; /* success cases including fallback */ | |
ac713874 UB |
568 | |
569 | out: | |
570 | release_sock(sk); | |
571 | out_err: | |
572 | return rc; | |
573 | } | |
574 | ||
575 | static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) | |
576 | { | |
577 | struct sock *sk = &lsmc->sk; | |
578 | struct socket *new_clcsock; | |
579 | struct sock *new_sk; | |
580 | int rc; | |
581 | ||
a046d57d | 582 | release_sock(&lsmc->sk); |
ac713874 UB |
583 | new_sk = smc_sock_alloc(sock_net(sk), NULL); |
584 | if (!new_sk) { | |
585 | rc = -ENOMEM; | |
586 | lsmc->sk.sk_err = ENOMEM; | |
587 | *new_smc = NULL; | |
a046d57d | 588 | lock_sock(&lsmc->sk); |
ac713874 UB |
589 | goto out; |
590 | } | |
591 | *new_smc = smc_sk(new_sk); | |
592 | ||
593 | rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); | |
a046d57d UB |
594 | lock_sock(&lsmc->sk); |
595 | if (rc < 0) { | |
596 | lsmc->sk.sk_err = -rc; | |
597 | new_sk->sk_state = SMC_CLOSED; | |
598 | sock_set_flag(new_sk, SOCK_DEAD); | |
f16a7dd5 | 599 | sk->sk_prot->unhash(new_sk); |
a046d57d UB |
600 | sock_put(new_sk); |
601 | *new_smc = NULL; | |
602 | goto out; | |
603 | } | |
604 | if (lsmc->sk.sk_state == SMC_CLOSED) { | |
605 | if (new_clcsock) | |
606 | sock_release(new_clcsock); | |
607 | new_sk->sk_state = SMC_CLOSED; | |
608 | sock_set_flag(new_sk, SOCK_DEAD); | |
f16a7dd5 | 609 | sk->sk_prot->unhash(new_sk); |
ac713874 UB |
610 | sock_put(new_sk); |
611 | *new_smc = NULL; | |
612 | goto out; | |
613 | } | |
614 | ||
615 | (*new_smc)->clcsock = new_clcsock; | |
616 | out: | |
617 | return rc; | |
618 | } | |
619 | ||
a046d57d UB |
620 | /* add a just created sock to the accept queue of the listen sock as |
621 | * candidate for a following socket accept call from user space | |
622 | */ | |
623 | static void smc_accept_enqueue(struct sock *parent, struct sock *sk) | |
624 | { | |
625 | struct smc_sock *par = smc_sk(parent); | |
626 | ||
627 | sock_hold(sk); | |
628 | spin_lock(&par->accept_q_lock); | |
629 | list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q); | |
630 | spin_unlock(&par->accept_q_lock); | |
631 | sk_acceptq_added(parent); | |
632 | } | |
633 | ||
634 | /* remove a socket from the accept queue of its parental listening socket */ | |
635 | static void smc_accept_unlink(struct sock *sk) | |
636 | { | |
637 | struct smc_sock *par = smc_sk(sk)->listen_smc; | |
638 | ||
639 | spin_lock(&par->accept_q_lock); | |
640 | list_del_init(&smc_sk(sk)->accept_q); | |
641 | spin_unlock(&par->accept_q_lock); | |
642 | sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk); | |
643 | sock_put(sk); | |
644 | } | |
645 | ||
646 | /* remove a sock from the accept queue to bind it to a new socket created | |
647 | * for a socket accept call from user space | |
648 | */ | |
b38d7324 UB |
649 | struct sock *smc_accept_dequeue(struct sock *parent, |
650 | struct socket *new_sock) | |
a046d57d UB |
651 | { |
652 | struct smc_sock *isk, *n; | |
653 | struct sock *new_sk; | |
654 | ||
655 | list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) { | |
656 | new_sk = (struct sock *)isk; | |
657 | ||
658 | smc_accept_unlink(new_sk); | |
659 | if (new_sk->sk_state == SMC_CLOSED) { | |
288c8390 UB |
660 | new_sk->sk_prot->unhash(new_sk); |
661 | sock_put(new_sk); | |
a046d57d UB |
662 | continue; |
663 | } | |
664 | if (new_sock) | |
665 | sock_graft(new_sk, new_sock); | |
666 | return new_sk; | |
667 | } | |
668 | return NULL; | |
669 | } | |
670 | ||
671 | /* clean up for a created but never accepted sock */ | |
b38d7324 | 672 | void smc_close_non_accepted(struct sock *sk) |
a046d57d UB |
673 | { |
674 | struct smc_sock *smc = smc_sk(sk); | |
675 | ||
676 | sock_hold(sk); | |
b38d7324 UB |
677 | lock_sock(sk); |
678 | if (!sk->sk_lingertime) | |
679 | /* wait for peer closing */ | |
680 | sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT; | |
288c8390 UB |
681 | if (smc->use_fallback) { |
682 | sk->sk_state = SMC_CLOSED; | |
683 | } else { | |
b38d7324 | 684 | smc_close_active(smc); |
288c8390 UB |
685 | sock_set_flag(sk, SOCK_DEAD); |
686 | sk->sk_shutdown |= SHUTDOWN_MASK; | |
687 | } | |
a046d57d UB |
688 | if (smc->clcsock) { |
689 | struct socket *tcp; | |
690 | ||
691 | tcp = smc->clcsock; | |
692 | smc->clcsock = NULL; | |
693 | sock_release(tcp); | |
694 | } | |
b38d7324 UB |
695 | if (smc->use_fallback) { |
696 | schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN); | |
288c8390 | 697 | } else if (sk->sk_state == SMC_CLOSED) { |
b38d7324 UB |
698 | smc_conn_free(&smc->conn); |
699 | schedule_delayed_work(&smc->sock_put_work, | |
700 | SMC_CLOSE_SOCK_PUT_DELAY); | |
701 | } | |
702 | release_sock(sk); | |
a046d57d UB |
703 | sock_put(sk); |
704 | } | |
705 | ||
9bf9abea UB |
706 | static int smc_serv_conf_first_link(struct smc_sock *smc) |
707 | { | |
708 | struct smc_link_group *lgr = smc->conn.lgr; | |
709 | struct smc_link *link; | |
710 | int rest; | |
711 | int rc; | |
712 | ||
713 | link = &lgr->lnk[SMC_SINGLE_LINK]; | |
652a1e41 UB |
714 | |
715 | rc = smc_wr_reg_send(link, | |
716 | smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); | |
717 | if (rc) | |
718 | return SMC_CLC_DECL_INTERR; | |
719 | ||
9bf9abea UB |
720 | /* send CONFIRM LINK request to client over the RoCE fabric */ |
721 | rc = smc_llc_send_confirm_link(link, | |
722 | link->smcibdev->mac[link->ibport - 1], | |
723 | &link->smcibdev->gid[link->ibport - 1], | |
724 | SMC_LLC_REQ); | |
725 | if (rc < 0) | |
726 | return SMC_CLC_DECL_TCL; | |
727 | ||
728 | /* receive CONFIRM LINK response from client over the RoCE fabric */ | |
729 | rest = wait_for_completion_interruptible_timeout( | |
730 | &link->llc_confirm_resp, | |
731 | SMC_LLC_WAIT_FIRST_TIME); | |
732 | if (rest <= 0) { | |
733 | struct smc_clc_msg_decline dclc; | |
734 | ||
735 | rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), | |
736 | SMC_CLC_DECLINE); | |
737 | } | |
738 | ||
739 | return rc; | |
740 | } | |
741 | ||
a046d57d UB |
742 | /* setup for RDMA connection of server */ |
743 | static void smc_listen_work(struct work_struct *work) | |
744 | { | |
745 | struct smc_sock *new_smc = container_of(work, struct smc_sock, | |
746 | smc_listen_work); | |
747 | struct socket *newclcsock = new_smc->clcsock; | |
748 | struct smc_sock *lsmc = new_smc->listen_smc; | |
749 | struct smc_clc_msg_accept_confirm cclc; | |
0cfdd8f9 | 750 | int local_contact = SMC_REUSE_CONTACT; |
a046d57d UB |
751 | struct sock *newsmcsk = &new_smc->sk; |
752 | struct smc_clc_msg_proposal pclc; | |
753 | struct smc_ib_device *smcibdev; | |
754 | struct sockaddr_in peeraddr; | |
0cfdd8f9 | 755 | struct smc_link *link; |
a046d57d UB |
756 | int reason_code = 0; |
757 | int rc = 0, len; | |
758 | __be32 subnet; | |
759 | u8 prefix_len; | |
760 | u8 ibport; | |
761 | ||
762 | /* do inband token exchange - | |
763 | *wait for and receive SMC Proposal CLC message | |
764 | */ | |
765 | reason_code = smc_clc_wait_msg(new_smc, &pclc, sizeof(pclc), | |
766 | SMC_CLC_PROPOSAL); | |
767 | if (reason_code < 0) | |
768 | goto out_err; | |
769 | if (reason_code > 0) | |
770 | goto decline_rdma; | |
771 | ||
772 | /* IPSec connections opt out of SMC-R optimizations */ | |
773 | if (using_ipsec(new_smc)) { | |
774 | reason_code = SMC_CLC_DECL_IPSEC; | |
775 | goto decline_rdma; | |
776 | } | |
777 | ||
778 | /* PNET table look up: search active ib_device and port | |
779 | * within same PNETID that also contains the ethernet device | |
780 | * used for the internal TCP socket | |
781 | */ | |
782 | smc_pnet_find_roce_resource(newclcsock->sk, &smcibdev, &ibport); | |
783 | if (!smcibdev) { | |
784 | reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ | |
785 | goto decline_rdma; | |
786 | } | |
787 | ||
788 | /* determine subnet and mask from internal TCP socket */ | |
789 | rc = smc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len); | |
790 | if (rc) { | |
791 | reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ | |
792 | goto decline_rdma; | |
793 | } | |
794 | if ((pclc.outgoing_subnet != subnet) || | |
795 | (pclc.prefix_len != prefix_len)) { | |
796 | reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ | |
797 | goto decline_rdma; | |
798 | } | |
799 | ||
800 | /* get address of the peer connected to the internal TCP socket */ | |
801 | kernel_getpeername(newclcsock, (struct sockaddr *)&peeraddr, &len); | |
802 | ||
0cfdd8f9 UB |
803 | /* allocate connection / link group */ |
804 | mutex_lock(&smc_create_lgr_pending); | |
805 | local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr, | |
806 | smcibdev, ibport, &pclc.lcl, 0); | |
0cfdd8f9 UB |
807 | if (local_contact < 0) { |
808 | rc = local_contact; | |
809 | if (rc == -ENOMEM) | |
810 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | |
145686ba | 811 | goto decline_rdma_unlock; |
0cfdd8f9 UB |
812 | } |
813 | link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; | |
a046d57d | 814 | |
3e034725 UB |
815 | /* create send buffer and rmb */ |
816 | rc = smc_buf_create(new_smc); | |
cd6851f3 UB |
817 | if (rc) { |
818 | reason_code = SMC_CLC_DECL_MEM; | |
145686ba | 819 | goto decline_rdma_unlock; |
cd6851f3 | 820 | } |
a046d57d | 821 | |
46c28dbd UB |
822 | smc_close_init(new_smc); |
823 | smc_rx_init(new_smc); | |
824 | ||
652a1e41 UB |
825 | if (local_contact != SMC_FIRST_CONTACT) { |
826 | struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; | |
827 | ||
828 | if (!buf_desc->reused) { | |
829 | /* register memory region for new rmb */ | |
830 | rc = smc_wr_reg_send(link, | |
831 | buf_desc->mr_rx[SMC_SINGLE_LINK]); | |
832 | if (rc) { | |
833 | reason_code = SMC_CLC_DECL_INTERR; | |
145686ba | 834 | goto decline_rdma_unlock; |
652a1e41 UB |
835 | } |
836 | } | |
837 | } | |
10428dd8 | 838 | smc_rmb_sync_sg_for_device(&new_smc->conn); |
652a1e41 | 839 | |
0cfdd8f9 | 840 | rc = smc_clc_send_accept(new_smc, local_contact); |
a046d57d | 841 | if (rc) |
145686ba | 842 | goto out_err_unlock; |
a046d57d UB |
843 | |
844 | /* receive SMC Confirm CLC message */ | |
845 | reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc), | |
846 | SMC_CLC_CONFIRM); | |
847 | if (reason_code < 0) | |
145686ba | 848 | goto out_err_unlock; |
a046d57d | 849 | if (reason_code > 0) |
145686ba | 850 | goto decline_rdma_unlock; |
0cfdd8f9 UB |
851 | smc_conn_save_peer_info(new_smc, &cclc); |
852 | if (local_contact == SMC_FIRST_CONTACT) | |
853 | smc_link_save_peer_info(link, &cclc); | |
a046d57d | 854 | |
bd4ad577 UB |
855 | rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc); |
856 | if (rc) { | |
857 | reason_code = SMC_CLC_DECL_INTERR; | |
145686ba | 858 | goto decline_rdma_unlock; |
bd4ad577 UB |
859 | } |
860 | ||
bd4ad577 UB |
861 | if (local_contact == SMC_FIRST_CONTACT) { |
862 | rc = smc_ib_ready_link(link); | |
863 | if (rc) { | |
864 | reason_code = SMC_CLC_DECL_INTERR; | |
145686ba | 865 | goto decline_rdma_unlock; |
bd4ad577 | 866 | } |
9bf9abea UB |
867 | /* QP confirmation over RoCE fabric */ |
868 | reason_code = smc_serv_conf_first_link(new_smc); | |
869 | if (reason_code < 0) { | |
870 | /* peer is not aware of a problem */ | |
871 | rc = reason_code; | |
145686ba | 872 | goto out_err_unlock; |
9bf9abea UB |
873 | } |
874 | if (reason_code > 0) | |
145686ba | 875 | goto decline_rdma_unlock; |
bd4ad577 | 876 | } |
a046d57d | 877 | |
e6727f39 | 878 | smc_tx_init(new_smc); |
145686ba | 879 | mutex_unlock(&smc_create_lgr_pending); |
e6727f39 | 880 | |
a046d57d UB |
881 | out_connected: |
882 | sk_refcnt_debug_inc(newsmcsk); | |
b38d7324 UB |
883 | if (newsmcsk->sk_state == SMC_INIT) |
884 | newsmcsk->sk_state = SMC_ACTIVE; | |
a046d57d | 885 | enqueue: |
b38d7324 | 886 | lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); |
a046d57d UB |
887 | if (lsmc->sk.sk_state == SMC_LISTEN) { |
888 | smc_accept_enqueue(&lsmc->sk, newsmcsk); | |
889 | } else { /* no longer listening */ | |
890 | smc_close_non_accepted(newsmcsk); | |
891 | } | |
892 | release_sock(&lsmc->sk); | |
893 | ||
894 | /* Wake up accept */ | |
895 | lsmc->sk.sk_data_ready(&lsmc->sk); | |
896 | sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */ | |
897 | return; | |
898 | ||
145686ba UB |
899 | decline_rdma_unlock: |
900 | mutex_unlock(&smc_create_lgr_pending); | |
a046d57d UB |
901 | decline_rdma: |
902 | /* RDMA setup failed, switch back to TCP */ | |
0cfdd8f9 | 903 | smc_conn_free(&new_smc->conn); |
a046d57d UB |
904 | new_smc->use_fallback = true; |
905 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { | |
bfbedfd3 | 906 | rc = smc_clc_send_decline(new_smc, reason_code); |
a046d57d UB |
907 | if (rc < sizeof(struct smc_clc_msg_decline)) |
908 | goto out_err; | |
909 | } | |
910 | goto out_connected; | |
911 | ||
145686ba UB |
912 | out_err_unlock: |
913 | mutex_unlock(&smc_create_lgr_pending); | |
a046d57d UB |
914 | out_err: |
915 | newsmcsk->sk_state = SMC_CLOSED; | |
b38d7324 | 916 | smc_conn_free(&new_smc->conn); |
a046d57d UB |
917 | goto enqueue; /* queue new sock with sk_err set */ |
918 | } | |
919 | ||
920 | static void smc_tcp_listen_work(struct work_struct *work) | |
921 | { | |
922 | struct smc_sock *lsmc = container_of(work, struct smc_sock, | |
923 | tcp_listen_work); | |
924 | struct smc_sock *new_smc; | |
925 | int rc = 0; | |
926 | ||
927 | lock_sock(&lsmc->sk); | |
928 | while (lsmc->sk.sk_state == SMC_LISTEN) { | |
929 | rc = smc_clcsock_accept(lsmc, &new_smc); | |
930 | if (rc) | |
931 | goto out; | |
932 | if (!new_smc) | |
933 | continue; | |
934 | ||
935 | new_smc->listen_smc = lsmc; | |
936 | new_smc->use_fallback = false; /* assume rdma capability first*/ | |
937 | sock_hold(&lsmc->sk); /* sock_put in smc_listen_work */ | |
938 | INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); | |
939 | smc_copy_sock_settings_to_smc(new_smc); | |
940 | schedule_work(&new_smc->smc_listen_work); | |
941 | } | |
942 | ||
943 | out: | |
944 | release_sock(&lsmc->sk); | |
945 | lsmc->sk.sk_data_ready(&lsmc->sk); /* no more listening, wake accept */ | |
946 | } | |
947 | ||
ac713874 UB |
948 | static int smc_listen(struct socket *sock, int backlog) |
949 | { | |
950 | struct sock *sk = sock->sk; | |
951 | struct smc_sock *smc; | |
952 | int rc; | |
953 | ||
954 | smc = smc_sk(sk); | |
955 | lock_sock(sk); | |
956 | ||
957 | rc = -EINVAL; | |
958 | if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN)) | |
959 | goto out; | |
960 | ||
961 | rc = 0; | |
962 | if (sk->sk_state == SMC_LISTEN) { | |
963 | sk->sk_max_ack_backlog = backlog; | |
964 | goto out; | |
965 | } | |
966 | /* some socket options are handled in core, so we could not apply | |
967 | * them to the clc socket -- copy smc socket options to clc socket | |
968 | */ | |
969 | smc_copy_sock_settings_to_clc(smc); | |
970 | ||
971 | rc = kernel_listen(smc->clcsock, backlog); | |
972 | if (rc) | |
973 | goto out; | |
974 | sk->sk_max_ack_backlog = backlog; | |
975 | sk->sk_ack_backlog = 0; | |
976 | sk->sk_state = SMC_LISTEN; | |
a046d57d UB |
977 | INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); |
978 | schedule_work(&smc->tcp_listen_work); | |
ac713874 UB |
979 | |
980 | out: | |
981 | release_sock(sk); | |
982 | return rc; | |
983 | } | |
984 | ||
985 | static int smc_accept(struct socket *sock, struct socket *new_sock, | |
cdfbabfb | 986 | int flags, bool kern) |
ac713874 | 987 | { |
a046d57d UB |
988 | struct sock *sk = sock->sk, *nsk; |
989 | DECLARE_WAITQUEUE(wait, current); | |
ac713874 | 990 | struct smc_sock *lsmc; |
a046d57d UB |
991 | long timeo; |
992 | int rc = 0; | |
ac713874 UB |
993 | |
994 | lsmc = smc_sk(sk); | |
995 | lock_sock(sk); | |
996 | ||
997 | if (lsmc->sk.sk_state != SMC_LISTEN) { | |
998 | rc = -EINVAL; | |
999 | goto out; | |
1000 | } | |
1001 | ||
a046d57d UB |
1002 | /* Wait for an incoming connection */ |
1003 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | |
1004 | add_wait_queue_exclusive(sk_sleep(sk), &wait); | |
1005 | while (!(nsk = smc_accept_dequeue(sk, new_sock))) { | |
1006 | set_current_state(TASK_INTERRUPTIBLE); | |
1007 | if (!timeo) { | |
1008 | rc = -EAGAIN; | |
1009 | break; | |
1010 | } | |
1011 | release_sock(sk); | |
1012 | timeo = schedule_timeout(timeo); | |
1013 | /* wakeup by sk_data_ready in smc_listen_work() */ | |
1014 | sched_annotate_sleep(); | |
1015 | lock_sock(sk); | |
1016 | if (signal_pending(current)) { | |
1017 | rc = sock_intr_errno(timeo); | |
1018 | break; | |
1019 | } | |
1020 | } | |
1021 | set_current_state(TASK_RUNNING); | |
1022 | remove_wait_queue(sk_sleep(sk), &wait); | |
ac713874 | 1023 | |
a046d57d UB |
1024 | if (!rc) |
1025 | rc = sock_error(nsk); | |
ac713874 UB |
1026 | |
1027 | out: | |
1028 | release_sock(sk); | |
1029 | return rc; | |
1030 | } | |
1031 | ||
1032 | static int smc_getname(struct socket *sock, struct sockaddr *addr, | |
1033 | int *len, int peer) | |
1034 | { | |
1035 | struct smc_sock *smc; | |
1036 | ||
b38d7324 UB |
1037 | if (peer && (sock->sk->sk_state != SMC_ACTIVE) && |
1038 | (sock->sk->sk_state != SMC_APPCLOSEWAIT1)) | |
ac713874 UB |
1039 | return -ENOTCONN; |
1040 | ||
1041 | smc = smc_sk(sock->sk); | |
1042 | ||
1043 | return smc->clcsock->ops->getname(smc->clcsock, addr, len, peer); | |
1044 | } | |
1045 | ||
1046 | static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |
1047 | { | |
1048 | struct sock *sk = sock->sk; | |
1049 | struct smc_sock *smc; | |
1050 | int rc = -EPIPE; | |
1051 | ||
1052 | smc = smc_sk(sk); | |
1053 | lock_sock(sk); | |
b38d7324 UB |
1054 | if ((sk->sk_state != SMC_ACTIVE) && |
1055 | (sk->sk_state != SMC_APPCLOSEWAIT1) && | |
1056 | (sk->sk_state != SMC_INIT)) | |
ac713874 UB |
1057 | goto out; |
1058 | if (smc->use_fallback) | |
1059 | rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); | |
1060 | else | |
e6727f39 | 1061 | rc = smc_tx_sendmsg(smc, msg, len); |
ac713874 UB |
1062 | out: |
1063 | release_sock(sk); | |
1064 | return rc; | |
1065 | } | |
1066 | ||
1067 | static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |
1068 | int flags) | |
1069 | { | |
1070 | struct sock *sk = sock->sk; | |
1071 | struct smc_sock *smc; | |
1072 | int rc = -ENOTCONN; | |
1073 | ||
1074 | smc = smc_sk(sk); | |
1075 | lock_sock(sk); | |
b38d7324 UB |
1076 | if ((sk->sk_state == SMC_INIT) || |
1077 | (sk->sk_state == SMC_LISTEN) || | |
1078 | (sk->sk_state == SMC_CLOSED)) | |
ac713874 UB |
1079 | goto out; |
1080 | ||
b38d7324 UB |
1081 | if (sk->sk_state == SMC_PEERFINCLOSEWAIT) { |
1082 | rc = 0; | |
1083 | goto out; | |
1084 | } | |
1085 | ||
ac713874 UB |
1086 | if (smc->use_fallback) |
1087 | rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); | |
1088 | else | |
952310cc | 1089 | rc = smc_rx_recvmsg(smc, msg, len, flags); |
b38d7324 | 1090 | |
ac713874 UB |
1091 | out: |
1092 | release_sock(sk); | |
1093 | return rc; | |
1094 | } | |
1095 | ||
a046d57d UB |
1096 | static unsigned int smc_accept_poll(struct sock *parent) |
1097 | { | |
1098 | struct smc_sock *isk; | |
1099 | struct sock *sk; | |
1100 | ||
1101 | lock_sock(parent); | |
1102 | list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) { | |
1103 | sk = (struct sock *)isk; | |
1104 | ||
1105 | if (sk->sk_state == SMC_ACTIVE) { | |
1106 | release_sock(parent); | |
1107 | return POLLIN | POLLRDNORM; | |
1108 | } | |
1109 | } | |
1110 | release_sock(parent); | |
1111 | ||
1112 | return 0; | |
1113 | } | |
1114 | ||
ac713874 UB |
1115 | static unsigned int smc_poll(struct file *file, struct socket *sock, |
1116 | poll_table *wait) | |
1117 | { | |
1118 | struct sock *sk = sock->sk; | |
1119 | unsigned int mask = 0; | |
1120 | struct smc_sock *smc; | |
a046d57d | 1121 | int rc; |
ac713874 UB |
1122 | |
1123 | smc = smc_sk(sock->sk); | |
a046d57d UB |
1124 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { |
1125 | /* delegate to CLC child sock */ | |
ac713874 UB |
1126 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); |
1127 | /* if non-blocking connect finished ... */ | |
1128 | lock_sock(sk); | |
1129 | if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) { | |
a046d57d UB |
1130 | sk->sk_err = smc->clcsock->sk->sk_err; |
1131 | if (sk->sk_err) { | |
1132 | mask |= POLLERR; | |
1133 | } else { | |
1134 | rc = smc_connect_rdma(smc); | |
1135 | if (rc < 0) | |
1136 | mask |= POLLERR; | |
1137 | else | |
1138 | /* success cases including fallback */ | |
1139 | mask |= POLLOUT | POLLWRNORM; | |
1140 | } | |
ac713874 UB |
1141 | } |
1142 | release_sock(sk); | |
1143 | } else { | |
a046d57d UB |
1144 | sock_poll_wait(file, sk_sleep(sk), wait); |
1145 | if (sk->sk_state == SMC_LISTEN) | |
1146 | /* woken up by sk_data_ready in smc_listen_work() */ | |
1147 | mask |= smc_accept_poll(sk); | |
1148 | if (sk->sk_err) | |
1149 | mask |= POLLERR; | |
b38d7324 UB |
1150 | if (atomic_read(&smc->conn.sndbuf_space) || |
1151 | (sk->sk_shutdown & SEND_SHUTDOWN)) { | |
e6727f39 UB |
1152 | mask |= POLLOUT | POLLWRNORM; |
1153 | } else { | |
1154 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); | |
1155 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
1156 | } | |
952310cc UB |
1157 | if (atomic_read(&smc->conn.bytes_to_rcv)) |
1158 | mask |= POLLIN | POLLRDNORM; | |
b38d7324 UB |
1159 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || |
1160 | (sk->sk_state == SMC_CLOSED)) | |
1161 | mask |= POLLHUP; | |
1162 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
1163 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; | |
1164 | if (sk->sk_state == SMC_APPCLOSEWAIT1) | |
1165 | mask |= POLLIN; | |
1166 | ||
ac713874 UB |
1167 | } |
1168 | ||
1169 | return mask; | |
1170 | } | |
1171 | ||
1172 | static int smc_shutdown(struct socket *sock, int how) | |
1173 | { | |
1174 | struct sock *sk = sock->sk; | |
1175 | struct smc_sock *smc; | |
1176 | int rc = -EINVAL; | |
b38d7324 | 1177 | int rc1 = 0; |
ac713874 UB |
1178 | |
1179 | smc = smc_sk(sk); | |
1180 | ||
1181 | if ((how < SHUT_RD) || (how > SHUT_RDWR)) | |
b38d7324 | 1182 | return rc; |
ac713874 UB |
1183 | |
1184 | lock_sock(sk); | |
1185 | ||
1186 | rc = -ENOTCONN; | |
b38d7324 UB |
1187 | if ((sk->sk_state != SMC_LISTEN) && |
1188 | (sk->sk_state != SMC_ACTIVE) && | |
1189 | (sk->sk_state != SMC_PEERCLOSEWAIT1) && | |
1190 | (sk->sk_state != SMC_PEERCLOSEWAIT2) && | |
1191 | (sk->sk_state != SMC_APPCLOSEWAIT1) && | |
1192 | (sk->sk_state != SMC_APPCLOSEWAIT2) && | |
1193 | (sk->sk_state != SMC_APPFINCLOSEWAIT)) | |
ac713874 UB |
1194 | goto out; |
1195 | if (smc->use_fallback) { | |
1196 | rc = kernel_sock_shutdown(smc->clcsock, how); | |
1197 | sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; | |
1198 | if (sk->sk_shutdown == SHUTDOWN_MASK) | |
1199 | sk->sk_state = SMC_CLOSED; | |
b38d7324 | 1200 | goto out; |
ac713874 | 1201 | } |
b38d7324 UB |
1202 | switch (how) { |
1203 | case SHUT_RDWR: /* shutdown in both directions */ | |
1204 | rc = smc_close_active(smc); | |
1205 | break; | |
1206 | case SHUT_WR: | |
1207 | rc = smc_close_shutdown_write(smc); | |
1208 | break; | |
1209 | case SHUT_RD: | |
1210 | if (sk->sk_state == SMC_LISTEN) | |
1211 | rc = smc_close_active(smc); | |
1212 | else | |
1213 | rc = 0; | |
1214 | /* nothing more to do because peer is not involved */ | |
1215 | break; | |
1216 | } | |
1217 | rc1 = kernel_sock_shutdown(smc->clcsock, how); | |
1218 | /* map sock_shutdown_cmd constants to sk_shutdown value range */ | |
1219 | sk->sk_shutdown |= how + 1; | |
ac713874 UB |
1220 | |
1221 | out: | |
1222 | release_sock(sk); | |
b38d7324 | 1223 | return rc ? rc : rc1; |
ac713874 UB |
1224 | } |
1225 | ||
1226 | static int smc_setsockopt(struct socket *sock, int level, int optname, | |
1227 | char __user *optval, unsigned int optlen) | |
1228 | { | |
1229 | struct sock *sk = sock->sk; | |
1230 | struct smc_sock *smc; | |
1231 | ||
1232 | smc = smc_sk(sk); | |
1233 | ||
1234 | /* generic setsockopts reaching us here always apply to the | |
1235 | * CLC socket | |
1236 | */ | |
1237 | return smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, | |
1238 | optval, optlen); | |
1239 | } | |
1240 | ||
1241 | static int smc_getsockopt(struct socket *sock, int level, int optname, | |
1242 | char __user *optval, int __user *optlen) | |
1243 | { | |
1244 | struct smc_sock *smc; | |
1245 | ||
1246 | smc = smc_sk(sock->sk); | |
1247 | /* socket options apply to the CLC socket */ | |
1248 | return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, | |
1249 | optval, optlen); | |
1250 | } | |
1251 | ||
1252 | static int smc_ioctl(struct socket *sock, unsigned int cmd, | |
1253 | unsigned long arg) | |
1254 | { | |
1255 | struct smc_sock *smc; | |
1256 | ||
1257 | smc = smc_sk(sock->sk); | |
1258 | if (smc->use_fallback) | |
1259 | return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); | |
1260 | else | |
1261 | return sock_no_ioctl(sock, cmd, arg); | |
1262 | } | |
1263 | ||
1264 | static ssize_t smc_sendpage(struct socket *sock, struct page *page, | |
1265 | int offset, size_t size, int flags) | |
1266 | { | |
1267 | struct sock *sk = sock->sk; | |
1268 | struct smc_sock *smc; | |
1269 | int rc = -EPIPE; | |
1270 | ||
1271 | smc = smc_sk(sk); | |
1272 | lock_sock(sk); | |
1273 | if (sk->sk_state != SMC_ACTIVE) | |
1274 | goto out; | |
1275 | if (smc->use_fallback) | |
1276 | rc = kernel_sendpage(smc->clcsock, page, offset, | |
1277 | size, flags); | |
1278 | else | |
1279 | rc = sock_no_sendpage(sock, page, offset, size, flags); | |
1280 | ||
1281 | out: | |
1282 | release_sock(sk); | |
1283 | return rc; | |
1284 | } | |
1285 | ||
1286 | static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, | |
1287 | struct pipe_inode_info *pipe, size_t len, | |
1288 | unsigned int flags) | |
1289 | { | |
1290 | struct sock *sk = sock->sk; | |
1291 | struct smc_sock *smc; | |
1292 | int rc = -ENOTCONN; | |
1293 | ||
1294 | smc = smc_sk(sk); | |
1295 | lock_sock(sk); | |
1296 | if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED)) | |
1297 | goto out; | |
1298 | if (smc->use_fallback) { | |
1299 | rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos, | |
1300 | pipe, len, flags); | |
1301 | } else { | |
1302 | rc = -EOPNOTSUPP; | |
1303 | } | |
1304 | out: | |
1305 | release_sock(sk); | |
1306 | return rc; | |
1307 | } | |
1308 | ||
1309 | /* must look like tcp */ | |
1310 | static const struct proto_ops smc_sock_ops = { | |
1311 | .family = PF_SMC, | |
1312 | .owner = THIS_MODULE, | |
1313 | .release = smc_release, | |
1314 | .bind = smc_bind, | |
1315 | .connect = smc_connect, | |
1316 | .socketpair = sock_no_socketpair, | |
1317 | .accept = smc_accept, | |
1318 | .getname = smc_getname, | |
1319 | .poll = smc_poll, | |
1320 | .ioctl = smc_ioctl, | |
1321 | .listen = smc_listen, | |
1322 | .shutdown = smc_shutdown, | |
1323 | .setsockopt = smc_setsockopt, | |
1324 | .getsockopt = smc_getsockopt, | |
1325 | .sendmsg = smc_sendmsg, | |
1326 | .recvmsg = smc_recvmsg, | |
1327 | .mmap = sock_no_mmap, | |
1328 | .sendpage = smc_sendpage, | |
1329 | .splice_read = smc_splice_read, | |
1330 | }; | |
1331 | ||
1332 | static int smc_create(struct net *net, struct socket *sock, int protocol, | |
1333 | int kern) | |
1334 | { | |
1335 | struct smc_sock *smc; | |
1336 | struct sock *sk; | |
1337 | int rc; | |
1338 | ||
1339 | rc = -ESOCKTNOSUPPORT; | |
1340 | if (sock->type != SOCK_STREAM) | |
1341 | goto out; | |
1342 | ||
1343 | rc = -EPROTONOSUPPORT; | |
1344 | if ((protocol != IPPROTO_IP) && (protocol != IPPROTO_TCP)) | |
1345 | goto out; | |
1346 | ||
1347 | rc = -ENOBUFS; | |
1348 | sock->ops = &smc_sock_ops; | |
1349 | sk = smc_sock_alloc(net, sock); | |
1350 | if (!sk) | |
1351 | goto out; | |
1352 | ||
1353 | /* create internal TCP socket for CLC handshake and fallback */ | |
1354 | smc = smc_sk(sk); | |
a046d57d | 1355 | smc->use_fallback = false; /* assume rdma capability first */ |
ac713874 UB |
1356 | rc = sock_create_kern(net, PF_INET, SOCK_STREAM, |
1357 | IPPROTO_TCP, &smc->clcsock); | |
1358 | if (rc) | |
1359 | sk_common_release(sk); | |
cd6851f3 UB |
1360 | smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); |
1361 | smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); | |
ac713874 UB |
1362 | |
1363 | out: | |
1364 | return rc; | |
1365 | } | |
1366 | ||
1367 | static const struct net_proto_family smc_sock_family_ops = { | |
1368 | .family = PF_SMC, | |
1369 | .owner = THIS_MODULE, | |
1370 | .create = smc_create, | |
1371 | }; | |
1372 | ||
1373 | static int __init smc_init(void) | |
1374 | { | |
1375 | int rc; | |
1376 | ||
6812baab TR |
1377 | rc = smc_pnet_init(); |
1378 | if (rc) | |
1379 | return rc; | |
1380 | ||
9bf9abea UB |
1381 | rc = smc_llc_init(); |
1382 | if (rc) { | |
1383 | pr_err("%s: smc_llc_init fails with %d\n", __func__, rc); | |
1384 | goto out_pnet; | |
1385 | } | |
1386 | ||
5f08318f UB |
1387 | rc = smc_cdc_init(); |
1388 | if (rc) { | |
1389 | pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc); | |
1390 | goto out_pnet; | |
1391 | } | |
1392 | ||
ac713874 UB |
1393 | rc = proto_register(&smc_proto, 1); |
1394 | if (rc) { | |
1395 | pr_err("%s: proto_register fails with %d\n", __func__, rc); | |
6812baab | 1396 | goto out_pnet; |
ac713874 UB |
1397 | } |
1398 | ||
1399 | rc = sock_register(&smc_sock_family_ops); | |
1400 | if (rc) { | |
1401 | pr_err("%s: sock_register fails with %d\n", __func__, rc); | |
1402 | goto out_proto; | |
1403 | } | |
f16a7dd5 | 1404 | INIT_HLIST_HEAD(&smc_v4_hashinfo.ht); |
ac713874 | 1405 | |
a4cf0443 UB |
1406 | rc = smc_ib_register_client(); |
1407 | if (rc) { | |
1408 | pr_err("%s: ib_register fails with %d\n", __func__, rc); | |
1409 | goto out_sock; | |
1410 | } | |
1411 | ||
ac713874 UB |
1412 | return 0; |
1413 | ||
a4cf0443 UB |
1414 | out_sock: |
1415 | sock_unregister(PF_SMC); | |
ac713874 UB |
1416 | out_proto: |
1417 | proto_unregister(&smc_proto); | |
6812baab TR |
1418 | out_pnet: |
1419 | smc_pnet_exit(); | |
ac713874 UB |
1420 | return rc; |
1421 | } | |
1422 | ||
1423 | static void __exit smc_exit(void) | |
1424 | { | |
0cfdd8f9 UB |
1425 | struct smc_link_group *lgr, *lg; |
1426 | LIST_HEAD(lgr_freeing_list); | |
1427 | ||
1428 | spin_lock_bh(&smc_lgr_list.lock); | |
1429 | if (!list_empty(&smc_lgr_list.list)) | |
1430 | list_splice_init(&smc_lgr_list.list, &lgr_freeing_list); | |
1431 | spin_unlock_bh(&smc_lgr_list.lock); | |
1432 | list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) { | |
1433 | list_del_init(&lgr->list); | |
1434 | smc_lgr_free(lgr); /* free link group */ | |
1435 | } | |
a4cf0443 | 1436 | smc_ib_unregister_client(); |
ac713874 UB |
1437 | sock_unregister(PF_SMC); |
1438 | proto_unregister(&smc_proto); | |
6812baab | 1439 | smc_pnet_exit(); |
ac713874 UB |
1440 | } |
1441 | ||
1442 | module_init(smc_init); | |
1443 | module_exit(smc_exit); | |
1444 | ||
1445 | MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>"); | |
1446 | MODULE_DESCRIPTION("smc socket address family"); | |
1447 | MODULE_LICENSE("GPL"); | |
1448 | MODULE_ALIAS_NETPROTO(PF_SMC); |