From c3011b9a7deaaaabdf955815d29eac39c8b75e67 Mon Sep 17 00:00:00 2001 From: Stefan Metzmacher Date: Wed, 28 May 2025 18:01:37 +0200 Subject: [PATCH] smb: client: make use of common smbdirect_socket This is the next step in the direction of a common smbdirect layer. Currently only structures are shared, but that will change over time until everything is shared. Cc: Steve French Cc: Tom Talpey Cc: Long Li Cc: Namjae Jeon Cc: Hyunchul Lee Cc: Meetakshi Setiya Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Signed-off-by: Stefan Metzmacher Signed-off-by: Steve French --- fs/smb/client/cifs_debug.c | 2 +- fs/smb/client/smbdirect.c | 258 ++++++++++++++++++++----------------- fs/smb/client/smbdirect.h | 12 +- 3 files changed, 146 insertions(+), 126 deletions(-) diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c index e03c890de0a0..56b0b5c82dd1 100644 --- a/fs/smb/client/cifs_debug.c +++ b/fs/smb/client/cifs_debug.c @@ -387,7 +387,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) seq_printf(m, "\nSMBDirect (in hex) protocol version: %x " "transport status: %x", server->smbd_conn->protocol, - server->smbd_conn->transport_status); + server->smbd_conn->socket.status); seq_printf(m, "\nConn receive_credit_max: %x " "send_credit_target: %x max_send_size: %x", server->smbd_conn->receive_credit_max, diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index 0b08169e885d..608ecd2149ed 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -163,10 +163,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work) { struct smbd_connection *info = container_of(work, struct smbd_connection, disconnect_work); + struct smbdirect_socket *sc = &info->socket; - if (info->transport_status == SMBD_CONNECTED) { - info->transport_status = SMBD_DISCONNECTING; - rdma_disconnect(info->id); + if (sc->status == SMBDIRECT_SOCKET_CONNECTED) { + sc->status = SMBDIRECT_SOCKET_DISCONNECTING; + rdma_disconnect(sc->rdma.cm_id); } } @@ -180,6 +181,7 @@ static int smbd_conn_upcall( struct rdma_cm_id *id, struct rdma_cm_event *event) { struct smbd_connection *info = id->context; + struct smbdirect_socket *sc = &info->socket; log_rdma_event(INFO, "event=%d status=%d\n", event->event, event->status); @@ -203,7 +205,7 @@ static int smbd_conn_upcall( case RDMA_CM_EVENT_ESTABLISHED: log_rdma_event(INFO, "connected event=%d\n", event->event); - info->transport_status = SMBD_CONNECTED; + sc->status = SMBDIRECT_SOCKET_CONNECTED; wake_up_interruptible(&info->conn_wait); break; @@ -211,20 +213,20 @@ static int smbd_conn_upcall( case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_REJECTED: log_rdma_event(INFO, "connecting failed event=%d\n", event->event); - info->transport_status = SMBD_DISCONNECTED; + sc->status = SMBDIRECT_SOCKET_DISCONNECTED; wake_up_interruptible(&info->conn_wait); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DISCONNECTED: /* This happens when we fail the negotiation */ - if (info->transport_status == SMBD_NEGOTIATE_FAILED) { - info->transport_status = SMBD_DISCONNECTED; + if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) { + sc->status = SMBDIRECT_SOCKET_DISCONNECTED; wake_up(&info->conn_wait); break; } - info->transport_status = SMBD_DISCONNECTED; + sc->status = SMBDIRECT_SOCKET_DISCONNECTED; wake_up_interruptible(&info->disconn_wait); wake_up_interruptible(&info->wait_reassembly_queue); wake_up_interruptible_all(&info->wait_send_queue); @@ -273,6 +275,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) int i; struct smbd_request *request = container_of(wc->wr_cqe, struct smbd_request, cqe); + struct smbd_connection *info = request->info; + struct smbdirect_socket *sc = &info->socket; log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", request, wc->status); @@ -284,7 +288,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) } for (i = 0; i < request->num_sge; i++) - ib_dma_unmap_single(request->info->id->device, + ib_dma_unmap_single(sc->ib.dev, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); @@ -391,8 +395,9 @@ static void smbd_post_send_credits(struct work_struct *work) struct smbd_connection *info = container_of(work, struct smbd_connection, post_send_credits_work); + struct smbdirect_socket *sc = &info->socket; - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { wake_up(&info->wait_receive_queues); return; } @@ -633,32 +638,34 @@ static int smbd_ia_open( struct smbd_connection *info, struct sockaddr *dstaddr, int port) { + struct smbdirect_socket *sc = &info->socket; int rc; - info->id = smbd_create_id(info, dstaddr, port); - if (IS_ERR(info->id)) { - rc = PTR_ERR(info->id); + sc->rdma.cm_id = smbd_create_id(info, dstaddr, port); + if (IS_ERR(sc->rdma.cm_id)) { + rc = PTR_ERR(sc->rdma.cm_id); goto out1; } + sc->ib.dev = sc->rdma.cm_id->device; - if (!frwr_is_supported(&info->id->device->attrs)) { + if (!frwr_is_supported(&sc->ib.dev->attrs)) { log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n"); log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n", - info->id->device->attrs.device_cap_flags, - info->id->device->attrs.max_fast_reg_page_list_len); + sc->ib.dev->attrs.device_cap_flags, + sc->ib.dev->attrs.max_fast_reg_page_list_len); rc = -EPROTONOSUPPORT; goto out2; } info->max_frmr_depth = min_t(int, smbd_max_frmr_depth, - info->id->device->attrs.max_fast_reg_page_list_len); + sc->ib.dev->attrs.max_fast_reg_page_list_len); info->mr_type = IB_MR_TYPE_MEM_REG; - if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) + if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) info->mr_type = IB_MR_TYPE_SG_GAPS; - info->pd = ib_alloc_pd(info->id->device, 0); - if (IS_ERR(info->pd)) { - rc = PTR_ERR(info->pd); + sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0); + if (IS_ERR(sc->ib.pd)) { + rc = PTR_ERR(sc->ib.pd); log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc); goto out2; } @@ -666,8 +673,8 @@ static int smbd_ia_open( return 0; out2: - rdma_destroy_id(info->id); - info->id = NULL; + rdma_destroy_id(sc->rdma.cm_id); + sc->rdma.cm_id = NULL; out1: return rc; @@ -681,6 +688,7 @@ out1: */ static int smbd_post_send_negotiate_req(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; struct ib_send_wr send_wr; int rc = -ENOMEM; struct smbd_request *request; @@ -704,18 +712,18 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info) request->num_sge = 1; request->sge[0].addr = ib_dma_map_single( - info->id->device, (void *)packet, + sc->ib.dev, (void *)packet, sizeof(*packet), DMA_TO_DEVICE); - if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { + if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) { rc = -EIO; goto dma_mapping_failed; } request->sge[0].length = sizeof(*packet); - request->sge[0].lkey = info->pd->local_dma_lkey; + request->sge[0].lkey = sc->ib.pd->local_dma_lkey; ib_dma_sync_single_for_device( - info->id->device, request->sge[0].addr, + sc->ib.dev, request->sge[0].addr, request->sge[0].length, DMA_TO_DEVICE); request->cqe.done = send_done; @@ -732,14 +740,14 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info) request->sge[0].length, request->sge[0].lkey); atomic_inc(&info->send_pending); - rc = ib_post_send(info->id->qp, &send_wr, NULL); + rc = ib_post_send(sc->ib.qp, &send_wr, NULL); if (!rc) return 0; /* if we reach here, post send failed */ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); atomic_dec(&info->send_pending); - ib_dma_unmap_single(info->id->device, request->sge[0].addr, + ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr, request->sge[0].length, DMA_TO_DEVICE); smbd_disconnect_rdma_connection(info); @@ -791,6 +799,7 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info) static int smbd_post_send(struct smbd_connection *info, struct smbd_request *request) { + struct smbdirect_socket *sc = &info->socket; struct ib_send_wr send_wr; int rc, i; @@ -799,7 +808,7 @@ static int smbd_post_send(struct smbd_connection *info, "rdma_request sge[%d] addr=0x%llx length=%u\n", i, request->sge[i].addr, request->sge[i].length); ib_dma_sync_single_for_device( - info->id->device, + sc->ib.dev, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); @@ -814,7 +823,7 @@ static int smbd_post_send(struct smbd_connection *info, send_wr.opcode = IB_WR_SEND; send_wr.send_flags = IB_SEND_SIGNALED; - rc = ib_post_send(info->id->qp, &send_wr, NULL); + rc = ib_post_send(sc->ib.qp, &send_wr, NULL); if (rc) { log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); smbd_disconnect_rdma_connection(info); @@ -831,6 +840,7 @@ static int smbd_post_send_iter(struct smbd_connection *info, struct iov_iter *iter, int *_remaining_data_length) { + struct smbdirect_socket *sc = &info->socket; int i, rc; int header_length; int data_length; @@ -842,11 +852,11 @@ wait_credit: /* Wait for send credits. A SMBD packet needs one credit */ rc = wait_event_interruptible(info->wait_send_queue, atomic_read(&info->send_credits) > 0 || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); if (rc) goto err_wait_credit; - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_outgoing(ERR, "disconnected not sending on wait_credit\n"); rc = -EAGAIN; goto err_wait_credit; @@ -859,9 +869,9 @@ wait_credit: wait_send_queue: wait_event(info->wait_post_send, atomic_read(&info->send_pending) < info->send_credit_target || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_outgoing(ERR, "disconnected not sending on wait_send_queue\n"); rc = -EAGAIN; goto err_wait_send_queue; @@ -888,8 +898,8 @@ wait_send_queue: .nr_sge = 1, .max_sge = SMBDIRECT_MAX_SEND_SGE, .sge = request->sge, - .device = info->id->device, - .local_dma_lkey = info->pd->local_dma_lkey, + .device = sc->ib.dev, + .local_dma_lkey = sc->ib.pd->local_dma_lkey, .direction = DMA_TO_DEVICE, }; @@ -941,18 +951,18 @@ wait_send_queue: if (!data_length) header_length = offsetof(struct smbdirect_data_transfer, padding); - request->sge[0].addr = ib_dma_map_single(info->id->device, + request->sge[0].addr = ib_dma_map_single(sc->ib.dev, (void *)packet, header_length, DMA_TO_DEVICE); - if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { + if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) { rc = -EIO; request->sge[0].addr = 0; goto err_dma; } request->sge[0].length = header_length; - request->sge[0].lkey = info->pd->local_dma_lkey; + request->sge[0].lkey = sc->ib.pd->local_dma_lkey; rc = smbd_post_send(info, request); if (!rc) @@ -961,7 +971,7 @@ wait_send_queue: err_dma: for (i = 0; i < request->num_sge; i++) if (request->sge[i].addr) - ib_dma_unmap_single(info->id->device, + ib_dma_unmap_single(sc->ib.dev, request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); @@ -1006,17 +1016,18 @@ static int smbd_post_send_empty(struct smbd_connection *info) static int smbd_post_recv( struct smbd_connection *info, struct smbd_response *response) { + struct smbdirect_socket *sc = &info->socket; struct ib_recv_wr recv_wr; int rc = -EIO; response->sge.addr = ib_dma_map_single( - info->id->device, response->packet, + sc->ib.dev, response->packet, info->max_receive_size, DMA_FROM_DEVICE); - if (ib_dma_mapping_error(info->id->device, response->sge.addr)) + if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr)) return rc; response->sge.length = info->max_receive_size; - response->sge.lkey = info->pd->local_dma_lkey; + response->sge.lkey = sc->ib.pd->local_dma_lkey; response->cqe.done = recv_done; @@ -1025,9 +1036,9 @@ static int smbd_post_recv( recv_wr.sg_list = &response->sge; recv_wr.num_sge = 1; - rc = ib_post_recv(info->id->qp, &recv_wr, NULL); + rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL); if (rc) { - ib_dma_unmap_single(info->id->device, response->sge.addr, + ib_dma_unmap_single(sc->ib.dev, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); smbd_disconnect_rdma_connection(info); log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); @@ -1185,9 +1196,10 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info) static void put_receive_buffer( struct smbd_connection *info, struct smbd_response *response) { + struct smbdirect_socket *sc = &info->socket; unsigned long flags; - ib_dma_unmap_single(info->id->device, response->sge.addr, + ib_dma_unmap_single(sc->ib.dev, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); spin_lock_irqsave(&info->receive_queue_lock, flags); @@ -1287,6 +1299,7 @@ static void idle_connection_timer(struct work_struct *work) void smbd_destroy(struct TCP_Server_Info *server) { struct smbd_connection *info = server->smbd_conn; + struct smbdirect_socket *sc; struct smbd_response *response; unsigned long flags; @@ -1294,19 +1307,21 @@ void smbd_destroy(struct TCP_Server_Info *server) log_rdma_event(INFO, "rdma session already destroyed\n"); return; } + sc = &info->socket; log_rdma_event(INFO, "destroying rdma session\n"); - if (info->transport_status != SMBD_DISCONNECTED) { - rdma_disconnect(server->smbd_conn->id); + if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) { + rdma_disconnect(sc->rdma.cm_id); log_rdma_event(INFO, "wait for transport being disconnected\n"); wait_event_interruptible( info->disconn_wait, - info->transport_status == SMBD_DISCONNECTED); + sc->status == SMBDIRECT_SOCKET_DISCONNECTED); } log_rdma_event(INFO, "destroying qp\n"); - ib_drain_qp(info->id->qp); - rdma_destroy_qp(info->id); + ib_drain_qp(sc->ib.qp); + rdma_destroy_qp(sc->rdma.cm_id); + sc->ib.qp = NULL; log_rdma_event(INFO, "cancelling idle timer\n"); cancel_delayed_work_sync(&info->idle_timer_work); @@ -1353,10 +1368,10 @@ void smbd_destroy(struct TCP_Server_Info *server) } destroy_mr_list(info); - ib_free_cq(info->send_cq); - ib_free_cq(info->recv_cq); - ib_dealloc_pd(info->pd); - rdma_destroy_id(info->id); + ib_free_cq(sc->ib.send_cq); + ib_free_cq(sc->ib.recv_cq); + ib_dealloc_pd(sc->ib.pd); + rdma_destroy_id(sc->rdma.cm_id); /* free mempools */ mempool_destroy(info->request_mempool); @@ -1365,7 +1380,7 @@ void smbd_destroy(struct TCP_Server_Info *server) mempool_destroy(info->response_mempool); kmem_cache_destroy(info->response_cache); - info->transport_status = SMBD_DESTROYED; + sc->status = SMBDIRECT_SOCKET_DESTROYED; destroy_workqueue(info->workqueue); log_rdma_event(INFO, "rdma session destroyed\n"); @@ -1390,7 +1405,7 @@ int smbd_reconnect(struct TCP_Server_Info *server) * This is possible if transport is disconnected and we haven't received * notification from RDMA, but upper layer has detected timeout */ - if (server->smbd_conn->transport_status == SMBD_CONNECTED) { + if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) { log_rdma_event(INFO, "disconnecting transport\n"); smbd_destroy(server); } @@ -1489,6 +1504,7 @@ static struct smbd_connection *_smbd_get_connection( { int rc; struct smbd_connection *info; + struct smbdirect_socket *sc; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; @@ -1498,29 +1514,30 @@ static struct smbd_connection *_smbd_get_connection( info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); if (!info) return NULL; + sc = &info->socket; - info->transport_status = SMBD_CONNECTING; + sc->status = SMBDIRECT_SOCKET_CONNECTING; rc = smbd_ia_open(info, dstaddr, port); if (rc) { log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc); goto create_id_failed; } - if (smbd_send_credit_target > info->id->device->attrs.max_cqe || - smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { + if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe || + smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) { log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", smbd_send_credit_target, - info->id->device->attrs.max_cqe, - info->id->device->attrs.max_qp_wr); + sc->ib.dev->attrs.max_cqe, + sc->ib.dev->attrs.max_qp_wr); goto config_failed; } - if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || - smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { + if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe || + smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) { log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", smbd_receive_credit_max, - info->id->device->attrs.max_cqe, - info->id->device->attrs.max_qp_wr); + sc->ib.dev->attrs.max_cqe, + sc->ib.dev->attrs.max_qp_wr); goto config_failed; } @@ -1531,32 +1548,30 @@ static struct smbd_connection *_smbd_get_connection( info->max_receive_size = smbd_max_receive_size; info->keep_alive_interval = smbd_keep_alive_interval; - if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || - info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { + if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || + sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { log_rdma_event(ERR, "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n", IB_DEVICE_NAME_MAX, - info->id->device->name, - info->id->device->attrs.max_send_sge, - info->id->device->attrs.max_recv_sge); + sc->ib.dev->name, + sc->ib.dev->attrs.max_send_sge, + sc->ib.dev->attrs.max_recv_sge); goto config_failed; } - info->send_cq = NULL; - info->recv_cq = NULL; - info->send_cq = - ib_alloc_cq_any(info->id->device, info, + sc->ib.send_cq = + ib_alloc_cq_any(sc->ib.dev, info, info->send_credit_target, IB_POLL_SOFTIRQ); - if (IS_ERR(info->send_cq)) { - info->send_cq = NULL; + if (IS_ERR(sc->ib.send_cq)) { + sc->ib.send_cq = NULL; goto alloc_cq_failed; } - info->recv_cq = - ib_alloc_cq_any(info->id->device, info, + sc->ib.recv_cq = + ib_alloc_cq_any(sc->ib.dev, info, info->receive_credit_max, IB_POLL_SOFTIRQ); - if (IS_ERR(info->recv_cq)) { - info->recv_cq = NULL; + if (IS_ERR(sc->ib.recv_cq)) { + sc->ib.recv_cq = NULL; goto alloc_cq_failed; } @@ -1570,29 +1585,30 @@ static struct smbd_connection *_smbd_get_connection( qp_attr.cap.max_inline_data = 0; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; - qp_attr.send_cq = info->send_cq; - qp_attr.recv_cq = info->recv_cq; + qp_attr.send_cq = sc->ib.send_cq; + qp_attr.recv_cq = sc->ib.recv_cq; qp_attr.port_num = ~0; - rc = rdma_create_qp(info->id, info->pd, &qp_attr); + rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr); if (rc) { log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc); goto create_qp_failed; } + sc->ib.qp = sc->rdma.cm_id->qp; memset(&conn_param, 0, sizeof(conn_param)); conn_param.initiator_depth = 0; conn_param.responder_resources = - min(info->id->device->attrs.max_qp_rd_atom, + min(sc->ib.dev->attrs.max_qp_rd_atom, SMBD_CM_RESPONDER_RESOURCES); info->responder_resources = conn_param.responder_resources; log_rdma_mr(INFO, "responder_resources=%d\n", info->responder_resources); /* Need to send IRD/ORD in private data for iWARP */ - info->id->device->ops.get_port_immutable( - info->id->device, info->id->port_num, &port_immutable); + sc->ib.dev->ops.get_port_immutable( + sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable); if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { ird_ord_hdr[0] = info->responder_resources; ird_ord_hdr[1] = 1; @@ -1613,16 +1629,16 @@ static struct smbd_connection *_smbd_get_connection( init_waitqueue_head(&info->conn_wait); init_waitqueue_head(&info->disconn_wait); init_waitqueue_head(&info->wait_reassembly_queue); - rc = rdma_connect(info->id, &conn_param); + rc = rdma_connect(sc->rdma.cm_id, &conn_param); if (rc) { log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc); goto rdma_connect_failed; } wait_event_interruptible( - info->conn_wait, info->transport_status != SMBD_CONNECTING); + info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING); - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_rdma_event(ERR, "rdma_connect failed port=%d\n", port); goto rdma_connect_failed; } @@ -1673,26 +1689,26 @@ allocate_mr_failed: negotiation_failed: cancel_delayed_work_sync(&info->idle_timer_work); destroy_caches_and_workqueue(info); - info->transport_status = SMBD_NEGOTIATE_FAILED; + sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; init_waitqueue_head(&info->conn_wait); - rdma_disconnect(info->id); + rdma_disconnect(sc->rdma.cm_id); wait_event(info->conn_wait, - info->transport_status == SMBD_DISCONNECTED); + sc->status == SMBDIRECT_SOCKET_DISCONNECTED); allocate_cache_failed: rdma_connect_failed: - rdma_destroy_qp(info->id); + rdma_destroy_qp(sc->rdma.cm_id); create_qp_failed: alloc_cq_failed: - if (info->send_cq) - ib_free_cq(info->send_cq); - if (info->recv_cq) - ib_free_cq(info->recv_cq); + if (sc->ib.send_cq) + ib_free_cq(sc->ib.send_cq); + if (sc->ib.recv_cq) + ib_free_cq(sc->ib.recv_cq); config_failed: - ib_dealloc_pd(info->pd); - rdma_destroy_id(info->id); + ib_dealloc_pd(sc->ib.pd); + rdma_destroy_id(sc->rdma.cm_id); create_id_failed: kfree(info); @@ -1732,6 +1748,7 @@ try_again: static int smbd_recv_buf(struct smbd_connection *info, char *buf, unsigned int size) { + struct smbdirect_socket *sc = &info->socket; struct smbd_response *response; struct smbdirect_data_transfer *data_transfer; int to_copy, to_read, data_read, offset; @@ -1846,12 +1863,12 @@ read_rfc1002_done: rc = wait_event_interruptible( info->wait_reassembly_queue, info->reassembly_data_length >= size || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); /* Don't return any data if interrupted */ if (rc) return rc; - if (info->transport_status != SMBD_CONNECTED) { + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { log_read(ERR, "disconnected\n"); return -ECONNABORTED; } @@ -1869,6 +1886,7 @@ static int smbd_recv_page(struct smbd_connection *info, struct page *page, unsigned int page_offset, unsigned int to_read) { + struct smbdirect_socket *sc = &info->socket; int ret; char *to_address; void *page_address; @@ -1877,7 +1895,7 @@ static int smbd_recv_page(struct smbd_connection *info, ret = wait_event_interruptible( info->wait_reassembly_queue, info->reassembly_data_length >= to_read || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); if (ret) return ret; @@ -1952,12 +1970,13 @@ int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst_array) { struct smbd_connection *info = server->smbd_conn; + struct smbdirect_socket *sc = &info->socket; struct smb_rqst *rqst; struct iov_iter iter; unsigned int remaining_data_length, klen; int rc, i, rqst_idx; - if (info->transport_status != SMBD_CONNECTED) + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) return -EAGAIN; /* @@ -2051,6 +2070,7 @@ static void smbd_mr_recovery_work(struct work_struct *work) { struct smbd_connection *info = container_of(work, struct smbd_connection, mr_recovery_work); + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *smbdirect_mr; int rc; @@ -2068,7 +2088,7 @@ static void smbd_mr_recovery_work(struct work_struct *work) } smbdirect_mr->mr = ib_alloc_mr( - info->pd, info->mr_type, + sc->ib.pd, info->mr_type, info->max_frmr_depth); if (IS_ERR(smbdirect_mr->mr)) { log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", @@ -2097,12 +2117,13 @@ static void smbd_mr_recovery_work(struct work_struct *work) static void destroy_mr_list(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *mr, *tmp; cancel_work_sync(&info->mr_recovery_work); list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { if (mr->state == MR_INVALIDATED) - ib_dma_unmap_sg(info->id->device, mr->sgt.sgl, + ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir); ib_dereg_mr(mr->mr); kfree(mr->sgt.sgl); @@ -2119,6 +2140,7 @@ static void destroy_mr_list(struct smbd_connection *info) */ static int allocate_mr_list(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; int i; struct smbd_mr *smbdirect_mr, *tmp; @@ -2134,7 +2156,7 @@ static int allocate_mr_list(struct smbd_connection *info) smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL); if (!smbdirect_mr) goto cleanup_entries; - smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, + smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type, info->max_frmr_depth); if (IS_ERR(smbdirect_mr->mr)) { log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", @@ -2179,20 +2201,20 @@ cleanup_entries: */ static struct smbd_mr *get_mr(struct smbd_connection *info) { + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *ret; int rc; again: rc = wait_event_interruptible(info->wait_mr, atomic_read(&info->mr_ready_count) || - info->transport_status != SMBD_CONNECTED); + sc->status != SMBDIRECT_SOCKET_CONNECTED); if (rc) { log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc); return NULL; } - if (info->transport_status != SMBD_CONNECTED) { - log_rdma_mr(ERR, "info->transport_status=%x\n", - info->transport_status); + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { + log_rdma_mr(ERR, "sc->status=%x\n", sc->status); return NULL; } @@ -2245,6 +2267,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, struct iov_iter *iter, bool writing, bool need_invalidate) { + struct smbdirect_socket *sc = &info->socket; struct smbd_mr *smbdirect_mr; int rc, num_pages; enum dma_data_direction dir; @@ -2274,7 +2297,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, num_pages, iov_iter_count(iter), info->max_frmr_depth); smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth); - rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl, + rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, dir); if (!rc) { log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n", @@ -2310,7 +2333,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution * on the next ib_post_send when we actually send I/O to remote peer */ - rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); + rc = ib_post_send(sc->ib.qp, ®_wr->wr, NULL); if (!rc) return smbdirect_mr; @@ -2319,7 +2342,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, /* If all failed, attempt to recover this MR by setting it MR_ERROR*/ map_mr_error: - ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl, + ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, smbdirect_mr->dir); dma_map_error: @@ -2357,6 +2380,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) { struct ib_send_wr *wr; struct smbd_connection *info = smbdirect_mr->conn; + struct smbdirect_socket *sc = &info->socket; int rc = 0; if (smbdirect_mr->need_invalidate) { @@ -2370,7 +2394,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) wr->send_flags = IB_SEND_SIGNALED; init_completion(&smbdirect_mr->invalidate_done); - rc = ib_post_send(info->id->qp, wr, NULL); + rc = ib_post_send(sc->ib.qp, wr, NULL); if (rc) { log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc); smbd_disconnect_rdma_connection(info); @@ -2387,7 +2411,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) if (smbdirect_mr->state == MR_INVALIDATED) { ib_dma_unmap_sg( - info->id->device, smbdirect_mr->sgt.sgl, + sc->ib.dev, smbdirect_mr->sgt.sgl, smbdirect_mr->sgt.nents, smbdirect_mr->dir); smbdirect_mr->state = MR_READY; diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h index 8561e19a23a3..904c4e5b9e5c 100644 --- a/fs/smb/client/smbdirect.h +++ b/fs/smb/client/smbdirect.h @@ -15,6 +15,8 @@ #include #include +#include "../common/smbdirect/smbdirect_socket.h" + extern int rdma_readwrite_threshold; extern int smbd_max_frmr_depth; extern int smbd_keep_alive_interval; @@ -50,14 +52,8 @@ enum smbd_connection_status { * 5. mempools for allocating packets */ struct smbd_connection { - enum smbd_connection_status transport_status; - - /* RDMA related */ - struct rdma_cm_id *id; - struct ib_qp_init_attr qp_attr; - struct ib_pd *pd; - struct ib_cq *send_cq, *recv_cq; - struct ib_device_attr dev_attr; + struct smbdirect_socket socket; + int ri_rc; struct completion ri_done; wait_queue_head_t conn_wait; -- 2.25.1